repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
weihaosky/SMVmatching
|
[
"814d00530026278f3582df3849ead46b3479cc57"
] |
[
"models/stackhourglass_prob.py"
] |
[
"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\nfrom .submodule import *\n\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes):\n super(hourglass, self).__init__()\n\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)\n\n self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm3d(inplanes*2)) #+conv2\n\n self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm3d(inplanes)) #+x\n\n def forward(self, x ,presqu, postsqu):\n \n out = self.conv1(x) #in:1/4 out:1/8\n pre = self.conv2(out) #in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) #in:1/8 out:1/16\n out = self.conv4(out) #in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out)+pre, inplace=True) \n\n out = self.conv6(post) #in:1/8 out:1/4\n\n return out, pre, post\n\nclass PSMNet(nn.Module):\n def __init__(self, maxdisp, prob_mode):\n super(PSMNet, self).__init__()\n self.maxdisp = maxdisp\n self.prob_mode = prob_mode\n if self.prob_mode == 1:\n self.var_regress = var_regression(self.maxdisp)\n\n self.feature_extraction = feature_extraction()\n\n self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1)) \n\n self.dres2 = hourglass(32)\n\n # self.dres3 = hourglass(32)\n\n self.dres4 = hourglass(32)\n\n self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n # self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n # nn.ReLU(inplace=True),\n # nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n\n def forward(self, left, right, maxdisp=0):\n if maxdisp != 0:\n self.maxdisp = maxdisp\n self.prob3 = nn.Sequential(convbn(self.maxdisp, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n refimg_fea = self.feature_extraction(left)\n targetimg_fea = self.feature_extraction(right)\n\n\n #matching\n cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1]*2, self.maxdisp/4, refimg_fea.size()[2], refimg_fea.size()[3]).zero_())\n if(refimg_fea.is_cuda):\n cost = cost.cuda()\n\n for i in range(self.maxdisp/4):\n if i > 0 :\n cost[:, :refimg_fea.size()[1], i, :,i:] = refimg_fea[:,:,:,i:]\n cost[:, refimg_fea.size()[1]:, i, :,i:] = targetimg_fea[:,:,:,:-i]\n else:\n cost[:, :refimg_fea.size()[1], i, :,:] = refimg_fea\n cost[:, refimg_fea.size()[1]:, i, :,:] = targetimg_fea\n cost = cost.contiguous()\n\n cost0 = self.dres0(cost)\n cost0 = self.dres1(cost0) + cost0\n\n out1, pre1, post1 = self.dres2(cost0, None, None) \n out1 = out1+cost0\n\n # out2, pre2, post2 = self.dres3(out1, pre1, post1) \n # out2 = out2+cost0\n\n out3, pre3, post3 = self.dres4(out1, pre1, post1) \n out3 = out3+cost0\n\n cost1 = self.classif1(out1)\n # cost2 = self.classif2(out2) + cost1\n cost3 = self.classif3(out3) + cost1\n\n # cost1 = F.interpolate(cost1, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=False)\n # cost2 = F.interpolate(cost2, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=False)\n\n # cost1 = torch.squeeze(cost1,1)\n # pred1 = F.softmax(cost1, dim=1)\n # pred1 = disparityregression(self.maxdisp)(pred1)\n\n # cost2 = torch.squeeze(cost2,1)\n # pred2 = F.softmax(cost2, dim=1)\n # pred2 = disparityregression(self.maxdisp)(pred2)\n\n cost3 = F.interpolate(cost3, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=False)\n cost3 = torch.squeeze(cost3, 1)\n pred3_prob = F.softmax(cost3, dim=1)\n pred3 = disparityregression(self.maxdisp)(pred3_prob)\n\n prob_sum = 4 * F.avg_pool3d(F.pad(pred3_prob.unsqueeze(1), pad=(0,0,0,0,1,2)), (4,1,1), stride=1, padding=0).squeeze(1)\n disp_index = pred3.long()\n \n if self.prob_mode == 1:\n var3 = self.var_regress(cost3)\n var3 = torch.squeeze(var3, 1)\n elif self.prob_mode == 2:\n confidence = torch.gather(prob_sum, 1, disp_index.unsqueeze(1)).squeeze(1)\n var3 = 1 - confidence\n\n if self.training:\n return None, None, pred3, var3\n else:\n return pred3, var3, None, None\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.nn.ConvTranspose3d",
"torch.nn.Conv2d",
"torch.nn.Conv3d",
"torch.nn.functional.relu",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"torch.squeeze"
]
] |
Kouki706/Prob-Paper
|
[
"01173d7f544cb274224a53ffc1ad1034a06084d6"
] |
[
"ProbPaper/Uniform.py"
] |
[
"#%%\nimport numpy as np\nimport math\nfrom scipy.stats import norm\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\n\n# 回帰式\ndef func(x, a, b):\n f = a*x + b\n return f\n\n# データの読み込み\nQin=np.loadtxt('./Data.csv', delimiter=',',usecols=[0])\nQin = np.sort(Qin)\n\n# データサイズの取得\nmax = Qin.size\nprint(\"データ数\",max)\n\n# メジアンランク法\nPin=np.empty(max)\nfor i in range(max):\n Pin[i] = (i+0.7) / (max+0.4)\n\n# 重複する値を除く\nfor i in range(max-2,0,-1):\n if(Qin[i] == Qin[i+1]):\n Pin[i] = Pin[i+1]\n\n# データをファイル出力する\nData = [Qin,Pin]\nData = np.array(Data).T\nnp.savetxt(\"./tmp/Prob.dat\",Data,delimiter=\"\\t\")\n\n# 正規分布の値を取得\nppp=Pin\nqqq=Qin\n\n# 回帰直線\npopt, pcov = curve_fit(func,qqq,ppp)\nrr=np.corrcoef(qqq,ppp)\naa = popt[0]\nbb = popt[1]\n\n# 決定係数\nresiduals = ppp - func(qqq, popt[0],popt[1])\nrss = np.sum(residuals**2)\ntss = np.sum((ppp-np.mean(ppp))**2)\nr_squared = 1 - (rss / tss)\n\n# 図の書式\nplt.rcParams['font.family'] = 'Times New Roman'\nfig = plt.figure(figsize=(4,3)) # Figure\nax = fig.add_subplot() # Axes\nax.patch.set_facecolor('lavender') # subplotの背景色\nax.patch.set_alpha(0.2) # subplotの背景透明度\nax.spines['top'].set_linewidth(0.1)\nax.spines['right'].set_linewidth(0.1)\nax.spines['left'].set_linewidth(0.1)\nax.spines['bottom'].set_linewidth(0.1)\n\n# x軸の最大・最小\nxmin=qqq[0] - (qqq[max-1]-qqq[0])/100\nxmax=qqq[max-1] + (qqq[max-1]-qqq[0])/100\n\n# y軸の最大・最小\nymin=0.001\nymax=0.999\n\n# 図の描画範囲\nax.set_xlim([xmin,xmax])\nax.set_ylim([ymin,ymax])\n\n# 図の表示書式\n# ax.tick_params(labelbottom=False)\n# ax.tick_params(labelleft=False)\n# ax.tick_params(bottom=False)\nax.tick_params(direction = \"inout\", axis = \"x\", length=2, width=0)\nax.tick_params(direction = \"out\", axis = \"y\", length=2, width=0)\n\n# y軸目盛用\n_dy=np.array([0.001,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.999])\ndy=_dy\n_dy=_dy * 100\n\n# 水平軸の描画用\n_dy_tick = np.array([0.001,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.999])\ndy_tick=_dy_tick\nax.hlines(dy_tick, xmin, xmax, color='mediumpurple',linewidth=0.1)\nax.hlines(0.5, xmin, xmax, color='black',linewidth=0.1)\n\n# x軸の目盛\n_dx=np.empty(7)\n_dx[0] = qqq[0]\n_dx[6] = qqq[max-1]\n\n# x軸の表示目盛の計算\nddx = (_dx[6]-_dx[0])/6\nfor i in range(1,6,1):\n _dx[i] = _dx[0] + ddx * i\n\n# 鉛直軸の描画\nax.vlines(_dx, ymin, ymax, color='mediumpurple',linewidth=0.1)\n\n# x軸目盛\n# for i in range(7):\n# ax.text(_dx[i], ymin-0.1, str(round(_dx[i],2)), ha = 'center', va = 'top', fontsize=fs)\nax.get_xaxis().set_tick_params(pad=1)\nax.set_xticks(_dx)\nax.set_xticklabels(np.round(_dx,2),fontsize=5)\n\n# y軸目盛の値\n# for i in range(9):\n# ax.text(xmin- (xmax-xmin)/100, dy[i], str(_dy[i]), ha = 'right', va = 'center', fontsize=fs)\nax.get_yaxis().set_tick_params(pad=1)\nax.set_yticks(dy)\nax.set_yticklabels(_dy, fontsize = 5)\n\n# 値のプロット\nax.scatter(qqq,ppp,s = 2, alpha=0.8,linewidths=0.1,c=\"mediumslateblue\",ec = \"navy\" ,zorder=10)\nax.plot([xmin, xmax], [aa*xmin + bb, aa*xmax + bb], color='k', linestyle='-', linewidth=0.25, zorder=9)\n\n# 文字のプロット\nax.text(xmin, ymax + (ymax-ymin)/50, \"F(t) (%) Median Ranks\", ha = 'left', va = 'bottom', fontsize=5)\n\n# 有効データ数\nmax = np.unique(Qin).size\nprint(\"有効データ数 = \",max)\n\n# 平均・標準偏差\na = -bb/aa\nb = 1/aa + a\nmean = (a+b)/2\nvar = math.sqrt((b-a)**2/12)\nprint(f'境界 a = {a:10.6f}')\nprint(f'境界 b = {b:10.6f}')\nprint('平均 = {mean:10.6f}'.format(**locals()))\nprint('標準偏差 = {var:10.6f}'.format(**locals()))\n\nprint('相関係数 = {rr[0][1]:10.6f}'.format(**locals()))\nprint('決定係数 = {r_squared:10.6f}'.format(**locals()))\n\nplt.show()\n\n#%%\nfrom matplotlib.backends.backend_pdf import PdfPages\npdf = PdfPages('./img/Uniform.pdf')\npdf.savefig(fig,bbox_inches=\"tight\",pad_inches=0.02)\npdf.close()\n"
] |
[
[
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.unique",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.sort",
"numpy.round",
"numpy.loadtxt",
"numpy.mean",
"numpy.savetxt",
"numpy.array",
"scipy.optimize.curve_fit",
"numpy.sum",
"numpy.empty",
"numpy.corrcoef"
]
] |
SriramRamesh/traffic-signaling-networks
|
[
"17fa5ed5e3cc935b0baa354bbfd6e6cd3030da64"
] |
[
"scripts/plot_exit_rate.py"
] |
[
"import sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# provide as argument the exrates.txt file, which contains raw number\n# of cars exiting every sink every second\ninpath = sys.argv[1]\n#sinks_exrates = np.loadtxt(sys.argv[1], skiprows=2)\nsinks_exrates = pd.read_csv(inpath, comment='#', index_col=0)\n\n# window length in seconds\nwindow = 60\nwindow_minutes = window/60\n\n# this is half the window in seconds\nradius = (window//2) \n\n# how often do we want the exit rate report? (seconds)\nfreq = 10\n\n# calculate avg number of cars per minute in \"window\", every \"freq\"\n# seconds; do this for each sink separately\n\nsavepath_fmt = inpath[:-4] + '_w{:03d}_s{:02d}_sink{{}}'.format(window, freq)\nfor sinkid in sinks_exrates.columns:\n\n timestamps = np.arange(1, sinks_exrates[sinkid].size, freq)\n exitrates = np.empty(timestamps.size)\n for ii, ts in enumerate(timestamps):\n start = 1 if ts <= radius else ts - radius\n end = sinks_exrates[sinkid].size - 1 if ts + radius > sinks_exrates[sinkid].size else ts + radius\n exitrates[ii]= sinks_exrates[sinkid].iloc[start:end].sum() / window_minutes\n\n np.savetxt(savepath_fmt.format(sinkid) + '.txt', np.vstack((timestamps,exitrates)).T, \n header='time_s,exit_rate', comments='', delimiter=',', fmt=['%.0f', '%.1f'])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(timestamps, exitrates, 'k-o', ms=6)\n ax.grid()\n fig.suptitle('Sink {}'.format(sinkid))\n ax.set_title('Exit rate over a rolling {} min window, computed every {} seconds'.format(window_minutes, freq))\n ax.set_ylabel('Cars per minute')\n ax.set_xlabel('Time (seconds)')\n fig.savefig(savepath_fmt.format(sinkid) + '.png')\n plt.close(fig)\n #plt.show()\n\nprint('Saved all the plots.')\n"
] |
[
[
"pandas.read_csv",
"numpy.arange",
"numpy.vstack",
"matplotlib.pyplot.close",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
idin/chronometry
|
[
"e022a1d06d8e8be990483130bc314a189de2f149"
] |
[
"chronometry/estimate/Estimator.py"
] |
[
"\nfrom slytherin.hash import hash_object\nfrom slytherin.functions import get_function_arguments\nfrom ravenclaw.preprocessing import Polynomial, Normalizer\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom pandas import DataFrame, concat\nfrom random import randint, random, choice\nfrom func_timeout import func_timeout, FunctionTimedOut\nimport matplotlib.pyplot as plt\nfrom numpy import where\n\nfrom .create_arguments import create_arguments\nfrom .Measurement import Measurement\nfrom ..time import get_elapsed\nfrom ..time import get_now\nfrom ..progress import ProgressBar\n\n\n# Estimator gets a single argument function and estimates the time it takes to run the function based on the argument\n# the function should accept an int larger than 0\nclass Estimator:\n\tdef __init__(self, function, args=None, unit='s', polynomial_degree=2, timeout=20):\n\t\tself._function = function\n\t\tself._function_arguments = get_function_arguments(function=function)\n\t\tself._unit = unit\n\t\tself._measurements = {}\n\t\tself._polynomial_degree = polynomial_degree\n\t\tself._model = None\n\t\tself._normalizer = None\n\t\tself._error_model = None\n\t\tself._max_x = None\n\t\tself._args = args\n\t\tself._timeout = timeout\n\t\tself._num_errors = 0\n\t\tself._num_regular_runs = 0\n\t\tself._num_timeouts = 0\n\t\tself._x_data_columns = {}\n\t\tself._error_x_data_columns = {}\n\n\t@staticmethod\n\tdef get_key(**kwargs):\n\t\treturn hash_object(kwargs)\n\n\tdef check_arguments(self, kwargs, method_name):\n\t\tunknown_arguments = [key for key in kwargs.keys() if key not in self._function_arguments]\n\t\tmissing_arguments = [key for key in self._function_arguments if key not in kwargs]\n\n\t\tif len(missing_arguments) == 1:\n\t\t\treturn f'{method_name}() is missing the argument \"{missing_arguments[0]}\"'\n\t\telif len(missing_arguments) > 1:\n\t\t\targuments_string = '\", \"'.join(missing_arguments)\n\t\t\treturn f'{method_name}() is missing arguments \"{arguments_string}\"'\n\n\t\tif len(unknown_arguments) == 0:\n\t\t\treturn False\n\t\telif len(unknown_arguments) == 1:\n\t\t\treturn f'{method_name}() got an unexpected argument \"{unknown_arguments[0]}\"'\n\t\telse:\n\t\t\targuments_string = '\", \"'.join(unknown_arguments)\n\t\t\treturn f'{method_name}() got unexpected arguments \"{arguments_string}\"'\n\n\tdef get_arguments(self, arguments, **kwargs):\n\t\tif arguments is None and len(kwargs) == 0:\n\t\t\traise ValueError('either arguments should be provided or kwargs!')\n\t\telif arguments is not None and len(kwargs) > 0:\n\t\t\traise ValueError('only one of arguments and kwargs should be provided!')\n\t\telif arguments is None:\n\t\t\targuments = kwargs\n\t\treturn arguments\n\n\tdef measure(self, timeout=None, arguments=None, **kwargs):\n\t\t\"\"\"\n\t\t:type timeout: int or float\n\t\t:type arguments: NoneType or dict\n\t\t:rtype: Measurement\n\t\t\"\"\"\n\n\t\tkwargs = self.get_arguments(arguments=arguments, **kwargs)\n\n\t\tif self.check_arguments(kwargs=kwargs, method_name='measure'):\n\t\t\traise TypeError(self.check_arguments(kwargs=kwargs, method_name='measure'))\n\n\t\tkey = self.get_key(**kwargs)\n\t\tif key in self._measurements:\n\t\t\treturn self._measurements[key]\n\t\telse:\n\t\t\tstart_time = get_now()\n\t\t\tif not timeout:\n\t\t\t\ttry:\n\t\t\t\t\tresult = self._function(**kwargs)\n\t\t\t\t\ttimeout_error = False\n\t\t\t\t\tother_error = False\n\t\t\t\t\tself._num_regular_runs += 1\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tresult = None\n\t\t\t\t\ttimeout_error = False\n\t\t\t\t\tother_error = True\n\t\t\t\t\tself._num_errors += 1\n\n\t\t\telse:\n\t\t\t\tdef run_function():\n\t\t\t\t\treturn self._function(**kwargs)\n\n\t\t\t\ttry:\n\t\t\t\t\tresult = func_timeout(timeout, run_function)\n\t\t\t\t\ttimeout_error = False\n\t\t\t\t\tother_error = False\n\t\t\t\t\tself._num_regular_runs += 1\n\n\t\t\t\texcept FunctionTimedOut:\n\t\t\t\t\tresult = None\n\t\t\t\t\ttimeout_error = True\n\t\t\t\t\tother_error = False\n\t\t\t\t\tself._num_timeouts += 1\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tresult = None\n\t\t\t\t\ttimeout_error = False\n\t\t\t\t\tother_error = True\n\t\t\t\t\tself._num_errors += 1\n\n\t\t\telapsed = get_elapsed(start=start_time, unit=self._unit)\n\t\t\tmeasurement = Measurement(\n\t\t\t\tx=kwargs, result=result, elapsed=elapsed, timeout_error=timeout_error, other_error=other_error\n\t\t\t)\n\t\t\tself._measurements[key] = measurement\n\t\t\tself._model = None\n\t\t\tself._normalizer = None\n\t\t\tself._error_model = None\n\t\t\tif self._max_x is None:\n\t\t\t\tself._max_x = kwargs\n\t\t\telse:\n\t\t\t\tself._max_x = {key: max(value, kwargs[key]) for key, value in self._max_x.items()}\n\t\t\treturn measurement\n\n\t@property\n\tdef data(self):\n\t\t\"\"\"\n\t\t:rtype: DataFrame\n\t\t\"\"\"\n\t\treturn DataFrame.from_records(\n\t\t\t[measurement.dictionary for measurement in self.measurements]\n\t\t)\n\n\t@property\n\tdef measurements(self):\n\t\t\"\"\"\n\t\t:rtype: list[Measurement]\n\t\t\"\"\"\n\t\tmeasurements = sorted(list(self._measurements.values()))\n\t\t# set the weights\n\t\tmin_elapsed = measurements[0].elapsed_time\n\t\tfor measurement in measurements:\n\t\t\tif min_elapsed > 0:\n\t\t\t\tmeasurement._weight = 1 + (measurement.elapsed_time / min_elapsed) ** 0.5\n\t\t\telse:\n\t\t\t\tmeasurement._weight = 1\n\t\treturn measurements\n\n\t@property\n\tdef num_measurements(self):\n\t\t\"\"\"\n\t\t:rtype: int\n\t\t\"\"\"\n\t\treturn len(self._measurements)\n\n\t@property\n\tdef num_errors(self):\n\t\treturn self._num_errors\n\n\t@property\n\tdef num_regular_runs(self):\n\t\treturn self._num_regular_runs\n\n\t@property\n\tdef num_timeouts(self):\n\t\treturn self._num_timeouts\n\n\t@property\n\tdef num_runs(self):\n\t\treturn self.num_errors + self.num_regular_runs\n\n\tdef get_x_data(self, x, degree=None):\n\t\t\"\"\"\n\t\t:type x: DataFrame or dict or list\n\t\t:type degree: NoneType or int\n\t\t:rtype: DataFrame\n\t\t\"\"\"\n\t\tif isinstance(x, dict):\n\t\t\tif all([isinstance(value, (list, tuple)) for value in x.values()]):\n\t\t\t\tdata = DataFrame(x)\n\t\t\telse:\n\t\t\t\tdata = DataFrame.from_records([x])\n\n\t\telif isinstance(x, list) and all([isinstance(element, dict) for element in x]):\n\t\t\tdata = DataFrame.from_records(x)\n\n\t\telif isinstance(x, list):\n\t\t\tdata = DataFrame({'x': x})\n\n\t\telse:\n\t\t\tdata = x\n\n\t\tdegree = degree or self._polynomial_degree\n\n\t\tif degree > 1:\n\t\t\tpolynomial = Polynomial(degree=degree)\n\t\t\tresult = polynomial.fit_transform(data=data)\n\n\t\telse:\n\t\t\tresult = data.copy()\n\n\t\tif degree not in self._x_data_columns:\n\t\t\tself._x_data_columns[degree] = list(result.columns)\n\t\treturn result[self._x_data_columns[degree]]\n\n\tdef get_error_x_data(self, x):\n\t\tdata = self.get_x_data(x=x, degree=1)\n\n\t\tfor column in self._function_arguments:\n\t\t\tdata[f'{column}_is_zero'] = data[column] == 0\n\t\t\tdata[f'{column}_is_positive'] = 1 * (data[column] > 0)\n\t\t\tdata[f'{column}_rounded'] = data[column].round()\n\t\t\tdata[f'{column}_is_int'] = data[f'{column}_rounded'] == data[column]\n\n\t\tif 1 not in self._error_x_data_columns:\n\t\t\tself._error_x_data_columns[1] = list(data.columns)\n\t\treturn data[self._error_x_data_columns[1]]\n\n\t@property\n\tdef training_x_y_weights(self):\n\t\t\"\"\"\n\t\t:rtype: tuple[DataFrame, list, list]\n\t\t\"\"\"\n\t\trecords = []\n\t\ty = []\n\t\tweights = []\n\t\tfor measurement in self.measurements:\n\t\t\tif not measurement.timeout_error and not measurement.other_error:\n\t\t\t\trecords.append(measurement.x)\n\t\t\t\ty.append(measurement.elapsed_time)\n\t\t\t\tweights.append(measurement.elapsed_time ** 2)\n\t\tx = self.get_x_data(DataFrame.from_records(records))\n\t\treturn x, y, weights\n\n\t@property\n\tdef error_training_x_y_weights(self):\n\t\t\"\"\"\n\t\t:rtype: tuple[DataFrame, list]\n\t\t\"\"\"\n\t\trecords = []\n\t\ty = []\n\t\tweights = []\n\t\terror_count = self.num_errors\n\t\tother_count = len(self._measurements) - error_count\n\n\t\tfor measurement in self._measurements.values():\n\t\t\tif not measurement.timeout_error:\n\t\t\t\trecords.append(measurement.x)\n\t\t\t\tif measurement.other_error:\n\t\t\t\t\ty.append(1)\n\t\t\t\t\tweights.append(other_count)\n\t\t\t\telse:\n\t\t\t\t\ty.append(0)\n\t\t\t\t\tweights.append(error_count)\n\t\tx = self.get_error_x_data(DataFrame.from_records(records))\n\t\treturn x, y, weights\n\n\t@property\n\tdef model(self):\n\t\t\"\"\"\n\t\t:rtype: LinearRegression\n\t\t\"\"\"\n\t\tif self._model is None:\n\t\t\tx, y, weights = self.training_x_y_weights\n\t\t\tself._model = LinearRegression()\n\t\t\tnormalized_x = self.normalizer.transform(X=x)\n\t\t\tself._model.fit(normalized_x, y, sample_weight=weights)\n\t\treturn self._model\n\n\t@property\n\tdef normalizer(self):\n\t\t\"\"\"\n\t\t:type Normalizer\n\t\t\"\"\"\n\t\tif self._normalizer is None:\n\t\t\tx, y, weights = self.training_x_y_weights\n\t\t\tself._normalizer = Normalizer()\n\t\t\tself._normalizer.fit(X=x)\n\t\treturn self._normalizer\n\n\t@property\n\tdef error_model(self):\n\t\t\"\"\"\n\t\t:rtype: RandomForestClassifier\n\t\t\"\"\"\n\t\tif self._error_model is None:\n\t\t\tx, y, weights = self.error_training_x_y_weights\n\t\t\tself._error_model = RandomForestClassifier()\n\t\t\tself._error_model.fit(x, y, sample_weight=weights)\n\t\treturn self._error_model\n\n\tdef is_in_measurements(self, **kwargs):\n\t\treturn self.get_key(**kwargs) in self._measurements\n\n\tdef predict(self, **kwargs):\n\t\terror_probability = self.predict_error_probability(**kwargs)\n\t\ttime_prediction = self.predict_time(**kwargs)\n\t\treturn {'error_probability': error_probability, 'time_prediction': time_prediction}\n\n\tdef predict_data(self, data):\n\t\t\"\"\"\n\t\t:type data: DataFrame\n\t\t:rtype: DataFrame\n\t\t\"\"\"\n\t\terror_probability = list(1 - self.error_model.predict_proba(self.get_error_x_data(data))[:, 0])\n\t\terror = self.error_model.predict(self.get_error_x_data(data))\n\t\tnormalized = self.normalizer.transform(self.get_x_data(data))\n\t\telapsed = self.model.predict(normalized)\n\t\treturn DataFrame({'error_probability': error_probability, 'error': error, 'time': elapsed})\n\n\tdef predict_time(self, **kwargs):\n\t\t\"\"\"\n\t\t:rtype: list or float or int\n\t\t\"\"\"\n\t\tif self.check_arguments(kwargs=kwargs, method_name='predict'):\n\t\t\traise TypeError(self.check_arguments(kwargs=kwargs, method_name='predict'))\n\n\t\tkey = self.get_key(**kwargs)\n\t\tif key in self._measurements:\n\t\t\treturn self._measurements[key].elapsed_time\n\n\t\tx_data = self.get_x_data(x=kwargs)\n\t\tnormalized = self.normalizer.transform(x_data)\n\t\tpredictions = self.model.predict(normalized)\n\t\tif x_data.shape[0] > 1:\n\t\t\treturn list(predictions)\n\t\telse:\n\t\t\treturn list(predictions)[0]\n\n\tdef predict_error(self, **kwargs):\n\t\t\"\"\"\n\t\t:rtype: list or float or int\n\t\t\"\"\"\n\t\tif self.check_arguments(kwargs=kwargs, method_name='predict_error'):\n\t\t\traise TypeError(self.check_arguments(kwargs=kwargs, method_name='predict_error'))\n\n\t\tkey = self.get_key(**kwargs)\n\t\tif key in self._measurements:\n\t\t\treturn 1 * self._measurements[key].other_error\n\n\t\tx_data = self.get_error_x_data(x=kwargs)\n\t\tpredictions = self.error_model.predict(x_data)\n\t\tif x_data.shape[0] > 1:\n\t\t\treturn list(predictions)\n\t\telse:\n\t\t\treturn list(predictions)[0]\n\n\tdef predict_error_probability(self, **kwargs):\n\t\t\"\"\"\n\t\t:rtype: list or float or int\n\t\t\"\"\"\n\t\tif self.check_arguments(kwargs=kwargs, method_name='predict_error'):\n\t\t\traise TypeError(self.check_arguments(kwargs=kwargs, method_name='predict_error'))\n\n\t\tkey = self.get_key(**kwargs)\n\t\tif key in self._measurements:\n\t\t\treturn 1 * self._measurements[key].other_error\n\n\t\tx_data = self.get_error_x_data(x=kwargs)\n\t\tpredictions = 1 - self.error_model.predict_proba(x_data)[:, 0]\n\t\tif x_data.shape[0] > 1:\n\t\t\treturn list(predictions)\n\t\telse:\n\t\t\treturn list(predictions)[0]\n\n\t@staticmethod\n\tdef choose_value(values):\n\t\tif isinstance(values, list):\n\t\t\treturn choice(values)\n\t\telif isinstance(values, tuple) and len(values) == 2:\n\t\t\tif isinstance(values[0], int) and isinstance(values[1], int):\n\t\t\t\treturn randint(values[0], values[1])\n\t\t\telse:\n\t\t\t\treturn min(values) + random() * (max(values) - min(values))\n\t\telse:\n\t\t\treturn values\n\n\tdef auto_explore(self, timeout=None, max_order=5):\n\t\ttimeout = timeout or self._timeout\n\n\t\tif self.num_measurements < 3:\n\t\t\tfor i in range(3):\n\t\t\t\tself.measure(timeout=timeout, arguments={key: i for key in self._function_arguments})\n\n\t\tfor main_key in self._function_arguments:\n\t\t\tfor order_of_magnitude in range(max_order):\n\t\t\t\targument_range = (-1 * 10 ** order_of_magnitude, 10 ** order_of_magnitude)\n\t\t\t\tprint(main_key, argument_range)\n\t\t\t\targuments = {key: argument_range if key == main_key else 1 for key in self._function_arguments}\n\t\t\t\tself.explore(\n\t\t\t\t\ttimeout=timeout,\n\t\t\t\t\tnum_rounds=10,\n\t\t\t\t\targuments=arguments\n\t\t\t\t)\n\n\t\tfor order_of_magnitude in range(max_order):\n\t\t\targument_range = (-1 * 10 ** order_of_magnitude, 10 ** order_of_magnitude)\n\t\t\tprint(argument_range)\n\t\t\tself.explore(\n\t\t\t\ttimeout=timeout,\n\t\t\t\tnum_rounds=10,\n\t\t\t\targuments={key: argument_range for key in self._function_arguments}\n\t\t\t)\n\n\tdef explore(self, timeout, num_rounds=1000, arguments=None, random_state=42, **kwargs):\n\t\tstart_time = get_now()\n\t\targuments = self.get_arguments(arguments=arguments, **kwargs)\n\n\t\tprogress_bar = ProgressBar(total=timeout)\n\t\telapsed = get_elapsed(start=start_time, unit=self._unit)\n\t\tprogress_bar.show(amount=elapsed, total=timeout)\n\n\t\tcandidates = create_arguments(num_rounds=num_rounds, arguments=arguments)\n\n\t\t# remove candidates that have already been tried\n\t\ttried_candidates = self.data.drop(columns=['time', 'result', 'timeout_error', 'other_error'])\n\t\tcandidates = concat([candidates, tried_candidates, tried_candidates])\n\t\tcandidates.drop_duplicates(keep=False, inplace=True)\n\n\t\tcandidates['_id_'] = range(candidates.shape[0])\n\t\tcandidates = candidates.sample(frac=1)\n\n\t\telapsed = get_elapsed(start=start_time, unit=self._unit)\n\t\tnum_points = num_errors = num_timeouts = max_elapsed = 0\n\t\tnum_candidates = candidates.shape[0]\n\n\t\twhile timeout > elapsed and num_candidates > 0:\n\t\t\tprogress_bar.show(\n\t\t\t\tamount=elapsed,\n\t\t\t\ttotal=timeout,\n\t\t\t\ttext=f'n:{num_points}, e:{num_errors}, o:{num_timeouts}, t:{round(max_elapsed, 2)}, c:{num_candidates}'\n\t\t\t)\n\t\t\tcandidates.reset_index(drop=True, inplace=True)\n\t\t\tpredictions = self.predict_data(data=candidates.drop(columns='_id_'))\n\n\t\t\tpredictions.reset_index(drop=True, inplace=True)\n\t\t\tpredictions['_id_'] = candidates['_id_']\n\n\t\t\t# if we are out of time, stop\n\t\t\telapsed = get_elapsed(start=start_time, unit=self._unit)\n\t\t\tremaining_time = timeout - elapsed\n\t\t\tif remaining_time < 0:\n\t\t\t\tbreak\n\n\t\t\tnext_candidates = candidates.copy()\n\n\t\t\t# if we have had too many errors, try to not go after errors\n\t\t\tif self.num_errors > self._num_regular_runs and predictions['error'].sum() < predictions.shape[0]:\n\t\t\t\tnext_candidates = candidates[predictions['error'] == 0]\n\t\t\t\tpredictions = predictions[predictions['error'] == 0]\n\n\t\t\t# only try the ones that are reasonable timewise\n\t\t\tif (predictions['time'] < remaining_time).sum() > 0:\n\t\t\t\tnext_candidates = next_candidates[predictions['time'] < remaining_time]\n\t\t\t\tpredictions = predictions[predictions['time'] < remaining_time]\n\t\t\telse:\n\t\t\t\t# there will be no candidate, let's at least try our best with the shortest elapsed time\n\t\t\t\tnext_candidates = next_candidates[predictions['time'] == predictions['time'].min()].head(1)\n\t\t\t\tpredictions = predictions[predictions['time'] == predictions['time'].min()].head(1)\n\n\t\t\t# stop if there are no candidates\n\t\t\tif next_candidates.shape[0] == 0:\n\t\t\t\tbreak\n\n\t\t\t# choose longest time:\n\t\t\t# best_candidate = next_candidates[predictions['time'] == predictions['time'].max()].head(1)\n\n\t\t\t# choose randomly\n\t\t\tbest_candidate = next_candidates.sample(n=1, random_state=random_state)\n\t\t\tx = best_candidate.to_dict('records')[0]\n\t\t\tbest_candidate_id = x['_id_']\n\t\t\tdel x['_id_']\n\t\t\tmeasurement = self.measure(timeout=timeout, **x)\n\n\t\t\t# remove the candidate that was measured\n\t\t\tcandidates = candidates[candidates['_id_'] != best_candidate_id]\n\t\t\tnum_candidates = candidates.shape[0]\n\n\t\t\tif measurement.timeout_error:\n\t\t\t\tnum_timeouts += 1\n\t\t\telif measurement.other_error:\n\t\t\t\tnum_errors += 1\n\t\t\telse:\n\t\t\t\tnum_points += 1\n\t\t\t\tmax_elapsed = max(max_elapsed, measurement.elapsed_time)\n\t\t\telapsed = get_elapsed(start=start_time, unit=self._unit)\n\n\t\tprogress_bar.show(\n\t\t\tamount=timeout + 1,\n\t\t\ttotal=timeout,\n\t\t\ttext=f'n:{num_points}, e:{num_errors}, o:{num_timeouts}, t:{round(max_elapsed, 2)}, c:{num_candidates}'\n\t\t)\n\n\tdef plot(self, x, ignore_errors=True, marker='.', linestyle='', ms=12):\n\t\tdata = self.data.copy()\n\t\tdata['error'] = where(data.timeout_error, 'timeout', where(data.other_error, 'error', 'normal'))\n\t\tdata.drop(columns=['timeout_error', 'other_error'], inplace=True)\n\n\t\tif ignore_errors:\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(data[x], data['time'], marker=marker, linestyle=linestyle, ms=ms)\n\n\t\telse:\n\t\t\tgroups = data.groupby('error')\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\t\t\tfor name, group in groups:\n\t\t\t\tax.plot(group[x], group['time'], marker=marker, linestyle=linestyle, ms=ms, label=name)\n\t\t\tax.legend()\n\t\tplt.show()\n\t\treturn fig, ax\n"
] |
[
[
"pandas.concat",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"pandas.DataFrame.from_records",
"matplotlib.pyplot.show",
"numpy.where"
]
] |
thejoeejoee/unit-brno-2018
|
[
"94b704cf09f1b29c3f6e620bd751682a0e744c44"
] |
[
"unit/detectors/hough_circle.py"
] |
[
"# coding=utf-8\nimport logging\nfrom collections import defaultdict, namedtuple\nfrom math import pi\nfrom typing import DefaultDict, Dict, Iterable, Tuple, Set, Callable\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nCircle = namedtuple('Circle', 'x y radius')\n\n\nclass HoughCircleDetector(object):\n \"\"\"\n With Hough's circle transform tries to detect main circled component on image.\n For each of bigger components are computed \"stats\" about radius probability.\n \"\"\"\n GRADIENT_THRESHOLD = 40\n RADIUS_RANGE = (10, 120)\n\n def __init__(\n self,\n image: np.ndarray,\n grads: np.ndarray,\n scale: int = 10,\n vote_threshold: int = 4\n ):\n self._scale = scale\n self._image = image[::scale, ::scale]\n self._grads = grads[::scale, ::scale]\n self._radius_range = self.RADIUS_RANGE[0] // scale, self.RADIUS_RANGE[1] // scale\n self._vote_threshold = vote_threshold\n\n @property\n def scale(self) -> int:\n return self._scale\n\n def detect(self) -> Iterable[Tuple[int, int, int, int]]:\n \"\"\"\n From loaded image detect all AABB boxes around particles.\n \"\"\"\n shape = self._grads.shape\n radius_shape_count = abs(self._radius_range[0] + self._radius_range[1]) # type: int\n # accumulator array\n accumulator = np.zeros((radius_shape_count, shape[0], shape[1])) # type: np.ndarray\n\n max_x, max_y = self._grads.shape[:2]\n # linspace for goniometric functions\n t = np.linspace(0, 2 * pi, self._vote_threshold * 3)\n\n over = defaultdict(set) # type: DefaultDict[Circle, set]\n\n gen_radius_f = self._generate_cone_radius_callable(\n max_x=max_x,\n max_y=max_y,\n accumulator=accumulator,\n over=over,\n coss=np.cos(t),\n sins=np.sin(t),\n )\n for rad in range(self._radius_range[0], self._radius_range[1]):\n # accumulate all needed radiuses\n logging.debug('Computing radius stats on level {}...'.format(rad))\n np.fromfunction(\n np.vectorize(gen_radius_f(rad)),\n shape=shape\n )\n\n groups = self._create_groups(over, self._grads)\n return self._generate_boxes(groups)\n\n def _create_groups(self, over: Dict[int, set], grads: np.ndarray) -> Dict[Circle, Set[Circle]]:\n \"\"\"\n From rated radiuses creates main components with their main subcomponents,\n using clustering and analytic geometry.\n \"\"\"\n entities = set()\n radius_count = len(over)\n logging.debug('Removing components on sides.')\n over = {\n radius: tuple(self._remove_image_edges_components(radius, circles)) for radius, circles in over.items()\n }\n\n main_components = self._place_main_components(over, radius_count)\n logging.debug('Placed {} main components.'.format(len(main_components)))\n main_components = {Circle(int(c.x), int(c.y), int(c.radius * 1.2)) for c in main_components}\n\n groups = self._place_minor_components(entities, main_components, over, radius_count)\n logging.debug('Minor components placed into {} groups.'.format(len(groups)))\n # color = iter(plt.cm.rainbow(np.linspace(0, 1, 100)))\n\n # plt.imshow(self._image.T)\n\n # self._debug_plot_components(color, groups)\n\n self._join_near_main_components(groups)\n logging.debug('Groups joined total to {}.'.format(len(groups)))\n\n # self._debug_plot_components(color, groups)\n\n # self._debug_plot_boxes(self._generate_boxes(groups))\n\n # plt.show()\n\n return groups\n\n def _generate_cone_radius_callable(self, max_x: int, max_y: int, accumulator: np.ndarray, over: Dict,\n coss: np.ndarray, sins: np.ndarray) -> Callable:\n \"\"\"\n Create callable for walking though bevel gradients arrays and accumulating them into array.\n \"\"\"\n\n def gen_radius_f(rad: int) -> Callable:\n def bucketer(x: int, y: int):\n x, y = int(x), int(y)\n if self._grads[x, y] < self.GRADIENT_THRESHOLD:\n return False\n\n def accumulate(row):\n aa = int(row[0])\n bb = int(row[1])\n if 0 <= aa < max_y and 0 <= bb < max_x:\n if not self._image[aa, bb]:\n return\n accumulator[rad, int(aa), int(bb)] += 1\n\n if accumulator[rad, int(aa), int(bb)] > self._vote_threshold:\n over[rad].add((aa, bb))\n\n try:\n accumulator[rad, int(aa) + 1, int(bb)] += .25\n except IndexError:\n pass\n try:\n accumulator[rad, int(aa) - 1, int(bb)] += .25\n except IndexError:\n pass\n try:\n accumulator[rad, int(aa), int(bb) + 1] += .25\n except IndexError:\n pass\n try:\n accumulator[rad, int(aa), int(bb) - 1] += .25\n except IndexError:\n pass\n\n a = x - coss * rad\n b = y - sins * rad\n np.apply_along_axis(accumulate, 1, np.dstack((a, b))[0])\n return True\n\n return bucketer\n\n return gen_radius_f\n\n def _join_near_main_components(self, groups: Dict[Circle, Set[Circle]]):\n \"\"\"\n Tries to join all near main components - based od distance and collisions.\n New created are added to groups and recurently added to possibility joins.\n \"\"\"\n to_process = set(groups.keys())\n\n while to_process:\n candidate = to_process.pop()\n\n for another in tuple(to_process):\n if candidate != another and self.is_too_near(candidate, another, 12) and self._can_join_components(\n candidate, another\n ):\n new_main = Circle(\n (another.x + candidate.x) / 2,\n (another.y + candidate.y) / 2,\n self.distance(another, candidate)\n )\n if candidate in groups:\n del groups[candidate]\n if another in groups:\n del groups[another]\n\n groups[new_main] = groups[another] | groups[candidate]\n to_process.add(new_main)\n break\n else:\n break\n\n def _place_minor_components(self, entities, main_components, over, radius_count):\n \"\"\"\n Tries place minor component into major (main) components - decision based on enclosing and distance.\n \"\"\"\n groups = defaultdict(set)\n for radius in sorted(over, reverse=False)[:radius_count // 2]:\n to_process = list(over.get(radius))\n\n while to_process:\n x, y = to_process.pop()\n candidate_circle = Circle(int(x), int(y), int(radius))\n\n if any(self.is_too_near(candidate_circle, c, ratio=-.75) for c in entities):\n continue\n for main in main_components:\n if self.is_inside(\n main=main,\n to_check=candidate_circle\n ) and self._can_join_components(\n c1=main,\n c2=candidate_circle\n ):\n groups[main].add(candidate_circle)\n continue\n return groups\n\n def _place_main_components(self, over, radius_count):\n \"\"\"\n Tries to place main components into image - decisions based on distance between others.\n \"\"\"\n main_components = set()\n for radius in sorted(over, reverse=True)[:int(radius_count // 1.25)]:\n to_process = list(over.get(radius))\n\n while to_process:\n x, y = to_process.pop()\n candidate_circle = Circle(int(x), int(y), int(radius))\n\n if any(self.is_too_near(candidate_circle, c, ratio=0.1) for c in main_components):\n continue\n\n main_components.add(candidate_circle)\n return main_components\n\n def _remove_image_edges_components(self, radius: int, circles: Iterable[Tuple[int, int]], ratio=1.35):\n \"\"\"\n Removes all components with sides outside image shape.\n \"\"\"\n max_x = self._image.shape[0]\n max_y = self._image.shape[1]\n for x, y in circles:\n if min((x, y, max_x - x, max_y - y)) > radius * ratio:\n yield (x, y)\n\n def _generate_boxes(self, groups: Dict[Circle, Set[Circle]], ratio=0.015):\n \"\"\"\n From groups of main components generates boxes from their minimal/maximal X/Y.\n \"\"\"\n side = self._image.shape[0]\n for main, components in groups.items():\n min_x = side\n min_y = side\n max_x = 0\n max_y = 0\n for c in components:\n max_x = max((max_x, c.x + c.radius))\n min_x = min((min_x, c.x - c.radius))\n max_y = max((max_y, c.y + c.radius))\n min_y = min((min_y, c.y - c.radius))\n if not (max_y > min_y) or not (max_x > min_x):\n continue\n\n yield (\n min_x - side * (ratio / 2),\n min_y - side * (ratio / 2),\n (max_x - min_x) + side * ratio,\n (max_y - min_y) + side * ratio\n )\n\n def _can_join_components(self, c1: Circle, c2: Circle):\n \"\"\"\n Detects, if two components can be joined - exists active full path on image between centers.\n \"\"\"\n u1 = (c2.x - c1.x)\n u2 = (c2.y - c1.y)\n a1 = c1.x\n a2 = c1.y\n side = self._image.shape[0]\n\n r = int(min(c1.radius, c2.radius) * 1)\n for rat in (.25, .5, .75, .1,):\n for t in np.linspace(0, 1, 30):\n x1 = int(round(a1 + t * u1))\n y1 = int(round(a2 + t * u2))\n x2 = int(round(a1 + r * rat + t * u1))\n y2 = int(round(a2 + r * rat + t * u2))\n x3 = int(round(a1 - r * rat + t * u1))\n y3 = int(round(a2 - r * rat + t * u2))\n\n twices = (x1, y1), (x2, y2), (x3, y3)\n\n def is_ok(x, y):\n return 0 <= x < side and 0 <= y < side\n\n if all((self._image[tw] < self.GRADIENT_THRESHOLD) for tw in twices if is_ok(*tw)):\n return False\n\n return True\n\n @staticmethod\n def distance(c1, c2):\n \"\"\"Returns distance between centers two circles.\"\"\"\n return ((c1.x - c2.x) ** 2 + (c1.y - c2.y) ** 2) ** .5\n\n @staticmethod\n def is_inside(main: Circle, to_check: Circle):\n \"\"\"Returns, if to_check Circle is inside of main Circle.\"\"\"\n return (((main.x - to_check.x) ** 2 + (main.y - to_check.y) ** 2) ** .5) < main.radius\n\n @classmethod\n def is_too_near(cls, c1: Circle, c2: Circle, ratio: float) -> bool:\n \"\"\"Returns, if two circles has overlap higher then ration of sum of their radiuses.\"\"\"\n dist = cls.distance(c1, c2)\n R = c1.radius + c2.radius\n return (dist - R) < ratio * R\n\n def _debug_plot_circles(self, over):\n plt.imshow(self._image)\n for rad, circles in over.items():\n for x, y in circles:\n self._debug_show_shape(plt.Circle((y, x), rad, fc='none', ec='red'))\n plt.axis('scaled')\n plt.show()\n\n def _debug_plot_boxes(self, boxes):\n for x, y, w, h in boxes:\n self._debug_show_shape(\n plt.Rectangle(\n (x, y),\n w,\n h,\n fc='none',\n ec='red',\n lw=2,\n linestyle='--'\n )\n )\n plt.axis('scaled')\n plt.show()\n\n def _debug_plot_components(self, color, groups):\n for main, entities in groups.items(): # type: Circle\n\n c = next(color)\n\n self._debug_show_shape(\n plt.Circle(\n (\n main.x,\n main.y # + side // self._scale\n ),\n main.radius,\n fc='none',\n ec=c,\n linestyle=':',\n lw=4\n )\n )\n\n for entity in entities:\n self._debug_show_shape(\n plt.Circle(\n (\n entity.x,\n entity.y # + side // self._scale\n ),\n entity.radius,\n fc='none',\n ec=c\n )\n )\n\n @staticmethod\n def _debug_show_shape(patch):\n ax = plt.gca()\n ax.add_patch(patch)\n"
] |
[
[
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"numpy.cos",
"numpy.dstack",
"numpy.sin",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
jamorrison/wgms_kit_comparison
|
[
"f0da12f368f91b6ab8b94aaef60574cf5ca09bcf"
] |
[
"analysis/analyze_the_data/cpg_questions/control_methylation/control_vectors_qc.py"
] |
[
"\"\"\"Create plots to show methylation for mitochondrial and control vector DNA.\"\"\"\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport os\n\n# Colors to use for each sample\nCOLOR = {\n 10: '#D81B60', # Ftube{A,B}kapaBC\n 9: '#D81B60', # Ftube{A,B}kapaBCrep2\n 8: '#1E88E5', # Ftube{A,B}neb\n 7: '#1E88E5', # Ftube{A,B}nebRep2\n 6: '#A0522D', # Ftube{A,B}pbat\n 5: '#004D40', # Ftube{A,B}swift\n 4: '#004D40', # Ftube{A,B}swiftRep2\n 3: '#1E88E5', # Ftube{A,B}neb10ng\n 2: '#1E88E5', # Ftube{A,B}neb10ngRep2\n 1: '#004D40', # Ftube{A,B}swift10ng\n 0: '#004D40', # Ftube{A,B}swift10ngRep2\n}\n\ndef extract_sample_info(sample_str):\n \"\"\"Extract kit, sample, and technical replicate from sample_str.\n\n Inputs -\n sample_str - string from sample name\n Returns -\n tuple (kit, biological sample name, technical replicate)\n \"\"\"\n s = sample_str.replace('Ftube', '')\n\n # The biological sample in now the first character in name\n bio = s[0]\n\n # Extract what kit is in sample\n kit = ''\n if 'kapabc' in s.lower():\n kit = 'Kapa'\n elif 'pbat' in s.lower():\n kit = 'PBAT'\n elif 'neb' in s.lower():\n kit = 'NEB'\n elif 'swift' in s.lower():\n kit = 'Swift'\n\n # Determine if low or high input\n if '10ng' in s:\n kit = 'Low ' + kit\n\n # Determine technical replicate\n rep = '1'\n if 'rep2' in s.lower():\n rep = '2'\n\n if (bio not in ['A', 'B']) or (kit == ''):\n print('[extract_sample_info] ERROR: Incorrect entry')\n return ('', '', '')\n\n return (kit, bio, rep)\n\ndef import_file(fname, control=True, select_chr=None):\n \"\"\"Read file into DataFrame and add some extra columns.\n\n Inputs -\n fname - filename to process\n control - whether file is for control vectors (True) or\n human (False) [default: True]\n select_chr - select out chromosome from DataFrame\n Returns -\n DataFrame of data from file with some added columns\n \"\"\"\n df = pd.read_csv(\n fname,\n sep='\\t',\n header=None,\n names=['chr','start','end','beta','covg','context']\n )\n\n # Add sample column\n samp = os.path.basename(fname)\n samp = samp.replace('.cg.sorted.mergecg.bed.gz', '')\n samp = samp.replace('.controls', '')\n kit, bio, rep = extract_sample_info(samp)\n df['sample'] = kit + ' Samp. ' + bio\n df['Replicate'] = 'Rep. ' + rep\n\n # Include column of which control vector the CpG is from, only for control\n # vectors\n if (control == True):\n df['vector'] = np.where(df['chr'] == 'J02459.1', 'lambda', 'pUC19')\n\n # Select only CpGs that occur on select_chr chromosome\n if select_chr != None:\n out = df[df.chr == select_chr]\n else:\n out = df\n\n return out\n\ndef create_plot_matplotlib(data, title, xlab, ylab, ynames, figname):\n \"\"\"Create violin plot of methylation data.\n\n Inputs -\n data - list of data to include in violinplot\n title - title of plot\n xlab - x-axis label of plot\n ylab - y-axis label of plot\n ynames - names of ticks on y-axis\n figname - name of output file for plot\n \"\"\"\n fig, ax = plt.subplots(figsize=(10,5))\n plt.tight_layout()\n\n parts = ax.violinplot(\n dataset = data,\n vert = False,\n showmeans = False,\n showmedians = False,\n showextrema = False\n )\n\n for idx, pc in enumerate(parts['bodies']):\n pc.set_facecolor(COLOR[idx])\n pc.set_edgecolor(COLOR[idx])\n pc.set_alpha(1)\n\n plt.title(title, fontsize=24)\n plt.xlabel(xlab, fontsize=20)\n plt.ylabel(ylab, fontsize=20)\n\n plt.xticks(\n [i for i in np.arange(0, 1.2, 0.2)],\n ['{:.1f}'.format(i) for i in np.arange(0, 1.2, 0.2)],\n fontsize=18\n )\n plt.yticks([i for i in np.arange(1,len(ynames)+1)], ynames, fontsize=18)\n\n plt.savefig(figname, bbox_inches='tight')\n plt.close('all')\n\ndef create_plot(data, title, xlab, ylab, figname):\n \"\"\"Create split violin plot of methylation data.\n\n Inputs -\n data - list of data to include in violinplot\n title - title of plot\n xlab - x-axis label of plot\n ylab - y-axis label of plot\n figname - name of output file for plot\n Returns -\n Nothing, plot saved to disk\n \"\"\"\n fig, ax = plt.subplots(figsize=(10,5))\n plt.tight_layout()\n\n sns.violinplot(data=data, x='beta', y='sample', hue='Replicate', cut=0,\n split=True, inner='quartile', linewidth=1, orient='h',\n palette={'Rep. 1': '#005596', 'Rep. 2': '#3fa294'})\n\n ax.legend(title='', ncol=2, bbox_to_anchor=(0.5,0.96), frameon=False,\n loc='lower center', fontsize=20)\n plt.xlim(-0.05, 1.05)\n\n plt.xticks(\n [i for i in np.arange(0, 1.2, 0.2)],\n ['{:.1f}'.format(i) for i in np.arange(0, 1.2, 0.2)],\n fontsize=18\n )\n plt.yticks(fontsize=18)\n\n plt.title(title, pad=40, fontsize=24)\n plt.xlabel(xlab, fontsize=20)\n plt.ylabel(ylab, fontsize=20)\n\n plt.savefig(figname, bbox_inches='tight')\n plt.close('all')\n\ndef lambda_puc_plots():\n \"\"\"Process control vector data into violin plots. \"\"\"\n # Useful variables\n path = '2019_11_07_FallopianTube_WGBS_Kit_Comparison/analysis/control_vectors/'\n appd = '.controls.cg.sorted.mergecg.bed.gz'\n\n # Load data into DataFrames\n ak1_hi = import_file(path+'FtubeAkapaBC'+appd)\n ak2_hi = import_file(path+'FtubeAkapaBCrep2'+appd)\n an1_hi = import_file(path+'FtubeAneb'+appd)\n an1_lo = import_file(path+'FtubeAneb10ng'+appd)\n an2_lo = import_file(path+'FtubeAneb10ngRep2'+appd)\n an2_hi = import_file(path+'FtubeAnebRep2'+appd)\n ap1_hi = import_file(path+'FtubeApbat'+appd)\n as1_hi = import_file(path+'FtubeAswift'+appd)\n as1_lo = import_file(path+'FtubeAswift10ng'+appd)\n as2_lo = import_file(path+'FtubeAswift10ngRep2'+appd)\n as2_hi = import_file(path+'FtubeAswiftRep2'+appd)\n\n bk1_hi = import_file(path+'FtubeBkapaBC'+appd)\n bk2_hi = import_file(path+'FtubeBkapaBCrep2'+appd)\n bn1_hi = import_file(path+'FtubeBneb'+appd)\n bn1_lo = import_file(path+'FtubeBneb10ng'+appd)\n bn2_lo = import_file(path+'FtubeBneb10ngRep2'+appd)\n bn2_hi = import_file(path+'FtubeBnebRep2'+appd)\n bp1_hi = import_file(path+'FtubeBpbat'+appd)\n bs1_hi = import_file(path+'FtubeBswift'+appd)\n bs1_lo = import_file(path+'FtubeBswift10ng'+appd)\n bs2_lo = import_file(path+'FtubeBswift10ngRep2'+appd)\n bs2_hi = import_file(path+'FtubeBswiftRep2'+appd)\n\n # Pull out lambdaphage data from files\n lamb = pd.concat(\n [ak1_hi[(ak1_hi.vector == 'lambda')],\n ak2_hi[(ak2_hi.vector == 'lambda')],\n bk1_hi[(bk1_hi.vector == 'lambda')],\n bk2_hi[(bk2_hi.vector == 'lambda')],\n an1_hi[(an1_hi.vector == 'lambda')],\n an2_hi[(an2_hi.vector == 'lambda')],\n bn1_hi[(bn1_hi.vector == 'lambda')],\n bn2_hi[(bn2_hi.vector == 'lambda')],\n ap1_hi[(ap1_hi.vector == 'lambda')],\n bp1_hi[(bp1_hi.vector == 'lambda')],\n as1_hi[(as1_hi.vector == 'lambda')],\n as2_hi[(as2_hi.vector == 'lambda')],\n bs1_hi[(bs1_hi.vector == 'lambda')],\n bs2_hi[(bs2_hi.vector == 'lambda')],\n an1_lo[(an1_lo.vector == 'lambda')],\n an2_lo[(an2_lo.vector == 'lambda')],\n bn1_lo[(bn1_lo.vector == 'lambda')],\n bn2_lo[(bn2_lo.vector == 'lambda')],\n as1_lo[(as1_lo.vector == 'lambda')],\n as2_lo[(as2_lo.vector == 'lambda')],\n bs1_lo[(bs1_lo.vector == 'lambda')],\n bs2_lo[(bs2_lo.vector == 'lambda')]]\n )\n\n # Pull out pUC19 data from files\n puck = pd.concat(\n [ak1_hi[(ak1_hi.vector == 'pUC19')],\n ak2_hi[(ak2_hi.vector == 'pUC19')],\n bk1_hi[(bk1_hi.vector == 'pUC19')],\n bk2_hi[(bk2_hi.vector == 'pUC19')],\n an1_hi[(an1_hi.vector == 'pUC19')],\n an2_hi[(an2_hi.vector == 'pUC19')],\n bn1_hi[(bn1_hi.vector == 'pUC19')],\n bn2_hi[(bn2_hi.vector == 'pUC19')],\n ap1_hi[(ap1_hi.vector == 'pUC19')],\n bp1_hi[(bp1_hi.vector == 'pUC19')],\n as1_hi[(as1_hi.vector == 'pUC19')],\n as2_hi[(as2_hi.vector == 'pUC19')],\n bs1_hi[(bs1_hi.vector == 'pUC19')],\n bs2_hi[(bs2_hi.vector == 'pUC19')],\n an1_lo[(an1_lo.vector == 'pUC19')],\n an2_lo[(an2_lo.vector == 'pUC19')],\n bn1_lo[(bn1_lo.vector == 'pUC19')],\n bn2_lo[(bn2_lo.vector == 'pUC19')],\n as1_lo[(as1_lo.vector == 'pUC19')],\n as2_lo[(as2_lo.vector == 'pUC19')],\n bs1_lo[(bs1_lo.vector == 'pUC19')],\n bs2_lo[(bs2_lo.vector == 'pUC19')]]\n )\n\n create_plot(\n lamb,\n 'Lambda Phage Control Retention',\n 'Percent Retained',\n '',\n 'lamb_control.pdf'\n )\n\n create_plot(\n puck,\n 'pUC19 Control Retention',\n 'Percent Retained',\n '',\n 'puck_control.pdf'\n )\n\ndef mitochondria_plots():\n \"\"\"Process mitochondrial DNA data into violin plots. \"\"\"\n # Useful variables\n path = '2019_11_07_FallopianTube_WGBS_Kit_Comparison/analysis/align/'\n appd = '.cg.sorted.mergecg.bed.gz'\n\n # Load files into DataFrames\n ak1_hi = import_file(path+'FtubeAkapaBC'+appd, control=False, select_chr='chrM')\n ak2_hi = import_file(path+'FtubeAkapaBCrep2'+appd, control=False, select_chr='chrM')\n an1_hi = import_file(path+'FtubeAneb'+appd, control=False, select_chr='chrM')\n an1_lo = import_file(path+'FtubeAneb10ng'+appd, control=False, select_chr='chrM')\n an2_lo = import_file(path+'FtubeAneb10ngRep2'+appd, control=False, select_chr='chrM')\n an2_hi = import_file(path+'FtubeAnebRep2'+appd, control=False, select_chr='chrM')\n ap1_hi = import_file(path+'FtubeApbat'+appd, control=False, select_chr='chrM')\n as1_hi = import_file(path+'FtubeAswift'+appd, control=False, select_chr='chrM')\n as1_lo = import_file(path+'FtubeAswift10ng'+appd, control=False, select_chr='chrM')\n as2_lo = import_file(path+'FtubeAswift10ngRep2'+appd, control=False, select_chr='chrM')\n as2_hi = import_file(path+'FtubeAswiftRep2'+appd, control=False, select_chr='chrM')\n\n bk1_hi = import_file(path+'FtubeBkapaBC'+appd, control=False, select_chr='chrM')\n bk2_hi = import_file(path+'FtubeBkapaBCrep2'+appd, control=False, select_chr='chrM')\n bn1_hi = import_file(path+'FtubeBneb'+appd, control=False, select_chr='chrM')\n bn1_lo = import_file(path+'FtubeBneb10ng'+appd, control=False, select_chr='chrM')\n bn2_lo = import_file(path+'FtubeBneb10ngRep2'+appd, control=False, select_chr='chrM')\n bn2_hi = import_file(path+'FtubeBnebRep2'+appd, control=False, select_chr='chrM')\n bp1_hi = import_file(path+'FtubeBpbat'+appd, control=False, select_chr='chrM')\n bs1_hi = import_file(path+'FtubeBswift'+appd, control=False, select_chr='chrM')\n bs1_lo = import_file(path+'FtubeBswift10ng'+appd, control=False, select_chr='chrM')\n bs2_lo = import_file(path+'FtubeBswift10ngRep2'+appd, control=False, select_chr='chrM')\n bs2_hi = import_file(path+'FtubeBswiftRep2'+appd, control=False, select_chr='chrM')\n\n # Mitochondrial beta values\n chrm = pd.concat(\n [ak1_hi,\n ak2_hi,\n bk1_hi,\n bk2_hi,\n an1_hi,\n an2_hi,\n bn1_hi,\n bn2_hi,\n ap1_hi,\n bp1_hi,\n as1_hi,\n as2_hi,\n bs1_hi,\n bs2_hi,\n an1_lo,\n an2_lo,\n bn1_lo,\n bn2_lo,\n as1_lo,\n as2_lo,\n bs1_lo,\n bs2_lo]\n )\n\n create_plot(\n chrm,\n 'Mitochondrial Retention',\n 'Percent Retained',\n '',\n 'chrM_control.pdf'\n )\n\nif __name__ == '__main__':\n lambda_puc_plots()\n mitochondria_plots()\n"
] |
[
[
"pandas.concat",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] |
jesseerdmann/audiobonsai
|
[
"ec1edcdbadc6b2aff3b743b5c42515f4d5638830"
] |
[
"stat_stuff.py"
] |
[
"import pandas as pd\n\nfrom ab_util import create_connection\nfrom datetime import datetime, timedelta\nfrom math import floor\n\nif __name__ == '__main__':\n db_conn = create_connection()\n today_date = datetime.now() # - timedelta(days=6)\n today_name = today_date.strftime(\"%Y%m%d\")\n weekago_date = datetime.now() - timedelta(days=7)\n weekago_name = weekago_date.strftime(\"%Y%m%d\")\n select_albums = 'SELECT spotify_uri, add_date, trade_rec from albums'\n select_artists = 'SELECT spotify_uri as artist_uri, add_date, orig_pop,' \\\n + ' orig_foll, current_pop, current_foll from artists'\n select_join = 'SELECT * from album_artists'\n select_today = 'SELECT * from pop_foll_{}'.format(today_name)\n select_weekago = 'SELECT * from pop_foll_{}'.format(weekago_name)\n album_df = pd.read_sql(select_albums, db_conn).set_index('spotify_uri')\n # print(album_df.head(10))\n artist_df = pd.read_sql(select_artists, db_conn).set_index('artist_uri')\n # print(artist_df.head(10))\n join_df = pd.read_sql(select_join, db_conn).set_index('artist_uri')\n # print(join_df.head(10))\n today_df = pd.read_sql(select_today, db_conn).set_index('artist_uri')\n # print(today_df.head(10))\n weekago_df = pd.read_sql(select_weekago, db_conn).set_index('artist_uri')\n # print(weekago_df.head(10))\n\n joined_df = artist_df.join(join_df).join(today_df, rsuffix='_today')\n joined_df = joined_df.join(weekago_df, rsuffix='_weekago')\n joined_df = joined_df.set_index('album_uri')\n joined_df = joined_df.join(album_df, rsuffix='_album')\n\n print(joined_df.head(10))\n joined_df['total_pop_diff'] = joined_df['current_pop'] - joined_df['orig_pop']\n joined_df['total_foll_diff'] = joined_df['current_foll'] - joined_df['orig_foll']\n joined_df['sixday_pop_diff'] = joined_df['pop_{}'.format(today_name)] - joined_df['pop_{}'.format(weekago_name)]\n joined_df['sixday_foll_diff'] = joined_df['foll_{}'.format(today_name)] - joined_df['foll_{}'.format(weekago_name)]\n joined_df['sixday_foll_diff_pct'] = (joined_df['sixday_foll_diff']/joined_df['pop_{}'.format(weekago_name)])*100\n joined_df['sixday_foll_diff_pct'] = joined_df['sixday_foll_diff_pct'].apply(lambda x: '{:.0f}'.format(x))\n print(joined_df.columns)\n # joined_df.to_csv('all_joined.csv')\n print('Total albums: {}'.format(len(joined_df)))\n total_pos = joined_df[joined_df['sixday_pop_diff'] > 0]\n total_pos = total_pos[total_pos['sixday_foll_diff'] > 0]\n print('Albums with total positive pop: {}'.format(len(total_pos)))\n print(total_pos.groupby('current_pop').count())\n # weekago_pop_pos = joined_df[joined_df['sixday_pop_diff'] > 3]\n # weekago_pop_pos = weekago_pop_pos.reset_index()\n # print(weekago_pop_pos.columns)\n # print(weekago_pop_pos.head(10))\n # weekago_pop_pos['url'] = weekago_pop_pos['index'].apply(lambda x: 'https://open.spotify.com/album/{}'.format(x[14:]))\n # print(weekago_pop_pos.head(10))\n # print('Albums with six day positive pop: {}'.format(len(weekago_pop_pos)))\n # print(weekago_pop_pos.groupby('sixday_pop_diff').count())\n # weekago_pop_pos.to_csv('all_joined.csv')\n trade_recs = total_pos[total_pos['trade_rec'] == 1]\n print(trade_recs.head(10))\n print('Albums with trade_recs: {}'.format(len(trade_recs)))\n print(trade_recs.groupby('current_pop').count())\n\n total_pos = total_pos[total_pos['current_pop'] > 20]\n total_pos = total_pos[total_pos['current_pop'] < 65]\n print('Albums with total positive pop with pop between 20 and 65: {}'.format(len(total_pos)))\n\n trade_recs = trade_recs[trade_recs['current_pop'] >= 20]\n trade_recs = trade_recs[trade_recs['current_pop'] <= 65]\n print('Albums with trade_recs with pop between 20 and 65: {}'.format(len(trade_recs)))\n # print(trade_recs.groupby('sixday_pop_diff').count())\n # print(trade_recs.groupby('sixday_foll_diff_pct').count())\n"
] |
[
[
"pandas.read_sql"
]
] |
droubo/meta-level-analysis-of-anomaly-detectors
|
[
"a64671365b6c98ad14fc82f3430d3082b0455a6c"
] |
[
"Analysis Scripts/cdf.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = pd.read_csv(\"AUC_SCORES.csv\", delimiter=\";\")\ndatasets = data['DATASETS'].tolist()\nprint(datasets)\nn = len(data)\nscores = []\nfor i in range(n):\n scores.append(data.loc[i:i].to_numpy()[0][1:])\nfor i in range(len(datasets)):\n print(datasets[i])\noccs = []\nfor i in range(len(datasets)):\n tmp = 0\n for s in scores[i]:\n if(s < 0.6):\n tmp += 1\n \n print(tmp)\n occs.append(tmp)\n\n\nx = []\ny = []\n#for i in range(24):\n #occs.append(int(input()))\nfor i in range(13):\n y.append(occs.count(i) / 24)\n x.append(i)\ncdf=np.cumsum(y)\nprint(y)\nplt.plot(x,y, marker=\"o\",label=\"PMF\")\nplt.plot(x,cdf,marker=\"o\",label=\"CDF\")\nplt.xlim(0,12)\nplt.ylim(0,1.1)\n\ntmp = []\nfor i in range(0,11):\n tmp.append(i/10)\nplt.yticks(tmp)\nplt.axvline(x=6, ymax=0.45, color='red', linestyle='--')\nplt.hlines(y=0.5, xmin = 0, xmax= 6, color='red', linestyle='dashed')\ntmp = []\nfor i in range(0,13):\n tmp.append(i)\nplt.xticks(tmp)\nplt.xlabel(\"#Detectors that exhibit similar performance to the Random Classifier (AUC)\")\nplt.ylabel(\"Probability Values\")\nplt.legend()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.hlines",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
patrickphatnguyen/deepchem
|
[
"f310f0a8d9eeb804f5e04974edff10ba62efab63"
] |
[
"deepchem/feat/graph_features.py"
] |
[
"import numpy as np\nfrom rdkit import Chem\n\nimport deepchem as dc\nfrom deepchem.feat import Featurizer\nfrom deepchem.feat.atomic_coordinates import ComplexNeighborListFragmentAtomicCoordinates\nfrom deepchem.feat.mol_graphs import ConvMol, WeaveMol\nfrom deepchem.data import DiskDataset\nimport multiprocessing\nimport logging\n\n\ndef _featurize_complex(featurizer, mol_pdb_file, protein_pdb_file, log_message):\n logging.info(log_message)\n return featurizer._featurize_complex(mol_pdb_file, protein_pdb_file)\n\n\ndef one_of_k_encoding(x, allowable_set):\n if x not in allowable_set:\n raise Exception(\"input {0} not in allowable set{1}:\".format(\n x, allowable_set))\n return list(map(lambda s: x == s, allowable_set))\n\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))\n\n\ndef get_intervals(l):\n \"\"\"For list of lists, gets the cumulative products of the lengths\"\"\"\n intervals = len(l) * [0]\n # Initalize with 1\n intervals[0] = 1\n for k in range(1, len(l)):\n intervals[k] = (len(l[k]) + 1) * intervals[k - 1]\n\n return intervals\n\n\ndef safe_index(l, e):\n \"\"\"Gets the index of e in l, providing an index of len(l) if not found\"\"\"\n try:\n return l.index(e)\n except:\n return len(l)\n\n\npossible_atom_list = [\n 'C', 'N', 'O', 'S', 'F', 'P', 'Cl', 'Mg', 'Na', 'Br', 'Fe', 'Ca', 'Cu',\n 'Mc', 'Pd', 'Pb', 'K', 'I', 'Al', 'Ni', 'Mn'\n]\npossible_numH_list = [0, 1, 2, 3, 4]\npossible_valence_list = [0, 1, 2, 3, 4, 5, 6]\npossible_formal_charge_list = [-3, -2, -1, 0, 1, 2, 3]\npossible_hybridization_list = [\n Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2\n]\npossible_number_radical_e_list = [0, 1, 2]\npossible_chirality_list = ['R', 'S']\n\nreference_lists = [\n possible_atom_list, possible_numH_list, possible_valence_list,\n possible_formal_charge_list, possible_number_radical_e_list,\n possible_hybridization_list, possible_chirality_list\n]\n\nintervals = get_intervals(reference_lists)\npossible_bond_stereo = [\"STEREONONE\", \"STEREOANY\", \"STEREOZ\", \"STEREOE\"]\nbond_fdim_base = 6\n\n\ndef get_feature_list(atom):\n features = 6 * [0]\n features[0] = safe_index(possible_atom_list, atom.GetSymbol())\n features[1] = safe_index(possible_numH_list, atom.GetTotalNumHs())\n features[2] = safe_index(possible_valence_list, atom.GetImplicitValence())\n features[3] = safe_index(possible_formal_charge_list, atom.GetFormalCharge())\n features[4] = safe_index(possible_number_radical_e_list,\n atom.GetNumRadicalElectrons())\n features[5] = safe_index(possible_hybridization_list, atom.GetHybridization())\n return features\n\n\ndef features_to_id(features, intervals):\n \"\"\"Convert list of features into index using spacings provided in intervals\"\"\"\n id = 0\n for k in range(len(intervals)):\n id += features[k] * intervals[k]\n\n # Allow 0 index to correspond to null molecule 1\n id = id + 1\n return id\n\n\ndef id_to_features(id, intervals):\n features = 6 * [0]\n\n # Correct for null\n id -= 1\n\n for k in range(0, 6 - 1):\n # print(6-k-1, id)\n features[6 - k - 1] = id // intervals[6 - k - 1]\n id -= features[6 - k - 1] * intervals[6 - k - 1]\n # Correct for last one\n features[0] = id\n return features\n\n\ndef atom_to_id(atom):\n \"\"\"Return a unique id corresponding to the atom type\"\"\"\n features = get_feature_list(atom)\n return features_to_id(features, intervals)\n\n\ndef atom_features(atom,\n bool_id_feat=False,\n explicit_H=False,\n use_chirality=False):\n if bool_id_feat:\n return np.array([atom_to_id(atom)])\n else:\n from rdkit import Chem\n results = one_of_k_encoding_unk(\n atom.GetSymbol(),\n [\n 'C',\n 'N',\n 'O',\n 'S',\n 'F',\n 'Si',\n 'P',\n 'Cl',\n 'Br',\n 'Mg',\n 'Na',\n 'Ca',\n 'Fe',\n 'As',\n 'Al',\n 'I',\n 'B',\n 'V',\n 'K',\n 'Tl',\n 'Yb',\n 'Sb',\n 'Sn',\n 'Ag',\n 'Pd',\n 'Co',\n 'Se',\n 'Ti',\n 'Zn',\n 'H', # H?\n 'Li',\n 'Ge',\n 'Cu',\n 'Au',\n 'Ni',\n 'Cd',\n 'In',\n 'Mn',\n 'Zr',\n 'Cr',\n 'Pt',\n 'Hg',\n 'Pb',\n 'Unknown'\n ]) + one_of_k_encoding(atom.GetDegree(),\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + \\\n one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) + \\\n [atom.GetFormalCharge(), atom.GetNumRadicalElectrons()] + \\\n one_of_k_encoding_unk(atom.GetHybridization(), [\n Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.\n SP3D, Chem.rdchem.HybridizationType.SP3D2\n ]) + [atom.GetIsAromatic()]\n # In case of explicit hydrogen(QM8, QM9), avoid calling `GetTotalNumHs`\n if not explicit_H:\n results = results + one_of_k_encoding_unk(atom.GetTotalNumHs(),\n [0, 1, 2, 3, 4])\n if use_chirality:\n try:\n results = results + one_of_k_encoding_unk(\n atom.GetProp('_CIPCode'),\n ['R', 'S']) + [atom.HasProp('_ChiralityPossible')]\n except:\n results = results + [False, False\n ] + [atom.HasProp('_ChiralityPossible')]\n\n return np.array(results)\n\n\ndef bond_features(bond, use_chirality=False):\n from rdkit import Chem\n bt = bond.GetBondType()\n bond_feats = [\n bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE,\n bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC,\n bond.GetIsConjugated(),\n bond.IsInRing()\n ]\n if use_chirality:\n bond_feats = bond_feats + one_of_k_encoding_unk(\n str(bond.GetStereo()), possible_bond_stereo)\n return np.array(bond_feats)\n\n\ndef pair_features(mol, edge_list, canon_adj_list, bt_len=6,\n graph_distance=True):\n if graph_distance:\n max_distance = 7\n else:\n max_distance = 1\n N = mol.GetNumAtoms()\n features = np.zeros((N, N, bt_len + max_distance + 1))\n num_atoms = mol.GetNumAtoms()\n rings = mol.GetRingInfo().AtomRings()\n for a1 in range(num_atoms):\n for a2 in canon_adj_list[a1]:\n # first `bt_len` features are bond features(if applicable)\n features[a1, a2, :bt_len] = np.asarray(\n edge_list[tuple(sorted((a1, a2)))], dtype=float)\n for ring in rings:\n if a1 in ring:\n # `bt_len`-th feature is if the pair of atoms are in the same ring\n features[a1, ring, bt_len] = 1\n features[a1, a1, bt_len] = 0.\n # graph distance between two atoms\n if graph_distance:\n distance = find_distance(\n a1, num_atoms, canon_adj_list, max_distance=max_distance)\n features[a1, :, bt_len + 1:] = distance\n # Euclidean distance between atoms\n if not graph_distance:\n coords = np.zeros((N, 3))\n for atom in range(N):\n pos = mol.GetConformer(0).GetAtomPosition(atom)\n coords[atom, :] = [pos.x, pos.y, pos.z]\n features[:, :, -1] = np.sqrt(np.sum(np.square(\n np.stack([coords] * N, axis=1) - \\\n np.stack([coords] * N, axis=0)), axis=2))\n\n return features\n\n\ndef find_distance(a1, num_atoms, canon_adj_list, max_distance=7):\n distance = np.zeros((num_atoms, max_distance))\n radial = 0\n # atoms `radial` bonds away from `a1`\n adj_list = set(canon_adj_list[a1])\n # atoms less than `radial` bonds away\n all_list = set([a1])\n while radial < max_distance:\n distance[list(adj_list), radial] = 1\n all_list.update(adj_list)\n # find atoms `radial`+1 bonds away\n next_adj = set()\n for adj in adj_list:\n next_adj.update(canon_adj_list[adj])\n adj_list = next_adj - all_list\n radial = radial + 1\n return distance\n\n\nclass ConvMolFeaturizer(Featurizer):\n name = ['conv_mol']\n\n def __init__(self, master_atom=False, use_chirality=False,\n atom_properties=[]):\n \"\"\"\n Parameters\n ----------\n master_atom: Boolean\n if true create a fake atom with bonds to every other atom.\n the initialization is the mean of the other atom features in\n the molecule. This technique is briefly discussed in\n Neural Message Passing for Quantum Chemistry\n https://arxiv.org/pdf/1704.01212.pdf\n use_chirality: Boolean\n if true then make the resulting atom features aware of the\n chirality of the molecules in question\n atom_properties: list of string or None\n properties in the RDKit Mol object to use as additional\n atom-level features in the larger molecular feature. If None,\n then no atom-level properties are used. Properties should be in the\n RDKit mol object should be in the form\n atom XXXXXXXX NAME\n where XXXXXXXX is a zero-padded 8 digit number coresponding to the\n zero-indexed atom index of each atom and NAME is the name of the property\n provided in atom_properties. So \"atom 00000000 sasa\" would be the\n name of the molecule level property in mol where the solvent\n accessible surface area of atom 0 would be stored.\n\n Since ConvMol is an object and not a numpy array, need to set dtype to\n object.\n \"\"\"\n self.dtype = object\n self.master_atom = master_atom\n self.use_chirality = use_chirality\n self.atom_properties = list(atom_properties)\n\n def _get_atom_properties(self, atom):\n \"\"\"\n For a given input RDKit atom return the values of the properties\n requested when initializing the featurize. See the __init__ of the\n class for a full description of the names of the properties\n\n Parameters\n ----------\n atom: RDKit.rdchem.Atom\n Atom to get the properties of\n returns a numpy lists of floats of the same size as self.atom_properties\n \"\"\"\n values = []\n for prop in self.atom_properties:\n mol_prop_name = str(\"atom %08d %s\" % (atom.GetIdx(), prop))\n try:\n values.append(float(atom.GetOwningMol().GetProp(mol_prop_name)))\n except KeyError:\n raise KeyError(\"No property %s found in %s in %s\" %\n (mol_prop_name, atom.GetOwningMol(), self))\n return np.array(values)\n\n def _featurize(self, mol):\n \"\"\"Encodes mol as a ConvMol object.\"\"\"\n # Get the node features\n idx_nodes = [(a.GetIdx(),\n np.concatenate((atom_features(\n a, use_chirality=self.use_chirality),\n self._get_atom_properties(a))))\n for a in mol.GetAtoms()]\n\n idx_nodes.sort() # Sort by ind to ensure same order as rd_kit\n idx, nodes = list(zip(*idx_nodes))\n\n # Stack nodes into an array\n nodes = np.vstack(nodes)\n if self.master_atom:\n master_atom_features = np.expand_dims(np.mean(nodes, axis=0), axis=0)\n nodes = np.concatenate([nodes, master_atom_features], axis=0)\n\n # Get bond lists with reverse edges included\n edge_list = [\n (b.GetBeginAtomIdx(), b.GetEndAtomIdx()) for b in mol.GetBonds()\n ]\n\n # Get canonical adjacency list\n canon_adj_list = [[] for mol_id in range(len(nodes))]\n for edge in edge_list:\n canon_adj_list[edge[0]].append(edge[1])\n canon_adj_list[edge[1]].append(edge[0])\n\n if self.master_atom:\n fake_atom_index = len(nodes) - 1\n for index in range(len(nodes) - 1):\n canon_adj_list[index].append(fake_atom_index)\n\n return ConvMol(nodes, canon_adj_list)\n\n def feature_length(self):\n return 75 + len(self.atom_properties)\n\n def __hash__(self):\n atom_properties = tuple(self.atom_properties)\n return hash((self.master_atom, self.use_chirality, atom_properties))\n\n def __eq__(self, other):\n if not isinstance(self, other.__class__):\n return False\n return self.master_atom == other.master_atom and \\\n self.use_chirality == other.use_chirality and \\\n tuple(self.atom_properties) == tuple(other.atom_properties)\n\n\nclass WeaveFeaturizer(Featurizer):\n name = ['weave_mol']\n\n def __init__(self, graph_distance=True, explicit_H=False,\n use_chirality=False):\n # Distance is either graph distance(True) or Euclidean distance(False,\n # only support datasets providing Cartesian coordinates)\n self.graph_distance = graph_distance\n # Set dtype\n self.dtype = object\n # If includes explicit hydrogens\n self.explicit_H = explicit_H\n # If uses use_chirality\n self.use_chirality = use_chirality\n if self.use_chirality:\n self.bt_len = bond_fdim_base + len(possible_bond_stereo)\n else:\n self.bt_len = bond_fdim_base\n\n def _featurize(self, mol):\n \"\"\"Encodes mol as a WeaveMol object.\"\"\"\n # Atom features\n idx_nodes = [(a.GetIdx(),\n atom_features(\n a,\n explicit_H=self.explicit_H,\n use_chirality=self.use_chirality))\n for a in mol.GetAtoms()]\n idx_nodes.sort() # Sort by ind to ensure same order as rd_kit\n idx, nodes = list(zip(*idx_nodes))\n\n # Stack nodes into an array\n nodes = np.vstack(nodes)\n\n # Get bond lists\n edge_list = {}\n for b in mol.GetBonds():\n edge_list[tuple(sorted([b.GetBeginAtomIdx(),\n b.GetEndAtomIdx()]))] = bond_features(\n b, use_chirality=self.use_chirality)\n\n # Get canonical adjacency list\n canon_adj_list = [[] for mol_id in range(len(nodes))]\n for edge in edge_list.keys():\n canon_adj_list[edge[0]].append(edge[1])\n canon_adj_list[edge[1]].append(edge[0])\n\n # Calculate pair features\n pairs = pair_features(\n mol,\n edge_list,\n canon_adj_list,\n bt_len=self.bt_len,\n graph_distance=self.graph_distance)\n\n return WeaveMol(nodes, pairs)\n\n\nclass AtomicConvFeaturizer(ComplexNeighborListFragmentAtomicCoordinates):\n \"\"\"This class computes the Atomic Convolution features\"\"\"\n\n # TODO (VIGS25): Complete the description\n\n name = ['atomic_conv']\n\n def __init__(self,\n labels,\n neighbor_cutoff,\n frag1_num_atoms=70,\n frag2_num_atoms=634,\n complex_num_atoms=701,\n max_num_neighbors=12,\n batch_size=24,\n atom_types=[\n 6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,\n 53., -1.\n ],\n radial=[[\n 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,\n 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0\n ], [0.0, 4.0, 8.0], [0.4]],\n layer_sizes=[32, 32, 16],\n strip_hydrogens=True,\n learning_rate=0.001,\n epochs=10):\n \"\"\"\n Parameters\n\n labels: numpy.ndarray\n Labels which we want to predict using the model\n neighbor_cutoff: int\n TODO (VIGS25): Add description\n frag1_num_atoms: int\n Number of atoms in first fragment\n frag2_num_atoms: int\n Number of atoms in second fragment\n complex_num_atoms: int\n TODO (VIGS25) : Add description\n max_num_neighbors: int\n Maximum number of neighbors possible for an atom\n batch_size: int\n Batch size used for training and evaluation\n atom_types: list\n List of atoms recognized by model. Atoms are indicated by their\n nuclear numbers.\n radial: list\n TODO (VIGS25): Add description\n layer_sizes: list\n List of layer sizes for the AtomicConvolutional Network\n strip_hydrogens: bool\n Whether to remove hydrogens while computing neighbor features\n learning_rate: float\n Learning rate for training the model\n epochs: int\n Number of epochs to train the model for\n \"\"\"\n\n self.atomic_conv_model = dc.models.atomic_conv.AtomicConvModel(\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms,\n max_num_neighbors=max_num_neighbors,\n batch_size=batch_size,\n atom_types=atom_types,\n radial=radial,\n layer_sizes=layer_sizes,\n learning_rate=learning_rate)\n\n super(AtomicConvFeaturizer, self).__init__(\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms,\n max_num_neighbors=max_num_neighbors,\n neighbor_cutoff=neighbor_cutoff,\n strip_hydrogens=strip_hydrogens)\n\n self.epochs = epochs\n self.labels = labels\n\n def featurize_complexes(self, mol_files, protein_files):\n pool = multiprocessing.Pool()\n results = []\n for i, (mol_file, protein_pdb) in enumerate(zip(mol_files, protein_files)):\n log_message = \"Featurizing %d / %d\" % (i, len(mol_files))\n results.append(\n pool.apply_async(_featurize_complex,\n (self, mol_file, protein_pdb, log_message)))\n pool.close()\n features = []\n failures = []\n for ind, result in enumerate(results):\n new_features = result.get()\n # Handle loading failures which return None\n if new_features is not None:\n features.append(new_features)\n else:\n failures.append(ind)\n\n features = np.asarray(features)\n labels = np.delete(self.labels, failures)\n dataset = DiskDataset.from_numpy(features, labels)\n\n # Fit atomic conv model\n self.atomic_conv_model.fit(dataset, nb_epoch=self.epochs)\n\n # Add the Atomic Convolution layers to fetches\n layers_to_fetch = list()\n for layer in self.atomic_conv_model.layers.values():\n if isinstance(layer, dc.models.atomic_conv.AtomicConvolution):\n layers_to_fetch.append(layer)\n\n # Extract the atomic convolution features\n atomic_conv_features = list()\n feed_dict_generator = self.atomic_conv_model.default_generator(\n dataset=dataset, epochs=1)\n\n for feed_dict in self.atomic_conv_model._create_feed_dicts(\n feed_dict_generator, training=False):\n frag1_conv, frag2_conv, complex_conv = self.atomic_conv_model._run_graph(\n outputs=layers_to_fetch, feed_dict=feed_dict, training=False)\n concatenated = np.concatenate(\n [frag1_conv, frag2_conv, complex_conv], axis=1)\n atomic_conv_features.append(concatenated)\n\n batch_size = self.atomic_conv_model.batch_size\n\n if len(features) % batch_size != 0:\n num_batches = (len(features) // batch_size) + 1\n num_to_skip = num_batches * batch_size - len(features)\n else:\n num_to_skip = 0\n\n atomic_conv_features = np.asarray(atomic_conv_features)\n atomic_conv_features = atomic_conv_features[-num_to_skip:]\n atomic_conv_features = np.squeeze(atomic_conv_features)\n\n return atomic_conv_features, failures\n"
] |
[
[
"numpy.asarray",
"numpy.squeeze",
"numpy.stack",
"numpy.concatenate",
"numpy.delete",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
lihaod/Deep_inpainting_localization
|
[
"f9841168f2e289347838e7de8e7206cba4516081"
] |
[
"utils/vgg_mfcn.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport collections\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import regularizers\nfrom tensorflow.contrib.layers.python.layers import utils as tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import math_ops\n\nfrom .bilinear_upsample_weights import bilinear_upsample_weights\nfrom . import TensorflowUtils as utils\n\n\n\nMODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-16.mat'\nMODEL_DIR = 'data/vgg_model/'\n\ndef vgg_arg_scope(weight_decay=0.0005):\n \"\"\"Defines the VGG arg scope.\n\n Args:\n weight_decay: The l2 regularization coefficient.\n\n Returns:\n An arg_scope.\n \"\"\"\n with arg_scope(\n [layers.conv2d],\n activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n weights_regularizer=regularizers.l2_regularizer(weight_decay),\n biases_initializer=init_ops.zeros_initializer(),\n padding='SAME') as arg_sc:\n return arg_sc\n\ndef vgg_mfcn(inputs, is_training, weight_decay=5e-4, dropout_keep_prob=0.5, first_no_subsample=6, no_pool=False, num_classes=2, init=True):\n model_data = utils.get_model_data(MODEL_DIR, MODEL_URL)\n mean_pixel = np.mean(model_data['normalization'][0][0][0], axis=(0, 1))\n weights = np.squeeze(model_data['layers'])\n\n layers_name = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3',\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5',\n )\n\n inputs_shape = tf.shape(inputs)\n processed_image = tf.subtract(inputs, mean_pixel)\n net = processed_image\n\n end_points = collections.OrderedDict()\n\n for i, name in enumerate(layers_name):\n kind = name[:4]\n if kind == 'conv':\n kernels, bias = weights[i][0][0][0][0]\n kernels = kernels.transpose(1,0,2,3)\n bias = bias.reshape(-1)\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n if init:\n print(\"Setting up vgg initialized conv layer: {}\".format(name))\n kernels_init = tf.constant_initializer(kernels, verify_shape=True)\n bias_init = tf.constant_initializer(bias, verify_shape=True)\n else:\n kernels = tf.contrib.layers.xavier_initializer()\n bias = init_ops.zeros_initializer()\n idx = int(name[4])\n dilation_rate = 1 if idx < first_no_subsample+1 else 2**(idx-first_no_subsample)\n net = layers.conv2d(net, kernels.shape[-1], kernels.shape[0:2], weights_initializer=kernels_init, biases_initializer=bias_init, rate=dilation_rate, scope=name)\n net = layers_lib.batch_norm(net, is_training=is_training, activation_fn=nn_ops.relu, scope=name)\n elif kind == 'relu':\n continue\n elif kind == 'pool':\n idx = int(name[4])\n if idx < first_no_subsample:\n net = layers_lib.max_pool2d(net, kernel_size=2, stride=2, padding=\"SAME\", scope=name)\n else:\n if not no_pool:\n net = layers_lib.max_pool2d(net, kernel_size=2, stride=1, padding=\"SAME\", scope=name)\n end_points[name] = net \n \n net = layers.conv2d(net, 4096, [7, 7], activation_fn=nn_ops.relu, scope='conv6')\n net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n\n net = layers.conv2d(net, 4096, [1, 1], activation_fn=nn_ops.relu, scope='conv7')\n net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n\n net = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='conv8')\n\n def skip_connection(input):\n # now to upscale to actual image size\n with tf.variable_scope('32x_to_16x'):\n deconv_shape1 = tf.shape(end_points[\"pool4\"])\n conv_t11 = tf.nn.conv2d_transpose(input, \\\n tf.get_variable('bilinear_kernel1', dtype=tf.float32, shape=[4,4,2,2], \\\n initializer=tf.constant_initializer(bilinear_upsample_weights(2,num_classes,num_classes), verify_shape=True), \\\n regularizer=regularizers.l2_regularizer(weight_decay)), \\\n [deconv_shape1[0],deconv_shape1[1],deconv_shape1[2],num_classes], strides=[1, 2, 2, 1], padding=\"SAME\")\n conv_t12 = layers.conv2d(end_points[\"pool4\"], num_classes, [1, 1], activation_fn=None, scope='conv_skip1')\n fuse_1 = tf.add(conv_t11, conv_t12, name=\"fuse_1\")\n\n with tf.variable_scope('16x_to_8x'):\n deconv_shape2 = tf.shape(end_points[\"pool3\"])\n conv_t21 = tf.nn.conv2d_transpose(fuse_1, \\\n tf.get_variable('bilinear_kernel2', dtype=tf.float32, shape=[4,4,2,2], \\\n initializer=tf.constant_initializer(bilinear_upsample_weights(2,num_classes,num_classes), verify_shape=True), \\\n regularizer=regularizers.l2_regularizer(weight_decay)), \\\n [deconv_shape2[0],deconv_shape2[1],deconv_shape2[2],num_classes], strides=[1, 2, 2, 1], padding=\"SAME\")\n conv_t22 = layers.conv2d(end_points[\"pool3\"], num_classes, [1, 1], activation_fn=None, scope='conv_skip2')\n fuse_2 = tf.add(conv_t21, conv_t22, name=\"fuse_2\")\n\n with tf.variable_scope('8x_to_1x'):\n conv_t3 = tf.nn.conv2d_transpose(fuse_2, \\\n tf.get_variable('bilinear_kernel3', dtype=tf.float32, shape=[16,16,2,2], \\\n initializer=tf.constant_initializer(bilinear_upsample_weights(8,num_classes,num_classes), verify_shape=True), \\\n regularizer=regularizers.l2_regularizer(weight_decay)), \\\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], num_classes], strides=[1, 8, 8, 1], padding=\"SAME\")\n\n return conv_t3\n\n with tf.variable_scope('mask_pred'):\n logits_msk = skip_connection(net)\n preds_msk = tf.cast(tf.argmax(logits_msk,3),tf.int32)\n preds_msk_map = tf.nn.softmax(logits_msk)[:,:,:,1]\n\n with tf.variable_scope('edge_pred'):\n logits_edg = skip_connection(net)\n preds_edg = tf.cast(tf.argmax(logits_edg,3),tf.int32)\n preds_edg_map = tf.nn.softmax(logits_edg)[:,:,:,1]\n\n return logits_msk, logits_edg, preds_msk, preds_edg, preds_msk_map, preds_edg_map\n\n\n\n"
] |
[
[
"tensorflow.nn.softmax",
"tensorflow.contrib.layers.python.layers.layers.max_pool2d",
"tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer",
"tensorflow.shape",
"numpy.squeeze",
"tensorflow.contrib.layers.python.layers.layers.dropout",
"tensorflow.contrib.layers.python.layers.layers.batch_norm",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.subtract",
"tensorflow.constant_initializer",
"tensorflow.contrib.layers.conv2d",
"numpy.mean",
"tensorflow.add",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.variable_scope",
"tensorflow.argmax"
]
] |
SuiMingYang/sales-message-classify
|
[
"1b9ce984e907b688096c2287ad80e495034b347c"
] |
[
"push_chatdata_api/chatdata.py"
] |
[
"#!flask/bin/python\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom flask import Flask, jsonify\nfrom flask import request,Response\nimport requests\nimport json\n\napp = Flask(__name__)\n\[email protected]('/push_chatdata', methods=['POST'])\ndef push_chatdata():\n try:\n info_list=request.form[\"chat\"]\n #print(info_list.replace(' ',''))\n info_list=json.loads(info_list.replace(' ',''))\n try:\n df = pd.DataFrame(info_list)\n df.to_sql('chatdata',create_engine(\"\"),if_exists=\"append\",index=True)\n #df.to_sql('chatdata',create_engine(\"mysql+pymysql://root:[email protected]/businessdata?charset=utf8\"),if_exists=\"append\")\n return \"success\"#json.dumps({'status':'success'})\n except Exception as e:\n print(e)\n return \"error\"#json.dumps({'status':'error','msg':e})\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n #app.run(debug=True)\n app.run(host='0.0.0.0',port=5577)"
] |
[
[
"pandas.DataFrame"
]
] |
steveschulze/skyportal
|
[
"8f6bec6de89ea774d8d88f0ef24e9d10f3fce151"
] |
[
"skyportal/handlers/api/color_mag.py"
] |
[
"import numpy as np\nfrom baselayer.app.access import auth_or_token\nfrom ..base import BaseHandler\n\nfrom ...models import (\n Obj,\n Annotation,\n)\n\n\ndef normalize_key(str):\n # convert the string to lowercase and remove underscores\n return str.lower().replace('_', '')\n\n\ndef get_color_mag(annotations, **kwargs):\n # please refer to `ObjColorMagHandler.get` below\n\n # ignore None inputs from e.g., query arguments\n inputs = {k: v for k, v in kwargs.items() if v is not None}\n\n catalog = inputs.get('catalog', 'gaia')\n mag_key = inputs.get('apparentMagKey', 'Mag_G')\n parallax_key = inputs.get('parallaxKey', 'Plx')\n absorption_key = inputs.get('absorptionKey', 'A_G')\n abs_mag_key = inputs.get('absoluteMagKey', None)\n blue_mag_key = inputs.get('blueMagKey', 'Mag_Bp')\n red_mag_key = inputs.get('redMagKey', 'Mag_Rp')\n color_key = inputs.get('colorKey', None)\n\n output = []\n\n for an in annotations:\n\n if not isinstance(an.data, dict):\n continue\n\n abs_mag = None\n color = None\n absorption = None\n origin = an.origin\n\n # go over all items in the data (e.g., different catalog matches)\n for key, xmatch in an.data.items():\n\n if not isinstance(xmatch, dict):\n continue\n\n # found the right catalog, but does it have the right keys?\n if normalize_key(key) == normalize_key(catalog):\n\n # get the absolute magnitude\n if abs_mag_key is not None: # get the absolute magnitude directly\n for k in xmatch.keys():\n if normalize_key(abs_mag_key) == normalize_key(k):\n abs_mag = xmatch[k] # found it!\n break # no need to scan the rest of the cross match\n else: # we need to look for the apparent magnitude and parallax\n mag = None\n plx = None\n for k in xmatch.keys():\n if normalize_key(mag_key) == normalize_key(k):\n mag = xmatch[k]\n if normalize_key(parallax_key) == normalize_key(k):\n plx = xmatch[k]\n if mag is not None and plx is not None:\n abs_mag = mag - 5 * np.log10(plx / 100)\n break # no need to scan the rest of the cross match\n\n # get the color data\n if color_key is not None: # get the color value directly\n for k in xmatch.keys():\n if normalize_key(color_key) == normalize_key(k):\n color = float(xmatch[k]) # found it!\n break # no need to scan the rest of the cross match\n else:\n blue = None\n red = None\n for k in xmatch.keys():\n if normalize_key(blue_mag_key) == normalize_key(k):\n blue = xmatch[k]\n if normalize_key(red_mag_key) == normalize_key(k):\n red = xmatch[k]\n if blue is not None and red is not None:\n # calculate the color between these two magnitudes\n color = float(blue) - float(red)\n break # no need to scan the rest of the cross match\n\n # only check this if given an absorption term\n if absorption_key is not None:\n for k in xmatch.keys():\n if normalize_key(absorption_key) == normalize_key(k):\n absorption = xmatch[k]\n break # no need to scan the rest of the cross match\n\n if (\n abs_mag is not None\n and not np.isnan(abs_mag)\n and color is not None\n and not np.isnan(color)\n ):\n\n if absorption is not None and not np.isnan(absorption):\n abs_mag = abs_mag + absorption # apply the absorption term\n\n output.append({'origin': origin, 'abs_mag': abs_mag, 'color': color})\n break # found all the data we need for this annotation/origin\n\n return output\n\n\nclass ObjColorMagHandler(BaseHandler):\n \"\"\"\n ---\n description: |\n get the color and absolute magnitude of a source\n based on cross-matches to some catalog (default is GAIA).\n parameters:\n - in: path\n name: obj_id\n required: true\n schema:\n type: string\n description: ID of the object to retrieve photometry for\n - in: query\n name: catalog\n required: false\n schema:\n type: string\n description: |\n The name of the data key, associated with a catalog cross match,\n from which the color-mag data should be retrieved.\n Default is GAIA. Ignores case and underscores.\n - in: query\n name: apparentMagKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the magnitude of the color-magnitude data.\n Will look for parallax data in addition to this magnitude\n in order to calculate the absolute magnitude of the object.\n Default is \"Mag_G\". Ignores case and underscores.\n - in: query\n name: parallaxKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the parallax of the source.\n Will look for magnitude data in addition to this parallax\n in order to calculate the absolute magnitude of the object.\n Default is \"Plx\". Ignores case and underscores.\n - in: query\n name: absorptionKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the source absorption term.\n Will add this term to the absolute magnitude calculated\n from apparent magnitude and parallax.\n Default is \"A_G\". Ignores case and underscores.\n - in: query\n name: absoluteMagKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the absolute magnitude of the color-magnitude data.\n If given, will override the \"apparentMagKey\", \"parallaxKey\"\n and \"absorptionKey\", and takes the magnitude directly from\n this key in the cross match dictionary.\n Default is None. Ignores case and underscores.\n - in: query\n name: blueMagKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the source magnitude in the shorter wavelength.\n Will add this term to the red magnitude to get the color.\n Default is \"Mag_Bp\". Ignores case and underscores.\n - in: query\n name: redMagKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the source magnitude in the longer wavelength.\n Will add this term to the blue magnitude to get the color.\n Default is \"Mag_Rp\". Ignores case and underscores.\n - in: query\n name: colorKey\n required: false\n schema:\n type: string\n description: |\n The key inside the cross-match which is associated\n with the color term of the color-magnitude data.\n If given, will override the \"blueMagKey\", and \"redMagKey\",\n taking the color directly from the associated dictionary value.\n Default is None. Ignores case and underscores.\n\n responses:\n 200:\n content:\n application/json:\n schema:\n allOf:\n - $ref: '#/components/schemas/Success'\n - type: array\n items:\n type: object\n properties:\n origin:\n type: string\n color:\n type: float\n abs_mag:\n type: float\n\n 400:\n content:\n application/json:\n schema: Error\n\n \"\"\"\n\n @auth_or_token\n def get(self, obj_id):\n obj = Obj.query.get(obj_id)\n if obj is None:\n return self.error('Invalid object id.')\n\n annotations = (\n Annotation.query_records_accessible_by(self.current_user)\n .filter(Annotation.obj_id == obj_id)\n .all()\n )\n\n catalog = self.get_query_argument('catalog', None) # \"GAIA\"\n mag_key = self.get_query_argument('apparentMagKey', None) # \"Mag_G\"\n parallax_key = self.get_query_argument('parallaxKey', None) # \"Plx\"\n absorption_key = self.get_query_argument('absorptionKey', None) # \"A_G\"\n abs_mag_key = self.get_query_argument('absoluteMagKey', None) # None\n blue_mag_key = self.get_query_argument('blueMagKey', None) # \"Mag_Bp\"\n red_mag_key = self.get_query_argument('redMagKey', None) # \"Mag_Rp\"\n color_key = self.get_query_argument('colorKey', None) # None\n\n output = get_color_mag(\n annotations,\n catalog=catalog,\n apparentMagKey=mag_key,\n parallaxKey=parallax_key,\n absorptionKey=absorption_key,\n absoluteMagKey=abs_mag_key,\n blueMagKey=blue_mag_key,\n redMagKey=red_mag_key,\n colorKey=color_key,\n )\n\n self.verify_and_commit()\n\n return self.success(data=output)\n"
] |
[
[
"numpy.isnan",
"numpy.log10"
]
] |
Rhushabh1/Mini-AI-Games
|
[
"a9478c1e8bb93ffecdc247b0cf3ba7e3416ff095"
] |
[
"envs/2048/engine_2048.py"
] |
[
"import pygame\nimport random\nimport numpy as np\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 900\nGRID_SIZE = 20\n\n# fontcolor, bgcolor\nwhite = '#ffffff'\nBgColor = '#cccccc'\ncolors = {\n\t1: \t\t['#b3adad', '#b3adad'],\n\t2: \t\t['#776e64', '#eee4da'],\n\t4: \t\t['#776e64', '#ede0c8'],\n\t8: \t\t['#f9f6f2', '#f2b179'],\n\t16: \t['#f9f6f2', '#f59563'],\n\t32: \t['#f9f6f2', '#f67c5f'],\n\t64: \t['#f9f6f2', '#f65e3b'],\n\t128: \t['#f9f6f2', '#edcf72'],\n\t256: \t['#f9f6f2', '#edcc61'],\n\t512: \t['#f9f6f2', '#edc850'],\n\t1024: \t['#f9f6f2', '#edc53f'],\n\t2048: \t['#f9f6f2', '#edc22e'],\n}\n\n\nclass Engine_2048:\n\tdef __init__(self):\n\t\t'''using powers as nos.\n\t\tConvert powers (x) to (2^x) in draw function'''\n\t\tself.grid = np.zeros((GRID_SIZE, GRID_SIZE)).astype('int32')\n\t\t\n\t\tself.dir_dict = {0: [1, 1],\t\t\t# up \t\t(1, 1), \t(-1, 0)\n\t\t\t\t\t\t 1: [0, 1],\t\t\t# left \t\t(0, 1), \t(0, -1)\n\t\t\t\t\t\t 2: [1, -1],\t\t# down \t\t(1, -1), \t(1, 0)\n\t\t\t\t\t\t 3: [0, -1]}\t\t# right \t(0, -1), \t(0, 1)\n\n\t\tself.header = SCREEN_HEIGHT - SCREEN_WIDTH\n\t\tself.cell_width = SCREEN_WIDTH//GRID_SIZE\n\t\tself.cell_height = (SCREEN_HEIGHT-self.header)//GRID_SIZE\n\t\tself.margin = min(self.cell_width, self.cell_height)//25\n\t\tself.font = pygame.font.SysFont('Arial', min(self.cell_width, self.cell_height)//2)\n\t\tself.header_font = pygame.font.SysFont('Arial', self.header//2)\n\n\t\tself.has_moved = False\n\t\tself.action_called = False\n\t\tself.is_stuck = False\n\t\tself.reached_goal = False\n\t\tself.total_reward = 0\n\t\tself.merge_reward = 0\n\t\t# self.has_collided = False\n\t\t# self.has_merged = False\n\n\n\t\tself.insert_choice = [1, 2]\t\t\t# [2, 4, 8]\n\t\tself.insert_probs = [0.95, 0.05]\n\t\tself.insert_cell()\n\n\tdef check_stuck(self):\n\t\t'''stuck when the there are no possible moves left'''\n\t\txs, ys = np.where(self.grid==0)\n\t\tif xs.size>0:\n\t\t\treturn False\n\t\tfor i in range(4):\n\t\t\tdirn = self.dir_dict[i]\n\t\t\tsuccess = self.shift(dirn, test=True)\n\t\t\tif success:\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef check_goal(self):\n\t\t'''check if 2048 is reached'''\n\t\txs, ys = np.where(self.grid==11)\n\t\tif xs.size>0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef insert_cell(self):\n\t\t'''place a number from [2, 4, 8] randomly in an empty cell'''\n\t\txs, ys = np.where(self.grid==0)\n\t\tif xs.size==0:\n\t\t\treturn\n\t\ti = random.randrange(xs.size)\n\t\tn = np.random.choice(self.insert_choice, p=self.insert_probs)\n\t\tself.grid[xs[i], ys[i]] = n\n\n\tdef shift(self, dirn, test=False):\n\t\t'''test=True -> change self.grid\n\t\telse -> dont alter self.grid'''\n\t\tx, y = dirn\n\t\ttmp_grid = self.grid.copy()\n\t\tif x:\n\t\t\ttmp_grid = tmp_grid.T\n\t\ttmp_grid = tmp_grid[::1, ::y]\n\n\t\tif not test:\n\t\t\tself.merge_reward = 0\n\t\t# default = RIGHT \n\t\tfor i in range(self.grid.shape[0]):\n\t\t\trow = tmp_grid[i]\n\t\t\t# shift\n\t\t\t# [3, 0, 1, 0]\n\t\t\ttmp_list = [j for j in row if j!=0]\n\t\t\t# [3, 1]\n\t\t\ttmp_list.extend( [0]*(row.size - len(tmp_list)) )\n\t\t\t# [3, 1, 0, 0]\n\t\t\t# merge\n\t\t\t# [3, 1, 1, 1, 0]\n\t\t\tfor j in range(1, len(tmp_list)):\n\t\t\t\tif tmp_list[j]==tmp_list[j-1] and tmp_list[j]!=0:\n\t\t\t\t\tif not test:\n\t\t\t\t\t\tself.merge_reward += pow(2, tmp_list[j]+1)\n\t\t\t\t\ttmp_list[j-1]+=1\n\t\t\t\t\ttmp_list[j] = 0\n\t\t\t# shift\n\t\t\t# [3, 2, 0, 1, 0]\n\t\t\ttmp_list = [j for j in tmp_list if j!=0]\n\t\t\ttmp_list.extend( [0]*(row.size - len(tmp_list)) )\n\t\t\t# [3, 2, 1, 0, 0]\n\n\t\t\ttmp_grid[i, :] = np.array(tmp_list)\n\n\t\tif not test:\n\t\t\tself.total_reward += self.merge_reward\n\n\t\ttmp_grid = tmp_grid[::1, ::y]\n\t\tif x:\n\t\t\ttmp_grid = tmp_grid.T\n\t\t\n\t\tif (self.grid==tmp_grid).all():\n\t\t\treturn False\n\t\tif not test:\n\t\t\tself.grid = tmp_grid\n\t\treturn True\n\n\t\t# # shreyas\n\t\t# test_grid = self.grid.copy()\n\t\t# for i in range(len(self.grid)):\n\t\t# \ttemp_x = i\n\t\t# \tfor j in range(len(self.grid)):\n\t\t# \t\ttemp_y = j\n\t\t# \t\t# print(i,j)\n\t\t# \t\tif(test_grid[i,j]==0):\n\t\t# \t\t\tcontinue\n\t\t# \t\ttemp = test_grid[i,j]\n\t\t# \t\twhile not (self.has_collided): #or self.has_merged):\n\t\t# \t\t\ttemp_x+=dirn[0]\n\t\t# \t\t\ttemp_y+=dirn[1]\n\n\t\t# \t\t\tif not (temp_x >= len(test_grid) or temp_x < 0 or temp_y >= len(test_grid) or temp_y < 0):\n\t\t# \t\t\t\tprint(1)\n\t\t\t\t\t\t\n\t\t# \t\t\t\tif (test_grid[temp_x,temp_y]!=0 and test_grid[temp_x,temp_y]!=test_grid[temp_x-dirn[0],temp_y-dirn[1]]):\n\t\t# \t\t\t\t\tprint(2)\n\t\t# \t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t# \t\t\t\telif (test_grid[temp_x,temp_y]==test_grid[temp_x-dirn[0],temp_y-dirn[1]] and test_grid[temp_x,temp_y]!=0):\n\t\t# \t\t\t\t\tprint(3)\n\t\t# \t\t\t\t\ttest_grid[temp_x,temp_y]+=1\n\t\t# \t\t\t\t\ttest_grid[temp_x-dirn[0],temp_y-dirn[1]]=0\n\t\t# \t\t\t\t\t#self.has_merged = True\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\tprint(4)\n\t\t# \t\t\t\t\ttest_grid[temp_x,temp_y] = temp\n\t\t# \t\t\t\t\ttest_grid[temp_x-dirn[0],temp_y-dirn[1]] = 0\n\t\t# \t\t\t\t\tprint(\"grid = \\n\",test_grid)\n\t\t# \t\t\t\t\tprint(\"temp_x,temp_y = \",temp_x,temp_y)\n\t\t# \t\t\t\t\tprint(\"temp_x-dirn[0],temp_y-dirn[1] = \",temp_x-dirn[0],temp_y-dirn[1])\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tself.has_collided = True\n\n\t\t# \t\tself.has_collided = False\n\t\t# \t\t#self.has_merged = False\n\n\t\t# if(self.grid==test_grid).all():\n\t\t# \treturn False\n\t\t# if not (test):\n\t\t# \tself.grid = test_grid\n\t\t# return True\n\n\t\t# return merge successful bool\n\n\tdef move(self, action):\n\t\t'''shift the cells using input action'''\n\t\tdirn = self.dir_dict[action]\n\t\tself.action_called = self.shift(dirn)\n\t\tif self.action_called:\n\t\t\tself.insert_cell()\n\n\tdef update(self):\n\t\tif self.action_called:\n\t\t\tself.is_stuck = self.check_stuck()\n\t\t\tself.reached_goal = self.check_goal()\n\t\tself.has_moved = self.action_called\n\t\tself.action_called = False\n\n\n\t\t# dirn = self.dir_dict[self.direction]\n\t\t# for x in range(len(self.grid)):\n\t\t# \ttemp_x = x\n\t\t# \tfor y in range(len(self.grid)):\n\t\t# \t\ttemp_y = y\n\n\t\t# \t\twhile not (self.has_collided):\n\t\t# \t\t\tself.check_full(self.grid,x,y)\n\t\t# \t\t\tif not (self.is_full):\n\n\t\t# \t\t\t\ttemp_value = self.grid[temp_x,temp_y]\n\t\t# \t\t\t\ttemp_x+=dirn[0]\n\t\t# \t\t\t\ttemp_y+=dirn[1]\n\t\t# \t\t\t\tif temp_x >= len(self.grid) or temp_x < 0:\n\t\t# \t\t\t\t\tself.has_collided = True\n\t\t# \t\t\t\telif temp_y >= len(self.grid) or temp_y < 0:\n\t\t# \t\t\t\t\tself.has_collided = True\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\tif self.grid[x,y] == 0 or self.grid[temp_x,temp_y] != 0:\n\t\t# \t\t\t\t\t\tcontinue\n\t\t# \t\t\t\t\telse:\n\t\t# \t\t\t\t\t\tself.merge(self.grid,dirn)\n\t\t# \t\t\t\t\t\tself.grid[temp_x,temp_y] = temp_value\n\n\t\t# \t\tself.has_collided = False\n\n\t\t# self.placed(self.grid)\n\n\tdef draw(self, screen):\n\t\t'''draw the snake and food on the screen'''\n\t\tw, h, m, head = self.cell_width, self.cell_height, self.margin, self.header\n\n\t\tscreen.fill(pygame.Color(white))\n\t\tpygame.draw.rect(screen, pygame.Color(BgColor), pygame.Rect(0, head, GRID_SIZE*w, GRID_SIZE*h), 0)\n\n\t\ttext = self.header_font.render(str(self.total_reward), True, pygame.Color(colors[1][0]), None)\n\t\ttextRect = text.get_rect()\n\t\ttextRect.center = ( SCREEN_WIDTH//4, head//2)\n\t\tscreen.blit(text, textRect)\n\n\t\tfor i in range(self.grid.shape[0]):\n\t\t\tfor j in range(self.grid.shape[1]):\n\t\t\t\tval = pow(2, self.grid[i, j])\n\t\t\t\t\n\t\t\t\tpygame.draw.rect(screen, pygame.Color(colors[val][1]), pygame.Rect(j*w+m, i*h+m + head, w-2*m, h-2*m), 0)\n\n\t\t\t\ttext = self.font.render(str(val), True, pygame.Color(colors[val][0]), None)\n\t\t\t\ttextRect = text.get_rect()\n\t\t\t\ttextRect.center = ( j*w+(w//2), i*h+(h//2) + head)\n\t\t\t\tscreen.blit(text, textRect)\n\n\nclass Pygame2D:\n\tdef __init__(self, grid_size=20, mode='bot'):\n\t\t'''Initialise pygame and display attributes'''\n\t\tglobal GRID_SIZE\n\t\tGRID_SIZE = grid_size\n\t\tallowed_modes = ['bot', 'human']\n\t\tassert mode in allowed_modes, \"Wrong mode for gym env. Should be from ['bot', 'human']\"\n\n\t\tself.mode = mode\n\t\tself.done = False\n\t\tself.no_op_action = -1 \t\t# action which does nothing\n\t\tself.human_action = self.no_op_action\n\n\t\tpygame.init()\n\t\tself.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\t\tpygame.display.set_caption(\"2048\")\n\t\tself.clock = pygame.time.Clock()\n\t\tself.engine = Engine_2048()\n\t\tself.game_speed = 10\n\t\tif self.mode == 'human':\n\t\t\tself.game_speed = 60\n\n\t\tself.stuck_penalty = -100\n\t\tself.finish_reward = 1000\n\t\tself.move_penalty = -1\n\n\t\tself.description = \"========CONTROLS========\\\n\t\t\\nUp arrow\\t: move up\\\n\t\t\\nDown arrow\\t: move down\\\n\t\t\\nLeft arrow\\t: move left\\\n\t\t\\nRight arrow\\t: move right\\\n\t\t\\n========================\"\n\n\tdef get_human_action(self):\n\t\tassert self.mode == 'human', \"return_action() not usable without 'human' mode for gym env.\"\n\t\taction = self.human_action\n\t\tself.human_action = self.no_op_action \t\t# no input action\n\t\treturn action\n\n\tdef action(self, action):\n\t\t'''update state by taking action\n\t\t-1 -> stop at position (do nothing)\n\t\t0 -> up \t1 -> left 2 -> down 3 -> right'''\n\t\tif action != -1:\n\t\t\tself.engine.move(action)\n\t\tself.engine.update()\n\n\tdef evaluate(self):\n\t\t'''compute reward of the engine'''\n\t\treward = 0\n\t\t# reward += self.engine.merge_reward\n\t\tif self.engine.has_moved:\n\t\t\treward += self.move_penalty\n\t\tif self.engine.is_stuck:\n\t\t\treward += self.stuck_penalty\n\t\tif self.engine.reached_goal:\n\t\t\treward += self.finish_reward\n\t\treturn reward\n\n\tdef is_done(self):\n\t\t'''check for terminal condition or crash'''\n\t\tif self.engine.is_stuck or self.engine.reached_goal or self.done:\n\t\t\tself.done = False\n\t\t\treturn True\n\t\treturn False\n\n\tdef observe(self):\n\t\t'''return next state upon taking action'''\n\t\treturn self.engine.grid.flatten()\n\n\tdef view(self):\n\t\t'''render the state of the game on the screen'''\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tself.done = True\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_q or event.key == pygame.K_ESCAPE:\n\t\t\t\t\tself.done = True\n\n\t\tself.engine.draw(self.screen)\n\n\t\tpygame.display.flip()\n\t\tself.clock.tick(self.game_speed)\n\n\tdef run_game_loop(self):\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tself.done = True\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_q or event.key == pygame.K_ESCAPE:\n\t\t\t\t\tself.done = True\n\t\t\t\tif self.mode == 'human':\n\t\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\t\tself.done = True\n\t\t\t\t\telif event.key == pygame.K_UP:\n\t\t\t\t\t\tself.human_action = 0\n\t\t\t\t\telif event.key == pygame.K_LEFT:\n\t\t\t\t\t\tself.human_action = 1\n\t\t\t\t\telif event.key == pygame.K_DOWN:\n\t\t\t\t\t\tself.human_action = 2\n\t\t\t\t\telif event.key == pygame.K_RIGHT:\n\t\t\t\t\t\tself.human_action = 3\n\n\t\taction = self.get_human_action()\n\t\tself.action(action)\n\t\treward = self.evaluate()\n\t\tdone = self.is_done()\n\n\t\tself.engine.draw(self.screen)\n\t\tpygame.display.flip()\n\t\tself.clock.tick(self.game_speed)\n\n\t\t# if done:\n\t\t# \tpygame.time.wait(5*1000)\n\n\t\treturn reward, done\n\n\tdef close(self):\n\t\tpygame.display.quit()\n\t\tpygame.quit()\n"
] |
[
[
"numpy.array",
"numpy.where",
"numpy.zeros",
"numpy.random.choice"
]
] |
migperfer/crema
|
[
"0aabbfa4dd67bdc86ff628203795060942c45aac"
] |
[
"crema/models/chord.py"
] |
[
"#!/usr/bin/env python\n'''CREMA Chord model'''\n\nimport numpy as np\nfrom scipy.stats import gmean\nfrom librosa import time_to_frames\nimport mir_eval\n\nfrom .base import CremaModel\n\n\nSEMITONE_TO_SCALE_DEGREE = ['1', 'b2', '2', 'b3', '3',\n '4', 'b5', '5', 'b6', '6', 'b7', '7']\n\n\nclass ChordModel(CremaModel):\n\n def __init__(self):\n self._instantiate('chord')\n\n def predict(self, filename=None, y=None, sr=None, outputs=None, outputfile=None):\n '''Chord prediction\n\n Parameters\n ----------\n filename : str\n Path to the audio file to analyze\n\n y, sr : np.ndarray, number>0\n\n Audio signal in memory to analyze\n\n outputs : dict `{str: np.ndarray}`\n\n Pre-computed model outputs, as given by ``ChordModel.outputs``.\n\n .. note:: At least one of `filename`, `y, sr`, or `outputs`\n must be provided.\n\n Returns\n -------\n jams.Annotation, namespace='chord'\n\n The chord estimate for the given signal.\n\n Examples\n --------\n >>> import crema\n >>> import librosa\n >>> model = crema.models.chord.ChordModel()\n >>> chord_est = model.predict(filename=librosa.util.example_audio_file())\n >>> chord_est\n <Annotation(namespace='chord',\n time=0,\n duration=61.4,\n annotation_metadata=<AnnotationMetadata(...)>,\n data=<45 observations>,\n sandbox=<Sandbox(...)>)>\n >>> chord_est.to_dataframe().head(5)\n time duration value confidence\n 0 0.000000 0.092880 E:maj 0.336977\n 1 0.092880 0.464399 E:7 0.324255\n 2 0.557279 1.021678 E:min 0.448759\n 3 1.578957 2.693515 E:maj 0.501462\n 4 4.272472 1.486077 E:min 0.287264\n '''\n if outputs is None:\n outputs = self.outputs(filename=filename, y=y, sr=sr)\n\n output_key = self.model.output_names[0]\n pump_op = self.pump[output_key]\n\n ann = super(ChordModel, self).predict(y=y, sr=sr, filename=filename,\n outputs=outputs)\n\n bass_pred = outputs['chord_bass']\n\n if outputfile is not None:\n np.save(outputfile, outputs['chord_pitch'])\n\n # Handle inversion estimation\n for obs in ann.pop_data():\n start, end = time_to_frames([obs.time, obs.time + obs.duration],\n sr=pump_op.sr,\n hop_length=pump_op.hop_length)\n\n value = obs.value\n if obs.value not in ('N', 'X'):\n mean_bass = gmean(bass_pred[start:end+1])\n\n bass_pc = np.argmax(mean_bass)\n root_pc, pitches, _ = mir_eval.chord.encode(obs.value)\n\n bass_rel = 0\n if bass_pc < 12:\n bass_rel = np.mod(bass_pc - root_pc, 12)\n\n if bass_rel and pitches[bass_rel]:\n value = '{}/{}'.format(value,\n SEMITONE_TO_SCALE_DEGREE[bass_rel])\n\n ann.append(time=obs.time,\n duration=obs.duration,\n value=value,\n confidence=obs.confidence)\n\n return ann\n"
] |
[
[
"scipy.stats.gmean",
"numpy.argmax",
"numpy.mod",
"numpy.save"
]
] |
LegendaryDaim/arXiv2020-RIFE
|
[
"8b1c0679a1d088117ab854b537ec631450658b70"
] |
[
"inference_video_parallel.py"
] |
[
"import os\nimport cv2\nimport torch\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom torch.nn import functional as F\nimport warnings\nimport _thread\nimport skvideo.io\nfrom queue import Queue, Empty\n\nwarnings.filterwarnings(\"ignore\")\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nif torch.cuda.is_available():\n torch.set_grad_enabled(False)\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser(description='Interpolation for a pair of images')\nparser.add_argument('--video', dest='video', required=True)\nparser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')\nparser.add_argument('--fps', dest='fps', type=int, default=None)\nparser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs')\nparser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension')\nparser.add_argument('--exp', dest='exp', type=int, default=1, help='interpolation exponent (default: 1)')\nargs = parser.parse_args()\nassert (args.exp in [1, 2, 3])\nargs.times = 2 ** args.exp\n\nfrom model.RIFE import Model\nmodel = Model()\nmodel.load_model('./train_log')\nmodel.eval()\nmodel.device()\n\nvideoCapture = cv2.VideoCapture(args.video)\nfps = videoCapture.get(cv2.CAP_PROP_FPS)\ntot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)\nvideoCapture.release()\nvideogen = skvideo.io.vreader(args.video)\nframe = next(videogen)\nh, w, _ = frame.shape\nif args.fps is None:\n args.fps = fps * args.times\nfourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\nvideo_path_wo_ext, ext = os.path.splitext(args.video)\nif args.png:\n if not os.path.exists('output'):\n os.mkdir('output')\n vid_out = None\nelse:\n vid_out = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.times, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h))\n \ncnt = 0\nskip_frame = 1\nbuffer = Queue()\nbuffer.put(frame)\ndef write_frame(i0, infs, i1, p, user_args):\n global skip_frame, cnt\n for i in range(i0.shape[0]):\n l = len(infs)\n # A video transition occurs.\n if p[i] > 0.2:\n for j in range(len(infs)):\n infs[j][i] = i0[i]\n \n # Result was too similar to previous frame, skip if given.\n if p[i] < 5e-3 and user_args.skip:\n if skip_frame % 100 == 0:\n print(\"Warning: Your video has {} static frames, \"\n \"skipping them may change the duration of the generated video.\".format(skip_frame))\n skip_frame += 1\n continue\n \n # Write results. \n buffer.put(i0[i])\n for inf in infs:\n buffer.put(inf[i])\n\ndef clear_buffer(user_args, buffer): \n global cnt\n while True:\n item = buffer.get()\n if item is None:\n break\n if user_args.png:\n cv2.imwrite('output/{:0>7d}.png'.format(cnt), item[:, :, ::-1])\n cnt += 1\n else:\n vid_out.write(item[:, :, ::-1])\n\ndef make_inference(model, I0, I1, exp):\n middle = model.inference(I0, I1)\n if exp == 1:\n return [middle]\n first_half = make_inference(model, I0, middle, exp=exp - 1)\n second_half = make_inference(model, middle, I1, exp=exp - 1)\n return [*first_half, middle, *second_half]\n\n\nph = ((h - 1) // 32 + 1) * 32\npw = ((w - 1) // 32 + 1) * 32\npadding = (0, pw - w, 0, ph - h)\nprint('{}.{}, {} frames in total, {}FPS to {}FPS'.format(video_path_wo_ext, args.ext, tot_frame, fps, args.fps))\npbar = tqdm(total=tot_frame)\nimg_list = []\n_thread.start_new_thread(clear_buffer, (args, buffer))\ntot_frame -= 1\nfor frame in videogen:\n tot_frame -= 1\n img_list.append(frame)\n if len(img_list) == 5 or tot_frame == 0:\n imgs = torch.from_numpy(np.transpose(img_list, (0, 3, 1, 2))).to(device, non_blocking=True).float() / 255.\n I0 = imgs[:-1]\n I1 = imgs[1:]\n p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False)\n - F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs()\n I0 = F.pad(I0, padding)\n I1 = F.pad(I1, padding)\n inferences = make_inference(model, I0, I1, exp=args.exp)\n I0 = np.array(img_list[:-1])\n I1 = np.array(img_list[1:])\n inferences = list(map(lambda x: ((x[:, :, :h, :w] * 255.).byte().cpu().detach().numpy().transpose(0, 2, 3, 1)), inferences))\n \n write_frame(I0, inferences, I1, p.mean(3).mean(2).mean(1), args)\n pbar.update(4)\n img_list = img_list[-1:]\nbuffer.put(img_list[0])\nimport time\nwhile(not buffer.empty()):\n time.sleep(0.1)\npbar.close()\nif not vid_out is None:\n vid_out.release()\n"
] |
[
[
"numpy.transpose",
"numpy.round",
"torch.set_grad_enabled",
"torch.cuda.is_available",
"torch.nn.functional.interpolate",
"numpy.array",
"torch.nn.functional.pad"
]
] |
JJavier98/TFG-Bebop-YOLO
|
[
"d56e307862c853fbea8bc1b0a634a0e65d7ba95c"
] |
[
"src/yolo3/model.py"
] |
[
"\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1,0),(1,0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)\n x = Add()([x,y])\n return x\n\ndef darknet_body(x):\n '''Darknent body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3,3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\ndef make_last_layers(x, num_filters, out_filters):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D(out_filters, (1,1)))(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n\n return Model(inputs, [y1,y2,y3])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n box_xy = K.sigmoid(feats[..., :2])\n box_wh = K.exp(feats[..., 2:4])\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n # Adjust preditions to each spatial grid point and anchor size.\n #box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n #box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n box_xy = (box_xy + grid) / K.cast(grid_shape[...,::-1], K.dtype(feats))\n box_wh = box_wh * anchors_tensor / K.cast(input_shape[...,::-1], K.dtype(feats))\n\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n '''Get corrected boxes'''\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n '''Process Conv layer output'''\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,\n anchors, num_classes, input_shape)\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for l in range(3):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_code reletive to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]\n\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(3)]\n y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),\n dtype='float32') for l in range(3)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0]>0\n\n for b in range(m):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor):\n for l in range(3):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')\n n = anchor_mask[l].index(n)\n c = true_boxes[b,t, 4].astype('int32')\n y_true[l][b, j, i, n, 0:4] = true_boxes[b,t, 0:4]\n y_true[l][b, j, i, n, 4] = 1\n y_true[l][b, j, i, n, 5+c] = 1\n break\n\n return y_true\n\ndef box_iou(b1, b2):\n '''Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n '''\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=.5):\n '''Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(T, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n yolo_outputs = args[:3]\n y_true = args[3:]\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(3)]\n loss = 0\n m = K.shape(yolo_outputs[0])[0]\n\n for l in range(3):\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n\n pred_xy, pred_wh, pred_confidence, pred_class_probs = yolo_head(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape)\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet box loss.\n xy_delta = (y_true[l][..., :2]-pred_xy)*grid_shapes[l][::-1]\n wh_delta = K.log(y_true[l][..., 2:4]) - K.log(pred_wh)\n # Avoid log(0)=-inf.\n wh_delta = K.switch(object_mask, wh_delta, K.zeros_like(wh_delta))\n box_delta = K.concatenate([xy_delta, wh_delta], axis=-1)\n box_delta_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n box_loss = object_mask * K.square(box_delta*box_delta_scale)\n confidence_loss = object_mask * K.square(1-pred_confidence) + \\\n (1-object_mask) * K.square(0-pred_confidence) * ignore_mask\n class_loss = object_mask * K.square(true_class_probs-pred_class_probs)\n loss += K.sum(box_loss) + K.sum(confidence_loss) + K.sum(class_loss)\n return loss / K.cast(m, K.dtype(loss))\n"
] |
[
[
"tensorflow.boolean_mask",
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"tensorflow.image.non_max_suppression",
"numpy.argmax",
"numpy.floor",
"numpy.array"
]
] |
n0whereRuoxi/rl-starter-files
|
[
"b2ae68d544f4665a62b3d782c44008ef050e9b62"
] |
[
"gym_minigrid/wrappers.py"
] |
[
"import math\nimport operator\nfrom functools import reduce\n\nimport numpy as np\nimport gym\nfrom gym import error, spaces, utils\nfrom .minigrid import OBJECT_TO_IDX, COLOR_TO_IDX, STATE_TO_IDX\n\nclass ReseedWrapper(gym.core.Wrapper):\n \"\"\"\n Wrapper to always regenerate an environment with the same set of seeds.\n This can be used to force an environment to always keep the same\n configuration when reset.\n \"\"\"\n\n def __init__(self, env, seeds=[0], seed_idx=0):\n self.seeds = list(seeds)\n self.seed_idx = seed_idx\n super().__init__(env)\n\n def reset(self, **kwargs):\n seed = self.seeds[self.seed_idx]\n self.seed_idx = (self.seed_idx + 1) % len(self.seeds)\n self.env.seed(seed)\n return self.env.reset(**kwargs)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return obs, reward, done, info\n\nclass ActionBonus(gym.core.Wrapper):\n \"\"\"\n Wrapper which adds an exploration bonus.\n This is a reward to encourage exploration of less\n visited (state,action) pairs.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.counts = {}\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n env = self.unwrapped\n tup = (tuple(env.agent_pos), env.agent_dir, action)\n\n # Get the count for this (s,a) pair\n pre_count = 0\n if tup in self.counts:\n pre_count = self.counts[tup]\n\n # Update the count for this (s,a) pair\n new_count = pre_count + 1\n self.counts[tup] = new_count\n\n bonus = 1 / math.sqrt(new_count)\n reward += bonus\n\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass StateBonus(gym.core.Wrapper):\n \"\"\"\n Adds an exploration bonus based on which positions\n are visited on the grid.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.counts = {}\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n # Tuple based on which we index the counts\n # We use the position after an update\n env = self.unwrapped\n tup = (tuple(env.agent_pos))\n\n # Get the count for this key\n pre_count = 0\n if tup in self.counts:\n pre_count = self.counts[tup]\n\n # Update the count for this key\n new_count = pre_count + 1\n self.counts[tup] = new_count\n\n bonus = 1 / math.sqrt(new_count)\n reward += bonus\n\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass ImgObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Use the image as the only observation output, no language/mission.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = env.observation_space.spaces['image']\n\n def observation(self, obs):\n return obs['image']\n\nclass OneHotPartialObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Wrapper to get a one-hot encoding of a partially observable\n agent view as observation.\n \"\"\"\n\n def __init__(self, env, tile_size=8):\n super().__init__(env)\n\n self.tile_size = tile_size\n\n obs_shape = env.observation_space['image'].shape\n\n # Number of bits per cell\n num_bits = len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + len(STATE_TO_IDX)\n\n self.observation_space.spaces[\"image\"] = spaces.Box(\n low=0,\n high=255,\n shape=(obs_shape[0], obs_shape[1], num_bits),\n dtype='uint8'\n )\n\n def observation(self, obs):\n img = obs['image']\n out = np.zeros(self.observation_space.spaces['image'].shape, dtype='uint8')\n\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n type = img[i, j, 0]\n color = img[i, j, 1]\n state = img[i, j, 2]\n\n out[i, j, type] = 1\n out[i, j, len(OBJECT_TO_IDX) + color] = 1\n out[i, j, len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + state] = 1\n\n return {\n 'mission': obs['mission'],\n 'image': out\n }\n\nclass RGBImgObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Wrapper to use fully observable RGB image as the only observation output,\n no language/mission. This can be used to have the agent to solve the\n gridworld in pixel space.\n \"\"\"\n\n def __init__(self, env, tile_size=8):\n super().__init__(env)\n\n self.tile_size = tile_size\n\n self.observation_space.spaces['image'] = spaces.Box(\n low=0,\n high=255,\n shape=(self.env.width * tile_size, self.env.height * tile_size, 3),\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n\n rgb_img = env.render(\n mode='rgb_array',\n highlight=False,\n tile_size=self.tile_size\n )\n\n return {\n 'mission': obs['mission'],\n 'image': rgb_img\n }\n\n\nclass RGBImgPartialObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Wrapper to use partially observable RGB image as the only observation output\n This can be used to have the agent to solve the gridworld in pixel space.\n \"\"\"\n\n def __init__(self, env, tile_size=8):\n super().__init__(env)\n\n self.tile_size = tile_size\n\n obs_shape = env.observation_space.spaces['image'].shape\n self.observation_space.spaces['image'] = spaces.Box(\n low=0,\n high=255,\n shape=(obs_shape[0] * tile_size, obs_shape[1] * tile_size, 3),\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n\n rgb_img_partial = env.get_obs_render(\n obs['image'],\n tile_size=self.tile_size\n )\n\n return {\n 'mission': obs['mission'],\n 'image': rgb_img_partial\n }\n\nclass FullyObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Fully observable gridworld using a compact grid encoding\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n\n self.observation_space.spaces[\"image\"] = spaces.Box(\n low=0,\n high=255,\n shape=(self.env.width, self.env.height, 3), # number of cells\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n full_grid = env.grid.encode()\n full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([\n OBJECT_TO_IDX['agent'],\n COLOR_TO_IDX['red'],\n env.agent_dir\n ])\n\n return {\n 'mission': obs['mission'],\n 'image': full_grid\n }\n\nclass FlatObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Encode mission strings using a one-hot scheme,\n and combine these with observed images into one flat array\n \"\"\"\n\n def __init__(self, env, maxStrLen=96):\n super().__init__(env)\n\n self.maxStrLen = maxStrLen\n self.numCharCodes = 27\n\n imgSpace = env.observation_space.spaces['image']\n imgSize = reduce(operator.mul, imgSpace.shape, 1)\n\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(imgSize + self.numCharCodes * self.maxStrLen,),\n dtype='uint8'\n )\n\n self.cachedStr = None\n self.cachedArray = None\n\n def observation(self, obs):\n image = obs['image']\n mission = obs['mission']\n\n # Cache the last-encoded mission string\n if mission != self.cachedStr:\n assert len(mission) <= self.maxStrLen, 'mission string too long ({} chars)'.format(len(mission))\n mission = mission.lower()\n\n strArray = np.zeros(shape=(self.maxStrLen, self.numCharCodes), dtype='float32')\n\n for idx, ch in enumerate(mission):\n if ch >= 'a' and ch <= 'z':\n chNo = ord(ch) - ord('a')\n elif ch == ' ':\n chNo = ord('z') - ord('a') + 1\n assert chNo < self.numCharCodes, '%s : %d' % (ch, chNo)\n strArray[idx, chNo] = 1\n\n self.cachedStr = mission\n self.cachedArray = strArray\n\n obs = np.concatenate((image.flatten(), self.cachedArray.flatten()))\n\n return obs\n\nclass ViewSizeWrapper(gym.core.Wrapper):\n \"\"\"\n Wrapper to customize the agent field of view size.\n This cannot be used with fully observable wrappers.\n \"\"\"\n\n def __init__(self, env, agent_view_size=7):\n super().__init__(env)\n\n assert agent_view_size % 2 == 1\n assert agent_view_size >= 3\n\n # Override default view size\n env.unwrapped.agent_view_size = agent_view_size\n\n # Compute observation space with specified view size\n observation_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(agent_view_size, agent_view_size, 3),\n dtype='uint8'\n )\n\n # Override the environment's observation space\n self.observation_space = spaces.Dict({\n 'image': observation_space\n })\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n return self.env.step(action)\n\nfrom .minigrid import Goal\nclass DirectionObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Provides the slope/angular direction to the goal with the observations as modeled by (y2 - y2 )/( x2 - x1)\n type = {slope , angle}\n \"\"\"\n def __init__(self, env,type='slope'):\n super().__init__(env)\n self.goal_position = None\n self.type = type\n\n def reset(self):\n obs = self.env.reset()\n if not self.goal_position:\n self.goal_position = [x for x,y in enumerate(self.grid.grid) if isinstance(y,(Goal) ) ]\n if len(self.goal_position) >= 1: # in case there are multiple goals , needs to be handled for other env types\n self.goal_position = (int(self.goal_position[0]/self.height) , self.goal_position[0]%self.width)\n return obs\n\n def observation(self, obs):\n slope = np.divide( self.goal_position[1] - self.agent_pos[1] , self.goal_position[0] - self.agent_pos[0])\n obs['goal_direction'] = np.arctan( slope ) if self.type == 'angle' else slope\n return obs\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.arctan",
"numpy.divide"
]
] |
akhilmeleth/greyatom-python-for-data-science
|
[
"634530dc8bb25ea6283a53861bf701d94d1fb5e4"
] |
[
"Project:-Make-Sense-of-Census/code.py"
] |
[
"# --------------\n# Importing header files\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\n#Reading file\r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\n#Code starts here\r\n#Loading data file and saving it into a new numpy array \r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\nprint(data.shape)\r\n\r\n#Concatenating the new record to the existing numpy array\r\ncensus=np.concatenate((data, new_record),axis = 0)\r\n\r\nprint(census.shape)\r\n\r\n#Subsetting the array to include only 'Age' column\r\nage=census[:,0]\r\n\r\n#Finding the max value of age\r\nmax_age=age.max()\r\nprint(\"Max Age= \",max_age)\r\nprint(age)\r\n\r\n#Find the min value of age\r\nmin_age=age.min()\r\nprint(\"Min Age= \",min_age)\r\n\r\n#Find the mean of age\r\nage_mean=age.mean()\r\nprint(\"Age Average= \", age_mean)\r\n\r\n#Find the standard deviation of age\r\nage_std=age.std()\r\nprint(\"Age Standard Deviation= \",age_std)\r\n\r\n#Creating new subsets based on 'Age'\r\nrace_0=census[census[:,2]==0]\r\nrace_1=census[census[:,2]==1]\r\nrace_2=census[census[:,2]==2]\r\nrace_3=census[census[:,2]==3]\r\nrace_4=census[census[:,2]==4]\r\n\r\n\r\n#Finding the length of the above created subsets\r\nlen_0=len(race_0)\r\nlen_1=len(race_1)\r\nlen_2=len(race_2)\r\nlen_3=len(race_3)\r\nlen_4=len(race_4)\r\n\r\n#Printing the length of the above created subsets\r\nprint('Race_0: ', len_0)\r\nprint('Race_1: ', len_1)\r\nprint('Race_2: ', len_2)\r\nprint('Race_3: ', len_3)\r\nprint('Race_4: ', len_4)\r\n\r\n#Storing the different race lengths with appropriate indexes\r\nrace_list=[len_0, len_1,len_2, len_3, len_4]\r\n\r\n#Storing the race with minimum length into a variable \r\nminority_race=race_list.index(min(race_list))\r\n\r\n#Subsetting the array based on the age \r\nsenior_citizens=census[census[:,0]>60]\r\n\r\n#Calculating the sum of all the values of array\r\nworking_hours_sum=senior_citizens.sum(axis=0)[6]\r\n\r\n#Finding the length of the array\r\nsenior_citizens_len=len(senior_citizens)\r\n\r\n#Finding the average working hours\r\navg_working_hours=working_hours_sum/senior_citizens_len\r\n\r\n#Printing the average working hours\r\nprint((avg_working_hours))\r\n\r\n#Creating an array based on 'education' column\r\nhigh=census[census[:,1]>10]\r\n\r\n#Finding the average pay\r\navg_pay_high=high[:,7].mean()\r\n\r\n#Printing the average pay\r\nprint(avg_pay_high)\r\n\r\n#Creating an array based on 'education' column\r\nlow=census[census[:,1]<=10]\r\n\r\n#Finding the average pay\r\navg_pay_low=low[:,7].mean()\r\n\r\n#Printing the average pay\r\nprint(avg_pay_low)\r\n#Code ends here\n\n\n"
] |
[
[
"numpy.concatenate",
"numpy.genfromtxt"
]
] |
ricardodeazambuja/IJCNN2017-2
|
[
"817165185de6152041bbaf21cbad6d12fb58f064"
] |
[
"membrane_lowpass_md.py"
] |
[
"\nimport numpy\n\nclass membrane_lowpass(object):\n \n def __init__(self, Number_of_Neurons, tau):\n '''\n Initializes the neuron membranes.\n Number_of_Neurons: total number of neurons to be simulated\n tau: time constant (in seconds)\n '''\n self.neurons = numpy.zeros(Number_of_Neurons)\n self.times = numpy.zeros(Number_of_Neurons)\n self.tau = tau\n \n def process_spikes(self, spikes, current_time):\n '''\n Processes the received spikes at the current time updating their membrane values.\n spikes: list with the indexes of the neurons who spiked.\n current_time: the time the neurons spiked (float)\n '''\n delta_t = current_time-self.times[spikes] # Calculates the difference between the last time they spiked\n current_values = self.neurons[spikes]*numpy.exp(-delta_t/self.tau) # Calculates the current values\n self.times[spikes]=numpy.ones(len(spikes))*current_time # Updates the last time they spiked\n self.neurons[spikes] = current_values + numpy.ones(len(spikes)) # Updates the neuron membrane values\n\n def check_values(self, current_time):\n '''\n Returns the current membrane values at the specified time.\n current_time: time used to calculate the membrane values.\n '''\n delta_t = current_time-self.times # Calculates the time since last spike\n return self.neurons*numpy.exp(-delta_t/self.tau) # Calculates the current values\n "
] |
[
[
"numpy.exp",
"numpy.zeros"
]
] |
huotarim/huotarim-xgboost-li-ion-batteries
|
[
"a5c183be35aab4bd86924913d37d0ec7bc3a4220"
] |
[
"922_tune_learning_rate.py"
] |
[
"# battery 023, parallel cross validation\nfrom pandas import read_csv\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import TimeSeriesSplit\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot\nimport numpy\n\n# Load data\nDIR = './'\nFILE = 'household_power_monthly.csv'\nfilename = DIR + FILE\n\n# Use column labels \nnames = ['Global_reactive_power','Voltage','Global_intensity',\n 'Sub_metering_1','Sub_metering_2','Sub_metering_3','Global_active_power']\n\ndataset = read_csv(filename, usecols=names) \narray = dataset.values\nX = array[:,0:len(names)-1]\ny = array[:,len(names)-1]\n\n# Split-out validation dataset to test and validation sets\ntest_size = 0.4\n# IMPORTANT: keep time series order by shuffle=False\nX_train, X_test, y_train, y_test = train_test_split(X, y,\ntest_size=test_size, shuffle=False)\n\n# Grid search\nmodel = XGBRegressor(nthread=-1)\nn_estimators = [50,100,200,300,400,500]\nlearning_rate = [0.0001, 0.001, 0.01, 0.1]\n\nparam_grid = dict(learning_rate=learning_rate, n_estimators=n_estimators)\ntscv = TimeSeriesSplit(n_splits=5) # 5 or 9 yields the same result (tscv splits look different, though)\n#grid_search = GridSearchCV(model, param_grid, scoring=\"explained_variance\", cv=tscv, n_jobs=-1)\ngrid_search = GridSearchCV(model, param_grid, scoring=\"explained_variance\", cv=tscv, n_jobs=-1)\ngrid_result = grid_search.fit(X_train, y_train)\n\n# summarize results\nprint(\"Best evaluated variance score: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\nmeans = grid_result.cv_results_['mean_test_score']\nstds = grid_result.cv_results_['std_test_score']\nparams = grid_result.cv_results_['params']\nfor mean, stdev, param in zip(means, stds, params):\n print(\"%f (%f) with: %r\" % (mean, stdev, param))\n\n# plot results\nscores = numpy.array(means).reshape(len(learning_rate), len(n_estimators))\nfor i, value in enumerate(learning_rate):\n pyplot.plot(n_estimators, scores[i], label='learning_rate: ' + str(value))\npyplot.legend()\npyplot.xlabel('n_estimators')\npyplot.ylabel('Explained variance')\npyplot.savefig('n_estimators_vs_learning_rate.png')\n# pyplot.show() # Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure."
] |
[
[
"matplotlib.pyplot.legend",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"matplotlib.use",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"sklearn.model_selection.TimeSeriesSplit",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
asuberlin/price_wage_dynamics
|
[
"06ac51fa0b3ad7f1bb3e81540354b8aea866a693"
] |
[
"single/firm.py"
] |
[
"from scipy.optimize import minimize, minimize_scalar, basinhopping, curve_fit\nimport numpy as np\n\nclass Firm:\n def __init__(self, money, firmParameters, p_star, S_star, L_star, z_star, expectation):\n self.gamma = firmParameters['gamma']\n self.p_t = p_star\n self.zeta_0 = firmParameters['zeta_0']\n self.error = firmParameters['error']\n self.z_star = z_star\n self.z_t = z_star * (1 + self.error)\n self.zeta_t = self.zeta_0 * (1 + self.error)\n self.inertia = firmParameters['inertia']\n self.e1 = firmParameters['e1']\n self.e2 = firmParameters['e2']\n self.mF_t = L_star if money else firmParameters['mF_0']\n self.stock_t = 0\n self.stock_tp1 = self.stock_t\n self.expiration = firmParameters['expiration']\n self.SSP_t = S_star\n self.SS_t = S_star\n self.LD_t = L_star\n\n self.expectation = expectation # desired expectation function\n \n self.memoryLength = 5 # for memory expectation\n self.memory = self.initMemory() # for memory expectation\n\n # for memory expectation\n def initMemory(self):\n memory = np.array([np.zeros(self.memoryLength),np.zeros(self.memoryLength)])\n for i in range(int(self.memoryLength)):\n memory[0][i] = max(0.001,self.SS_t*(1+np.random.normal(loc = 0.0, scale = 0.05))) \n memory[1][i] = (self.z_star / memory[0][i] ** self.zeta_t) \n print(memory)\n return memory\n \n ### functions used in firm action calls\n \n # firm production function\n def rho(self, L, gamma):\n return L ** gamma\n\n # sugar demand function\n def phi(self, L, z, zeta, gamma): \n return z / self.rho(L, gamma) ** zeta\n \n # used in firm decides production \n def optimizeL(self, z, zeta, gamma, Lmax, stock): #firm decides labor for optimal production\n f = lambda L: (self.rho(L, gamma) + stock) * self.phi(L, z, zeta, gamma) - L\n res = minimize_scalar(f, bounds=(0, Lmax), method='bounded')\n return(res.x)\n\n ### firm action calls\n \n # given expected demand and expected labor supply, plan production of stuff. \n def decideProduction(self, verbose, money, SM_t, Lmax): \n if not money: \n optimum = self.optimizeL(self.z_t, self.zeta_t, self.gamma, Lmax, self.stock_t)\n budget = self.p_t * SM_t\n self.LD_tp1 = min(optimum, budget)\n self.w_tp1 = 1\n if verbose: print('Firm: optimum labor is {:.4f}, affordable is {:.4f}, so planned labor is {:.4f}.'.format(optimum, budget, self.LD_tp1))\n \n if money:\n optimum = self.optimizeL(self.z_t, self.zeta_t, self.gamma, Lmax, self.stock_t)\n budget = self.mF_t\n self.LD_tp1 = min(optimum, budget)\n self.w_tp1 = 1\n if verbose: print('Firm: optimum labor is {:.4f}, affordable is {:.4f}, so planned labor is {:.4f}.'.format(optimum, budget, self.LD_tp1))\n \n self.SSP_tp1 = self.rho(self.LD_tp1, self.gamma) \n self.p_tp1 = self.phi(self.LD_tp1, self.z_t, self.zeta_t, self.gamma)\n if verbose: print('Firm: planned production is {:.4f} and price is {:.4f}'.format(self.SSP_tp1, self.p_tp1))\n \n # given market labor result, produce stuff \n def produce(self, verbose, LM_tp1):\n self.SS_tp1 = self.rho(LM_tp1, self.gamma) + self.stock_t\n if verbose: print('Firm: Firm produces {:.4f}'.format(self.SS_tp1))\n \n # update stock\n def updateStock(self, verbose, SM_tp1):\n deltaS = self.SS_tp1 - SM_tp1\n if deltaS > 0: self.stock_tp1 = deltaS * (1 - self.expiration)\n if verbose: print('Firm: delta supply is {:.4f}, expiration is {:.4f}, \\\n initial stock is {:.4f} and new stock is {:.4f}'.format(deltaS, self.expiration, self.stock_t, self.stock_tp1))\n \n # update monetary holdings\n def updateLedger(self, verbose, SM_tp1, LM_tp1):\n self.mF_tp1 = self.mF_t + self.p_tp1 * SM_tp1 - LM_tp1\n if verbose: print('Firm: initial ledger balance {:.4f} and new ledger balance is {:.4f}'.format(self.mF_t, self.mF_tp1))\n \n # update stuff demand and labor supply function parameters\n def updateExpectations(self, verbose, money, SM_t, SM_tp1, Lmax, LS_t, LS_tp1):\n if verbose: print('Old p and new p are {:.4f} and {:.4f}, old S and new S are {:.4f} and {:.4f}'.format(self.p_t, self.p_tp1, SM_t, SM_tp1))\n self.z_tp1, self.zeta_tp1 = self.expectation(self.z_t, self.zeta_t, self.p_t, self.p_tp1, SM_t, SM_tp1, self.e1, self.e2, self.inertia, self.memory, self.memoryLength)\n if verbose: print('Firm: initial z and zeta: {:.4f} {:.4f}, and adjusted z and zeta: {:.4f} {:.4f}'.format(self.z_t, self.zeta_t, self.z_tp1, self.zeta_tp1))\n"
] |
[
[
"numpy.random.normal",
"scipy.optimize.minimize_scalar",
"numpy.zeros"
]
] |
affromero/SMILE
|
[
"931510d69b2e33f2fe633563833c50a7408f89ef"
] |
[
"metrics/arcface_resnet.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 18-5-21 下午5:26\n\n@author: ronghuaiyang\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn.utils.weight_norm as weight_norm\nimport torch.nn.functional as F\n\n# https://github.com/ronghuaiyang/arcface-pytorch\n\n# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n# 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass IRBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):\n super(IRBlock, self).__init__()\n self.bn0 = nn.BatchNorm2d(inplanes)\n self.conv1 = conv3x3(inplanes, inplanes)\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.prelu = nn.PReLU()\n self.conv2 = conv3x3(inplanes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n self.use_se = use_se\n if self.use_se:\n self.se = SEBlock(planes)\n\n def forward(self, x):\n residual = x\n out = self.bn0(x)\n out = self.conv1(out)\n out = self.bn1(out)\n out = self.prelu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n if self.use_se:\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.prelu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass SEBlock(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SEBlock, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.PReLU(),\n nn.Linear(channel // reduction, channel),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\nclass ResNetFace(nn.Module):\n def __init__(self, block, layers, use_se=True):\n self.inplanes = 64\n self.use_se = use_se\n super(ResNetFace, self).__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.prelu = nn.PReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.bn4 = nn.BatchNorm2d(512)\n self.dropout = nn.Dropout()\n self.fc5 = nn.Linear(512 * 8 * 8, 512)\n self.bn5 = nn.BatchNorm1d(512)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))\n self.inplanes = planes\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, use_se=self.use_se))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.prelu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.bn4(x)\n x = self.dropout(x)\n x = x.view(x.size(0), -1)\n x = self.fc5(x)\n x = self.bn5(x)\n\n return x\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers):\n self.inplanes = 64\n super(ResNet, self).__init__()\n # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n # bias=False)\n self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], stride=2)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n # self.avgpool = nn.AvgPool2d(8, stride=1)\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.fc5 = nn.Linear(512 * 8 * 8, 512)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n # x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n # x = nn.AvgPool2d(kernel_size=x.size()[2:])(x)\n # x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc5(x)\n\n return x\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n\n\ndef resnet_face18(use_se=True, **kwargs):\n model = ResNetFace(IRBlock, [2, 2, 2, 2], use_se=use_se, **kwargs)\n return model"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.nn.Sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_normal_"
]
] |
FujitsuLaboratories/CAC
|
[
"d12df8e47f61eaf7d7b0ed355e2d1aa296453f86"
] |
[
"cac/pruning/examples/resnet110/resnet110.py"
] |
[
"# resnet110.py COPYRIGHT Fujitsu Limited 2021\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass LambdaLayer(nn.Module):\r\n def __init__(self, lambd):\r\n super(LambdaLayer, self).__init__()\r\n self.lambd = lambd\r\n\r\n def forward(self, x):\r\n return self.lambd(x)\r\n\r\n# To change \"channels for conv layer\" & \"nodes for fc layer\" by pruning, custum model is defined.\r\n# for CIFAR-10\r\nclass ResNet110(nn.Module):\r\n def __init__(\r\n self,\r\n num_classes=10,\r\n\r\n ch_conv1=16,\r\n\r\n ch_l10conv1=16,\r\n ch_l10conv2=16,\r\n ch_l11conv1=16,\r\n ch_l11conv2=16,\r\n ch_l12conv1=16,\r\n ch_l12conv2=16,\r\n ch_l13conv1=16,\r\n ch_l13conv2=16,\r\n ch_l14conv1=16,\r\n ch_l14conv2=16,\r\n ch_l15conv1=16,\r\n ch_l15conv2=16,\r\n ch_l16conv1=16,\r\n ch_l16conv2=16,\r\n ch_l17conv1=16,\r\n ch_l17conv2=16,\r\n ch_l18conv1=16,\r\n ch_l18conv2=16,\r\n ch_l19conv1=16,\r\n ch_l19conv2=16,\r\n ch_l110conv1=16,\r\n ch_l110conv2=16,\r\n ch_l111conv1=16,\r\n ch_l111conv2=16,\r\n ch_l112conv1=16,\r\n ch_l112conv2=16,\r\n ch_l113conv1=16,\r\n ch_l113conv2=16,\r\n ch_l114conv1=16,\r\n ch_l114conv2=16,\r\n ch_l115conv1=16,\r\n ch_l115conv2=16,\r\n ch_l116conv1=16,\r\n ch_l116conv2=16,\r\n ch_l117conv1=16,\r\n ch_l117conv2=16,\r\n\r\n ch_l20conv1=32,\r\n ch_l20conv2=32,\r\n ch_l21conv1=32,\r\n ch_l21conv2=32,\r\n ch_l22conv1=32,\r\n ch_l22conv2=32,\r\n ch_l23conv1=32,\r\n ch_l23conv2=32,\r\n ch_l24conv1=32,\r\n ch_l24conv2=32,\r\n ch_l25conv1=32,\r\n ch_l25conv2=32,\r\n ch_l26conv1=32,\r\n ch_l26conv2=32,\r\n ch_l27conv1=32,\r\n ch_l27conv2=32,\r\n ch_l28conv1=32,\r\n ch_l28conv2=32,\r\n ch_l29conv1=32,\r\n ch_l29conv2=32,\r\n ch_l210conv1=32,\r\n ch_l210conv2=32,\r\n ch_l211conv1=32,\r\n ch_l211conv2=32,\r\n ch_l212conv1=32,\r\n ch_l212conv2=32,\r\n ch_l213conv1=32,\r\n ch_l213conv2=32,\r\n ch_l214conv1=32,\r\n ch_l214conv2=32,\r\n ch_l215conv1=32,\r\n ch_l215conv2=32,\r\n ch_l216conv1=32,\r\n ch_l216conv2=32,\r\n ch_l217conv1=32,\r\n ch_l217conv2=32,\r\n\r\n ch_l30conv1=64,\r\n ch_l30conv2=64,\r\n ch_l31conv1=64,\r\n ch_l31conv2=64,\r\n ch_l32conv1=64,\r\n ch_l32conv2=64,\r\n ch_l33conv1=64,\r\n ch_l33conv2=64,\r\n ch_l34conv1=64,\r\n ch_l34conv2=64,\r\n ch_l35conv1=64,\r\n ch_l35conv2=64,\r\n ch_l36conv1=64,\r\n ch_l36conv2=64,\r\n ch_l37conv1=64,\r\n ch_l37conv2=64,\r\n ch_l38conv1=64,\r\n ch_l38conv2=64,\r\n ch_l39conv1=64,\r\n ch_l39conv2=64,\r\n ch_l310conv1=64,\r\n ch_l310conv2=64,\r\n ch_l311conv1=64,\r\n ch_l311conv2=64,\r\n ch_l312conv1=64,\r\n ch_l312conv2=64,\r\n ch_l313conv1=64,\r\n ch_l313conv2=64,\r\n ch_l314conv1=64,\r\n ch_l314conv2=64,\r\n ch_l315conv1=64,\r\n ch_l315conv2=64,\r\n ch_l316conv1=64,\r\n ch_l316conv2=64,\r\n ch_l317conv1=64,\r\n ch_l317conv2=64,\r\n ):\r\n super(ResNet110, self).__init__()\r\n self.conv1 = nn.Conv2d(3, ch_conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(ch_conv1)\r\n\r\n # layer1-0\r\n self.l10_conv1 = nn.Conv2d(ch_conv1, ch_l10conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l10_bn1 = nn.BatchNorm2d(ch_l10conv1)\r\n self.l10_conv2 = nn.Conv2d(ch_l10conv1, ch_l10conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l10_bn2 = nn.BatchNorm2d(ch_l10conv2)\r\n # layer1-1\r\n self.l11_conv1 = nn.Conv2d(ch_l10conv2, ch_l11conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l11_bn1 = nn.BatchNorm2d(ch_l11conv1)\r\n self.l11_conv2 = nn.Conv2d(ch_l11conv1, ch_l11conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l11_bn2 = nn.BatchNorm2d(ch_l11conv2)\r\n # layer1-2\r\n self.l12_conv1 = nn.Conv2d(ch_l11conv2, ch_l12conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l12_bn1 = nn.BatchNorm2d(ch_l12conv1)\r\n self.l12_conv2 = nn.Conv2d(ch_l12conv1, ch_l12conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l12_bn2 = nn.BatchNorm2d(ch_l12conv2)\r\n # layer1-3\r\n self.l13_conv1 = nn.Conv2d(ch_l12conv2, ch_l13conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l13_bn1 = nn.BatchNorm2d(ch_l13conv1)\r\n self.l13_conv2 = nn.Conv2d(ch_l13conv1, ch_l13conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l13_bn2 = nn.BatchNorm2d(ch_l13conv2)\r\n #layer1-4\r\n self.l14_conv1 = nn.Conv2d(ch_l13conv2, ch_l14conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l14_bn1 = nn.BatchNorm2d(ch_l14conv1)\r\n self.l14_conv2 = nn.Conv2d(ch_l14conv1, ch_l14conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l14_bn2 = nn.BatchNorm2d(ch_l14conv2)\r\n #layer1-5\r\n self.l15_conv1 = nn.Conv2d(ch_l14conv2, ch_l15conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l15_bn1 = nn.BatchNorm2d(ch_l15conv1)\r\n self.l15_conv2 = nn.Conv2d(ch_l15conv1, ch_l15conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l15_bn2 = nn.BatchNorm2d(ch_l15conv2)\r\n #layer1-6\r\n self.l16_conv1 = nn.Conv2d(ch_l15conv2, ch_l16conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l16_bn1 = nn.BatchNorm2d(ch_l16conv1)\r\n self.l16_conv2 = nn.Conv2d(ch_l16conv1, ch_l16conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l16_bn2 = nn.BatchNorm2d(ch_l16conv2)\r\n #layer1-7\r\n self.l17_conv1 = nn.Conv2d(ch_l16conv2, ch_l17conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l17_bn1 = nn.BatchNorm2d(ch_l17conv1)\r\n self.l17_conv2 = nn.Conv2d(ch_l17conv1, ch_l17conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l17_bn2 = nn.BatchNorm2d(ch_l17conv2)\r\n #layer1-8\r\n self.l18_conv1 = nn.Conv2d(ch_l17conv2, ch_l18conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l18_bn1 = nn.BatchNorm2d(ch_l18conv1)\r\n self.l18_conv2 = nn.Conv2d(ch_l18conv1, ch_l18conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l18_bn2 = nn.BatchNorm2d(ch_l18conv2)\r\n #layer1-9\r\n self.l19_conv1 = nn.Conv2d(ch_l18conv2, ch_l19conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l19_bn1 = nn.BatchNorm2d(ch_l19conv1)\r\n self.l19_conv2 = nn.Conv2d(ch_l19conv1, ch_l19conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l19_bn2 = nn.BatchNorm2d(ch_l19conv2)\r\n #layer1-10 \r\n self.l110_conv1 = nn.Conv2d(ch_l19conv2, ch_l110conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l110_bn1 = nn.BatchNorm2d(ch_l110conv1)\r\n self.l110_conv2 = nn.Conv2d(ch_l110conv1, ch_l110conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l110_bn2 = nn.BatchNorm2d(ch_l110conv2)\r\n #layer1-11 \r\n self.l111_conv1 = nn.Conv2d(ch_l110conv2, ch_l111conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l111_bn1 = nn.BatchNorm2d(ch_l111conv1)\r\n self.l111_conv2 = nn.Conv2d(ch_l111conv1, ch_l111conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l111_bn2 = nn.BatchNorm2d(ch_l111conv2)\r\n #layer1-12\r\n self.l112_conv1 = nn.Conv2d(ch_l111conv2, ch_l112conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l112_bn1 = nn.BatchNorm2d(ch_l112conv1)\r\n self.l112_conv2 = nn.Conv2d(ch_l112conv1, ch_l112conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l112_bn2 = nn.BatchNorm2d(ch_l112conv2)\r\n #layer1-13\r\n self.l113_conv1 = nn.Conv2d(ch_l112conv2, ch_l113conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l113_bn1 = nn.BatchNorm2d(ch_l113conv1)\r\n self.l113_conv2 = nn.Conv2d(ch_l113conv1, ch_l113conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l113_bn2 = nn.BatchNorm2d(ch_l113conv2)\r\n #layer1-14 \r\n self.l114_conv1 = nn.Conv2d(ch_l113conv2, ch_l114conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l114_bn1 = nn.BatchNorm2d(ch_l114conv1)\r\n self.l114_conv2 = nn.Conv2d(ch_l114conv1, ch_l114conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l114_bn2 = nn.BatchNorm2d(ch_l114conv2)\r\n #layer1-15 \r\n self.l115_conv1 = nn.Conv2d(ch_l114conv2, ch_l115conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l115_bn1 = nn.BatchNorm2d(ch_l115conv1)\r\n self.l115_conv2 = nn.Conv2d(ch_l115conv1, ch_l115conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l115_bn2 = nn.BatchNorm2d(ch_l115conv2)\r\n #layer1-16 \r\n self.l116_conv1 = nn.Conv2d(ch_l115conv2, ch_l116conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l116_bn1 = nn.BatchNorm2d(ch_l116conv1)\r\n self.l116_conv2 = nn.Conv2d(ch_l116conv1, ch_l116conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l116_bn2 = nn.BatchNorm2d(ch_l116conv2)\r\n #layer1-17 \r\n self.l117_conv1 = nn.Conv2d(ch_l116conv2, ch_l117conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l117_bn1 = nn.BatchNorm2d(ch_l117conv1)\r\n self.l117_conv2 = nn.Conv2d(ch_l117conv1, ch_l117conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l117_bn2 = nn.BatchNorm2d(ch_l117conv2)\r\n\r\n # zero padding 1 : add zero to resize tensor 16 -> 32\r\n ch_diff12 = ch_l20conv2 - ch_l117conv2\r\n # just through input to output\r\n self.zeropad11 = LambdaLayer(lambda x: F.pad(x[:, :, :, :], (0, 0, 0, 0, 0, 0), \"constant\", 0))\r\n self.zeropad12 = LambdaLayer(lambda x: F.pad(x[:, :, :, :], (0, 0, 0, 0, 0, 0), \"constant\", 0))\r\n \r\n if ch_diff12 >0:\r\n if ch_diff12%2 ==0:\r\n self.zeropad11 = LambdaLayer(lambda x: \r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff12//2, ch_diff12//2), \"constant\", 0))\r\n else:\r\n self.zeropad11 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff12//2, (ch_diff12//2)+1), \"constant\", 0))\r\n elif ch_diff12 <0:\r\n ch_diff12 = ch_diff12 * -1.0\r\n if ch_diff12%2 ==0:\r\n self.zeropad12 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff12//2, ch_diff12//2), \"constant\", 0))\r\n else:\r\n self.zeropad12 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff12//2, (ch_diff12//2)+1), \"constant\", 0))\r\n\r\n # layer2-0\r\n self.l20_conv1 = nn.Conv2d(ch_l117conv2, ch_l20conv1, kernel_size=3, stride=2, padding=1, bias=False)\r\n self.l20_bn1 = nn.BatchNorm2d(ch_l20conv1)\r\n self.l20_conv2 = nn.Conv2d(ch_l20conv1, ch_l20conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l20_bn2 = nn.BatchNorm2d(ch_l20conv2)\r\n # layer2-1\r\n self.l21_conv1 = nn.Conv2d(ch_l20conv2, ch_l21conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l21_bn1 = nn.BatchNorm2d(ch_l21conv1)\r\n self.l21_conv2 = nn.Conv2d(ch_l21conv1, ch_l21conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l21_bn2 = nn.BatchNorm2d(ch_l21conv2)\r\n # layer2-2\r\n self.l22_conv1 = nn.Conv2d(ch_l21conv2, ch_l22conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l22_bn1 = nn.BatchNorm2d(ch_l22conv1)\r\n self.l22_conv2 = nn.Conv2d(ch_l22conv1, ch_l22conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l22_bn2 = nn.BatchNorm2d(ch_l22conv2)\r\n # layer2-3\r\n self.l23_conv1 = nn.Conv2d(ch_l22conv2, ch_l23conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l23_bn1 = nn.BatchNorm2d(ch_l23conv1)\r\n self.l23_conv2 = nn.Conv2d(ch_l23conv1, ch_l23conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l23_bn2 = nn.BatchNorm2d(ch_l23conv2)\r\n #layer2-4\r\n self.l24_conv1 = nn.Conv2d(ch_l23conv2, ch_l24conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l24_bn1 = nn.BatchNorm2d(ch_l24conv1)\r\n self.l24_conv2 = nn.Conv2d(ch_l24conv1, ch_l24conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l24_bn2 = nn.BatchNorm2d(ch_l24conv2)\r\n #layer2-5\r\n self.l25_conv1 = nn.Conv2d(ch_l24conv2, ch_l25conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l25_bn1 = nn.BatchNorm2d(ch_l25conv1)\r\n self.l25_conv2 = nn.Conv2d(ch_l25conv1, ch_l25conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l25_bn2 = nn.BatchNorm2d(ch_l25conv2)\r\n #layer2-6\r\n self.l26_conv1 = nn.Conv2d(ch_l25conv2, ch_l26conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l26_bn1 = nn.BatchNorm2d(ch_l26conv1)\r\n self.l26_conv2 = nn.Conv2d(ch_l26conv1, ch_l26conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l26_bn2 = nn.BatchNorm2d(ch_l26conv2)\r\n #layer2-7\r\n self.l27_conv1 = nn.Conv2d(ch_l26conv2, ch_l27conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l27_bn1 = nn.BatchNorm2d(ch_l27conv1)\r\n self.l27_conv2 = nn.Conv2d(ch_l27conv1, ch_l27conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l27_bn2 = nn.BatchNorm2d(ch_l27conv2)\r\n #layer2-8\r\n self.l28_conv1 = nn.Conv2d(ch_l27conv2, ch_l28conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l28_bn1 = nn.BatchNorm2d(ch_l28conv1)\r\n self.l28_conv2 = nn.Conv2d(ch_l28conv1, ch_l28conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l28_bn2 = nn.BatchNorm2d(ch_l28conv2)\r\n #layer2-9\r\n self.l29_conv1 = nn.Conv2d(ch_l28conv2, ch_l29conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l29_bn1 = nn.BatchNorm2d(ch_l29conv1)\r\n self.l29_conv2 = nn.Conv2d(ch_l29conv1, ch_l29conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l29_bn2 = nn.BatchNorm2d(ch_l29conv2)\r\n #layer2-10\r\n self.l210_conv1 = nn.Conv2d(ch_l29conv2, ch_l210conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l210_bn1 = nn.BatchNorm2d(ch_l210conv1)\r\n self.l210_conv2 = nn.Conv2d(ch_l210conv1, ch_l210conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l210_bn2 = nn.BatchNorm2d(ch_l210conv2)\r\n #layer2-11\r\n self.l211_conv1 = nn.Conv2d(ch_l210conv2, ch_l211conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l211_bn1 = nn.BatchNorm2d(ch_l211conv1)\r\n self.l211_conv2 = nn.Conv2d(ch_l211conv1, ch_l211conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l211_bn2 = nn.BatchNorm2d(ch_l211conv2)\r\n #layer2-12\r\n self.l212_conv1 = nn.Conv2d(ch_l211conv2, ch_l212conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l212_bn1 = nn.BatchNorm2d(ch_l212conv1)\r\n self.l212_conv2 = nn.Conv2d(ch_l212conv1, ch_l212conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l212_bn2 = nn.BatchNorm2d(ch_l212conv2)\r\n #layer2-13\r\n self.l213_conv1 = nn.Conv2d(ch_l212conv2, ch_l213conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l213_bn1 = nn.BatchNorm2d(ch_l213conv1)\r\n self.l213_conv2 = nn.Conv2d(ch_l213conv1, ch_l213conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l213_bn2 = nn.BatchNorm2d(ch_l213conv2)\r\n #layer2-14\r\n self.l214_conv1 = nn.Conv2d(ch_l213conv2, ch_l214conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l214_bn1 = nn.BatchNorm2d(ch_l214conv1)\r\n self.l214_conv2 = nn.Conv2d(ch_l214conv1, ch_l214conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l214_bn2 = nn.BatchNorm2d(ch_l214conv2)\r\n #layer2-15\r\n self.l215_conv1 = nn.Conv2d(ch_l214conv2, ch_l215conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l215_bn1 = nn.BatchNorm2d(ch_l215conv1)\r\n self.l215_conv2 = nn.Conv2d(ch_l215conv1, ch_l215conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l215_bn2 = nn.BatchNorm2d(ch_l215conv2)\r\n #layer2-16\r\n self.l216_conv1 = nn.Conv2d(ch_l215conv2, ch_l216conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l216_bn1 = nn.BatchNorm2d(ch_l216conv1)\r\n self.l216_conv2 = nn.Conv2d(ch_l216conv1, ch_l216conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l216_bn2 = nn.BatchNorm2d(ch_l216conv2)\r\n #layer2-17\r\n self.l217_conv1 = nn.Conv2d(ch_l216conv2, ch_l217conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l217_bn1 = nn.BatchNorm2d(ch_l217conv1)\r\n self.l217_conv2 = nn.Conv2d(ch_l217conv1, ch_l217conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l217_bn2 = nn.BatchNorm2d(ch_l217conv2)\r\n\r\n\r\n # zero padding 2 : add zero to resize tensor 32 -> 64 \r\n ch_diff23 = ch_l30conv2 - ch_l217conv2\r\n # just through input to output\r\n self.zeropad21 = LambdaLayer(lambda x: F.pad(x[:, :, :, :], (0, 0, 0, 0, 0, 0), \"constant\", 0))\r\n self.zeropad22 = LambdaLayer(lambda x: F.pad(x[:, :, :, :], (0, 0, 0, 0, 0, 0), \"constant\", 0))\r\n \r\n #x.size ([mini-batch, out_ch, feature_map_size, feature_map_size])\r\n #x[:,:,::2,::2] : downsample input_feature_map to half\r\n if ch_diff23 >0:\r\n if ch_diff23%2 ==0:\r\n self.zeropad21 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff23//2, ch_diff23//2), \"constant\", 0))\r\n else:\r\n self.zeropad21 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff23//2, (ch_diff23//2)+1), \"constant\", 0))\r\n elif ch_diff23 <0:\r\n ch_diff23 = ch_diff23 * -1.0\r\n if ch_diff23%2 ==0:\r\n self.zeropad22 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff23//2, ch_diff23//2), \"constant\", 0))\r\n else:\r\n self.zeropad22 = LambdaLayer(lambda x:\r\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, ch_diff23//2, (ch_diff23//2)+1), \"constant\", 0))\r\n\r\n # layer3-0\r\n self.l30_conv1 = nn.Conv2d(ch_l217conv2, ch_l30conv1, kernel_size=3, stride=2, padding=1, bias=False)\r\n self.l30_bn1 = nn.BatchNorm2d(ch_l30conv1)\r\n self.l30_conv2 = nn.Conv2d(ch_l30conv1, ch_l30conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l30_bn2 = nn.BatchNorm2d(ch_l30conv2)\r\n # layer3-1\r\n self.l31_conv1 = nn.Conv2d(ch_l30conv2, ch_l31conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l31_bn1 = nn.BatchNorm2d(ch_l31conv1)\r\n self.l31_conv2 = nn.Conv2d(ch_l31conv1, ch_l31conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l31_bn2 = nn.BatchNorm2d(ch_l31conv2)\r\n # layer3-2\r\n self.l32_conv1 = nn.Conv2d(ch_l31conv2, ch_l32conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l32_bn1 = nn.BatchNorm2d(ch_l32conv1)\r\n self.l32_conv2 = nn.Conv2d(ch_l32conv1, ch_l32conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l32_bn2 = nn.BatchNorm2d(ch_l32conv2)\r\n # layer3-3\r\n self.l33_conv1 = nn.Conv2d(ch_l32conv2, ch_l33conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l33_bn1 = nn.BatchNorm2d(ch_l33conv1)\r\n self.l33_conv2 = nn.Conv2d(ch_l33conv1, ch_l33conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l33_bn2 = nn.BatchNorm2d(ch_l33conv2)\r\n # layer3-4\r\n self.l34_conv1 = nn.Conv2d(ch_l33conv2, ch_l34conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l34_bn1 = nn.BatchNorm2d(ch_l34conv1)\r\n self.l34_conv2 = nn.Conv2d(ch_l34conv1, ch_l34conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l34_bn2 = nn.BatchNorm2d(ch_l34conv2)\r\n # layer3-5\r\n self.l35_conv1 = nn.Conv2d(ch_l34conv2, ch_l35conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l35_bn1 = nn.BatchNorm2d(ch_l35conv1)\r\n self.l35_conv2 = nn.Conv2d(ch_l35conv1, ch_l35conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l35_bn2 = nn.BatchNorm2d(ch_l35conv2)\r\n # layer3-6\r\n self.l36_conv1 = nn.Conv2d(ch_l35conv2, ch_l36conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l36_bn1 = nn.BatchNorm2d(ch_l36conv1)\r\n self.l36_conv2 = nn.Conv2d(ch_l36conv1, ch_l36conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l36_bn2 = nn.BatchNorm2d(ch_l36conv2)\r\n # layer3-7\r\n self.l37_conv1 = nn.Conv2d(ch_l36conv2, ch_l37conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l37_bn1 = nn.BatchNorm2d(ch_l37conv1)\r\n self.l37_conv2 = nn.Conv2d(ch_l37conv1, ch_l37conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l37_bn2 = nn.BatchNorm2d(ch_l37conv2)\r\n # layer3-8\r\n self.l38_conv1 = nn.Conv2d(ch_l37conv2, ch_l38conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l38_bn1 = nn.BatchNorm2d(ch_l38conv1)\r\n self.l38_conv2 = nn.Conv2d(ch_l38conv1, ch_l38conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l38_bn2 = nn.BatchNorm2d(ch_l38conv2)\r\n # layer3-9\r\n self.l39_conv1 = nn.Conv2d(ch_l38conv2, ch_l39conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l39_bn1 = nn.BatchNorm2d(ch_l39conv1)\r\n self.l39_conv2 = nn.Conv2d(ch_l39conv1, ch_l39conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l39_bn2 = nn.BatchNorm2d(ch_l39conv2)\r\n # layer3-10\r\n self.l310_conv1 = nn.Conv2d(ch_l39conv2, ch_l310conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l310_bn1 = nn.BatchNorm2d(ch_l310conv1)\r\n self.l310_conv2 = nn.Conv2d(ch_l310conv1, ch_l310conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l310_bn2 = nn.BatchNorm2d(ch_l310conv2)\r\n # layer3-11\r\n self.l311_conv1 = nn.Conv2d(ch_l310conv2, ch_l311conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l311_bn1 = nn.BatchNorm2d(ch_l311conv1)\r\n self.l311_conv2 = nn.Conv2d(ch_l311conv1, ch_l311conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l311_bn2 = nn.BatchNorm2d(ch_l311conv2)\r\n # layer3-12\r\n self.l312_conv1 = nn.Conv2d(ch_l311conv2, ch_l312conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l312_bn1 = nn.BatchNorm2d(ch_l312conv1)\r\n self.l312_conv2 = nn.Conv2d(ch_l312conv1, ch_l312conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l312_bn2 = nn.BatchNorm2d(ch_l312conv2)\r\n # layer3-13\r\n self.l313_conv1 = nn.Conv2d(ch_l312conv2, ch_l313conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l313_bn1 = nn.BatchNorm2d(ch_l313conv1)\r\n self.l313_conv2 = nn.Conv2d(ch_l313conv1, ch_l313conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l313_bn2 = nn.BatchNorm2d(ch_l313conv2)\r\n # layer3-14\r\n self.l314_conv1 = nn.Conv2d(ch_l313conv2, ch_l314conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l314_bn1 = nn.BatchNorm2d(ch_l314conv1)\r\n self.l314_conv2 = nn.Conv2d(ch_l314conv1, ch_l314conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l314_bn2 = nn.BatchNorm2d(ch_l314conv2)\r\n # layer3-15\r\n self.l315_conv1 = nn.Conv2d(ch_l314conv2, ch_l315conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l315_bn1 = nn.BatchNorm2d(ch_l315conv1)\r\n self.l315_conv2 = nn.Conv2d(ch_l315conv1, ch_l315conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l315_bn2 = nn.BatchNorm2d(ch_l315conv2)\r\n # layer3-16\r\n self.l316_conv1 = nn.Conv2d(ch_l315conv2, ch_l316conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l316_bn1 = nn.BatchNorm2d(ch_l316conv1)\r\n self.l316_conv2 = nn.Conv2d(ch_l316conv1, ch_l316conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l316_bn2 = nn.BatchNorm2d(ch_l316conv2)\r\n # layer3-17\r\n self.l317_conv1 = nn.Conv2d(ch_l316conv2, ch_l317conv1, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l317_bn1 = nn.BatchNorm2d(ch_l317conv1)\r\n self.l317_conv2 = nn.Conv2d(ch_l317conv1, ch_l317conv2, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.l317_bn2 = nn.BatchNorm2d(ch_l317conv2)\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\r\n self.linear = nn.Linear(ch_l317conv2, num_classes)\r\n\r\n def forward(self, x):\r\n x = F.relu(self.bn1(self.conv1(x))) \r\n\r\n # layer1-0\r\n identity = x\r\n x = F.relu(self.l10_bn1(self.l10_conv1(x)))\r\n x = self.l10_bn2(self.l10_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-1\r\n identity = x\r\n x = F.relu(self.l11_bn1(self.l11_conv1(x)))\r\n x = self.l11_bn2(self.l11_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-2\r\n identity = x\r\n x = F.relu(self.l12_bn1(self.l12_conv1(x)))\r\n x = self.l12_bn2(self.l12_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-3\r\n identity = x\r\n x = F.relu(self.l13_bn1(self.l13_conv1(x)))\r\n x = self.l13_bn2(self.l13_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-4\r\n identity = x\r\n x = F.relu(self.l14_bn1(self.l14_conv1(x)))\r\n x = self.l14_bn2(self.l14_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-5\r\n identity = x\r\n x = F.relu(self.l15_bn1(self.l15_conv1(x)))\r\n x = self.l15_bn2(self.l15_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-6\r\n identity = x\r\n x = F.relu(self.l16_bn1(self.l16_conv1(x)))\r\n x = self.l16_bn2(self.l16_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-7\r\n identity = x\r\n x = F.relu(self.l17_bn1(self.l17_conv1(x)))\r\n x = self.l17_bn2(self.l17_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-8\r\n identity = x\r\n x = F.relu(self.l18_bn1(self.l18_conv1(x)))\r\n x = self.l18_bn2(self.l18_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-9\r\n identity = x\r\n x = F.relu(self.l19_bn1(self.l19_conv1(x)))\r\n x = self.l19_bn2(self.l19_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-10\r\n identity = x\r\n x = F.relu(self.l110_bn1(self.l110_conv1(x)))\r\n x = self.l110_bn2(self.l110_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-11\r\n identity = x\r\n x = F.relu(self.l111_bn1(self.l111_conv1(x)))\r\n x = self.l111_bn2(self.l111_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-12\r\n identity = x\r\n x = F.relu(self.l112_bn1(self.l112_conv1(x)))\r\n x = self.l112_bn2(self.l112_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-13\r\n identity = x\r\n x = F.relu(self.l113_bn1(self.l113_conv1(x)))\r\n x = self.l113_bn2(self.l113_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-14\r\n identity = x\r\n x = F.relu(self.l114_bn1(self.l114_conv1(x)))\r\n x = self.l114_bn2(self.l114_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-15\r\n identity = x\r\n x = F.relu(self.l115_bn1(self.l115_conv1(x)))\r\n x = self.l115_bn2(self.l115_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-16\r\n identity = x\r\n x = F.relu(self.l116_bn1(self.l116_conv1(x)))\r\n x = self.l116_bn2(self.l116_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer1-17\r\n identity = x\r\n x = F.relu(self.l117_bn1(self.l117_conv1(x)))\r\n x = self.l117_bn2(self.l117_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n\r\n\r\n # layer2-0\r\n identity = x\r\n x = F.relu(self.l20_bn1(self.l20_conv1(x)))\r\n x = self.l20_bn2(self.l20_conv2(x))\r\n x = self.zeropad12(x) # zero padding on main path\r\n x += self.zeropad11(identity) # zero padding on shortcut path\r\n x = F.relu(x)\r\n # layer2-1\r\n identity = x\r\n x = F.relu(self.l21_bn1(self.l21_conv1(x)))\r\n x = self.l21_bn2(self.l21_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-2\r\n identity = x\r\n x = F.relu(self.l22_bn1(self.l22_conv1(x)))\r\n x = self.l22_bn2(self.l22_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-3\r\n identity = x\r\n x = F.relu(self.l23_bn1(self.l23_conv1(x)))\r\n x = self.l23_bn2(self.l23_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-4\r\n identity = x\r\n x = F.relu(self.l24_bn1(self.l24_conv1(x)))\r\n x = self.l24_bn2(self.l24_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-5\r\n identity = x\r\n x = F.relu(self.l25_bn1(self.l25_conv1(x)))\r\n x = self.l25_bn2(self.l25_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-6\r\n identity = x\r\n x = F.relu(self.l26_bn1(self.l26_conv1(x)))\r\n x = self.l26_bn2(self.l26_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-7\r\n identity = x\r\n x = F.relu(self.l27_bn1(self.l27_conv1(x)))\r\n x = self.l27_bn2(self.l27_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-8\r\n identity = x\r\n x = F.relu(self.l28_bn1(self.l28_conv1(x)))\r\n x = self.l28_bn2(self.l28_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-9\r\n identity = x\r\n x = F.relu(self.l29_bn1(self.l29_conv1(x)))\r\n x = self.l29_bn2(self.l29_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-10\r\n identity = x\r\n x = F.relu(self.l210_bn1(self.l210_conv1(x)))\r\n x = self.l210_bn2(self.l210_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-11\r\n identity = x\r\n x = F.relu(self.l211_bn1(self.l211_conv1(x)))\r\n x = self.l211_bn2(self.l211_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-12\r\n identity = x\r\n x = F.relu(self.l212_bn1(self.l212_conv1(x)))\r\n x = self.l212_bn2(self.l212_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-13\r\n identity = x\r\n x = F.relu(self.l213_bn1(self.l213_conv1(x)))\r\n x = self.l213_bn2(self.l213_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-14\r\n identity = x\r\n x = F.relu(self.l214_bn1(self.l214_conv1(x)))\r\n x = self.l214_bn2(self.l214_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-15\r\n identity = x\r\n x = F.relu(self.l215_bn1(self.l215_conv1(x)))\r\n x = self.l215_bn2(self.l215_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-16\r\n identity = x\r\n x = F.relu(self.l216_bn1(self.l216_conv1(x)))\r\n x = self.l216_bn2(self.l216_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer2-17\r\n identity = x\r\n x = F.relu(self.l217_bn1(self.l217_conv1(x)))\r\n x = self.l217_bn2(self.l217_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n\r\n\r\n # layer3-0 \r\n identity = x\r\n x = F.relu(self.l30_bn1(self.l30_conv1(x)))\r\n x = self.l30_bn2(self.l30_conv2(x))\r\n x = self.zeropad22(x) # zero padding on main path\r\n x+= self.zeropad21(identity) # zero padding on shortcut path\r\n x = F.relu(x)\r\n # layer3-1\r\n identity = x\r\n x = F.relu(self.l31_bn1(self.l31_conv1(x)))\r\n x = self.l31_bn2(self.l31_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-2\r\n identity = x\r\n x = F.relu(self.l32_bn1(self.l32_conv1(x)))\r\n x = self.l32_bn2(self.l32_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-3\r\n identity = x\r\n x = F.relu(self.l33_bn1(self.l33_conv1(x)))\r\n x = self.l33_bn2(self.l33_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-4\r\n identity = x\r\n x = F.relu(self.l34_bn1(self.l34_conv1(x)))\r\n x = self.l34_bn2(self.l34_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-5\r\n identity = x\r\n x = F.relu(self.l35_bn1(self.l35_conv1(x)))\r\n x = self.l35_bn2(self.l35_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-6\r\n identity = x\r\n x = F.relu(self.l36_bn1(self.l36_conv1(x)))\r\n x = self.l36_bn2(self.l36_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-7\r\n identity = x\r\n x = F.relu(self.l37_bn1(self.l37_conv1(x)))\r\n x = self.l37_bn2(self.l37_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-8\r\n identity = x\r\n x = F.relu(self.l38_bn1(self.l38_conv1(x)))\r\n x = self.l38_bn2(self.l38_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-9\r\n identity = x\r\n x = F.relu(self.l39_bn1(self.l39_conv1(x)))\r\n x = self.l39_bn2(self.l39_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-10\r\n identity = x\r\n x = F.relu(self.l310_bn1(self.l310_conv1(x)))\r\n x = self.l310_bn2(self.l310_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-11\r\n identity = x\r\n x = F.relu(self.l311_bn1(self.l311_conv1(x)))\r\n x = self.l311_bn2(self.l311_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-12\r\n identity = x\r\n x = F.relu(self.l312_bn1(self.l312_conv1(x)))\r\n x = self.l312_bn2(self.l312_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-13\r\n identity = x\r\n x = F.relu(self.l313_bn1(self.l313_conv1(x)))\r\n x = self.l313_bn2(self.l313_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-14\r\n identity = x\r\n x = F.relu(self.l314_bn1(self.l314_conv1(x)))\r\n x = self.l314_bn2(self.l314_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-15\r\n identity = x\r\n x = F.relu(self.l315_bn1(self.l315_conv1(x)))\r\n x = self.l315_bn2(self.l315_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-16\r\n identity = x\r\n x = F.relu(self.l316_bn1(self.l316_conv1(x)))\r\n x = self.l316_bn2(self.l316_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n # layer3-17\r\n identity = x\r\n x = F.relu(self.l317_bn1(self.l317_conv1(x)))\r\n x = self.l317_bn2(self.l317_conv2(x))\r\n x += identity\r\n x = F.relu(x)\r\n \r\n x = F.avg_pool2d(x, x.size()[3])\r\n x = x.view(x.size(0),-1)\r\n x = self.linear(x)\r\n \r\n return x\r\n"
] |
[
[
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.pad"
]
] |
daviesje/21cmFAST
|
[
"f36885a813ace72f34c881d80473208d06e3829a"
] |
[
"src/py21cmfast/cli.py"
] |
[
"\"\"\"Module that contains the command line app.\"\"\"\nimport builtins\nimport click\nimport inspect\nimport logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nimport yaml\nfrom os import path, remove\nfrom pathlib import Path\n\nfrom . import _cfg, cache_tools, global_params, plotting\nfrom . import wrapper as lib\n\n\ndef _get_config(config=None):\n if config is None:\n config = path.expanduser(path.join(\"~\", \".21cmfast\", \"runconfig_example.yml\"))\n\n with open(config) as f:\n cfg = yaml.load(f, Loader=yaml.FullLoader)\n\n return cfg\n\n\ndef _ctx_to_dct(args):\n dct = {}\n j = 0\n while j < len(args):\n arg = args[j]\n if \"=\" in arg:\n a = arg.split(\"=\")\n dct[a[0].replace(\"--\", \"\")] = a[-1]\n j += 1\n else:\n dct[arg.replace(\"--\", \"\")] = args[j + 1]\n j += 2\n\n return dct\n\n\ndef _update(obj, ctx):\n # Try to use the extra arguments as an override of config.\n kk = list(ctx.keys())\n for k in kk:\n # noinspection PyProtectedMember\n if hasattr(obj, k):\n try:\n val = getattr(obj, \"_\" + k)\n setattr(obj, \"_\" + k, type(val)(ctx[k]))\n ctx.pop(k)\n except (AttributeError, TypeError):\n try:\n val = getattr(obj, k)\n setattr(obj, k, type(val)(ctx[k]))\n ctx.pop(k)\n except AttributeError:\n pass\n\n\ndef _override(ctx, *param_dicts):\n # Try to use the extra arguments as an override of config.\n\n if ctx.args:\n ctx = _ctx_to_dct(ctx.args)\n for p in param_dicts:\n _update(p, ctx)\n\n # Also update globals, always.\n _update(global_params, ctx)\n if ctx:\n warnings.warn(\"The following arguments were not able to be set: %s\" % ctx)\n\n\nmain = click.Group()\n\n\[email protected](\n context_settings={ # Doing this allows arbitrary options to override config\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n }\n)\[email protected](\n \"--config\",\n type=click.Path(exists=True, dir_okay=False),\n default=None,\n help=\"Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)\",\n)\[email protected](\n \"--regen/--no-regen\",\n default=False,\n help=\"Whether to force regeneration of init/perturb files if they already exist.\",\n)\[email protected](\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"directory to write data and plots to -- must exist.\",\n)\[email protected](\n \"--seed\",\n type=int,\n default=None,\n help=\"specify a random seed for the initial conditions\",\n)\[email protected]_context\ndef init(ctx, config, regen, direc, seed):\n \"\"\"Run a single iteration of 21cmFAST init, saving results to file.\n\n Parameters\n ----------\n ctx :\n A parameter from the parent CLI function to be able to override config.\n config : str\n Path to the configuration file.\n regen : bool\n Whether to regenerate all data, even if found in cache.\n direc : str\n Where to search for cached items.\n seed : int\n Random seed used to generate data.\n \"\"\"\n cfg = _get_config(config)\n\n # Set user/cosmo params from config.\n user_params = lib.UserParams(**cfg.get(\"user_params\", {}))\n cosmo_params = lib.CosmoParams(**cfg.get(\"cosmo_params\", {}))\n\n _override(ctx, user_params, cosmo_params)\n\n lib.initial_conditions(\n user_params=user_params,\n cosmo_params=cosmo_params,\n regenerate=regen,\n write=True,\n direc=direc,\n random_seed=seed,\n )\n\n\[email protected](\n context_settings={ # Doing this allows arbitrary options to override config\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n }\n)\[email protected](\"redshift\", type=float)\[email protected](\n \"--config\",\n type=click.Path(exists=True, dir_okay=False),\n default=None,\n help=\"Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)\",\n)\[email protected](\n \"--regen/--no-regen\",\n default=False,\n help=\"Whether to force regeneration of init/perturb files if they already exist.\",\n)\[email protected](\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"directory to write data and plots to -- must exist.\",\n)\[email protected](\n \"--seed\",\n type=int,\n default=None,\n help=\"specify a random seed for the initial conditions\",\n)\[email protected]_context\ndef perturb(ctx, redshift, config, regen, direc, seed):\n \"\"\"Run 21cmFAST perturb_field at the specified redshift, saving results to file.\n\n Parameters\n ----------\n ctx :\n A parameter from the parent CLI function to be able to override config.\n redshift : float\n Redshift at which to generate perturbed field.\n config : str\n Path to the configuration file.\n regen : bool\n Whether to regenerate all data, even if found in cache.\n direc : str\n Where to search for cached items.\n seed : int\n Random seed used to generate data.\n \"\"\"\n cfg = _get_config(config)\n\n # Set user/cosmo params from config.\n user_params = lib.UserParams(**cfg.get(\"user_params\", {}))\n cosmo_params = lib.CosmoParams(**cfg.get(\"cosmo_params\", {}))\n\n _override(ctx, user_params, cosmo_params)\n\n lib.perturb_field(\n redshift=redshift,\n user_params=user_params,\n cosmo_params=cosmo_params,\n regenerate=regen,\n write=True,\n direc=direc,\n random_seed=seed,\n )\n\n\[email protected](\n context_settings={ # Doing this allows arbitrary options to override config\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n }\n)\[email protected](\"redshift\", type=float)\[email protected](\n \"-p\",\n \"--prev_z\",\n type=float,\n default=None,\n help=\"Previous redshift (the spin temperature data must already exist for this redshift)\",\n)\[email protected](\n \"--config\",\n type=click.Path(exists=True, dir_okay=False),\n default=None,\n help=\"Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)\",\n)\[email protected](\n \"--regen/--no-regen\",\n default=False,\n help=\"Whether to force regeneration of init/perturb files if they already exist.\",\n)\[email protected](\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"directory to write data and plots to -- must exist.\",\n)\[email protected](\n \"--seed\",\n type=int,\n default=None,\n help=\"specify a random seed for the initial conditions\",\n)\[email protected]_context\ndef spin(ctx, redshift, prev_z, config, regen, direc, seed):\n \"\"\"Run spin_temperature at the specified redshift, saving results to file.\n\n Parameters\n ----------\n ctx :\n A parameter from the parent CLI function to be able to override config.\n redshift : float\n The redshift to generate the field at.\n prev_z : float\n The redshift of a previous box from which to evolve to the current one.\n config : str\n Path to the configuration file.\n regen : bool\n Whether to regenerate all data, even if found in cache.\n direc : str\n Where to search for cached items.\n seed : int\n Random seed used to generate data.\n \"\"\"\n cfg = _get_config(config)\n\n # Set user/cosmo params from config.\n user_params = lib.UserParams(**cfg.get(\"user_params\", {}))\n cosmo_params = lib.CosmoParams(**cfg.get(\"cosmo_params\", {}))\n flag_options = lib.FlagOptions(\n **cfg.get(\"flag_options\", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES\n )\n astro_params = lib.AstroParams(\n **cfg.get(\"astro_params\", {}), INHOMO_RECO=flag_options.INHOMO_RECO\n )\n\n _override(ctx, user_params, cosmo_params, astro_params, flag_options)\n\n lib.spin_temperature(\n redshift=redshift,\n astro_params=astro_params,\n flag_options=flag_options,\n previous_spin_temp=prev_z,\n user_params=user_params,\n cosmo_params=cosmo_params,\n regenerate=regen,\n write=True,\n direc=direc,\n random_seed=seed,\n )\n\n\[email protected](\n context_settings={ # Doing this allows arbitrary options to override config\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n }\n)\[email protected](\"redshift\", type=float)\[email protected](\n \"-p\",\n \"--prev_z\",\n type=float,\n default=None,\n help=\"Previous redshift (the ionized box data must already exist for this redshift)\",\n)\[email protected](\n \"--config\",\n type=click.Path(exists=True, dir_okay=False),\n default=None,\n help=\"Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)\",\n)\[email protected](\n \"--regen/--no-regen\",\n default=False,\n help=\"Whether to force regeneration of init/perturb files if they already exist.\",\n)\[email protected](\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"directory to write data and plots to -- must exist.\",\n)\[email protected](\n \"--seed\",\n type=int,\n default=None,\n help=\"specify a random seed for the initial conditions\",\n)\[email protected]_context\ndef ionize(ctx, redshift, prev_z, config, regen, direc, seed):\n \"\"\"Run 21cmFAST ionize_box at the specified redshift, saving results to file.\n\n Parameters\n ----------\n ctx :\n A parameter from the parent CLI function to be able to override config.\n redshift : float\n The redshift to generate the field at.\n prev_z : float\n The redshift of a previous box from which to evolve to the current one.\n config : str\n Path to the configuration file.\n regen : bool\n Whether to regenerate all data, even if found in cache.\n direc : str\n Where to search for cached items.\n seed : int\n Random seed used to generate data.\n \"\"\"\n cfg = _get_config(config)\n\n # Set user/cosmo params from config.\n user_params = lib.UserParams(**cfg.get(\"user_params\", {}))\n cosmo_params = lib.CosmoParams(**cfg.get(\"cosmo_params\", {}))\n flag_options = lib.FlagOptions(\n **cfg.get(\"flag_options\", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES\n )\n astro_params = lib.AstroParams(\n **cfg.get(\"astro_params\", {}), INHOMO_RECO=flag_options.INHOMO_RECO\n )\n\n _override(ctx, user_params, cosmo_params, astro_params, flag_options)\n\n lib.ionize_box(\n redshift=redshift,\n astro_params=astro_params,\n flag_options=flag_options,\n previous_ionize_box=prev_z,\n user_params=user_params,\n cosmo_params=cosmo_params,\n regenerate=regen,\n write=True,\n direc=direc,\n random_seed=seed,\n )\n\n\[email protected](\n context_settings={ # Doing this allows arbitrary options to override config\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n }\n)\[email protected](\"redshift\", type=str)\[email protected](\n \"--config\",\n type=click.Path(exists=True, dir_okay=False),\n default=None,\n help=\"Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)\",\n)\[email protected](\n \"--out\",\n type=click.Path(dir_okay=True, file_okay=True),\n default=None,\n help=\"Path to output full Coeval simulation to (directory OK).\",\n)\[email protected](\n \"--regen/--no-regen\",\n default=False,\n help=\"Whether to force regeneration of init/perturb files if they already exist.\",\n)\[email protected](\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"cache directory\",\n)\[email protected](\n \"--seed\",\n type=int,\n default=None,\n help=\"specify a random seed for the initial conditions\",\n)\[email protected]_context\ndef coeval(ctx, redshift, config, out, regen, direc, seed):\n \"\"\"Efficiently generate coeval cubes at a given redshift.\n\n Parameters\n ----------\n ctx :\n A parameter from the parent CLI function to be able to override config.\n redshift : float\n The redshift to generate the field at.\n config : str\n Path to the configuration file.\n regen : bool\n Whether to regenerate all data, even if found in cache.\n direc : str\n Where to search for cached items.\n seed : int\n Random seed used to generate data.\n \"\"\"\n if out is not None:\n out = Path(out).absolute()\n if len(out.suffix) not in (2, 3) and not out.exists():\n out.mkdir()\n elif not out.parent.exists():\n out.parent.mkdir()\n\n try:\n redshift = [float(z.strip()) for z in redshift.split(\",\")]\n except TypeError:\n raise TypeError(\"redshift argument must be comma-separated list of values.\")\n\n cfg = _get_config(config)\n\n # Set user/cosmo params from config.\n user_params = lib.UserParams(**cfg.get(\"user_params\", {}))\n cosmo_params = lib.CosmoParams(**cfg.get(\"cosmo_params\", {}))\n flag_options = lib.FlagOptions(\n **cfg.get(\"flag_options\", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES\n )\n astro_params = lib.AstroParams(\n **cfg.get(\"astro_params\", {}), INHOMO_RECO=flag_options.INHOMO_RECO\n )\n\n _override(ctx, user_params, cosmo_params, astro_params, flag_options)\n\n coeval = lib.run_coeval(\n redshift=redshift,\n astro_params=astro_params,\n flag_options=flag_options,\n user_params=user_params,\n cosmo_params=cosmo_params,\n regenerate=regen,\n write=True,\n direc=direc,\n random_seed=seed,\n )\n\n if out:\n for i, (z, c) in enumerate(zip(redshift, coeval)):\n if out.is_dir():\n fname = out / c.get_unique_filename()\n elif len(redshift) == 1:\n fname = out\n else:\n out = out.parent / f\"{out.name}_z{z}{out.suffix}\"\n c.save(fname)\n\n print(f\"Saved Coeval box to {fname}.\")\n\n\[email protected](\n context_settings={ # Doing this allows arbitrary options to override config\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n }\n)\[email protected](\"redshift\", type=float)\[email protected](\n \"--config\",\n type=click.Path(exists=True, dir_okay=False),\n default=None,\n help=\"Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)\",\n)\[email protected](\n \"--out\",\n type=click.Path(dir_okay=True, file_okay=True),\n default=None,\n help=\"Path to output full Lightcone to (directory OK).\",\n)\[email protected](\n \"--regen/--no-regen\",\n default=False,\n help=\"Whether to force regeneration of init/perturb files if they already exist.\",\n)\[email protected](\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"directory to write data and plots to -- must exist.\",\n)\[email protected](\n \"-X\",\n \"--max-z\",\n type=float,\n default=None,\n help=\"maximum redshift of the stored lightcone data\",\n)\[email protected](\n \"--seed\",\n type=int,\n default=None,\n help=\"specify a random seed for the initial conditions\",\n)\[email protected]_context\ndef lightcone(ctx, redshift, config, out, regen, direc, max_z, seed):\n \"\"\"Efficiently generate coeval cubes at a given redshift.\n\n Parameters\n ----------\n ctx :\n A parameter from the parent CLI function to be able to override config.\n redshift : float\n The redshift to generate the field at.\n config : str\n Path to the configuration file.\n regen : bool\n Whether to regenerate all data, even if found in cache.\n direc : str\n Where to search for cached items.\n max_z : float\n Maximum redshift to include in the produced lightcone.\n seed : int\n Random seed used to generate data.\n \"\"\"\n cfg = _get_config(config)\n\n if out is not None:\n out = Path(out).absolute()\n if len(out.suffix) not in (2, 3) and not out.exists():\n out.mkdir()\n elif not out.parent.exists():\n out.parent.mkdir()\n\n # Set user/cosmo params from config.\n user_params = lib.UserParams(**cfg.get(\"user_params\", {}))\n cosmo_params = lib.CosmoParams(**cfg.get(\"cosmo_params\", {}))\n flag_options = lib.FlagOptions(\n **cfg.get(\"flag_options\", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES\n )\n astro_params = lib.AstroParams(\n **cfg.get(\"astro_params\", {}), INHOMO_RECO=flag_options.INHOMO_RECO\n )\n\n _override(ctx, user_params, cosmo_params, astro_params, flag_options)\n\n lc = lib.run_lightcone(\n redshift=redshift,\n max_redshift=max_z,\n astro_params=astro_params,\n flag_options=flag_options,\n user_params=user_params,\n cosmo_params=cosmo_params,\n regenerate=regen,\n write=True,\n direc=direc,\n random_seed=seed,\n )\n\n if out:\n fname = out / lc.get_unique_filename() if out.is_dir() else out\n lc.save(fname)\n\n print(f\"Saved Lightcone to {fname}.\")\n\n\ndef _query(direc=None, kind=None, md5=None, seed=None, clear=False):\n cls = list(\n cache_tools.query_cache(direc=direc, kind=kind, hsh=md5, seed=seed, show=False)\n )\n\n if not clear:\n print(\"%s Data Sets Found:\" % len(cls))\n print(\"------------------\")\n else:\n print(\"Removing %s data sets...\" % len(cls))\n\n for file, c in cls:\n if not clear:\n print(\" @ {%s}:\" % file)\n print(\" %s\" % str(c))\n\n print()\n\n else:\n direc = direc or path.expanduser(_cfg.config[\"direc\"])\n remove(path.join(direc, file))\n\n\[email protected]()\[email protected](\n \"-d\",\n \"--direc\",\n type=click.Path(exists=True, dir_okay=True),\n default=None,\n help=\"directory to write data and plots to -- must exist.\",\n)\[email protected](\"-k\", \"--kind\", type=str, default=None, help=\"filter by kind of data.\")\[email protected](\"-m\", \"--md5\", type=str, default=None, help=\"filter by md5 hsh\")\[email protected](\"-s\", \"--seed\", type=str, default=None, help=\"filter by random seed\")\[email protected](\n \"--clear/--no-clear\",\n default=False,\n help=\"remove all data sets returned by this query.\",\n)\ndef query(direc, kind, md5, seed, clear):\n \"\"\"Query the cache database.\n\n Parameters\n ----------\n direc : str\n Directory in which to search for cache items\n kind : str\n Filter output by kind of box (eg. InitialConditions)\n md5 : str\n Filter output by hsh\n seed : float\n Filter output by random seed.\n clear : bool\n Remove all data sets returned by the query.\n \"\"\"\n _query(direc, kind, md5, seed, clear)\n\n\[email protected]()\[email protected](\"param\", type=str)\[email protected](\"value\", type=str)\[email protected](\n \"-s\",\n \"--struct\",\n type=click.Choice([\"flag_options\", \"cosmo_params\", \"user_params\", \"astro_params\"]),\n default=\"flag_options\",\n help=\"struct in which the new feature exists\",\n)\[email protected](\n \"-t\",\n \"--vtype\",\n type=click.Choice([\"bool\", \"float\", \"int\"]),\n default=\"bool\",\n help=\"type of the new parameter\",\n)\[email protected](\n \"-l/-c\",\n \"--lightcone/--coeval\",\n default=True,\n help=\"whether to use a lightcone for comparison\",\n)\[email protected](\n \"-z\", \"--redshift\", type=float, default=6.0, help=\"redshift of the comparison boxes\"\n)\[email protected](\n \"-Z\",\n \"--max-redshift\",\n type=float,\n default=30,\n help=\"maximum redshift of the comparison lightcone\",\n)\[email protected](\"-r\", \"--random-seed\", type=int, default=12345, help=\"random seed to use\")\[email protected](\"-v\", \"--verbose\", count=True)\[email protected](\n \"-g/-G\",\n \"--regenerate/--cache\",\n default=True,\n help=\"whether to regenerate the boxes\",\n)\ndef pr_feature(\n param,\n value,\n struct,\n vtype,\n lightcone,\n redshift,\n max_redshift,\n random_seed,\n verbose,\n regenerate,\n):\n \"\"\"\n Create standard plots comparing a default simulation against a simulation with a new feature.\n\n The new feature is switched on by setting PARAM to VALUE.\n Plots are saved in the current directory, with the prefix \"pr_feature\".\n\n Parameters\n ----------\n param : str\n Name of the parameter to modify to \"switch on\" the feature.\n value : float\n Value to which to set it.\n struct : str\n The input parameter struct to which `param` belongs.\n vtype : str\n Type of the new parameter.\n lightcone : bool\n Whether the comparison should be done on a lightcone.\n redshift : float\n Redshift of comparison.\n max_redshift : float\n If using a lightcone, the maximum redshift in the lightcone to compare.\n random_seed : int\n Random seed at which to compare.\n verbose : int\n How verbose the output should be.\n regenerate : bool\n Whether to regenerate all data, even if it is in cache.\n \"\"\"\n import powerbox\n\n lvl = [logging.WARNING, logging.INFO, logging.DEBUG][verbose]\n logger = logging.getLogger(\"21cmFAST\")\n logger.setLevel(lvl)\n value = getattr(builtins, vtype)(value)\n\n structs = {\n \"user_params\": {\"HII_DIM\": 128, \"BOX_LEN\": 250},\n \"flag_options\": {\"USE_TS_FLUCT\": True},\n \"cosmo_params\": {},\n \"astro_params\": {},\n }\n\n if lightcone:\n print(\"Running default lightcone...\")\n lc_default = lib.run_lightcone(\n redshift=redshift,\n max_redshift=max_redshift,\n random_seed=random_seed,\n regenerate=regenerate,\n **structs,\n )\n structs[struct][param] = value\n\n print(\"Running lightcone with new feature...\")\n lc_new = lib.run_lightcone(\n redshift=redshift,\n max_redshift=max_redshift,\n random_seed=random_seed,\n regenerate=regenerate,\n **structs,\n )\n\n print(\"Plotting lightcone slices...\")\n for field in [\"brightness_temp\"]:\n fig, ax = plt.subplots(3, 1, sharex=True, sharey=True)\n\n vmin = -150\n vmax = 30\n\n plotting.lightcone_sliceplot(\n lc_default, ax=ax[0], fig=fig, vmin=vmin, vmax=vmax\n )\n ax[0].set_title(\"Default\")\n\n plotting.lightcone_sliceplot(\n lc_new, ax=ax[1], fig=fig, cbar=False, vmin=vmin, vmax=vmax\n )\n ax[1].set_title(\"New\")\n\n plotting.lightcone_sliceplot(\n lc_default, lightcone2=lc_new, cmap=\"bwr\", ax=ax[2], fig=fig\n )\n ax[2].set_title(\"Difference\")\n\n plt.savefig(f\"pr_feature_lighcone_2d_{field}.pdf\")\n\n def rms(x, axis=None):\n return np.sqrt(np.mean(x ** 2, axis=axis))\n\n print(\"Plotting lightcone history...\")\n fig, ax = plt.subplots(4, 1, sharex=True, gridspec_kw={\"hspace\": 0.05})\n ax[0].plot(lc_default.node_redshifts, lc_default.global_xHI, label=\"Default\")\n ax[0].plot(lc_new.node_redshifts, lc_new.global_xHI, label=\"New\")\n ax[0].set_ylabel(r\"$x_{\\rm HI}$\")\n ax[0].legend()\n\n ax[1].plot(\n lc_default.node_redshifts,\n lc_default.global_brightness_temp,\n label=\"Default\",\n )\n ax[1].plot(lc_new.node_redshifts, lc_new.global_brightness_temp, label=\"New\")\n ax[1].set_ylabel(\"$T_b$ [K]\")\n ax[3].set_xlabel(\"z\")\n\n rms_diff = rms(lc_default.brightness_temp, axis=(0, 1)) - rms(\n lc_new.brightness_temp, axis=(0, 1)\n )\n ax[2].plot(lc_default.lightcone_redshifts, rms_diff, label=\"RMS\")\n ax[2].plot(\n lc_new.node_redshifts,\n lc_default.global_xHI - lc_new.global_xHI,\n label=\"$x_{HI}$\",\n )\n ax[2].plot(\n lc_new.node_redshifts,\n lc_default.global_brightness_temp - lc_new.global_brightness_temp,\n label=\"$T_b$\",\n )\n ax[2].legend()\n ax[2].set_ylabel(\"Differences\")\n\n diff_rms = rms(lc_default.brightness_temp - lc_new.brightness_temp, axis=(0, 1))\n ax[3].plot(lc_default.lightcone_redshifts, diff_rms)\n ax[3].set_ylabel(\"RMS of Diff.\")\n\n plt.savefig(\"pr_feature_history.pdf\")\n\n print(\"Plotting power spectra history...\")\n p_default = []\n p_new = []\n z = []\n thickness = 200 # Mpc\n ncells = int(thickness / lc_new.cell_size)\n chunk_size = lc_new.cell_size * ncells\n start = 0\n print(ncells)\n while start + ncells <= lc_new.shape[-1]:\n pd, k = powerbox.get_power(\n lc_default.brightness_temp[:, :, start : start + ncells],\n lc_default.lightcone_dimensions[:2] + (chunk_size,),\n )\n p_default.append(pd)\n\n pn, k = powerbox.get_power(\n lc_new.brightness_temp[:, :, start : start + ncells],\n lc_new.lightcone_dimensions[:2] + (chunk_size,),\n )\n p_new.append(pn)\n z.append(lc_new.lightcone_redshifts[start])\n\n start += ncells\n\n p_default = np.array(p_default).T\n p_new = np.array(p_new).T\n\n fig, ax = plt.subplots(2, 1, sharex=True)\n ax[0].set_yscale(\"log\")\n\n inds = [\n np.where(np.abs(k - 0.1) == np.abs(k - 0.1).min())[0][0],\n np.where(np.abs(k - 0.2) == np.abs(k - 0.2).min())[0][0],\n np.where(np.abs(k - 0.5) == np.abs(k - 0.5).min())[0][0],\n np.where(np.abs(k - 1) == np.abs(k - 1).min())[0][0],\n ]\n\n for i, (pdef, pnew, kk) in enumerate(\n zip(p_default[inds], p_new[inds], k[inds])\n ):\n ax[0].plot(z, pdef, ls=\"--\", label=f\"k={kk:.2f}\", color=f\"C{i}\")\n ax[0].plot(z, pnew, ls=\"-\", color=f\"C{i}\")\n ax[1].plot(z, np.log10(pdef / pnew), ls=\"-\", color=f\"C{i}\")\n ax[1].set_xlabel(\"z\")\n ax[0].set_ylabel(r\"$\\Delta^2 [{\\rm mK}^2]$\")\n ax[1].set_ylabel(r\"log ratio of $\\Delta^2 [{\\rm mK}^2]$\")\n ax[0].legend()\n\n plt.savefig(\"pr_feature_power_history.pdf\")\n\n else:\n raise NotImplementedError()\n"
] |
[
[
"numpy.abs",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.log10",
"numpy.mean",
"numpy.array"
]
] |
Afafabdb/Instahawk
|
[
"43e380980968fdf9102dc439161fa2fe288e488a"
] |
[
"app.py"
] |
[
"import instaloader\nfrom flask import Flask, render_template, request, jsonify\nfrom instaloader import Profile, Post\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nimport numpy as np\n\n\napp = Flask(__name__)\nloader = instaloader.Instaloader()\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return render_template('home.html')\n\ndef getSentiments(captions):\n if len(captions) > 0 and type(captions) == list:\n analyser = SentimentIntensityAnalyzer()\n neutral = []\n positive = []\n negative = []\n compound = []\n\n for caption in captions:\n neutral.append(analyser.polarity_scores(caption)['neu'])\n positive.append(analyser.polarity_scores(caption)['pos'])\n negative.append(analyser.polarity_scores(caption)['neg'])\n compound.append(analyser.polarity_scores(caption)['compound'])\n\n positive = np.array(positive)\n negative = np.array(negative)\n neutral = np.array(neutral)\n compound = np.array(compound)\n\n return {\n 'Neutral':round(neutral.mean(),2)*100.0,\n 'Positive':round(positive.mean(),2)*100.0,\n 'Negative':round(negative.mean(), 2) * 100.0,\n 'Compound':round(compound.mean(), 2) * 100.0\n }\n else:\n return '{\"Negative\":0.0,\"Neutral\":0.0,\"Overall\":0.0,\"Positive\":0.0}'\n\[email protected]('/checkPrivacy/', methods=['GET'])\ndef checkAccountPrivacy():\n username = request.args['username']\n profile = Profile.from_username(loader.context, username)\n privacy = str(profile.is_private)\n return privacy\n\n\n\[email protected]('/basicProfile/', methods=['GET'])\ndef getBasicPublicProfile():\n username = request.args['username']\n profile = Profile.from_username(loader.context, username)\n\n profileInformation = []\n profileInformation.append(profile.full_name)\n profileInformation.append(profile.get_profile_pic_url())\n\n\n information = {\n \"name\": profile.full_name,\n \"profilePicture\": profile.get_profile_pic_url(),\n \"bio\": profile.biography,\n \"followers\": profile.followers,\n \"following\": profile.followees,\n }\n\n return jsonify(information)\n\[email protected]('/deepProfile/', methods=['GET'])\ndef getDeepPublicProfile():\n username = request.args['username']\n profile = Profile.from_username(loader.context, username)\n\n profileInformation = []\n profileInformation.append(profile.full_name)\n profileInformation.append(profile.get_profile_pic_url())\n\n postsWithHashtags = 0\n likesTotal = 0\n videoPosts = 0\n\n captions = []\n for post in profile.get_posts():\n if post.caption != None:\n captions.append(post.caption)\n if post.caption_hashtags != None:\n postsWithHashtags = postsWithHashtags + 1\n if post.likes != None:\n likesTotal = likesTotal + post.likes\n if post.is_video:\n videoPosts = videoPosts + 1\n\n information = {\n \"name\": profile.full_name,\n \"profilePicture\": profile.get_profile_pic_url(),\n \"bio\": profile.biography,\n \"followers\": profile.followers,\n \"following\": profile.followees,\n \"totalProfileLikes\": likesTotal,\n \"totalVideoPosts\": videoPosts,\n \"postsWithHashtags\": postsWithHashtags,\n \"captionSentiments\": getSentiments(captions),\n }\n\n return jsonify(information)\n\n\n\n\n"
] |
[
[
"numpy.array"
]
] |
jakeatmsft/MachineLearningNotebooks
|
[
"f918280668e6d4f6ff98dc2c9ea04527b716fc60"
] |
[
"how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/keras_mnist.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport numpy as np\nimport argparse\nimport os\nimport glob\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.callbacks import Callback\n\n\n\nfrom azureml.core import Run\nfrom utils import load_data, one_hot_encode\n\nprint(\"Tensorflow version:\", tf.__version__)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-folder', type=str, dest='data_folder', default='data', help='data folder mounting point')\nparser.add_argument('--batch-size', type=int, dest='batch_size', default=50, help='mini batch size for training')\nparser.add_argument('--first-layer-neurons', type=int, dest='n_hidden_1', default=100,\n help='# of neurons in the first layer')\nparser.add_argument('--second-layer-neurons', type=int, dest='n_hidden_2', default=100,\n help='# of neurons in the second layer')\nparser.add_argument('--learning-rate', type=float, dest='learning_rate', default=0.001, help='learning rate')\n\nargs = parser.parse_args()\n\ndata_folder = args.data_folder\n\nprint('training dataset is stored here:', data_folder)\n\nX_train_path = glob.glob(os.path.join(data_folder, '**/train-images-idx3-ubyte.gz'), recursive=True)[0]\nX_test_path = glob.glob(os.path.join(data_folder, '**/t10k-images-idx3-ubyte.gz'), recursive=True)[0]\ny_train_path = glob.glob(os.path.join(data_folder, '**/train-labels-idx1-ubyte.gz'), recursive=True)[0]\ny_test_path = glob.glob(os.path.join(data_folder, '**/t10k-labels-idx1-ubyte.gz'), recursive=True)[0]\n\nX_train = load_data(X_train_path, False) / 255.0\nX_test = load_data(X_test_path, False) / 255.0\ny_train = load_data(y_train_path, True).reshape(-1)\ny_test = load_data(y_test_path, True).reshape(-1)\n\ntraining_set_size = X_train.shape[0]\n\nn_inputs = 28 * 28\nn_h1 = args.n_hidden_1\nn_h2 = args.n_hidden_2\nn_outputs = 10\nn_epochs = 20\nbatch_size = args.batch_size\nlearning_rate = args.learning_rate\n\ny_train = one_hot_encode(y_train, n_outputs)\ny_test = one_hot_encode(y_test, n_outputs)\nprint(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\\n')\n\n# Build a simple MLP model\nmodel = Sequential()\n# first hidden layer\nmodel.add(Dense(n_h1, activation='relu', input_shape=(n_inputs,)))\n# second hidden layer\nmodel.add(Dense(n_h2, activation='relu'))\n# output layer\nmodel.add(Dense(n_outputs, activation='softmax'))\n\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=RMSprop(lr=learning_rate),\n metrics=['accuracy'])\n\n# start an Azure ML run\nrun = Run.get_context()\n\n\nclass LogRunMetrics(Callback):\n # callback at the end of every epoch\n def on_epoch_end(self, epoch, log):\n # log a value repeated which creates a list\n run.log('Loss', log['val_loss'])\n run.log('Accuracy', log['val_accuracy'])\n\n\nhistory = model.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=n_epochs,\n verbose=2,\n validation_data=(X_test, y_test),\n callbacks=[LogRunMetrics()])\n\nscore = model.evaluate(X_test, y_test, verbose=0)\n\n# log a single value\nrun.log(\"Final test loss\", score[0])\nprint('Test loss:', score[0])\n\nrun.log('Final test accuracy', score[1])\nprint('Test accuracy:', score[1])\n\nplt.figure(figsize=(6, 3))\nplt.title('MNIST with Keras MLP ({} epochs)'.format(n_epochs), fontsize=14)\nplt.plot(history.history['val_accuracy'], 'b-', label='Accuracy', lw=4, alpha=0.5)\nplt.plot(history.history['val_loss'], 'r--', label='Loss', lw=4, alpha=0.5)\nplt.legend(fontsize=12)\nplt.grid(True)\n\n# log an image\nrun.log_image('Accuracy vs Loss', plot=plt)\n\n# create a ./outputs/model folder in the compute target\n# files saved in the \"./outputs\" folder are automatically uploaded into run history\nos.makedirs('./outputs/model', exist_ok=True)\n\n# serialize NN architecture to JSON\nmodel_json = model.to_json()\n# save model JSON\nwith open('./outputs/model/model.json', 'w') as f:\n f.write(model_json)\n# save model weights\nmodel.save_weights('./outputs/model/model.h5')\nprint(\"model saved in ./outputs/model folder\")\n"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"tensorflow.keras.models.Sequential",
"matplotlib.pyplot.figure"
]
] |
Tung-I/nips2019_template
|
[
"a1fcf35b7633d192d2706a533731cb8c457ac230"
] |
[
"src/runner/predictors/vipcup_seg_predictor.py"
] |
[
"import csv\nimport torch\nimport logging\nimport numpy as np\nimport nibabel as nib\nfrom tqdm import tqdm\n\nfrom src.data.transforms import compose\n\n\nclass VIPCUPSegPredictor(object):\n \"\"\"The predictor for the VIPCUP 2018 segmentation task.\n Args:\n data_dir (Path): The directory of the saved data.\n data_split_csv (str): The path of the training and validation data split csv file.\n preprocessings (list of Box): The preprocessing techniques applied to the testing data.\n transforms (list of Box): The preprocessing techniques applied to the data.\n sample_size (tuple): The window size of each sampled data.\n shift (tuple): The shift distance between two contiguous samples.\n device (torch.device): The device.\n net (BaseNet): The network architecture.\n metric_fns (list of torch.nn.Module): The metric functions.\n saved_dir (str): The directory to save the predicted videos, images and metrics (default: None).\n exported (bool): Whether to export the predicted video, images and metrics (default: False).\n \"\"\"\n def __init__(self, data_dir, data_split_csv, preprocessings, transforms, sample_size, shift,\n device, net, metric_fns, saved_dir=None, exported=None):\n self.data_dir = data_dir\n self.data_split_csv = data_split_csv\n self.preprocessings = compose(preprocessings)\n self.transforms = compose(transforms)\n self.sample_size = sample_size\n self.shift = shift\n self.device = device\n self.net = net\n self.metric_fns = metric_fns\n self.saved_dir = saved_dir\n self.exported = exported\n self.log = self._init_log()\n\n def _init_log(self):\n \"\"\"Initialize the log.\n Returns:\n log (dict): The initialized log.\n \"\"\"\n log = {}\n for metric in self.metric_fns:\n if metric.__class__.__name__ == 'Dice':\n for i in range(self.net.out_channels):\n log[f'Dice_{i}'] = 0\n elif metric.__class__.__name__ == 'FalseNegativeSize':\n for i in range(self.net.out_channels-1):\n log[f'FalseNegativeSize_{i+1}'] = []\n else:\n log[metric.__class__.__name__] = 0\n return log\n\n def _update_log(self, log, metrics):\n \"\"\"Update the log.\n Args:\n log (dict): The log to be updated.\n metrics (list of torch.Tensor): The computed metrics.\n \"\"\"\n for metric, _metric in zip(self.metric_fns, metrics):\n if metric.__class__.__name__ == 'Dice':\n for i, class_score in enumerate(_metric):\n log[f'Dice_{i}'] += class_score.item()\n elif metric.__class__.__name__ == 'FalseNegativeSize':\n for i, class_score in enumerate(_metric):\n log[f'FalseNegativeSize_{i+1}'] += class_score\n else:\n log[metric.__class__.__name__] += _metric.item()\n\n def predict(self):\n data_paths = []\n count = 0\n self.net.eval()\n # Create the testing data path list\n with open(self.data_split_csv, \"r\") as f:\n rows = csv.reader(f)\n for case_name, split_type in rows:\n if split_type == 'Validation':\n image_path = self.data_dir / f'{case_name[:5]}-{case_name[5:]}image.nii.gz'\n label_path = self.data_dir / f'{case_name[:5]}-{case_name[5:]}label_GTV1.nii.gz'\n data_paths.append([image_path, label_path])\n\n # Initital the list for saving metrics as a csv file\n header = ['name']\n for metric in self.metric_fns:\n if metric.__class__.__name__ == 'Dice':\n for i in range(self.net.out_channels):\n header += [f'Dice_{i}']\n elif metric.__class__.__name__ == 'FalseNegativeSize':\n # From class 1 to class n\n for i in range(self.net.out_channels-1):\n header += [f'FalseNegativeSize_{i+1}']\n else:\n header += [metric.__class__.__name__]\n results = [header]\n\n if self.exported:\n csv_path = self.saved_dir / 'results.csv'\n output_dir = self.saved_dir / 'prediction'\n if not output_dir.is_dir():\n output_dir.mkdir(parents=True)\n\n trange = tqdm(data_paths, total=len(data_paths), desc='testing')\n for data_path in trange:\n image_path, label_path = data_path\n image, label = nib.load(str(image_path)).get_fdata(), nib.load(str(label_path)).get_fdata()\n data_shape = list(image.shape)\n\n image, label = self.preprocessings(image[..., None], label[..., None], normalize_tags=[True, False])\n image, label = self.transforms(image, label, dtypes=[torch.float, torch.long])\n image, label = image.permute(3, 2, 0, 1).contiguous().to(self.device), label.permute(3, 2, 0, 1).contiguous().to(self.device)\n prediction = torch.zeros(1, self.net.out_channels, *image.shape[1:], dtype=torch.float32).to(self.device)\n pixel_count = torch.zeros(1, self.net.out_channels, *image.shape[1:], dtype=torch.float32).to(self.device)\n\n # Get the coordinated of each sampled volume\n starts, ends = [], []\n for k in range(0, data_shape[2], self.shift[2]):\n for j in range(0, data_shape[1], self.shift[1]):\n for i in range(0, data_shape[0], self.shift[0]):\n ends.append([min(i+self.sample_size[0], data_shape[0]), \\\n min(j+self.sample_size[1], data_shape[1]), \\\n min(k+self.sample_size[2], data_shape[2])])\n starts.append([ends[-1][i]-self.sample_size[i] for i in range(len(data_shape))])\n\n # Get the prediction and calculate the average of the overlapped area\n for start, end in zip(starts, ends):\n input = image[:, start[2]:end[2], start[0]:end[0], start[1]:end[1]]\n with torch.no_grad():\n output = self.net(input.unsqueeze(dim=0))\n prediction[:, :, start[2]:end[2], start[0]:end[0], start[1]:end[1]] += output\n pixel_count[:, :, start[2]:end[2], start[0]:end[0], start[1]:end[1]] += 1.0\n prediction = prediction / pixel_count\n\n count += 1\n metrics = [metric(prediction, label.unsqueeze(dim=0)) for metric in self.metric_fns]\n self._update_log(self.log, metrics)\n\n # Export the prediction\n if self.exported:\n filename = str(image_path.parts[-1]).replace('image', 'pred')\n prediction = prediction.argmax(dim=1).squeeze().cpu().numpy().transpose(1, 2, 0)\n nib.save(nib.Nifti1Image(prediction, np.eye(4)), str(output_dir / filename))\n\n result = [filename]\n for metric, _metric in zip(self.metric_fns, metrics):\n if metric.__class__.__name__ == 'Dice':\n for i, class_score in enumerate(_metric):\n result.append(class_score.item())\n elif metric.__class__.__name__ == 'FalseNegativeSize':\n for i, class_score in enumerate(_metric):\n if len(class_score) == 0:\n result.append(0)\n else:\n result.append(np.mean(class_score))\n else:\n result.append(_metric.item())\n results.append([*result])\n\n dicts = {}\n for key, value in self.log.items():\n if 'FalseNegativeSize' in key:\n dicts[key] = f'{np.mean(value): .3f}' if len(value) > 0 else str(0.000)\n else:\n dicts[key] = f'{value / count: .3f}'\n trange.set_postfix(**dicts)\n\n for key in self.log:\n if 'FalseNegativeSize' in key:\n self.log[key] = np.mean(self.log[key]) if len(self.log[key]) > 0 else 0\n else:\n self.log[key] /= count\n\n if self.exported:\n with open(csv_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(results)\n\n logging.info(f'Test log: {self.log}.')\n"
] |
[
[
"numpy.eye",
"torch.no_grad",
"numpy.mean",
"torch.zeros"
]
] |
Velcon-Zheng/scPretrain
|
[
"e12081cbfeb9b014e7e780770ada3e816d2a856c"
] |
[
"fig.py"
] |
[
"import config\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.ticker as ticker\n\nfrom tqdm import tqdm\nfrom matplotlib.pyplot import MultipleLocator\n\nfold=config.fold1\nfold2=config.fold2\n\nacc_pt,acc_ft=[[] for i in range(fold*fold2)],[[] for i in range(fold*fold2)]\nkappa_pt,kappa_ft=[[] for i in range(fold*fold2)],[[] for i in range(fold*fold2)]\nauprc_pt,auprc_ft=[[] for i in range(fold*fold2)],[[] for i in range(fold*fold2)]\nauroc_pt,auroc_ft=[[] for i in range(fold*fold2)],[[] for i in range(fold*fold2)]\n\n\n\nfor i in range(fold):\n for j in range(fold2):\n with open('result_out{m}_{n}.out'.format(m=str(i+10),n=str(j+1)),'r') as f:\n for l in f.readlines():\n line=re.split(r'[,,(,),\\s]',l)\n if line[0]=='pretrained:':\n acc_pt[i*fold2+j].append(float(line[1]))\n kappa_pt[i*fold2+j].append(float(line[3]))\n auprc_pt[i*fold2+j].append(float(line[6]))\n auroc_pt[i*fold2+j].append(float(line[8]))\n if line[0]=='no-pretrain:':\n acc_ft[i*fold2+j].append(float(line[1]))\n kappa_ft[i*fold2+j].append(float(line[3]))\n auprc_ft[i*fold2+j].append(float(line[6]))\n auroc_ft[i*fold2+j].append(float(line[8]))\n\n\nplt.figure(figsize=[5,5],dpi=600)\nx=np.linspace(0.6,1,100)\nplt.plot(x,x,color='grey',linestyle='--')\nline=np.zeros(l)+0.001\ncolor=['g' for i in range(l)]\nplt.scatter([np.mean([acc_ft[j][i] for j in range(fold)]) for i in range(l)],\n [np.mean([acc_pt[j][i] for j in range(fold)]) for i in range(l)],\n linewidths=line,c=color)\nfont={'size':30}\nplt.ylabel('scPretrain',fontdict=font)\nplt.xlabel('Without pre-training',fontdict=font)\nplt.gca().xaxis.set_major_locator(MultipleLocator(0.1))\nplt.xticks(fontsize=20)\nplt.yticks(fontsize=20)\nplt.title('Accuracy',fontsize=30)\nplt.savefig('figs/nn_acc.png')\n\nplt.figure(figsize=[5,5],dpi=600)\nx=np.linspace(0,1,100)\nplt.plot(x,x,color='grey',linestyle='--')\nl=len(acc_pt[0])\nline=np.zeros(l)+0.001\ncolor=['g' for i in range(l)]\nplt.scatter([np.mean([kappa_ft[j][i] for j in range(fold)]) for i in range(l)],\n [np.mean([kappa_pt[j][i] for j in range(fold)]) for i in range(l)],\n linewidths=line,c=color)\nfont={'size':30}\nplt.ylabel('scPretrain',fontdict=font)\nplt.xlabel('Without pre-training',fontdict=font)\nplt.xticks(fontsize=20)\nplt.yticks(fontsize=20)\nplt.title('Cohen\\'s Kappa',fontsize=30)\nplt.savefig('figs/nn_kappa.png')\n\nl=len(acc_pt[0])\nplt.figure(figsize=[5,5],dpi=600)\nx=np.linspace(0.3,1,100)\nplt.plot(x,x,color='grey',linestyle='--')\nline=np.zeros(l)+0.001\ncolor=['g' for i in range(l)]\nplt.scatter([np.mean([auprc_ft[j][i] for j in range(fold)]) for i in range(l)],\n [np.mean([auprc_pt[j][i] for j in range(fold)]) for i in range(l)],\n linewidths=line,c=color)\nfont={'size':30}\nplt.ylabel('scPretrain',fontdict=font)\nplt.xlabel('Without pre-training',fontdict=font)\nplt.xticks(fontsize=20)\nplt.yticks(fontsize=20)\nplt.title('AUPRC',fontsize=30)\nplt.savefig('figs/nn_auprc.png')\n\nplt.figure(figsize=[5,5],dpi=600)\nx=np.linspace(0.4,1,100)\nplt.plot(x,x,color='grey',linestyle='--')\nline=np.zeros(l)+0.001\ncolor=['g' for i in range(l)]\nplt.scatter([np.mean([auroc_ft[j][i] for j in range(fold)]) for i in range(l)],\n [np.mean([auroc_pt[j][i] for j in range(fold)]) for i in range(l)],\n linewidths=line,c=color)\nfont={'size':30}\nplt.ylabel('scPretrain',fontdict=font)\nplt.xlabel('Without pre-training',fontdict=font)\nplt.xticks(fontsize=20)\nplt.yticks(fontsize=20)\nplt.title('AUROC',fontsize=30)\nplt.savefig('figs/nn_auroc.png')\n\nwith open('svm.p','rb') as f:\n pt_auprc,pt_auroc,ft_auprc,ft_auroc=pickle.load(f)\n\nl=60\nplt.figure(figsize=[5,5],dpi=600)\nx=np.linspace(0.5,1,100)\nplt.plot(x,x,color='grey',linestyle='--')\nline=np.zeros(l)+0.001\ncolor=['g' for i in range(l)]\nplt.scatter(ft_auprc,pt_auprc,linewidths=line,c=color)\n\nfont={'size':30}\nplt.ylabel('scPretrain',fontdict=font)\nplt.xlabel('Without pre-training',fontdict=font)\nplt.yticks(fontsize=20)\nplt.xticks(fontsize=20)\nplt.title('LR-based AUPRC',fontsize=30)\nplt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))\nplt.savefig('figs/svm_auprc.png')\n\nplt.figure(figsize=[5,5],dpi=600)\nx=np.linspace(0.7,1,100)\nplt.plot(x,x,color='grey',linestyle='--')\nline=np.zeros(l)+0.001\ncolor=['g' for i in range(l)]\nplt.scatter(ft_auroc,pt_auroc,linewidths=line,c=color)\nfont={'size':30}\nplt.ylabel('scPretrain',fontdict=font)\nplt.xlabel('Without pre-training',fontdict=font)\nplt.xticks([0.7,0.8,0.9,1],fontsize=20)\nplt.yticks([0.7,0.8,0.9,1],fontsize=20)\n#plt.gca().xaxis.set_major_locator(MultipleLocator(0.1))\nplt.title('SVM-based AUROC',fontsize=30)\nplt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))\nplt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))\nplt.savefig('figs/svm_auroc.png')"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.yticks",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.MultipleLocator",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.ticker.FormatStrFormatter",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] |
ZAKAUDD/LightNet
|
[
"58353b28d33e69cc877db878c4a888aabc2118ce"
] |
[
"modules/dense.py"
] |
[
"from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom .bn import ABN\n\n\nclass DenseModule(nn.Module):\n def __init__(self, in_chns, squeeze_ratio, out_chns, n_layers, dilate_sec=(1, 2, 4, 8, 16), norm_act=ABN):\n super(DenseModule, self).__init__()\n self.n_layers = n_layers\n self.mid_out = int(in_chns * squeeze_ratio)\n\n self.convs1 = nn.ModuleList()\n self.convs3 = nn.ModuleList()\n\n for idx in range(self.n_layers):\n dilate = dilate_sec[idx % len(dilate_sec)]\n self.last_channel = in_chns + idx * out_chns\n\n \"\"\"\n self.convs1.append(nn.Sequential(OrderedDict([\n (\"bn\", norm_act(self.last_channel)),\n (\"conv\", nn.Conv2d(self.last_channel, self.mid_out, 1, bias=False))\n ])))\n \"\"\"\n\n self.convs3.append(nn.Sequential(OrderedDict([\n (\"bn\", norm_act(self.last_channel)),\n (\"conv\", nn.Conv2d(self.last_channel, out_chns, kernel_size=3, stride=1,\n padding=dilate, dilation=dilate, bias=False))\n ])))\n\n @property\n def out_channels(self):\n return self.last_channel + 1\n\n def forward(self, x):\n inputs = [x]\n for i in range(self.n_layers):\n x = torch.cat(inputs, dim=1)\n # x = self.convs1[i](x)\n x = self.convs3[i](x)\n inputs += [x]\n\n return torch.cat(inputs, dim=1)\n\n\nclass DPDenseModule(nn.Module):\n def __init__(self, in_chns, squeeze_ratio, out_chns, n_layers, dilate_sec=(1, 2, 4, 8, 16), norm_act=ABN):\n super(DPDenseModule, self).__init__()\n self.n_layers = n_layers\n self.convs3 = nn.ModuleList()\n\n for idx in range(self.n_layers):\n dilate = dilate_sec[idx % len(dilate_sec)]\n self.last_channel = in_chns + idx * out_chns\n mid_out = int(self.last_channel * squeeze_ratio)\n\n self.convs3.append(nn.Sequential(OrderedDict([(\"bn.1\", norm_act(self.last_channel)),\n (\"conv_up\", nn.Conv2d(self.last_channel, mid_out,\n kernel_size=1, stride=1, padding=0,\n bias=False)),\n (\"bn.2\", norm_act(mid_out)),\n (\"dconv\", nn.Conv2d(mid_out, mid_out,\n kernel_size=3, stride=1, padding=dilate,\n groups=mid_out, dilation=dilate,\n bias=False)),\n (\"pconv\", nn.Conv2d(mid_out, out_chns,\n kernel_size=1, stride=1, padding=0,\n bias=False)),\n (\"dropout\", nn.Dropout2d(p=0.2, inplace=True))])))\n \"\"\"\n self.convs3.append(nn.Sequential(OrderedDict([(\"bn.1\", norm_act(self.last_channel)),\n (\"dconv\", nn.Conv2d(self.last_channel, self.last_channel,\n kernel_size=3, stride=1, padding=dilate,\n groups=self.last_channel, dilation=dilate,\n bias=False)),\n (\"pconv\", nn.Conv2d(self.last_channel, out_chns,\n kernel_size=1, stride=1, padding=0,\n bias=False)),\n (\"dropout\", nn.Dropout2d(p=0.2, inplace=True))])))\n \"\"\"\n @property\n def out_channels(self):\n return self.last_channel + 1\n\n def forward(self, x):\n inputs = [x]\n for i in range(self.n_layers):\n x = torch.cat(inputs, dim=1)\n x = self.convs3[i](x)\n inputs += [x]\n\n return torch.cat(inputs, dim=1)\n\n"
] |
[
[
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Dropout2d",
"torch.cat"
]
] |
LMEst-Rotor/ross
|
[
"33aaa4b32ff7c32465cb81840f3cb2d6b2cdd65a"
] |
[
"ross/fluid_flow/cylindrical.py"
] |
[
"import time\n\nimport numpy as np\nfrom numpy.linalg import pinv\nfrom scipy.optimize import curve_fit, minimize\n\nfrom ross.units import Q_, check_units\n\n\nclass THDCylindrical:\n \"\"\"This class calculates the pressure and temperature field in oil film of a cylindrical bearing, with two (2) pads. It is also possible to obtain the stiffness and damping coefficients.\n\n Parameters\n ----------\n Bearing Geometry\n ^^^^^^^^^^^^^^^^\n Describes the geometric characteristics.\n L : float, pint.Quantity\n Bearing length. Default unit is meter.\n R : float\n Rotor radius. The unit is meter.\n c_r : float\n Radial clearence between rotor and bearing. The unit is meter.\n betha_s : float\n Arc length of each pad. The unit is degree.\n\n\n Operation conditions\n ^^^^^^^^^^^^^^^^^^^^\n Describes the operation conditions of the bearing.\n speed : float, pint.Quantity\n Rotor rotational speed. Default unit is rad/s.\n Wx : Float\n Load in X direction. The unit is newton.\n Wy : Float\n Load in Y direction. The unit is newton.\n\n Fluid propierties\n ^^^^^^^^^^^^^^^^^\n Describes the fluid characteristics.\n mu_ref : float\n Fluid reference viscosity. The unit is Pa*s.\n rho : float, pint.Quantity\n Fluid density. Default unit is kg/m^3.\n k_t : Float\n Fluid thermal conductivity. The unit is J/(s*m*°C).\n Cp : float\n Fluid specific heat. The unit is J/(kg*°C).\n Treserv : float\n Oil reservoir temperature. The unit is celsius.\n fat_mixt : float\n Ratio of oil in Treserv temperature that mixes with the circulating oil.\n\n Viscosity interpolation\n ^^^^^^^^^^^^^^^^^^^^^^^\n Interpolation data required.\n T_muI : float\n Inferior limit temperature. The unit is celsius.\n T_muF : float\n Upper limit temperature. The unit is celsius.\n mu_I : float\n Inferior limit viscosity. The unit is Pa*s.\n mu_F : float\n Upper limit viscosity. The unit is Pa*s.\n\n Mesh discretization\n ^^^^^^^^^^^^^^^^^^^\n Describes the discretization of the bearing.\n ntheta : int\n Number of volumes along the direction theta (direction of flow).\n nz : int\n Number of volumes along the Z direction (axial direction).\n n_gap : int\n Number of volumes in recess zone.\n\n\n Returns\n -------\n A THDCylindrical object.\n\n References\n ----------\n .. [1] BARBOSA, J. S.; LOBATO, FRAN S.; CAMPANINE SICCHIERI, LEONARDO;CAVALINI JR, ALDEMIR AP. ; STEFFEN JR, VALDER. Determinação da Posição de Equilíbrio em Mancais Hidrodinâmicos Cilíndricos usando o Algoritmo de Evolução Diferencial. REVISTA CEREUS, v. 10, p. 224-239, 2018. ..\n .. [2] DANIEL, G.B. Desenvolvimento de um Modelo Termohidrodinâmico para Análise em Mancais Segmentados. Campinas: Faculdade de Engenharia Mecânica, Universidade Estadual de Campinas, 2012. Tese (Doutorado). ..\n .. [3] NICOLETTI, R., Efeitos Térmicos em Mancais Segmentados Híbridos – Teoria e Experimento. 1999. Dissertação de Mestrado. Universidade Estadual de Campinas, Campinas. ..\n\n Attributes\n ----------\n Pdim : array\n Dimensional pressure field. The unit is pascal.\n dPdz : array\n Differential pressure field in z direction.\n dPdy : array\n Differential pressure field in theta direction.\n Tdim : array\n Dimensional temperature field. The unit is celsius.\n Fhx : float\n Force in X direction. The unit is newton.\n Fhy : float\n Force in Y direction. The unit is newton.\n equilibrium_pos : array\n Array with excentricity ratio and attitude angle information.\n Its shape is: array([excentricity, angle])\n\n Examples\n --------\n >>> from ross.fluid_flow.cylindrical import cylindrical_bearing_example\n >>> x0 = [0.1,-0.1]\n >>> bearing = cylindrical_bearing_example()\n >>> bearing.run(x0)\n >>> bearing.equilibrium_pos\n array([ 0.58768737, -0.67319389])\n \"\"\"\n\n @check_units\n def __init__(\n self,\n L,\n R,\n c_r,\n n_theta,\n n_z,\n n_y,\n n_gap,\n betha_s,\n mu_ref,\n speed,\n Wx,\n Wy,\n k_t,\n Cp,\n rho,\n T_reserv,\n fat_mixt,\n T_muI,\n T_muF,\n mu_I,\n mu_F,\n sommerfeld_type=2,\n ):\n\n self.L = L\n self.R = R\n self.c_r = c_r\n self.n_theta = n_theta\n self.n_z = n_z\n self.n_y = n_y\n self.n_gap = n_gap\n self.mu_ref = mu_ref\n self.speed = speed\n self.Wx = Wx\n self.Wy = Wy\n self.k_t = k_t\n self.Cp = Cp\n self.rho = rho\n self.T_reserv = T_reserv\n self.fat_mixt = fat_mixt\n self.equilibrium_pos = None\n self.sommerfeld_type = sommerfeld_type\n\n if self.n_y == None:\n self.n_y = self.n_theta\n\n self.betha_s = betha_s * np.pi / 180\n\n self.n_pad = 2\n\n self.thetaI = 0\n self.thetaF = self.betha_s\n self.dtheta = (self.thetaF - self.thetaI) / (self.n_theta)\n\n ##\n # Dimensionless discretization variables\n\n self.dY = 1 / self.n_y\n self.dZ = 1 / self.n_z\n\n # Z-axis direction\n\n self.Z_I = 0\n self.Z_F = 1\n Z = np.zeros((self.n_z + 2))\n\n Z[0] = self.Z_I\n Z[self.n_z + 1] = self.Z_F\n Z[1 : self.n_z + 1] = np.arange(self.Z_I + 0.5 * self.dZ, self.Z_F, self.dZ)\n self.Z = Z\n\n # Dimensionalization\n\n self.dz = self.dZ * self.L\n self.dy = self.dY * self.betha_s * self.R\n\n self.Zdim = self.Z * L\n\n # Interpolation coefficients\n self.a, self.b = self._interpol(T_muI, T_muF, mu_I, mu_F)\n\n def _forces(self, x0, y0, xpt0, ypt0):\n \"\"\"Calculates the forces in Y and X direction.\n\n Parameters\n ----------\n x0 : array, float\n If the other parameters are None, x0 is an array with eccentricity ratio and attitude angle.\n Else, x0 is the position of the center of the rotor in the x-axis.\n y0 : float\n The position of the center of the rotor in the y-axis.\n xpt0 : float\n The speed of the center of the rotor in the x-axis.\n ypt0 : float\n The speed of the center of the rotor in the y-axis.\n\n\n Returns\n -------\n Fhx : float\n Force in X direction. The unit is newton.\n Fhy : float\n Force in Y direction. The unit is newton.\n \"\"\"\n if y0 is None and xpt0 is None and ypt0 is None:\n self.x0 = x0\n\n xr = self.x0[0] * self.c_r * np.cos(self.x0[1])\n yr = self.x0[0] * self.c_r * np.sin(self.x0[1])\n self.Y = yr / self.c_r\n self.X = xr / self.c_r\n\n self.Xpt = 0\n self.Ypt = 0\n else:\n self.X = x0 / self.c_r\n self.Y = y0 / self.c_r\n\n self.Xpt = xpt0 / (self.c_r * self.speed)\n self.Ypt = ypt0 / (self.c_r * self.speed)\n\n T_conv = 0.8 * self.T_reserv\n\n T_mist = self.T_reserv * np.ones(self.n_pad)\n\n while (T_mist[0] - T_conv) >= 1e-2:\n\n P = np.zeros((self.n_z, self.n_theta, self.n_pad))\n dPdy = np.zeros((self.n_z, self.n_theta, self.n_pad))\n dPdz = np.zeros((self.n_z, self.n_theta, self.n_pad))\n T = np.ones((self.n_z, self.n_theta, self.n_pad))\n T_new = np.ones((self.n_z, self.n_theta, self.n_pad)) * 1.2\n\n T_conv = T_mist[0]\n\n mi_new = 1.1 * np.ones((self.n_z, self.n_theta, self.n_pad))\n PP = np.zeros(((self.n_z), (2 * self.n_theta)))\n\n nk = (self.n_z) * (self.n_theta)\n\n Mat_coef = np.zeros((nk, nk))\n Mat_coef_T = np.zeros((nk, nk))\n b = np.zeros((nk, 1))\n b_T = np.zeros((nk, 1))\n\n for n_p in np.arange(self.n_pad):\n\n self.thetaI = (\n n_p * self.betha_s\n + self.dtheta * self.n_gap / 2\n + (n_p * self.dtheta * self.n_gap)\n )\n\n self.thetaF = self.thetaI + self.betha_s\n\n self.dtheta = (self.thetaF - self.thetaI) / (self.n_theta)\n\n if n_p == 0:\n Ytheta1 = np.arange(self.thetaI, self.thetaF, self.dtheta)\n else:\n Ytheta2 = np.arange(self.thetaI, self.thetaF, self.dtheta)\n\n T_ref = T_mist[n_p - 1]\n\n # Temperature convergence while\n\n while (\n np.linalg.norm(T_new[:, :, n_p] - T[:, :, n_p])\n / np.linalg.norm(T[:, :, n_p])\n >= 1e-3\n ):\n\n T_ref = T_mist[n_p - 1]\n\n mi = mi_new\n\n T[:, :, n_p] = T_new[:, :, n_p]\n\n ki = 0\n kj = 0\n k = 0\n\n # Solution of pressure field initialization\n\n for ii in np.arange((self.Z_I + 0.5 * self.dZ), self.Z_F, self.dZ):\n for jj in np.arange(\n self.thetaI + (self.dtheta / 2), self.thetaF, self.dtheta\n ):\n\n hP = 1 - self.X * np.cos(jj) - self.Y * np.sin(jj)\n he = (\n 1\n - self.X * np.cos(jj + 0.5 * self.dtheta)\n - self.Y * np.sin(jj + 0.5 * self.dtheta)\n )\n hw = (\n 1\n - self.X * np.cos(jj - 0.5 * self.dtheta)\n - self.Y * np.sin(jj - 0.5 * self.dtheta)\n )\n hn = hP\n hs = hn\n\n if kj == 0 and ki == 0:\n MI_e = 0.5 * (mi[ki, kj] + mi[ki, kj + 1])\n MI_w = mi[ki, kj]\n MI_s = mi[ki, kj]\n MI_n = 0.5 * (mi[ki, kj] + mi[ki + 1, kj])\n\n if kj == 0 and ki > 0 and ki < self.n_z - 1:\n MI_e = 0.5 * (mi[ki, kj] + mi[ki, kj + 1])\n MI_w = mi[ki, kj]\n MI_s = 0.5 * (mi[ki, kj] + mi[ki - 1, kj])\n MI_n = 0.5 * (mi[ki, kj] + mi[ki + 1, kj])\n\n if kj == 0 and ki == self.n_z - 1:\n MI_e = 0.5 * (mi[ki, kj] + mi[ki, kj + 1])\n MI_w = mi[ki, kj]\n MI_s = 0.5 * (mi[ki, kj] + mi[ki - 1, kj])\n MI_n = mi[ki, kj]\n\n if ki == 0 and kj > 0 and kj < self.n_theta - 1:\n MI_e = 0.5 * (mi[ki, kj] + mi[ki, kj + 1])\n MI_w = 0.5 * (mi[ki, kj] + mi[ki, kj - 1])\n MI_s = mi[ki, kj]\n MI_n = 0.5 * (mi[ki, kj] + mi[ki + 1, kj])\n\n if (\n kj > 0\n and kj < self.n_theta - 1\n and ki > 0\n and ki < self.n_z - 1\n ):\n MI_e = 0.5 * (mi[ki, kj] + mi[ki, kj + 1])\n MI_w = 0.5 * (mi[ki, kj] + mi[ki, kj - 1])\n MI_s = 0.5 * (mi[ki, kj] + mi[ki - 1, kj])\n MI_n = 0.5 * (mi[ki, kj] + mi[ki + 1, kj])\n\n if ki == self.n_z - 1 and kj > 0 and kj < self.n_theta - 1:\n MI_e = 0.5 * (mi[ki, kj] + mi[ki, kj + 1])\n MI_w = 0.5 * (mi[ki, kj] + mi[ki, kj - 1])\n MI_s = 0.5 * (mi[ki, kj] + mi[ki - 1, kj])\n MI_n = mi[ki, kj]\n\n if ki == 0 and kj == self.n_theta - 1:\n MI_e = mi[ki, kj]\n MI_w = 0.5 * (mi[ki, kj] + mi[ki, kj - 1])\n MI_s = mi[ki, kj]\n MI_n = 0.5 * (mi[ki, kj] + mi[ki + 1, kj])\n\n if kj == self.n_theta - 1 and ki > 0 and ki < self.n_z - 1:\n MI_e = mi[ki, kj]\n MI_w = 0.5 * (mi[ki, kj] + mi[ki, kj - 1])\n MI_s = 0.5 * (mi[ki, kj] + mi[ki - 1, kj])\n MI_n = 0.5 * (mi[ki, kj] + mi[ki + 1, kj])\n\n if kj == self.n_theta - 1 and ki == self.n_z - 1:\n MI_e = mi[ki, kj]\n MI_w = 0.5 * (mi[ki, kj] + mi[ki, kj - 1])\n MI_s = 0.5 * (mi[ki, kj] + mi[ki - 1, kj])\n MI_n = mi[ki, kj]\n\n CE = (self.dZ * he ** 3) / (\n 12 * MI_e[n_p] * self.dY * self.betha_s ** 2\n )\n CW = (self.dZ * hw ** 3) / (\n 12 * MI_w[n_p] * self.dY * self.betha_s ** 2\n )\n CN = (self.dY * (self.R ** 2) * hn ** 3) / (\n 12 * MI_n[n_p] * self.dZ * self.L ** 2\n )\n CS = (self.dY * (self.R ** 2) * hs ** 3) / (\n 12 * MI_s[n_p] * self.dZ * self.L ** 2\n )\n CP = -(CE + CW + CN + CS)\n\n B = (self.dZ / (2 * self.betha_s)) * (he - hw) - (\n (self.Ypt * np.cos(jj) + self.Xpt * np.sin(jj))\n * self.dy\n * self.dZ\n )\n\n k = k + 1\n b[k - 1, 0] = B\n\n if ki == 0 and kj == 0:\n Mat_coef[k - 1, k - 1] = CP - CS - CW\n Mat_coef[k - 1, k] = CE\n Mat_coef[k - 1, k + self.n_theta - 1] = CN\n\n elif kj == 0 and ki > 0 and ki < self.n_z - 1:\n Mat_coef[k - 1, k - 1] = CP - CW\n Mat_coef[k - 1, k] = CE\n Mat_coef[k - 1, k - self.n_theta - 1] = CS\n Mat_coef[k - 1, k + self.n_theta - 1] = CN\n\n elif kj == 0 and ki == self.n_z - 1:\n Mat_coef[k - 1, k - 1] = CP - CN - CW\n Mat_coef[k - 1, k] = CE\n Mat_coef[k - 1, k - self.n_theta - 1] = CS\n\n elif ki == 0 and kj > 0 and kj < self.n_y - 1:\n Mat_coef[k - 1, k - 1] = CP - CS\n Mat_coef[k - 1, k] = CE\n Mat_coef[k - 1, k - 2] = CW\n Mat_coef[k - 1, k + self.n_theta - 1] = CN\n\n elif (\n ki > 0\n and ki < self.n_z - 1\n and kj > 0\n and kj < self.n_y - 1\n ):\n Mat_coef[k - 1, k - 1] = CP\n Mat_coef[k - 1, k - 2] = CW\n Mat_coef[k - 1, k - self.n_theta - 1] = CS\n Mat_coef[k - 1, k + self.n_theta - 1] = CN\n Mat_coef[k - 1, k] = CE\n\n elif ki == self.n_z - 1 and kj > 0 and kj < self.n_y - 1:\n Mat_coef[k - 1, k - 1] = CP - CN\n Mat_coef[k - 1, k] = CE\n Mat_coef[k - 1, k - 2] = CW\n Mat_coef[k - 1, k - self.n_theta - 1] = CS\n\n elif ki == 0 and kj == self.n_y - 1:\n Mat_coef[k - 1, k - 1] = CP - CE - CS\n Mat_coef[k - 1, k - 2] = CW\n Mat_coef[k - 1, k + self.n_theta - 1] = CN\n\n elif kj == self.n_y - 1 and ki > 0 and ki < self.n_z - 1:\n Mat_coef[k - 1, k - 1] = CP - CE\n Mat_coef[k - 1, k - 2] = CW\n Mat_coef[k - 1, k - self.n_theta - 1] = CS\n Mat_coef[k - 1, k + self.n_theta - 1] = CN\n\n elif ki == self.n_z - 1 and kj == self.n_y - 1:\n Mat_coef[k - 1, k - 1] = CP - CE - CN\n Mat_coef[k - 1, k - 2] = CW\n Mat_coef[k - 1, k - self.n_theta - 1] = CS\n\n kj = kj + 1\n\n kj = 0\n ki = ki + 1\n\n # Solution of pressure field end\n\n p = np.linalg.solve(Mat_coef, b)\n cont = 0\n\n for i in np.arange(self.n_z):\n for j in np.arange(self.n_theta):\n\n P[i, j, n_p] = p[cont]\n cont = cont + 1\n\n if P[i, j, n_p] < 0:\n P[i, j, n_p] = 0\n\n # Dimensional pressure fied\n\n Pdim = (P * self.mu_ref * self.speed * (self.R ** 2)) / (\n self.c_r ** 2\n )\n\n ki = 0\n kj = 0\n k = 0\n\n # Solution of temperature field initialization\n\n for ii in np.arange(\n (self.Z_I + 0.5 * self.dZ), (self.Z_F), self.dZ\n ):\n for jj in np.arange(\n self.thetaI + (self.dtheta / 2), self.thetaF, self.dtheta\n ):\n\n # Pressure gradients\n\n if kj == 0 and ki == 0:\n dPdy[ki, kj, n_p] = (P[ki, kj + 1, n_p] - 0) / (\n 2 * self.dY\n )\n dPdz[ki, kj, n_p] = (P[ki + 1, kj, n_p] - 0) / (\n 2 * self.dZ\n )\n\n if kj == 0 and ki > 0 and ki < self.n_z - 1:\n dPdy[ki, kj, n_p] = (P[ki, kj + 1, n_p] - 0) / (\n 2 * self.dY\n )\n dPdz[ki, kj, n_p] = (\n P[ki + 1, kj, n_p] - P[ki - 1, kj, n_p]\n ) / (2 * self.dZ)\n\n if kj == 0 and ki == self.n_z - 1:\n dPdy[ki, kj, n_p] = (P[ki, kj + 1, n_p] - 0) / (\n 2 * self.dY\n )\n dPdz[ki, kj, n_p] = (0 - P[ki - 1, kj, n_p]) / (\n 2 * self.dZ\n )\n\n if ki == 0 and kj > 0 and kj < self.n_theta - 1:\n dPdy[ki, kj, n_p] = (\n P[ki, kj + 1, n_p] - P[ki, kj - 1, n_p]\n ) / (2 * self.dY)\n dPdz[ki, kj, n_p] = (P[ki + 1, kj, n_p] - 0) / (\n 2 * self.dZ\n )\n\n if (\n kj > 0\n and kj < self.n_theta - 1\n and ki > 0\n and ki < self.n_z - 1\n ):\n dPdy[ki, kj, n_p] = (\n P[ki, kj + 1, n_p] - P[ki, kj - 1, n_p]\n ) / (2 * self.dY)\n dPdz[ki, kj, n_p] = (\n P[ki + 1, kj, n_p] - P[ki - 1, kj, n_p]\n ) / (2 * self.dZ)\n\n if ki == self.n_z - 1 and kj > 0 and kj < self.n_theta - 1:\n dPdy[ki, kj, n_p] = (\n P[ki, kj + 1, n_p] - P[ki, kj - 1, n_p]\n ) / (2 * self.dY)\n dPdz[ki, kj, n_p] = (0 - P[ki - 1, kj, n_p]) / (\n 2 * self.dZ\n )\n\n if ki == 0 and kj == self.n_theta - 1:\n dPdy[ki, kj, n_p] = (0 - P[ki, kj - 1, n_p]) / (\n 2 * self.dY\n )\n dPdz[ki, kj, n_p] = (P[ki + 1, kj, n_p] - 0) / (\n 2 * self.dZ\n )\n\n if kj == self.n_theta - 1 and ki > 0 and ki < self.n_z - 1:\n dPdy[ki, kj, n_p] = (0 - P[ki, kj - 1, n_p]) / (\n 2 * self.dY\n )\n dPdz[ki, kj, n_p] = (\n P[ki + 1, kj, n_p] - P[ki - 1, kj, n_p]\n ) / (2 * self.dZ)\n\n if kj == self.n_theta - 1 and ki == self.n_z - 1:\n dPdy[ki, kj, n_p] = (0 - P[ki, kj - 1, n_p]) / (\n 2 * self.dY\n )\n dPdz[ki, kj, n_p] = (0 - P[ki - 1, kj, n_p]) / (\n 2 * self.dZ\n )\n\n HP = 1 - self.X * np.cos(jj) - self.Y * np.sin(jj)\n hpt = -self.Ypt * np.cos(jj) + self.Xpt * np.sin(jj)\n\n mi_p = mi[ki, kj, n_p]\n\n AE = -(self.k_t * HP * self.dZ) / (\n self.rho\n * self.Cp\n * self.speed\n * ((self.betha_s * self.R) ** 2)\n * self.dY\n )\n AW = (\n (\n ((HP ** 3) * dPdy[ki, kj, n_p] * self.dZ)\n / (12 * mi_p * (self.betha_s ** 2))\n )\n - ((HP) * self.dZ / (2 * self.betha_s))\n - (\n (self.k_t * HP * self.dZ)\n / (\n self.rho\n * self.Cp\n * self.speed\n * ((self.betha_s * self.R) ** 2)\n * self.dY\n )\n )\n )\n AN = -(self.k_t * HP * self.dY) / (\n self.rho\n * self.Cp\n * self.speed\n * (self.L ** 2)\n * self.dZ\n )\n AS = (\n (\n (self.R ** 2)\n * (HP ** 3)\n * dPdz[ki, kj, n_p]\n * self.dY\n )\n / (12 * (self.L ** 2) * mi_p)\n ) - (\n (self.k_t * HP * self.dY)\n / (\n self.rho\n * self.Cp\n * self.speed\n * (self.L ** 2)\n * self.dZ\n )\n )\n AP = -(AE + AW + AN + AS)\n\n auxb_T = (self.speed * self.mu_ref) / (\n self.rho * self.Cp * self.T_reserv * self.c_r\n )\n b_TG = (\n self.mu_ref\n * self.speed\n * (self.R ** 2)\n * self.dY\n * self.dZ\n * P[ki, kj, n_p]\n * hpt\n ) / (self.rho * self.Cp * self.T_reserv * (self.c_r ** 2))\n b_TH = (\n self.speed\n * self.mu_ref\n * (hpt ** 2)\n * 4\n * mi_p\n * self.dY\n * self.dZ\n ) / (self.rho * self.Cp * self.T_reserv * 3 * HP)\n b_TI = (\n auxb_T\n * (mi_p * (self.R ** 2) * self.dY * self.dZ)\n / (HP * self.c_r)\n )\n b_TJ = (\n auxb_T\n * (\n (self.R ** 2)\n * (HP ** 3)\n * (dPdy[ki, kj, n_p] ** 2)\n * self.dY\n * self.dZ\n )\n / (12 * self.c_r * (self.betha_s ** 2) * mi_p)\n )\n b_TK = (\n auxb_T\n * (\n (self.R ** 4)\n * (HP ** 3)\n * (dPdz[ki, kj, n_p] ** 2)\n * self.dY\n * self.dZ\n )\n / (12 * self.c_r * (self.L ** 2) * mi_p)\n )\n\n B_T = b_TG + b_TH + b_TI + b_TJ + b_TK\n\n k = k + 1\n\n b_T[k - 1, 0] = B_T\n\n if ki == 0 and kj == 0:\n Mat_coef_T[k - 1, k - 1] = AP + AS - AW\n Mat_coef_T[k - 1, k] = AE\n Mat_coef_T[k - 1, k + self.n_theta - 1] = AN\n b_T[k - 1, 0] = b_T[k - 1, 0] - 2 * AW * (\n T_ref / self.T_reserv\n )\n\n elif kj == 0 and ki > 0 and ki < self.n_z - 1:\n Mat_coef_T[k - 1, k - 1] = AP - AW\n Mat_coef_T[k - 1, k] = AE\n Mat_coef_T[k - 1, k - self.n_theta - 1] = AS\n Mat_coef_T[k - 1, k + self.n_theta - 1] = AN\n b_T[k - 1, 0] = b_T[k - 1, 0] - 2 * AW * (\n T_ref / self.T_reserv\n )\n\n elif kj == 0 and ki == self.n_z - 1:\n Mat_coef_T[k - 1, k - 1] = AP + AN - AW\n Mat_coef_T[k - 1, k] = AE\n Mat_coef_T[k - 1, k - self.n_theta - 1] = AS\n b_T[k - 1, 0] = b_T[k - 1, 0] - 2 * AW * (\n T_ref / self.T_reserv\n )\n\n elif ki == 0 and kj > 0 and kj < self.n_y - 1:\n Mat_coef_T[k - 1, k - 1] = AP + AS\n Mat_coef_T[k - 1, k] = AE\n Mat_coef_T[k - 1, k - 2] = AW\n Mat_coef_T[k - 1, k + self.n_theta - 1] = AN\n\n elif (\n ki > 0\n and ki < self.n_z - 1\n and kj > 0\n and kj < self.n_y - 1\n ):\n Mat_coef_T[k - 1, k - 1] = AP\n Mat_coef_T[k - 1, k - 2] = AW\n Mat_coef_T[k - 1, k - self.n_theta - 1] = AS\n Mat_coef_T[k - 1, k + self.n_theta - 1] = AN\n Mat_coef_T[k - 1, k] = AE\n\n elif ki == self.n_z - 1 and kj > 0 and kj < self.n_y - 1:\n Mat_coef_T[k - 1, k - 1] = AP + AN\n Mat_coef_T[k - 1, k] = AE\n Mat_coef_T[k - 1, k - 2] = AW\n Mat_coef_T[k - 1, k - self.n_theta - 1] = AS\n\n elif ki == 0 and kj == self.n_y - 1:\n Mat_coef_T[k - 1, k - 1] = AP + AE + AS\n Mat_coef_T[k - 1, k - 2] = AW\n Mat_coef_T[k - 1, k + self.n_theta - 1] = AN\n\n elif kj == self.n_y - 1 and ki > 0 and ki < self.n_z - 1:\n Mat_coef_T[k - 1, k - 1] = AP + AE\n Mat_coef_T[k - 1, k - 2] = AW\n Mat_coef_T[k - 1, k - self.n_theta - 1] = AS\n Mat_coef_T[k - 1, k + self.n_theta - 1] = AN\n\n elif ki == self.n_z - 1 and kj == self.n_y - 1:\n Mat_coef_T[k - 1, k - 1] = AP + AE + AN\n Mat_coef_T[k - 1, k - 2] = AW\n Mat_coef_T[k - 1, k - self.n_theta - 1] = AS\n\n kj = kj + 1\n\n kj = 0\n ki = ki + 1\n\n # Solution of temperature field end\n\n t = np.linalg.solve(Mat_coef_T, b_T)\n cont = 0\n\n for i in np.arange(self.n_z):\n for j in np.arange(self.n_theta):\n\n T_new[i, j, n_p] = t[cont]\n cont = cont + 1\n\n Tdim = T_new * self.T_reserv\n\n T_end = np.sum(Tdim[:, -1, n_p]) / self.n_z\n\n T_mist[n_p] = (\n self.fat_mixt * self.T_reserv + (1 - self.fat_mixt) * T_end\n )\n\n for i in np.arange(self.n_z):\n for j in np.arange(self.n_theta):\n\n mi_new[i, j, n_p] = (\n self.a * (Tdim[i, j, n_p]) ** self.b\n ) / self.mu_ref\n\n PP = np.zeros(((self.n_z), (2 * self.n_theta)))\n\n PP = np.concatenate((Pdim[:, :, 0], Pdim[:, :, 1]), axis=1)\n Ytheta = np.concatenate((Ytheta1, Ytheta2))\n\n auxF = np.zeros((2, len(Ytheta)))\n\n auxF[0, :] = np.cos(Ytheta)\n auxF[1, :] = np.sin(Ytheta)\n\n dA = self.dy * self.dz\n\n auxP = PP * dA\n\n vector_auxF_x = auxF[0, :]\n vector_auxF_y = auxF[1, :]\n\n auxFx = auxP * vector_auxF_x\n auxFy = auxP * vector_auxF_y\n\n fxj = -np.sum(auxFx)\n fyj = -np.sum(auxFy)\n\n Fhx = fxj\n Fhy = fyj\n self.Fhx = Fhx\n self.Fhy = Fhy\n return Fhx, Fhy\n\n def run(self, x, print_result=False, print_progress=False, print_time=False):\n \"\"\"This method runs the optimization to find the equilibrium position of the rotor's center.\n\n Parameters\n ----------\n x : array\n Array with eccentricity ratio and attitude angle\n print_progress : bool\n Set it True to print the score and forces on each iteration.\n False by default.\n \"\"\"\n args = print_progress\n t1 = time.time()\n res = minimize(\n self._score,\n x,\n args,\n method=\"Nelder-Mead\",\n tol=10e-3,\n options={\"maxiter\": 1000},\n )\n self.equilibrium_pos = res.x\n t2 = time.time()\n\n if print_result:\n print(res)\n\n if print_time:\n print(f\"Time Spent: {t2-t1} seconds\")\n\n def _interpol(self, T_muI, T_muF, mu_I, mu_F):\n \"\"\"\n\n Parameters\n ----------\n\n\n\n Returns\n -------\n\n \"\"\"\n\n def viscosity(x, a, b):\n return a * (x ** b)\n\n xdata = [T_muI, T_muF] # changed boundary conditions to avoid division by ]\n ydata = [mu_I, mu_F]\n\n popt, pcov = curve_fit(viscosity, xdata, ydata, p0=(6.0, -1.0))\n a, b = popt\n\n return a, b\n\n def coefficients(self, show_coef=False):\n \"\"\"Calculates the dynamic coefficients of stiffness \"k\" and damping \"c\". The formulation is based in application of virtual displacements and speeds on the rotor from its equilibrium position to determine the bearing stiffness and damping coefficients.\n\n Parameters\n ----------\n show_coef : bool\n Set it True, to print the calculated coefficients.\n False by default.\n\n Returns\n -------\n coefs : tuple\n Bearing stiffness and damping coefficients.\n Its shape is: ((kxx, kxy, kyx, kyy), (cxx, cxy, cyx, cyy))\n\n \"\"\"\n if self.equilibrium_pos is None:\n self.run([0.1, -0.1], True, True)\n self.coefficients()\n else:\n xeq = self.equilibrium_pos[0] * self.c_r * np.cos(self.equilibrium_pos[1])\n yeq = self.equilibrium_pos[0] * self.c_r * np.sin(self.equilibrium_pos[1])\n\n dE = 0.001\n epix = np.abs(dE * self.c_r * np.cos(self.equilibrium_pos[1]))\n epiy = np.abs(dE * self.c_r * np.sin(self.equilibrium_pos[1]))\n\n Va = self.speed * (self.R)\n epixpt = 0.000001 * np.abs(Va * np.sin(self.equilibrium_pos[1]))\n epiypt = 0.000001 * np.abs(Va * np.cos(self.equilibrium_pos[1]))\n\n Aux01 = self._forces(xeq + epix, yeq, 0, 0)\n Aux02 = self._forces(xeq - epix, yeq, 0, 0)\n Aux03 = self._forces(xeq, yeq + epiy, 0, 0)\n Aux04 = self._forces(xeq, yeq - epiy, 0, 0)\n\n Aux05 = self._forces(xeq, yeq, epixpt, 0)\n Aux06 = self._forces(xeq, yeq, -epixpt, 0)\n Aux07 = self._forces(xeq, yeq, 0, epiypt)\n Aux08 = self._forces(xeq, yeq, 0, -epiypt)\n\n # Ss = self.sommerfeld(Aux08[0],Aux08[1])\n\n Kxx = -self.sommerfeld(Aux01[0], Aux02[1]) * (\n (Aux01[0] - Aux02[0]) / (epix / self.c_r)\n )\n Kxy = -self.sommerfeld(Aux03[0], Aux04[1]) * (\n (Aux03[0] - Aux04[0]) / (epiy / self.c_r)\n )\n Kyx = -self.sommerfeld(Aux01[1], Aux02[1]) * (\n (Aux01[1] - Aux02[1]) / (epix / self.c_r)\n )\n Kyy = -self.sommerfeld(Aux03[1], Aux04[1]) * (\n (Aux03[1] - Aux04[1]) / (epiy / self.c_r)\n )\n\n Cxx = -self.sommerfeld(Aux05[0], Aux06[0]) * (\n (Aux05[0] - Aux06[0]) / (epixpt / self.c_r / self.speed)\n )\n Cxy = -self.sommerfeld(Aux07[0], Aux08[0]) * (\n (Aux07[0] - Aux08[0]) / (epiypt / self.c_r / self.speed)\n )\n Cyx = -self.sommerfeld(Aux05[1], Aux06[1]) * (\n (Aux05[1] - Aux06[1]) / (epixpt / self.c_r / self.speed)\n )\n Cyy = -self.sommerfeld(Aux07[1], Aux08[1]) * (\n (Aux07[1] - Aux08[1]) / (epiypt / self.c_r / self.speed)\n )\n\n kxx = (np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / self.c_r) * Kxx\n kxy = (np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / self.c_r) * Kxy\n kyx = (np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / self.c_r) * Kyx\n kyy = (np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / self.c_r) * Kyy\n\n cxx = (\n np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / (self.c_r * self.speed)\n ) * Cxx\n cxy = (\n np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / (self.c_r * self.speed)\n ) * Cxy\n cyx = (\n np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / (self.c_r * self.speed)\n ) * Cyx\n cyy = (\n np.sqrt((self.Wx ** 2) + (self.Wy ** 2)) / (self.c_r * self.speed)\n ) * Cyy\n\n if show_coef:\n print(f\"kxx = {kxx}\")\n print(f\"kxy = {kxy}\")\n print(f\"kyx = {kyx}\")\n print(f\"kyy = {kyy}\")\n\n print(f\"cxx = {cxx}\")\n print(f\"cxy = {cxy}\")\n print(f\"cyx = {cyx}\")\n print(f\"cyy = {cyy}\")\n\n coefs = ((kxx, kxy, kyx, kyy), (cxx, cxy, cyx, cyy))\n\n return coefs\n\n def _score(self, x, print_progress=False):\n \"\"\"This method used to set the objective function of minimize optimization.\n\n Parameters\n ==========\n score: float\n Balanced Force expression between the load aplied in bearing and the\n resultant force provide by oil film.\n\n Returns\n ========\n Score coefficient.\n\n \"\"\"\n Fhx, Fhy = self._forces(x, None, None, None)\n score = np.sqrt(((self.Wx + Fhx) ** 2) + ((self.Wy + Fhy) ** 2))\n if print_progress:\n print(f\"Score: \", score)\n print(\"============================================\")\n print(f\"Força na direção x: \", Fhx)\n print(\"============================================\")\n print(f\"Força na direção y: \", Fhy)\n print(\"\")\n\n return score\n\n def sommerfeld(self, force_x, force_y):\n \"\"\"Calculate the sommerfeld number. This dimensionless number is used to calculate the dynamic coeficients.\n\n Parameters\n ----------\n force_x : float\n Force in x direction. The unit is newton.\n force_y : float\n Force in y direction. The unit is newton.\n\n Returns\n -------\n Ss : float\n Sommerfeld number.\n \"\"\"\n if self.sommerfeld_type == 1:\n S = (self.mu_ref * ((self.R) ** 3) * self.L * self.speed) / (\n np.pi * (self.c_r ** 2) * np.sqrt((self.Wx ** 2) + (self.Wy ** 2))\n )\n\n elif self.sommerfeld_type == 2:\n S = 1 / (\n 2\n * ((self.L / (2 * self.R)) ** 2)\n * (np.sqrt((force_x ** 2) + (force_y ** 2)))\n )\n\n Ss = S * ((self.L / (2 * self.R)) ** 2)\n\n return Ss\n\n\ndef cylindrical_bearing_example():\n \"\"\"Create an example of a cylindrical bearing with termo hydrodynamic effects. This function returns pressure and temperature field and dynamic coefficient. The purpose is to make available a simple model so that a doctest can be written using it.\n Returns\n -------\n THDCylindrical : ross.THDCylindrical Object\n An instance of a termo-hydrodynamic cylendrical bearing model object.\n Examples\n --------\n >>> bearing = cylindrical_bearing_example()\n >>> bearing.L\n 0.263144\n \"\"\"\n\n bearing = THDCylindrical(\n L=0.263144,\n R=0.2,\n c_r=1.95e-4,\n n_theta=41,\n n_z=5,\n n_y=None,\n n_gap=1,\n betha_s=176,\n mu_ref=0.02,\n speed=Q_(900, \"RPM\"),\n Wx=0,\n Wy=-112814.91,\n k_t=0.15327,\n Cp=1915.24,\n rho=854.952,\n T_reserv=50,\n fat_mixt=0.52,\n T_muI=50,\n T_muF=80,\n mu_I=0.02,\n mu_F=0.01,\n sommerfeld_type=2,\n )\n\n return bearing\n"
] |
[
[
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.arange",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.concatenate",
"scipy.optimize.curve_fit",
"numpy.ones",
"scipy.optimize.minimize",
"numpy.zeros",
"numpy.sum"
]
] |
Zhangyongtao123/maskrcnn_benchmark
|
[
"059f04c26df2c1bd19dd7360ee2487ce3461da37"
] |
[
"tools/test_net.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\n\n\n\n# Check if we can enable mixed-precision via apex.amp\nprint(torch.__version__)\nprint(torch.cuda.is_available())\nprint(torch.cuda.device_count())\nprint(torch.cuda.get_device_name(0))\nprint(torch.cuda.current_device())\n\n\ntry:\n from apex import amp\nexcept ImportError:\n raise ImportError('Use APEX for mixed precision via apex.amp')\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"maskrcnn_benchmark\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n model = build_detection_model(cfg)\n model.to(cfg.MODEL.DEVICE)\n\n # Initialize mixed-precision if necessary\n use_mixed_precision = cfg.DTYPE == 'float16'\n amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n )\n synchronize()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.cuda.current_device",
"torch.cuda.is_available",
"torch.cuda.get_device_name",
"torch.cuda.device_count"
]
] |
bwosh/DLR_CollaborationAndCompetition
|
[
"aee211990dbb889888df2397bdbd7e229acdc770"
] |
[
"utils/utils.py"
] |
[
"import numpy as np\nimport torch\n\ndef soft_update(local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\ndef tensor(t, device):\n return torch.tensor(t, dtype=torch.float).to(device)\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n"
] |
[
[
"numpy.sqrt",
"torch.tensor"
]
] |
Ericknht/DataEngineer
|
[
"966dd45288ad9ec7bcea7fec5f198496dd54d0e9"
] |
[
"web_scrapper/transform/main.py"
] |
[
"import argparse\nimport hashlib\nimport nltk\nfrom nltk.corpus import stopwords\nimport logging\nlogging.basicConfig(level=logging.INFO)\nfrom urllib.parse import urlparse\n\nimport pandas as pd\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(filename):\n logger.info('Starting cleaning process')\n\n df = _read_data(filename)\n newspaper_uid = _extract_newspaper_uid(filename)\n df = _add_newspaper_uid_column(df, newspaper_uid)\n df = _extract_host(df)\n df = _fill_missing_titles(df)\n df = _generate_uids_for_rows(df)\n df = _remove_new_lines_from_body(df)\n df = _add_tokenize(df)\n df = _remove_duplicate_entries(df, 'title')\n df = _drop_rows_with_missing_values(df)\n _save_data(df, filename)\n\n return df\n\n\ndef _read_data(filename):\n logger.info('Reading file {}'.format(filename))\n\n return pd.read_csv(filename)\n\n\ndef _extract_newspaper_uid(filename):\n logger.info('Extracting newspaper uid')\n newspaper_uid = filename.split('_')[0]\n\n logger.info('Newspaper uid detected: {}'.format(newspaper_uid))\n return newspaper_uid\n\n\ndef _add_newspaper_uid_column(df, newspaper_uid):\n logger.info('Filling newspaper_uid column with {}'.format(newspaper_uid))\n df['newspaper_uid'] = newspaper_uid\n\n return df\n\n\ndef _extract_host(df):\n logger.info('Extracting host from urls')\n df['host'] = df['url'].apply(lambda url: urlparse(url).netloc)\n\n return df\n\n\ndef _fill_missing_titles(df):\n logger.info('Filling missing titles')\n missing_title_mask = df['title'].isna()\n\n missing_titles = (df[missing_title_mask]['url']\n .str.extract(r'(?P<missing_titles>[^/]+)/$')\n .applymap(lambda title: title.split('-'))\n .applymap(lambda title_word_list: ' '.join(title_word_list))\n )\n\n df.loc[missing_title_mask, 'title'] = missing_titles.loc[:, 'missing_titles']\n\n return df\n\n\ndef _generate_uids_for_rows(df):\n logger.info('Generating uids for each row')\n uids = (df\n .apply(lambda row: hashlib.md5(bytes(row['url'].encode())), axis=1)\n .apply(lambda hash_object: hash_object.hexdigest())\n )\n df['uid'] = uids\n\n return df.set_index('uid')\n\n\ndef _remove_new_lines_from_body(df):\n logger.info('Remove new lines from body')\n\n stripped_body = (df\n .apply(lambda row: row['body'], axis=1)\n .apply(lambda body: list(body))\n .apply(lambda letters: list(map(lambda letter: letter.replace('\\n',''), letters)))\n .apply(lambda letters: ''.join(letters))\n )\n\n df['body'] = stripped_body\n\n return df\n\nstop_words = set(stopwords.words(('spanish')))\ndef _tokenize_columns(df, column_name):\n logger.info('Tokenize column: {}'.format(column_name))\n\n return (df\n .dropna()\n .apply(lambda row: nltk.word_tokenize(row[column_name]), axis=1)\n .apply(lambda tokens: list(filter(lambda token: token.isalpha(), tokens)))\n .apply(lambda tokens: list(map(lambda token: token.lower(), tokens)))\n .apply(lambda word_list: list(filter(lambda word: word not in stop_words, word_list)))\n .apply(lambda valid_word_list: len(valid_word_list))\n )\n\n\ndef _add_tokenize(df):\n logger.info('Add tokenize for title and body')\n\n df['n_tokens_title'] = _tokenize_columns(df, 'title')\n df['n_tokens_body'] = _tokenize_columns(df, 'body')\n\n return df\n\n\ndef _remove_duplicate_entries(df, column_name):\n logger.info('Removing duplicate entries')\n df.drop_duplicates(subset=[column_name], keep='first', inplace=True)\n\n return df\n\n\ndef _drop_rows_with_missing_values(df):\n logger.info('Dropping rows with missing values')\n return df.dropna()\n\ndef _save_data(df, filename):\n clean_filename = 'clean_{}'.format(filename)\n logger.info('Saving data at location: {}'.format(clean_filename))\n df.to_csv(clean_filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('filename',\n help='The path to the dirty data',\n type=str)\n\n args = parser.parse_args()\n\n df = main(args.filename)\n print(df)"
] |
[
[
"pandas.read_csv"
]
] |
aryavohra/ray
|
[
"6d884f1442c271329829ff0bceae588f3cd49d7d"
] |
[
"rllib/utils/exploration/ornstein_uhlenbeck_noise.py"
] |
[
"import numpy as np\nfrom typing import Optional, Union\n\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.exploration.gaussian_noise import GaussianNoise\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch, \\\n get_variable, TensorType\nfrom ray.rllib.utils.numpy import convert_to_numpy\nfrom ray.rllib.utils.schedules import Schedule\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\nclass OrnsteinUhlenbeckNoise(GaussianNoise):\n \"\"\"An exploration that adds Ornstein-Uhlenbeck noise to continuous actions.\n\n If explore=True, returns sampled actions plus a noise term X,\n which changes according to this formula:\n Xt+1 = -theta*Xt + sigma*N[0,stddev], where theta, sigma and stddev are\n constants. Also, some completely random period is possible at the\n beginning.\n If explore=False, returns the deterministic action.\n \"\"\"\n\n def __init__(self,\n action_space,\n *,\n framework: str,\n ou_theta: float = 0.15,\n ou_sigma: float = 0.2,\n ou_base_scale: float = 0.1,\n random_timesteps: int = 1000,\n initial_scale: float = 1.0,\n final_scale: float = 0.02,\n scale_timesteps: int = 10000,\n scale_schedule: Optional[Schedule] = None,\n **kwargs):\n \"\"\"Initializes an Ornstein-Uhlenbeck Exploration object.\n\n Args:\n action_space (Space): The gym action space used by the environment.\n ou_theta (float): The theta parameter of the Ornstein-Uhlenbeck\n process.\n ou_sigma (float): The sigma parameter of the Ornstein-Uhlenbeck\n process.\n ou_base_scale (float): A fixed scaling factor, by which all OU-\n noise is multiplied. NOTE: This is on top of the parent\n GaussianNoise's scaling.\n random_timesteps (int): The number of timesteps for which to act\n completely randomly. Only after this number of timesteps, the\n `self.scale` annealing process will start (see below).\n initial_scale (float): The initial scaling weight to multiply\n the noise with.\n final_scale (float): The final scaling weight to multiply\n the noise with.\n scale_timesteps (int): The timesteps over which to linearly anneal\n the scaling factor (after(!) having used random actions for\n `random_timesteps` steps.\n scale_schedule (Optional[Schedule]): An optional Schedule object\n to use (instead of constructing one from the given parameters).\n framework (Optional[str]): One of None, \"tf\", \"torch\".\n \"\"\"\n # The current OU-state value (gets updated each time, an eploration\n # action is computed).\n self.ou_state = get_variable(\n np.array(action_space.low.size * [.0], dtype=np.float32),\n framework=framework,\n tf_name=\"ou_state\",\n torch_tensor=True,\n device=None)\n\n super().__init__(\n action_space,\n framework=framework,\n random_timesteps=random_timesteps,\n initial_scale=initial_scale,\n final_scale=final_scale,\n scale_timesteps=scale_timesteps,\n scale_schedule=scale_schedule,\n stddev=1.0, # Force `self.stddev` to 1.0.\n **kwargs)\n self.ou_theta = ou_theta\n self.ou_sigma = ou_sigma\n self.ou_base_scale = ou_base_scale\n # Now that we know the device, move ou_state there, in case of PyTorch.\n if self.framework == \"torch\" and self.device is not None:\n self.ou_state = self.ou_state.to(self.device)\n\n @override(GaussianNoise)\n def _get_tf_exploration_action_op(self, action_dist: ActionDistribution,\n explore: Union[bool, TensorType],\n timestep: Union[int, TensorType]):\n ts = timestep if timestep is not None else self.last_timestep\n scale = self.scale_schedule(ts)\n\n # The deterministic actions (if explore=False).\n deterministic_actions = action_dist.deterministic_sample()\n\n # Apply base-scaled and time-annealed scaled OU-noise to\n # deterministic actions.\n gaussian_sample = tf.random.normal(\n shape=[self.action_space.low.size], stddev=self.stddev)\n ou_new = self.ou_theta * -self.ou_state + \\\n self.ou_sigma * gaussian_sample\n if self.framework in [\"tf2\", \"tfe\"]:\n self.ou_state.assign_add(ou_new)\n ou_state_new = self.ou_state\n else:\n ou_state_new = tf1.assign_add(self.ou_state, ou_new)\n high_m_low = self.action_space.high - self.action_space.low\n high_m_low = tf.where(\n tf.math.is_inf(high_m_low), tf.ones_like(high_m_low), high_m_low)\n noise = scale * self.ou_base_scale * ou_state_new * high_m_low\n stochastic_actions = tf.clip_by_value(\n deterministic_actions + noise,\n self.action_space.low * tf.ones_like(deterministic_actions),\n self.action_space.high * tf.ones_like(deterministic_actions))\n\n # Stochastic actions could either be: random OR action + noise.\n random_actions, _ = \\\n self.random_exploration.get_tf_exploration_action_op(\n action_dist, explore)\n exploration_actions = tf.cond(\n pred=tf.convert_to_tensor(ts < self.random_timesteps),\n true_fn=lambda: random_actions,\n false_fn=lambda: stochastic_actions)\n\n # Chose by `explore` (main exploration switch).\n action = tf.cond(\n pred=tf.constant(explore, dtype=tf.bool)\n if isinstance(explore, bool) else explore,\n true_fn=lambda: exploration_actions,\n false_fn=lambda: deterministic_actions)\n # Logp=always zero.\n batch_size = tf.shape(deterministic_actions)[0]\n logp = tf.zeros(shape=(batch_size, ), dtype=tf.float32)\n\n # Increment `last_timestep` by 1 (or set to `timestep`).\n if self.framework in [\"tf2\", \"tfe\"]:\n if timestep is None:\n self.last_timestep.assign_add(1)\n else:\n self.last_timestep.assign(timestep)\n else:\n assign_op = (tf1.assign_add(self.last_timestep, 1)\n if timestep is None else tf1.assign(\n self.last_timestep, timestep))\n with tf1.control_dependencies([assign_op, ou_state_new]):\n action = tf.identity(action)\n logp = tf.identity(logp)\n\n return action, logp\n\n @override(GaussianNoise)\n def _get_torch_exploration_action(self, action_dist: ActionDistribution,\n explore: bool,\n timestep: Union[int, TensorType]):\n # Set last timestep or (if not given) increase by one.\n self.last_timestep = timestep if timestep is not None else \\\n self.last_timestep + 1\n\n # Apply exploration.\n if explore:\n # Random exploration phase.\n if self.last_timestep < self.random_timesteps:\n action, _ = \\\n self.random_exploration.get_torch_exploration_action(\n action_dist, explore=True)\n # Apply base-scaled and time-annealed scaled OU-noise to\n # deterministic actions.\n else:\n det_actions = action_dist.deterministic_sample()\n scale = self.scale_schedule(self.last_timestep)\n gaussian_sample = scale * torch.normal(\n mean=torch.zeros(self.ou_state.size()), std=1.0) \\\n .to(self.device)\n ou_new = self.ou_theta * -self.ou_state + \\\n self.ou_sigma * gaussian_sample\n self.ou_state += ou_new\n high_m_low = torch.from_numpy(\n self.action_space.high - self.action_space.low). \\\n to(self.device)\n high_m_low = torch.where(\n torch.isinf(high_m_low),\n torch.ones_like(high_m_low).to(self.device), high_m_low)\n noise = scale * self.ou_base_scale * self.ou_state * high_m_low\n\n action = torch.min(\n torch.max(\n det_actions + noise,\n torch.tensor(\n self.action_space.low,\n dtype=torch.float32,\n device=self.device)),\n torch.tensor(\n self.action_space.high,\n dtype=torch.float32,\n device=self.device))\n\n # No exploration -> Return deterministic actions.\n else:\n action = action_dist.deterministic_sample()\n\n # Logp=always zero.\n logp = torch.zeros(\n (action.size()[0], ), dtype=torch.float32, device=self.device)\n\n return action, logp\n\n @override(GaussianNoise)\n def get_state(self, sess: Optional[\"tf.Session\"] = None):\n \"\"\"Returns the current scale value.\n\n Returns:\n Union[float,tf.Tensor[float]]: The current scale value.\n \"\"\"\n if sess:\n return sess.run(\n dict(self._tf_state_op, **{\n \"ou_state\": self.ou_state,\n }))\n\n state = super().get_state()\n return dict(\n state, **{\n \"ou_state\": convert_to_numpy(self.ou_state)\n if self.framework != \"tf\" else self.ou_state,\n })\n\n @override(GaussianNoise)\n def set_state(self, state: dict,\n sess: Optional[\"tf.Session\"] = None) -> None:\n if self.framework == \"tf\":\n self.ou_state.load(state[\"ou_state\"], session=sess)\n else:\n self.ou_state = state[\"ou_state\"]\n super().set_state(state, sess=sess)\n"
] |
[
[
"numpy.array"
]
] |
mahmlk/ENSF619.2_Assignments
|
[
"e1cff2159a23f3482e632cd460ac8d14c60f365e"
] |
[
"Final Project/Model/main.py"
] |
[
"from __future__ import print_function\nimport torch, os, copy, time, pickle\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nimport pandas as pd\nfrom torchvision.datasets.folder import IMG_EXTENSIONS\nfrom torchvision.datasets import ImageFolder\nfrom sklearn.metrics import confusion_matrix\nimport glob, pickle\nimport seaborn as sn\nimport argparse\nstart_time= time.time()\n\n\n\nparser = argparse.ArgumentParser(description='COVID-19 Detection from X-ray Images')\nparser.add_argument('--test_covid_path', type=str, default='./data/test/covid/',\n help='COVID-19 test samples directory')\nparser.add_argument('--test_non_covid_path', type=str, default='./data/test/non/',\n help='Non-COVID test samples directory')\nparser.add_argument('--trained_model_path', type=str, default='./covid_resnet18_epoch2.pt',\n help='The path and name of trained model')\n\nparser.add_argument('--cut_off_threshold', type=float, default= 0.2, \n help='cut-off threshold. Any sample with probability higher than this is considered COVID-19 (default: 0.2)')\nparser.add_argument('--batch_size', type=int, default=20, \n help='input batch size for training (default: 20)')\nparser.add_argument('--num_workers', type=int, default=0, \n help='number of workers to train (default: 0)')\n\n\nargs = parser.parse_args()\n\n\n\n\n\n\n###Utility function to find sensitivity and specificity for different cut-off thresholds\ndef find_sens_spec( covid_prob, noncovid_prob, thresh):\n sensitivity= (covid_prob >= thresh).sum() / (len(covid_prob)+1e-10)\n specificity= (noncovid_prob < thresh).sum() / (len(noncovid_prob)+1e-10)\n print(\"sensitivity= %.3f, specificity= %.3f\" %(sensitivity,specificity))\n return sensitivity, specificity\n \ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n\n\nclass_names = ['covid','non']\n\n\n\n\n###Test on trained model\nmodel_name= args.trained_model_path\nmodel= torch.load(model_name, map_location='cpu') \nmodel.eval()\n\n\n\n\n###loading new images\nimsize= 224\nloader = transforms.Compose([transforms.Resize(imsize), \n transforms.CenterCrop(224), \n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\ndef image_loader(image_name):\n \"\"\"load image, returns cuda tensor\"\"\"\n image = Image.open(image_name).convert(\"RGB\")\n image = loader(image).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image\n\nsm = torch.nn.Softmax()\n\n\n\n###Get the predicted probabilities of all samples\ntest_covid = glob.glob(\"%s*\" %args.test_covid_path)\ntest_non = glob.glob(\"%s*\" %args.test_non_covid_path)\n\ncovid_pred= np.zeros([len(test_covid),1]).astype(int)\nnon_pred = np.zeros([len(test_non),1]).astype(int)\n\ncovid_prob= np.zeros([len(test_covid),1])\nnon_prob = np.zeros([len(test_non),1])\n\n\nfor i in range(len(test_covid)):\n cur_img= image_loader(test_covid[i])\n model_output= model(cur_img)\n cur_pred = model_output.max(1, keepdim=True)[1]\n cur_prob = sm(model_output)\n covid_prob[i,:]= cur_prob.data.numpy()[0,0]\n print(\"%03d Covid predicted label:%s\" %(i, class_names[int(cur_pred.data.numpy())]) )\n\n\nfor i in range(len(test_non)):\n cur_img= image_loader(test_non[i])\n model_output= model(cur_img)\n cur_pred = model_output.max(1, keepdim=True)[1] \n cur_prob = sm(model_output)\n non_prob[i,:]= cur_prob.data.numpy()[0,0]\n print(\"%03d Non-Covid predicted label:%s\" %(i, class_names[int(cur_pred.data.numpy())]) )\n\n\n\n#pickle.dump( covid_prob, open( \"./results/covid_prob_%s.p\" %model_name, \"wb\" ) )\n#pickle.dump( non_prob, open( \"./results/non_prob_%s.p\" %model_name, \"wb\" ) )\n\n\n\n####find sensitivity and specificity\nthresh= args.cut_off_threshold\nsensitivity_40, specificity= find_sens_spec( covid_prob, non_prob, thresh)\n\n\n\n####derive labels based on probabilities and cut-off threshold\ncovid_pred = np.where( covid_prob >thresh, 1, 0)\nnon_pred = np.where( non_prob >thresh, 1, 0)\n\n\n\n####derive confusion-matrix\ncovid_list= [int(covid_pred[i]) for i in range(len(covid_pred))]\ncovid_count = [(x, covid_list.count(x)) for x in set(covid_list)]\n\nnon_list= [int(non_pred[i]) for i in range(len(non_pred))]\nnon_count = [(x, non_list.count(x)) for x in set(non_list)]\n\ny_pred_list= covid_list+non_list\ny_test_list= [1 for i in range(len(covid_list))]+[0 for i in range(len(non_list))]\n\ny_pred= np.asarray(y_pred_list, dtype=np.int64)\ny_test= np.asarray(y_test_list, dtype=np.int64)\n\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2) \n\n#Plot normalized confusion matrix\ndf_cm = pd.DataFrame(cnf_matrix, index = [i for i in class_names],\n columns = [i for i in class_names])\n\n\nax = sn.heatmap(df_cm, cmap=plt.cm.Blues, annot=True, cbar=False, fmt='g', xticklabels= ['Non-COVID-19','COVID-19'], yticklabels= ['Non-COVID-19','COVID-19'])\nax.set_title(\"Confusion matrix\")\nplt.savefig('./confusion_matrix.png') #dpi = 200\n\n####plot the predicted probability distribution\nbins = np.linspace(0, 1, 25)\nplt.subplot(211)\nplt.hist(covid_prob, bins, color= 'blue', histtype = 'bar', label='Probabilities of COVID-19 Samples')\nplt.ylim([0,10])\nplt.legend(loc='upper center')\nplt.subplot(212)\nplt.hist(non_prob, bins, color= 'green', label='Probabilities of Non-COVID Samples')\nplt.legend(loc='upper center')\nplt.savefig('./scores_histogram.png') #dpi = 200\n\nend_time= time.time()\ntot_time= end_time- start_time\nprint(\"\\nTotal Time:\", tot_time)\n"
] |
[
[
"torch.nn.Softmax",
"matplotlib.pyplot.legend",
"numpy.linspace",
"torch.load",
"numpy.asarray",
"matplotlib.pyplot.ylim",
"numpy.set_printoptions",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"torch.cuda.is_available",
"numpy.where",
"matplotlib.pyplot.hist",
"torch.autograd.Variable"
]
] |
GT4SD/-reinvent_models
|
[
"e1cf00d1b24fe5f39354e34829adc25460da84e2"
] |
[
"reinvent_models/link_invent/networks/attention_layer.py"
] |
[
"import math\r\n\r\nimport torch\r\nfrom torch import nn as tnn\r\n\r\n\r\nclass AttentionLayer(tnn.Module):\r\n\r\n def __init__(self, num_dimensions: int):\r\n super(AttentionLayer, self).__init__()\r\n\r\n self.num_dimensions = num_dimensions\r\n\r\n self._attention_linear = tnn.Sequential(\r\n tnn.Linear(self.num_dimensions*2, self.num_dimensions),\r\n tnn.Tanh()\r\n )\r\n\r\n def forward(self, padded_seqs: torch.Tensor, encoder_padded_seqs: torch.Tensor, decoder_mask: torch.Tensor) \\\r\n -> (torch.Tensor, torch.Tensor): # pylint: disable=arguments-differ\r\n \"\"\"\r\n Performs the forward pass.\r\n :param padded_seqs: A tensor with the output sequences (batch, seq_d, dim).\r\n :param encoder_padded_seqs: A tensor with the encoded input sequences (batch, seq_e, dim).\r\n :param decoder_mask: A tensor that represents the encoded input mask.\r\n :return : Two tensors: one with the modified logits and another with the attention weights.\r\n \"\"\"\r\n # scaled dot-product\r\n # (batch, seq_d, 1, dim)*(batch, 1, seq_e, dim) => (batch, seq_d, seq_e*)\r\n attention_weights = (padded_seqs.unsqueeze(dim=2)*encoder_padded_seqs.unsqueeze(dim=1))\\\r\n .sum(dim=3).div(math.sqrt(self.num_dimensions))\\\r\n .softmax(dim=2)\r\n # (batch, seq_d, seq_e*)@(batch, seq_e, dim) => (batch, seq_d, dim)\r\n attention_context = attention_weights.bmm(encoder_padded_seqs)\r\n\r\n return (self._attention_linear(torch.cat([padded_seqs, attention_context], dim=2))*decoder_mask,\r\n attention_weights)\r\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.Tanh"
]
] |
tridungduong16/fairCE
|
[
"b13c72c253d875e68c0294b91aaddcbf93460d92"
] |
[
"src/counterfactual_explanation/flow_ssl/data/nlp_datasets.py"
] |
[
"import os.path\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nimport subprocess\n\nGDRIVE_DOWNLOAD = lambda FID,FNAME: f\"wget --load-cookies /tmp/cookies.txt \\\"https://docs.google.com/uc?export=download&confirm=$(wget\\\n --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate \\\n 'https://docs.google.com/uc?export=download&id={FID}' -O- | \\\n sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\\\1\\\\n/p')&id={FID}\\\" -O {FNAME} && rm -rf /tmp/cookies.txt\"\n\nclass AG_News(Dataset):\n num_classes=4\n class_weights=None\n ignored_index=-100\n dim = 768\n train_fid,train_fname = '1k40hM-x91OPUwGXhWVU7NoCUOXXEuFR4','ag_news_train.npz'\n test_fid,test_fname = '1cPzyv6UW6Cau87fDj3hxe523UL4qw4EJ','ag_news_test.npz'\n def __init__(self, root=os.path.expanduser('~/datasets/AGNEWS/'), train=True, transform=None, target_transform=None):\n if not os.path.exists(root+self.train_fname):\n os.makedirs(root,exist_ok=True)\n subprocess.call(GDRIVE_DOWNLOAD(self.train_fid,self.train_fname),shell=True)\n subprocess.call(GDRIVE_DOWNLOAD(self.test_fid,self.test_fname),shell=True)\n subprocess.call(f'cp {self.train_fname} {root}',shell=True)\n subprocess.call(f'cp {self.test_fname} {root}',shell=True)\n\n if transform is not None:\n raise ValueError(\"Transform should be `None`\")\n if train:\n path = os.path.join(root, self.train_fname) \n else:\n path = os.path.join(root, self.test_fname) \n\n data_labels = np.load(path)\n data, labels = data_labels[\"encodings\"], data_labels[\"labels\"]\n data = torch.from_numpy(data)\n labels = torch.from_numpy(labels)\n if train:\n self.train_data = data\n self.train_labels = labels\n else:\n self.test_data = data\n self.test_labels = labels\n self.train = train\n\n def __len__(self):\n if self.train:\n return len(self.train_labels)\n else:\n return len(self.test_labels)\n\n def __getitem__(self, idx):\n if self.train:\n return self.train_data[idx], self.train_labels[idx]\n else:\n return self.test_data[idx], self.test_labels[idx]\n\n\n\nclass YAHOO(Dataset):\n num_classes=10\n class_weights=None\n ignored_index=-100\n dim = 768\n train_fid,train_fname = '1nkf9k3Cqfxxpk05c0yN0HVVtijrVWjde','yahoo_train.npz'\n test_fid,test_fname = '1Z20AEPvX_mVle_1SFCyrWYBiglmZ6_wv','yahoo_test.npz'\n def __init__(self, root=os.path.expanduser('~/datasets/YAHOO/'), train=True):\n super().__init__()\n if not os.path.exists(root+self.train_fname):\n os.makedirs(root,exist_ok=True)\n subprocess.call(GDRIVE_DOWNLOAD(self.train_fid,self.train_fname),shell=True)\n subprocess.call(GDRIVE_DOWNLOAD(self.test_fid,self.test_fname),shell=True)\n subprocess.call(f'cp {self.train_fname} {root}',shell=True)\n subprocess.call(f'cp {self.test_fname} {root}',shell=True)\n train_path = os.path.join(root, self.train_fname) \n test_path = os.path.join(root, self.test_fname)\n train_data = np.load(train_path)\n test_data = np.load(test_path)\n self.X_train, self.y_train = train_data[\"encodings\"], train_data[\"labels\"]\n self.X_test, self.y_test = test_data[\"encodings\"], test_data[\"labels\"]\n self.X_test = self.X_test[:10000]\n self.y_test = self.y_test[:10000]\n self.X = torch.from_numpy(self.X_train if train else self.X_test).float()\n self.Y = torch.from_numpy(self.y_train if train else self.y_test).long()\n\n def __len__(self):\n return self.X.shape[0]\n\n def __getitem__(self, idx):\n return self.X[idx],self.Y[idx]\n\n \n\n\n"
] |
[
[
"numpy.load",
"torch.from_numpy"
]
] |
ara-ta3/mnist
|
[
"5af3cc964145dc94cab3cc860819ea37717a346f"
] |
[
"python/mnist/functions.py"
] |
[
"import numpy as np\n\n\ndef cross_entropy_error(y, t):\n if y.ndim == 1:\n t = t.reshape(1, t.size)\n y = y.reshape(1, y.size)\n if t.size == y.size:\n t = t.argmax(axis=1)\n\n batch_size = y.shape[0]\n return -np.sum(np.log(y[np.arange(batch_size), t])) / batch_size\n\n\ndef numerical_gradient(f, x):\n h = 1e-4\n grad = np.zeros_like(x)\n\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n idx = it.multi_index\n tmp_val = x[idx]\n x[idx] = float(tmp_val) + h\n fxh1 = f(x)\n\n x[idx] = tmp_val - h\n fxh2 = f(x)\n grad[idx] = (fxh1 - fxh2) / (2*h)\n x[idx] = tmp_val\n it.iternext()\n\n return grad\n"
] |
[
[
"numpy.arange",
"numpy.zeros_like",
"numpy.nditer"
]
] |
rwest/RMG-Py
|
[
"54e645b17a8e9a1652c9e60b9251444bc6c25e5f"
] |
[
"rmgpy/data/thermo.py"
] |
[
"#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\n\"\"\"\n\n\"\"\"\nimport itertools\nimport logging\nimport math\nimport os.path\nimport re\nimport time\nfrom copy import deepcopy\n\nimport numpy as np\n\nimport rmgpy.constants as constants\nimport rmgpy.molecule\nimport rmgpy.quantity\nfrom rmgpy.data.base import Database, Entry, make_logic_node, DatabaseError\nfrom rmgpy.ml.estimator import MLEstimator\nfrom rmgpy.molecule import Molecule, Bond, Group\nfrom rmgpy.species import Species\nfrom rmgpy.thermo import NASAPolynomial, NASA, ThermoData, Wilhoit\nfrom rmgpy.data.surface import MetalDatabase\nfrom rmgpy import settings\n\n#: This dictionary is used to add multiplicity to species label\n_multiplicity_labels = {1: 'S', 2: 'D', 3: 'T', 4: 'Q', 5: 'V'}\n\n\n################################################################################\n\ndef save_entry(f, entry):\n \"\"\"\n Write a Pythonic string representation of the given `entry` in the thermo\n database to the file object `f`.\n \"\"\"\n\n f.write('entry(\\n')\n f.write(' index = {0:d},\\n'.format(entry.index))\n f.write(' label = \"{0}\",\\n'.format(entry.label))\n\n if isinstance(entry.item, Molecule):\n f.write(' molecule = \\n')\n f.write('\"\"\"\\n')\n f.write(entry.item.to_adjacency_list(remove_h=False))\n f.write('\"\"\",\\n')\n elif isinstance(entry.item, Group):\n f.write(' group = \\n')\n f.write('\"\"\"\\n')\n f.write(entry.item.to_adjacency_list())\n f.write('\"\"\",\\n')\n else:\n f.write(' group = \"{0}\",\\n'.format(entry.item))\n\n if isinstance(entry.data, ThermoData):\n f.write(' thermo = ThermoData(\\n')\n f.write(' Tdata = {0!r},\\n'.format(entry.data.Tdata))\n f.write(' Cpdata = {0!r},\\n'.format(entry.data.Cpdata))\n f.write(' H298 = {0!r},\\n'.format(entry.data.H298))\n f.write(' S298 = {0!r},\\n'.format(entry.data.S298))\n if entry.data.Tmin is not None:\n f.write(' Tmin = {0!r},\\n'.format(entry.data.Tmin))\n if entry.data.Tmax is not None:\n f.write(' Tmax = {0!r},\\n'.format(entry.data.Tmax))\n f.write(' ),\\n')\n elif isinstance(entry.data, Wilhoit):\n f.write(' thermo = Wilhoit(\\n')\n f.write(' cp0 = {0!r},\\n'.format(entry.data.cp0))\n f.write(' cpInf = {0!r},\\n'.format(entry.data.cpInf))\n f.write(' a0 = {0:g},\\n'.format(entry.data.a0))\n f.write(' a1 = {0:g},\\n'.format(entry.data.a1))\n f.write(' a2 = {0:g},\\n'.format(entry.data.a2))\n f.write(' a3 = {0:g},\\n'.format(entry.data.a3))\n f.write(' B = {0!r},\\n'.format(entry.data.B))\n f.write(' H0 = {0!r},\\n'.format(entry.data.H0))\n f.write(' S0 = {0!r},\\n'.format(entry.data.S0))\n if entry.data.Tmin is not None:\n f.write(' Tmin = {0!r},\\n'.format(entry.data.Tmin))\n if entry.data.Tmax is not None:\n f.write(' Tmax = {0!r},\\n'.format(entry.data.Tmax))\n f.write(' ),\\n')\n elif isinstance(entry.data, NASA):\n f.write(' thermo = NASA(\\n')\n f.write(' polynomials = [\\n')\n for poly in entry.data.polynomials:\n f.write(' {0!r},\\n'.format(poly))\n f.write(' ],\\n')\n if entry.data.Tmin is not None:\n f.write(' Tmin = {0!r},\\n'.format(entry.data.Tmin))\n if entry.data.Tmax is not None:\n f.write(' Tmax = {0!r},\\n'.format(entry.data.Tmax))\n if entry.data.E0 is not None:\n f.write(' E0 = {0!r},\\n'.format(entry.data.E0))\n if entry.data.Cp0 is not None:\n f.write(' Cp0 = {0!r},\\n'.format(entry.data.Cp0))\n if entry.data.CpInf is not None:\n f.write(' CpInf = {0!r},\\n'.format(entry.data.CpInf))\n f.write(' ),\\n')\n else:\n f.write(' thermo = {0!r},\\n'.format(entry.data))\n\n if entry.reference is not None:\n f.write(' reference = {0!r},\\n'.format(entry.reference))\n if entry.reference_type != \"\":\n f.write(' referenceType = \"{0}\",\\n'.format(entry.reference_type))\n f.write(f' shortDesc = \"\"\"{entry.short_desc.strip()}\"\"\",\\n')\n f.write(f' longDesc = \\n\"\"\"\\n{entry.long_desc.strip()}\\n\"\"\",\\n')\n if entry.rank:\n f.write(\" rank = {0},\\n\".format(entry.rank))\n\n if entry.metal:\n f.write(' metal = \"{0}\",\\n'.format(entry.metal))\n if entry.facet:\n f.write(' facet = \"{0}\",\\n'.format(entry.facet))\n if entry.site:\n f.write(' site = \"{0}\",\\n'.format(entry.site))\n\n f.write(')\\n\\n')\n\n\ndef generate_old_library_entry(data):\n \"\"\"\n Return a list of values used to save entries to the old-style RMG\n thermo database based on the thermodynamics object `data`.\n \"\"\"\n if isinstance(data, ThermoData):\n return '{0:9g} {1:9g} {2:9g} {3:9g} {4:9g} {5:9g} {6:9g} {7:9g} {8:9g} {9:9g} {10:9g} {11:9g}'.format(\n data.H298.value_si / 4184.,\n data.S298.value_si / 4.184,\n data.Cpdata.value_si[0] / 4.184,\n data.Cpdata.value_si[1] / 4.184,\n data.Cpdata.value_si[2] / 4.184,\n data.Cpdata.value_si[3] / 4.184,\n data.Cpdata.value_si[4] / 4.184,\n data.Cpdata.value_si[5] / 4.184,\n data.Cpdata.value_si[6] / 4.184,\n data.H298.uncertainty / 4184.,\n data.S298.uncertainty / 4.184,\n max(data.Cpdata.uncertainty) / 4.184,\n )\n elif isinstance(data, str):\n return data\n else:\n return '{0:9g} {1:9g} {2:9g} {3:9g} {4:9g} {5:9g} {6:9g} {7:9g} {8:9g} {9:9g} {10:9g} {11:9g}'.format(\n data.get_enthalpy(298) / 4184.,\n data.get_entropy(298) / 4.184,\n data.get_heat_capacity(300) / 4.184,\n data.get_heat_capacity(400) / 4.184,\n data.get_heat_capacity(500) / 4.184,\n data.get_heat_capacity(600) / 4.184,\n data.get_heat_capacity(800) / 4.184,\n data.get_heat_capacity(1000) / 4.184,\n data.get_heat_capacity(1500) / 4.184,\n 0,\n 0,\n 0,\n )\n\n\ndef process_old_library_entry(data):\n \"\"\"\n Process a list of parameters `data` as read from an old-style RMG\n thermo database, returning the corresponding thermodynamics object.\n \"\"\"\n return ThermoData(\n Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([float(d) for d in data[2:9]], \"cal/(mol*K)\", \"+|-\", float(data[11])),\n H298=(float(data[0]), \"kcal/mol\", \"+|-\", float(data[9])),\n S298=(float(data[1]), \"cal/(mol*K)\", \"+|-\", float(data[10])),\n )\n\n\ndef add_thermo_data(thermo_data1, thermo_data2, group_additivity=False, verbose=False):\n \"\"\"\n Add the thermodynamic data `thermo_data2` to the data `thermo_data1`,\n and return `thermo_data1`.\n\n If `group_additivity` is True, append comments related to group additivity estimation\n If `verbose` is False, omit the comments from a \"zero entry\", whose H298, S298, and Cp are all 0.\n If `verbose` is True, or thermo_data2 is not a zero entry, add thermo_data2.comment to thermo_data1.comment.\n \"\"\"\n if (len(thermo_data1.Tdata.value_si) != len(thermo_data2.Tdata.value_si) or\n any([T1 != T2 for T1, T2 in zip(thermo_data1.Tdata.value_si, thermo_data2.Tdata.value_si)])):\n raise ValueError('Cannot add these ThermoData objects due to their having different temperature points.')\n\n for i in range(thermo_data1.Tdata.value_si.shape[0]):\n thermo_data1.Cpdata.value_si[i] += thermo_data2.Cpdata.value_si[i]\n thermo_data1.H298.value_si += thermo_data2.H298.value_si\n thermo_data1.S298.value_si += thermo_data2.S298.value_si\n\n test_zero = sum(abs(value) for value in\n [thermo_data2.H298.value_si, thermo_data2.S298.value_si] + thermo_data2.Cpdata.value_si.tolist())\n # Used to check if all of the entries in thermo_data2 are zero\n\n if group_additivity:\n if verbose or test_zero != 0:\n # If verbose==True or test_zero!=0, add thermo_data2.comment to thermo_data1.comment.\n if thermo_data1.comment:\n thermo_data1.comment += ' + {0}'.format(thermo_data2.comment)\n else:\n thermo_data1.comment = 'Thermo group additivity estimation: ' + thermo_data2.comment\n\n return thermo_data1\n\n\ndef remove_thermo_data(thermo_data1, thermo_data2, group_additivity=False, verbose=False):\n \"\"\"\n Remove the thermodynamic data `thermo_data2` from the data `thermo_data1`,\n and return `thermo_data1`.\n If `verbose` is True, append ' - thermo_data2.comment' to the thermo_data1.comment.\n If `verbose` is False, remove the thermo_data2.comment from the thermo_data1.comment.\n \"\"\"\n if (len(thermo_data1.Tdata.value_si) != len(thermo_data2.Tdata.value_si) or\n any([T1 != T2 for T1, T2 in zip(thermo_data1.Tdata.value_si, thermo_data2.Tdata.value_si)])):\n raise ValueError('Cannot take the difference between these ThermoData objects due to their having different '\n 'temperature points.')\n\n for i in range(thermo_data1.Tdata.value_si.shape[0]):\n thermo_data1.Cpdata.value_si[i] -= thermo_data2.Cpdata.value_si[i]\n thermo_data1.H298.value_si -= thermo_data2.H298.value_si\n thermo_data1.S298.value_si -= thermo_data2.S298.value_si\n\n if group_additivity:\n if verbose:\n thermo_data1.comment += ' - {0}'.format(thermo_data2.comment)\n else:\n thermo_data1.comment = re.sub(re.escape(' + ' + thermo_data2.comment), '', thermo_data1.comment, 1)\n return thermo_data1\n\n\ndef average_thermo_data(thermo_data_list=None):\n \"\"\"\n Average a list of ThermoData values together.\n Sets uncertainty values to be the approximately the 95% confidence interval, equivalent to\n 2 standard deviations calculated using the sample standard variance:\n \n Uncertainty = 2s\n s = sqrt( sum(abs(x - x.mean())^2) / N - 1) where N is the number of values averaged\n \n Note that uncertainties are only computed when number of values is greater than 1.\n \"\"\"\n if thermo_data_list is None:\n thermo_data_list = []\n\n num_values = len(thermo_data_list)\n\n if num_values == 0:\n raise ValueError('No thermo data values were inputted to be averaged.')\n else:\n logging.debug('Averaging thermo data over {0} value(s).'.format(num_values))\n\n if num_values == 1:\n return deepcopy(thermo_data_list[0])\n\n else:\n averaged_thermo_data = deepcopy(thermo_data_list[0])\n for thermo_data in thermo_data_list[1:]:\n averaged_thermo_data = add_thermo_data(averaged_thermo_data, thermo_data)\n\n for i in range(averaged_thermo_data.Tdata.value_si.shape[0]):\n averaged_thermo_data.Cpdata.value_si[i] /= num_values\n\n cp_data = [thermo_data.Cpdata.value_si[i] for thermo_data in thermo_data_list]\n averaged_thermo_data.Cpdata.uncertainty[i] = 2 * np.std(cp_data, ddof=1)\n\n h_data = [thermo_data.H298.value_si for thermo_data in thermo_data_list]\n averaged_thermo_data.H298.value_si /= num_values\n averaged_thermo_data.H298.uncertainty_si = 2 * np.std(h_data, ddof=1)\n\n s_data = [thermo_data.S298.value_si for thermo_data in thermo_data_list]\n averaged_thermo_data.S298.value_si /= num_values\n averaged_thermo_data.S298.uncertainty_si = 2 * np.std(s_data, ddof=1)\n return averaged_thermo_data\n\n\ndef common_atoms(cycle1, cycle2):\n \"\"\"\n INPUT: two cycles with type: list of atoms\n OUTPUT: a set of common atoms\n \"\"\"\n set1 = set(cycle1)\n set2 = set(cycle2)\n return set1.intersection(set2)\n\n\ndef combine_cycles(cycle1, cycle2):\n \"\"\"\n INPUT: two cycles with type: list of atoms\n OUTPUT: a combined cycle with type: list of atoms\n \"\"\"\n set1 = set(cycle1)\n set2 = set(cycle2)\n return list(set1.union(set2))\n\n\ndef is_aromatic_ring(submol):\n \"\"\"\n This method takes a monoring submol (Molecule initialized with a list of atoms containing just \n the ring), and check if it is a aromatic ring.\n \"\"\"\n ring_size = len(submol.atoms)\n if ring_size not in [5, 6]:\n return False\n for ring_atom in submol.atoms:\n for bonded_atom, bond in ring_atom.edges.items():\n if bonded_atom in submol.atoms:\n if not bond.is_benzene():\n return False\n return True\n\n\ndef is_bicyclic(polyring):\n \"\"\"\n Given a polyring (a list of `Atom`s)\n returns True if it's a bicyclic, False otherwise\n \"\"\"\n submol, _ = convert_ring_to_sub_molecule(polyring)\n sssr = submol.get_smallest_set_of_smallest_rings()\n\n return len(sssr) == 2\n\n\ndef find_aromatic_bonds_from_sub_molecule(submol):\n \"\"\"\n This method finds all the aromatic bonds within a input submolecule and \n returns a set of unique aromatic bonds\n \"\"\"\n\n aromatic_bonds = []\n for atom in submol.atoms:\n bonds = submol.get_bonds(atom)\n for atom_j in bonds:\n if atom_j in submol.atoms:\n bond = bonds[atom_j]\n if bond.is_benzene():\n aromatic_bonds.append(bond)\n return set(aromatic_bonds)\n\n\ndef convert_ring_to_sub_molecule(ring):\n \"\"\"\n This function takes a ring structure (can either be monoring or polyring) to create a new \n submolecule with newly deep copied atoms\n\n Outputted submolecules may have incomplete valence and may cause errors with some Molecule.methods(), such\n as update_atomtypes() or update(). In the future we may consider using groups for the sub-molecules.\n \"\"\"\n\n atoms_mapping = {}\n for atom in ring:\n atoms_mapping[atom] = atom.copy() # this copy is deep copy of origin atom with empty edges\n\n mol0 = Molecule(atoms=list(atoms_mapping.values()))\n\n for atom in ring:\n for bonded_atom, bond in atom.edges.items():\n if bonded_atom in ring:\n if not mol0.has_bond(atoms_mapping[atom], atoms_mapping[bonded_atom]):\n mol0.add_bond(Bond(atoms_mapping[atom], atoms_mapping[bonded_atom], order=bond.order))\n\n mol0.update_multiplicity()\n mol0.update_connectivity_values()\n return mol0, atoms_mapping\n\n\ndef combine_two_rings_into_sub_molecule(ring1, ring2):\n \"\"\"\n This function combines 2 rings (with common atoms) to create a new \n submolecule with newly deep copied atoms\n \"\"\"\n\n assert len(common_atoms(ring1, ring2)) > 0, \"The two input rings don't have common atoms.\"\n\n atoms_mapping = {}\n for atom in ring1 + ring2:\n if atom not in atoms_mapping:\n atoms_mapping[atom] = atom.copy()\n\n mol0 = Molecule(atoms=list(atoms_mapping.values()))\n\n for atom in ring1:\n for bonded_atom, bond in atom.edges.items():\n if bonded_atom in ring1:\n if not mol0.has_bond(atoms_mapping[atom], atoms_mapping[bonded_atom]):\n mol0.add_bond(Bond(atoms_mapping[atom], atoms_mapping[bonded_atom], order=bond.order))\n\n for atom in ring2:\n for bonded_atom, bond in atom.edges.items():\n if bonded_atom in ring2:\n if not mol0.has_bond(atoms_mapping[atom], atoms_mapping[bonded_atom]):\n mol0.add_bond(Bond(atoms_mapping[atom], atoms_mapping[bonded_atom], order=bond.order))\n\n mol0.update_multiplicity()\n mol0.update_connectivity_values()\n\n return mol0, atoms_mapping\n\n\ndef get_copy_for_one_ring(ring):\n \"\"\"\n Make a copy of a single ring from a molecule.\n\n Returns a list of atoms.\n \"\"\"\n atoms_mapping = convert_ring_to_sub_molecule(ring)[1]\n\n ring_copy = [atoms_mapping[atom] for atom in ring]\n\n return ring_copy\n\n\ndef get_copy_from_two_rings_with_common_atoms(ring1, ring2):\n \"\"\"\n Make a copy of a two rings from a molecule and also generates the merged ring.\n\n Returns a copy of ring1, a copy of ring2, and the merged rings, each as a list of atoms.\n \"\"\"\n merged_ring, atoms_mapping = combine_two_rings_into_sub_molecule(ring1, ring2)\n\n ring1_copy = [atoms_mapping[atom] for atom in ring1]\n ring2_copy = [atoms_mapping[atom] for atom in ring2]\n\n return ring1_copy, ring2_copy, merged_ring\n\n\ndef is_ring_partial_matched(ring, matched_group):\n \"\"\"\n An example of ring partial match is tricyclic ring is matched by a bicyclic group\n usually because of not enough data in polycyclic tree. The method takes a matched group \n returned from descend_tree and the ring (a list of non-hydrogen atoms in the ring)\n \"\"\"\n # if matched group has less atoms than the target ring\n # it's surely a partial match\n if len(ring) > len(matched_group.atoms):\n return True\n else:\n submol_ring, _ = convert_ring_to_sub_molecule(ring)\n sssr = submol_ring.get_smallest_set_of_smallest_rings()\n sssr_grp = matched_group.get_smallest_set_of_smallest_rings()\n if sorted([len(sr) for sr in sssr]) == sorted([len(sr_grp) for sr_grp in sssr_grp]):\n return False\n else:\n return True\n\n\ndef bicyclic_decomposition_for_polyring(polyring):\n \"\"\"\n Decompose a polycyclic ring into all possible bicyclic combinations: `bicyclics_merged_from_ring_pair`\n and return a `ring_occurances_dict` that contains all single ring tuples as keys and the number of times\n they appear each bicyclic submolecule. These bicyclic and single rings are used \n later in the heuristic polycyclic thermo algorithm.\n \"\"\"\n\n submol, _ = convert_ring_to_sub_molecule(polyring)\n sssr = submol.get_deterministic_sssr()\n\n ring_pair_with_common_atoms_list = []\n ring_occurances_dict = {}\n\n # Initialize ringOccuranceDict\n for ring in sssr:\n ring_occurances_dict[tuple(ring)] = 0\n\n ring_num = len(sssr)\n for i in range(ring_num):\n for j in range(i + 1, ring_num):\n if common_atoms(sssr[i], sssr[j]):\n # Copy the SSSR's again because these ones are going to be merged into bicyclics\n # and manipulated (aromatic bonds have to be screened and changed to single if needed)\n sssr_i, sssr_j, merged_ring = get_copy_from_two_rings_with_common_atoms(sssr[i], sssr[j])\n ring_pair_with_common_atoms_list.append([sssr_i, sssr_j, merged_ring])\n # Save the single ring SSSRs that appear in bicyclics using the original copy\n # because they will be manipulated (differently) in _add_poly_ring_correction_thermo_data_from_heuristic\n ring_occurances_dict[tuple(sssr[i])] += 1\n ring_occurances_dict[tuple(sssr[j])] += 1\n\n bicyclics_merged_from_ring_pair = []\n # pre-process 2-ring cores\n for ringA, ringB, merged_ring in ring_pair_with_common_atoms_list:\n submol_a = Molecule(atoms=ringA)\n submol_b = Molecule(atoms=ringB)\n is_a_aromatic = is_aromatic_ring(submol_a)\n is_b_aromatic = is_aromatic_ring(submol_b)\n # if ringA and ringB are both aromatic or not aromatic\n # don't need to do anything extra\n if is_a_aromatic and is_b_aromatic:\n pass\n elif not is_a_aromatic and not is_b_aromatic:\n aromatic_bonds_in_a = find_aromatic_bonds_from_sub_molecule(submol_a)\n for aromaticBond_inA in aromatic_bonds_in_a:\n aromaticBond_inA.set_order_num(1)\n\n aromatic_bonds_in_b = find_aromatic_bonds_from_sub_molecule(submol_b)\n for aromaticBond_inB in aromatic_bonds_in_b:\n aromaticBond_inB.set_order_num(1)\n elif is_a_aromatic:\n aromatic_bonds_in_b = find_aromatic_bonds_from_sub_molecule(submol_b)\n for aromaticBond_inB in aromatic_bonds_in_b:\n # Make sure the aromatic bond in ringB is in ringA, and both ringB atoms are in ringA \n # If so, preserve the B bond status, otherwise change to single bond order\n if ((aromaticBond_inB.atom1 in submol_a.atoms) and\n (aromaticBond_inB.atom2 in submol_a.atoms) and\n (submol_a.has_bond(aromaticBond_inB.atom1, aromaticBond_inB.atom2))):\n pass\n else:\n aromaticBond_inB.set_order_num(1)\n else:\n aromatic_bonds_in_a = find_aromatic_bonds_from_sub_molecule(submol_a)\n for aromaticBond_inA in aromatic_bonds_in_a:\n if ((aromaticBond_inA.atom1 in submol_b.atoms) and\n (aromaticBond_inA.atom2 in submol_b.atoms) and\n (submol_b.has_bond(aromaticBond_inA.atom1, aromaticBond_inA.atom2))):\n pass\n else:\n aromaticBond_inA.set_order_num(1)\n merged_ring.saturate_unfilled_valence(update=True)\n bicyclics_merged_from_ring_pair.append(merged_ring)\n\n return bicyclics_merged_from_ring_pair, ring_occurances_dict\n\n\ndef split_bicyclic_into_single_rings(bicyclic_submol):\n \"\"\"\n Splits a given bicyclic submolecule into two individual single \n ring submolecules (a list of `Molecule`s ).\n \"\"\"\n sssr = bicyclic_submol.get_deterministic_sssr()\n\n return [convert_ring_to_sub_molecule(sssr[0])[0],\n convert_ring_to_sub_molecule(sssr[1])[0]]\n\n\ndef saturate_ring_bonds(ring_submol):\n \"\"\"\n Given a ring submolelcule (`Molecule`), makes a deep copy and converts non-single bonds \n into single bonds, returns a new saturated submolecule (`Molecule`)\n \"\"\"\n atoms_mapping = {}\n for atom in ring_submol.atoms:\n if atom not in atoms_mapping:\n atoms_mapping[atom] = atom.copy()\n\n mol0 = Molecule(atoms=list(atoms_mapping.values()))\n\n already_saturated = True\n for atom in ring_submol.atoms:\n for bonded_atom, bond in atom.edges.items():\n if bonded_atom in ring_submol.atoms:\n if bond.order > 1.0 and not bond.is_benzene():\n already_saturated = False\n if not mol0.has_bond(atoms_mapping[atom], atoms_mapping[bonded_atom]):\n bond_order = 1.0\n if bond.is_benzene():\n bond_order = 1.5\n mol0.add_bond(Bond(atoms_mapping[atom], atoms_mapping[bonded_atom], order=bond_order))\n\n mol0.saturate_unfilled_valence()\n mol0.update_atomtypes()\n mol0.update_multiplicity()\n mol0.update_connectivity_values()\n return mol0, already_saturated\n\n\n################################################################################\n\nclass ThermoDepository(Database):\n \"\"\"\n A class for working with the RMG thermodynamics depository.\n \"\"\"\n\n def __init__(self, label='', name='', short_desc='', long_desc='', metal=None, site=None, facet=None):\n Database.__init__(self, label=label, name=name, short_desc=short_desc, long_desc=long_desc, metal=metal, site=site, facet=facet)\n\n def load_entry(self, index, label, molecule, thermo, reference=None, referenceType='', shortDesc='', longDesc='',\n rank=None, metal=None, site=None, facet=None):\n \"\"\"\n Method for parsing entries in database files.\n Note that these argument names are retained for backward compatibility.\n \"\"\"\n entry = Entry(\n index=index,\n label=label,\n item=Molecule().from_adjacency_list(molecule),\n data=thermo,\n reference=reference,\n reference_type=referenceType,\n short_desc=shortDesc,\n long_desc=longDesc.strip(),\n rank=rank,\n metal=metal,\n site=site,\n facet=facet,\n )\n self.entries[label] = entry\n return entry\n\n def save_entry(self, f, entry):\n \"\"\"\n Write the given `entry` in the thermo database to the file object `f`.\n \"\"\"\n return save_entry(f, entry)\n\n\n################################################################################\n\nclass ThermoLibrary(Database):\n \"\"\"\n A class for working with a RMG thermodynamics library.\n \"\"\"\n\n def __init__(self, label='', name='', solvent=None, short_desc='', long_desc='', metal=None, site=None, facet=None):\n Database.__init__(self, label=label, name=name, short_desc=short_desc, long_desc=long_desc,\n metal=metal, site=site, facet=facet)\n\n def load_entry(self,\n index,\n label,\n molecule,\n thermo,\n reference=None,\n referenceType='',\n shortDesc='',\n longDesc='',\n rank=None,\n metal=None,\n facet=None,\n site=None,\n ):\n \"\"\"\n Method for parsing entries in database files.\n Note that these argument names are retained for backward compatibility.\n \"\"\"\n\n molecule = Molecule().from_adjacency_list(molecule)\n\n # Internal checks for adding entry to the thermo library\n if label in list(self.entries.keys()):\n raise DatabaseError('Found a duplicate molecule with label {0} in the thermo library {1}. '\n 'Please correct your library.'.format(label, self.name))\n\n for entry in self.entries.values():\n if molecule.is_isomorphic(entry.item):\n if molecule.multiplicity == entry.item.multiplicity:\n raise DatabaseError('Adjacency list and multiplicity of {0} matches that of '\n 'existing molecule {1} in thermo library {2}. Please '\n 'correct your library.'.format(label, entry.label, self.name))\n\n self.entries[label] = Entry(\n index=index,\n label=label,\n item=molecule,\n data=thermo,\n reference=reference,\n reference_type=referenceType,\n short_desc=shortDesc,\n long_desc=longDesc.strip(),\n rank=rank,\n metal=metal,\n facet=facet,\n site=site,\n )\n\n def save_entry(self, f, entry):\n \"\"\"\n Write the given `entry` in the thermo database to the file object `f`.\n \"\"\"\n return save_entry(f, entry)\n\n def generate_old_library_entry(self, data):\n \"\"\"\n Return a list of values used to save entries to the old-style RMG\n thermo database based on the thermodynamics object `data`.\n \"\"\"\n return generate_old_library_entry(data)\n\n def process_old_library_entry(self, data):\n \"\"\"\n Process a list of parameters `data` as read from an old-style RMG\n thermo database, returning the corresponding thermodynamics object.\n \"\"\"\n return process_old_library_entry(data)\n\n\n################################################################################\n\nclass ThermoGroups(Database):\n \"\"\"\n A class for working with an RMG thermodynamics group additivity database.\n \"\"\"\n\n def __init__(self, label='', name='', short_desc='', long_desc='', metal=None, site=None, facet=None):\n Database.__init__(self, label=label, name=name, short_desc=short_desc, long_desc=long_desc,\n metal=metal, site=site, facet=facet)\n\n def load_entry(self,\n index,\n label,\n group,\n thermo,\n reference=None,\n referenceType='',\n shortDesc='',\n longDesc='',\n rank=None,\n metal=None,\n facet=None,\n site=None,\n ):\n \"\"\"\n Method for parsing entries in database files.\n Note that these argument names are retained for backward compatibility.\n \"\"\"\n\n if (group[0:3].upper() == 'OR{' or\n group[0:4].upper() == 'AND{' or\n group[0:7].upper() == 'NOT OR{' or\n group[0:8].upper() == 'NOT AND{'):\n item = make_logic_node(group)\n else:\n item = Group().from_adjacency_list(group)\n self.entries[label] = Entry(\n index=index,\n label=label,\n item=item,\n data=thermo,\n reference=reference,\n reference_type=referenceType,\n short_desc=shortDesc,\n long_desc=longDesc.strip(),\n rank=rank,\n metal=metal,\n facet=facet,\n site=site,\n )\n\n def save_entry(self, f, entry):\n \"\"\"\n Write the given `entry` in the thermo database to the file object `f`.\n \"\"\"\n return save_entry(f, entry)\n\n def generate_old_library_entry(self, data):\n \"\"\"\n Return a list of values used to save entries to the old-style RMG\n thermo database based on the thermodynamics object `data`.\n \"\"\"\n\n return generate_old_library_entry(data)\n\n def process_old_library_entry(self, data):\n \"\"\"\n Process a list of parameters `data` as read from an old-style RMG\n thermo database, returning the corresponding thermodynamics object.\n \"\"\"\n return process_old_library_entry(data)\n\n def copy_data(self, source, destination):\n \"\"\"\n This method copys the ThermoData object and all meta data\n from source to destination\n Args:\n source: The entry for which data is being copied\n destination: The entry for which data is being overwritten\n\n \"\"\"\n destination.data = source.data\n destination.reference = source.reference\n destination.short_desc = source.short_desc\n destination.long_desc = source.long_desc\n destination.rank = source.rank\n destination.reference_type = source.reference_type\n destination.metal = source.metal\n destination.facet = source.facet\n destination.site = source.site\n\n def remove_group(self, group_to_remove):\n \"\"\"\n Removes a group that is in a tree from the database. For thermo\n groups we also, need to re-point any unicode thermo_data that may\n have pointed to the entry.\n\n Returns the removed group\n \"\"\"\n\n # First call base class method\n Database.remove_group(self, group_to_remove)\n\n parent_r = group_to_remove.parent\n\n # look for other pointers that point toward entry\n for entry in self.entries.values():\n if isinstance(entry.data, str):\n if entry.data == group_to_remove.label:\n # if the entryToRemove.data is also a pointer, then copy\n if isinstance(group_to_remove.data, str):\n entry.data = group_to_remove.data\n # if the parent points toward entry and the data is\n # not a base string, we need to copy the data to the parent\n elif entry is parent_r:\n self.copy_data(group_to_remove, parent_r)\n # otherwise, point toward entryToRemove's parent\n else:\n entry.data = str(parent_r.label)\n\n return group_to_remove\n\n\n################################################################################\n\nclass ThermoDatabase(object):\n \"\"\"\n A class for working with the RMG thermodynamics database.\n \"\"\"\n\n def __init__(self):\n self.depository = {}\n self.libraries = {}\n self.surface = {}\n self.groups = {}\n self.library_order = []\n self.local_context = {\n 'ThermoData': ThermoData,\n 'Wilhoit': Wilhoit,\n 'NASAPolynomial': NASAPolynomial,\n 'NASA': NASA,\n }\n self.global_context = {}\n\n # Use Pt111 binding energies as default\n self.binding_energies = {\n 'H': rmgpy.quantity.Energy(-2.75368,'eV/molecule'),\n 'C': rmgpy.quantity.Energy(-7.02516,'eV/molecule'),\n 'N': rmgpy.quantity.Energy(-4.63225,'eV/molecule'),\n 'O': rmgpy.quantity.Energy(-3.81153,'eV/molecule'),\n }\n\n def __reduce__(self):\n \"\"\"\n A helper function used when pickling a ThermoDatabase object.\n \"\"\"\n d = {\n 'depository': self.depository,\n 'libraries': self.libraries,\n 'groups': self.groups,\n 'library_order': self.library_order,\n 'surface' : self.surface,\n }\n return ThermoDatabase, (), d\n\n def __setstate__(self, d):\n \"\"\"\n A helper function used when unpickling a ThermoDatabase object.\n \"\"\"\n self.depository = d['depository']\n self.libraries = d['libraries']\n self.groups = d['groups']\n self.library_order = d['library_order']\n self.surface = d['surface']\n\n def load(self, path, libraries=None, depository=True, surface=False):\n \"\"\"\n Load the thermo database from the given `path` on disk, where `path`\n points to the top-level folder of the thermo database.\n \"\"\"\n if depository:\n self.load_depository(os.path.join(path, 'depository'))\n else:\n self.depository = {}\n self.load_libraries(os.path.join(path, 'libraries'), libraries)\n self.load_groups(os.path.join(path, 'groups'))\n if surface:\n self.load_surface()\n\n def load_depository(self, path):\n \"\"\"\n Load the thermo database from the given `path` on disk, where `path`\n points to the top-level folder of the thermo database.\n \"\"\"\n self.depository = {\n 'stable': ThermoDepository().load(os.path.join(path, 'stable.py'),\n self.local_context, self.global_context),\n 'radical': ThermoDepository().load(os.path.join(path, 'radical.py'),\n self.local_context, self.global_context)\n }\n\n def load_libraries(self, path, libraries=None):\n \"\"\"\n Load the thermo database from the given `path` on disk, where `path`\n points to the top-level folder of the thermo database.\n \n If no libraries are given, all are loaded.\n \"\"\"\n self.libraries = {}\n self.library_order = []\n if libraries is None:\n for (root, dirs, files) in os.walk(os.path.join(path)):\n for f in files:\n name, ext = os.path.splitext(f)\n if ext.lower() == '.py':\n logging.info('Loading thermodynamics library from {0} in {1}...'.format(f, root))\n library = ThermoLibrary()\n library.load(os.path.join(root, f), self.local_context, self.global_context)\n library.label = os.path.splitext(f)[0]\n self.libraries[library.label] = library\n self.library_order.append(library.label)\n\n else:\n for libraryName in libraries:\n f = libraryName + '.py'\n if os.path.exists(os.path.join(path, f)):\n logging.info('Loading thermodynamics library from {0} in {1}...'.format(f, path))\n library = ThermoLibrary()\n library.load(os.path.join(path, f), self.local_context, self.global_context)\n library.label = os.path.splitext(f)[0]\n self.libraries[library.label] = library\n self.library_order.append(library.label)\n else:\n if libraryName == \"KlippensteinH2O2\":\n logging.info(\n '\\n** Note: The thermo library KlippensteinH2O2 was replaced and is no longer available '\n 'in RMG. For H2 combustion chemistry consider using the BurkeH2O2 library instead\\n')\n raise DatabaseError('Library {} not found in {}... Please check if your library is '\n 'correctly placed'.format(libraryName, path))\n\n def load_surface(self):\n \"\"\"\n Load the metal database from the given `path` on disk, where `path`\n points to the top-level folder of the thermo database.\n \"\"\"\n MetalDB = MetalDatabase()\n MetalDB.load(os.path.join(settings['database.directory'], 'surface'))\n\n self.surface = {\n 'metal': MetalDB\n }\n\n def load_groups(self, path):\n \"\"\"\n Load the thermo database from the given `path` on disk, where `path`\n points to the top-level folder of the thermo database.\n \"\"\"\n logging.info('Loading thermodynamics group database from {0}...'.format(path))\n categories = [\n 'group',\n 'ring',\n 'radical',\n 'polycyclic',\n 'other',\n 'longDistanceInteraction_cyclic',\n 'longDistanceInteraction_noncyclic',\n 'adsorptionPt111',\n ]\n self.groups = {\n category: ThermoGroups(label=category).load(os.path.join(path, category + '.py'),\n self.local_context, self.global_context)\n for category in categories\n }\n\n self.record_ring_generic_nodes()\n self.record_polycylic_generic_nodes()\n\n def save(self, path):\n \"\"\"\n Save the thermo database to the given `path` on disk, where `path`\n points to the top-level folder of the thermo database.\n \"\"\"\n path = os.path.abspath(path)\n if not os.path.exists(path):\n os.mkdir(path)\n self.save_depository(os.path.join(path, 'depository'))\n self.save_libraries(os.path.join(path, 'libraries'))\n self.save_groups(os.path.join(path, 'groups'))\n self.save_surface(os.path.join(path, 'surface'))\n\n def save_depository(self, path):\n \"\"\"\n Save the thermo depository to the given `path` on disk, where `path`\n points to the top-level folder of the thermo depository.\n \"\"\"\n if not os.path.exists(path):\n os.mkdir(path)\n for depo in self.depository.keys():\n self.depository[depo].save(os.path.join(path, depo + '.py'))\n\n def save_libraries(self, path):\n \"\"\"\n Save the thermo libraries to the given `path` on disk, where `path`\n points to the top-level folder of the thermo libraries.\n \"\"\"\n if not os.path.exists(path):\n os.mkdir(path)\n for library in self.libraries.values():\n library.save(os.path.join(path, '{0}.py'.format(library.label)))\n\n def save_groups(self, path):\n \"\"\"\n Save the thermo groups to the given `path` on disk, where `path`\n points to the top-level folder of the thermo groups.\n \"\"\"\n if not os.path.exists(path):\n os.mkdir(path)\n for group in self.groups.keys():\n self.groups[group].save(os.path.join(path, group + '.py'))\n\n def save_surface(self, path):\n \"\"\"\n Save the metal library to the given `path` on disk, where `path`\n points to the top-level folder of the metal library.\n \"\"\"\n\n if not os.path.exists(path):\n os.mkdir(path)\n for library in self.surface.keys():\n self.surface[library].save(os.path.join(path, library + '.py'))\n\n def load_old(self, path):\n \"\"\"\n Load the old RMG thermo database from the given `path` on disk, where\n `path` points to the top-level folder of the old RMG database.\n \"\"\"\n # The old database does not have a depository, so create an empty one\n self.depository = {}\n self.depository['stable'] = ThermoDepository(label='stable', name='Stable Molecules')\n self.depository['radical'] = ThermoDepository(label='radical', name='Radical Molecules')\n\n for (root, dirs, files) in os.walk(os.path.join(path, 'thermo_libraries')):\n if (os.path.exists(os.path.join(root, 'Dictionary.txt')) and\n os.path.exists(os.path.join(root, 'Library.txt'))):\n library = ThermoLibrary(label=os.path.basename(root), name=os.path.basename(root))\n library.load_old(\n dictstr=os.path.join(root, 'Dictionary.txt'),\n treestr='',\n libstr=os.path.join(root, 'Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=False,\n )\n library.label = os.path.basename(root)\n self.libraries[library.label] = library\n\n self.groups = {}\n self.groups['group'] = ThermoGroups(label='group', name='Functional Group Additivity Values').load_old(\n dictstr=os.path.join(path, 'thermo_groups', 'Group_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', 'Group_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', 'Group_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n self.groups['gauche'] = ThermoGroups(label='gauche', name='Gauche Interaction Corrections').load_old(\n dictstr=os.path.join(path, 'thermo_groups', 'Gauche_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', 'Gauche_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', 'Gauche_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n self.groups['int15'] = ThermoGroups(label='int15', name='1,5-Interaction Corrections').load_old(\n dictstr=os.path.join(path, 'thermo_groups', '15_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', '15_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', '15_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n self.groups['radical'] = ThermoGroups(label='radical', name='Radical Corrections').load_old(\n dictstr=os.path.join(path, 'thermo_groups', 'Radical_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', 'Radical_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', 'Radical_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n self.groups['ring'] = ThermoGroups(label='ring', name='Ring Corrections').load_old(\n dictstr=os.path.join(path, 'thermo_groups', 'Ring_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', 'Ring_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', 'Ring_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n self.groups['polycyclic'] = ThermoGroups(label='other', name='Polycyclic Ring Corrections').load_old(\n dictstr=os.path.join(path, 'thermo_groups', 'Polycyclic_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', 'Polycyclic_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', 'Polycyclic_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n self.groups['other'] = ThermoGroups(label='other', name='Other Corrections').load_old(\n dictstr=os.path.join(path, 'thermo_groups', 'Other_Dictionary.txt'),\n treestr=os.path.join(path, 'thermo_groups', 'Other_Tree.txt'),\n libstr=os.path.join(path, 'thermo_groups', 'Other_Library.txt'),\n num_parameters=12,\n num_labels=1,\n pattern=True,\n )\n\n def prune_heteroatoms(self, allowed=None):\n \"\"\"\n Remove all species from thermo libraries that contain atoms other than those allowed.\n \n This is useful before saving the database for use in RMG-Java\n \"\"\"\n if allowed is None:\n allowed = ['C', 'H', 'O', 'S']\n allowed_elements = [rmgpy.molecule.element.get_element(label) for label in allowed]\n for library in self.libraries.values():\n logging.info(\"Removing hetoroatoms from thermo library '{0}'\".format(library.name))\n to_delete = []\n for entry in library.entries.values():\n for atom in entry.item.atoms:\n if atom.element not in allowed_elements:\n to_delete.append(entry.label)\n break\n for label in to_delete:\n logging.info(\" {0}\".format(label))\n library.entries.pop(label)\n\n def save_old(self, path):\n \"\"\"\n Save the old RMG thermo database to the given `path` on disk, where\n `path` points to the top-level folder of the old RMG database.\n \"\"\"\n\n # Depository not used in old database, so it is not saved\n\n libraries_path = os.path.join(path, 'thermo_libraries')\n if not os.path.exists(libraries_path):\n os.mkdir(libraries_path)\n for library in self.libraries.values():\n library_path = os.path.join(libraries_path, library.label)\n if not os.path.exists(library_path):\n os.mkdir(library_path)\n library.save_old(\n dictstr=os.path.join(library_path, 'Dictionary.txt'),\n treestr='',\n libstr=os.path.join(library_path, 'Library.txt'),\n )\n\n groups_path = os.path.join(path, 'thermo_groups')\n if not os.path.exists(groups_path):\n os.mkdir(groups_path)\n self.groups['group'].save_old(\n dictstr=os.path.join(groups_path, 'Group_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Group_Tree.txt'),\n libstr=os.path.join(groups_path, 'Group_Library.txt'),\n )\n self.groups['gauche'].save_old(\n dictstr=os.path.join(groups_path, 'Gauche_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Gauche_Tree.txt'),\n libstr=os.path.join(groups_path, 'Gauche_Library.txt'),\n )\n self.groups['int15'].save_old(\n dictstr=os.path.join(groups_path, '15_Dictionary.txt'),\n treestr=os.path.join(groups_path, '15_Tree.txt'),\n libstr=os.path.join(groups_path, '15_Library.txt'),\n )\n self.groups['radical'].save_old(\n dictstr=os.path.join(groups_path, 'Radical_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Radical_Tree.txt'),\n libstr=os.path.join(groups_path, 'Radical_Library.txt'),\n )\n self.groups['ring'].save_old(\n dictstr=os.path.join(groups_path, 'Ring_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Ring_Tree.txt'),\n libstr=os.path.join(groups_path, 'Ring_Library.txt'),\n )\n self.groups['polycyclic'].save_old(\n dictstr=os.path.join(groups_path, 'Polycyclic_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Polycyclic_Tree.txt'),\n libstr=os.path.join(groups_path, 'Polycyclic_Library.txt'),\n )\n self.groups['other'].save_old(\n dictstr=os.path.join(groups_path, 'Other_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Other_Tree.txt'),\n libstr=os.path.join(groups_path, 'Other_Library.txt'),\n )\n\n def record_polycylic_generic_nodes(self):\n \"\"\"\n Identify generic nodes in tree for polycyclic groups.\n Saves them as a list in the `generic_nodes` attribute\n in the polycyclic :class:`ThermoGroups` object, which\n must be pre-loaded.\n\n Necessary for polycyclic heuristic.\n \"\"\"\n self.groups['polycyclic'].generic_nodes = ['PolycyclicRing']\n for label, entry in self.groups['polycyclic'].entries.items():\n if isinstance(entry.data, ThermoData):\n continue\n self.groups['polycyclic'].generic_nodes.append(label)\n\n def record_ring_generic_nodes(self):\n \"\"\"\n Identify generic nodes in tree for ring groups.\n Saves them as a list in the `generic_nodes` attribute\n in the ring :class:`ThermoGroups` object, which\n must be pre-loaded.\n\n Necessary for polycyclic heuristic.\n \"\"\"\n self.groups['ring'].generic_nodes = ['Ring']\n for label, entry in self.groups['ring'].entries.items():\n if isinstance(entry.data, ThermoData):\n continue\n self.groups['ring'].generic_nodes.append(label)\n\n def get_thermo_data(self, species, metal_to_scale_to=None, training_set=None):\n \"\"\"\n Return the thermodynamic parameters for a given :class:`Species`\n object `species`. This function first searches the loaded libraries\n in order, returning the first match found, before falling back to\n estimation via machine learning and then group additivity.\n \n The method corrects for symmetry when the molecule uses machine\n learning or group additivity. Libraries and direct QM calculations\n are already corrected.\n\n If either metal to scale to or from is not specified, assume the binding energies given in the input file\n \n Returns: ThermoData\n \"\"\"\n from rmgpy.rmg.input import get_input\n\n thermo0 = self.get_thermo_data_from_libraries(species)\n\n if thermo0 is not None: # was able to find thermodata in the loaded libraries\n if len(thermo0) != 3:\n raise RuntimeError(\"thermo0 should be a tuple (thermo_data, library, entry), not {0}\".format(thermo0))\n entry = thermo0[2]\n thermo0 = thermo0[0]\n\n if species.contains_surface_site():\n if entry.metal is not None:\n if entry.facet is not None:\n db_label = entry.metal + entry.facet\n thermo0 = self.correct_binding_energy(thermo0, species, metal_to_scale_from=db_label,\n metal_to_scale_to=metal_to_scale_to)\n else: # no facet was given\n thermo0 = self.correct_binding_energy(thermo0, species, metal_to_scale_from=entry.metal, metal_to_scale_to=metal_to_scale_to)\n else: # assume the thermo came from pt 111\n thermo0 = self.correct_binding_energy(thermo0, species, metal_to_scale_from=None, metal_to_scale_to=metal_to_scale_to)\n return thermo0\n\n if species.contains_surface_site():\n try:\n thermo0 = self.get_thermo_data_for_surface_species(species)\n thermo0 = self.correct_binding_energy(thermo0, species, metal_to_scale_from=\"Pt111\", metal_to_scale_to=metal_to_scale_to) # group adsorption values come from Pt111\n return thermo0\n except:\n logging.error(\"Error attempting to get thermo for species %s with structure \\n%s\", \n species, species.molecule[0].to_adjacency_list())\n raise\n\n try:\n quantum_mechanics = get_input('quantum_mechanics')\n except Exception:\n logging.debug('Quantum Mechanics DB could not be found.')\n quantum_mechanics = None\n\n try:\n ml_estimator, ml_settings = get_input('ml_estimator')\n except Exception:\n logging.debug('ML estimator could not be found.')\n ml_estimator, ml_settings = None, None\n\n if quantum_mechanics:\n original_molecule = species.molecule[0]\n if quantum_mechanics.settings.onlyCyclics and not original_molecule.is_cyclic():\n pass\n else: # try a QM calculation\n if original_molecule.get_radical_count() > quantum_mechanics.settings.maxRadicalNumber:\n # Too many radicals for direct calculation: use HBI.\n logging.info(\"{0} radicals on {1} exceeds limit of {2}. Using HBI method.\".format(\n original_molecule.get_radical_count(),\n species.label,\n quantum_mechanics.settings.maxRadicalNumber,\n ))\n\n # Need to estimate thermo via each resonance isomer\n thermo = []\n for molecule in species.molecule:\n molecule.clear_labeled_atoms()\n # Try to see if the saturated molecule can be found in the libraries\n tdata = self.estimate_radical_thermo_via_hbi(molecule, self.get_thermo_data_from_libraries)\n priority = 1\n if tdata is None:\n # Then attempt quantum mechanics job on the saturated molecule\n tdata = self.estimate_radical_thermo_via_hbi(molecule, quantum_mechanics.get_thermo_data)\n priority = 2\n if tdata is None:\n # Fall back to group additivity\n tdata = self.estimate_thermo_via_group_additivity(molecule)\n priority = 3\n\n thermo.append((priority, tdata.get_enthalpy(298.), molecule, tdata))\n\n if len(thermo) > 1:\n # Sort thermo first by the priority, then by the most stable H298 value\n thermo = sorted(thermo, key=lambda x: (x[0], x[1]))\n for i, therm in enumerate(thermo):\n logging.debug(\"Resonance isomer {0} {1} gives H298={2:.0f} J/mol\"\n \"\".format(i + 1, therm[2].to_smiles(), therm[1]))\n # Save resonance isomers reordered by their thermo\n species.molecule = [item[2] for item in thermo]\n original_molecule = species.molecule[0]\n thermo0 = thermo[0][3]\n\n # update entropy by symmetry correction\n thermo0.S298.value_si -= constants.R * math.log(species.get_symmetry_number())\n\n else: # Not too many radicals: do a direct calculation.\n thermo0 = quantum_mechanics.get_thermo_data(original_molecule) # returns None if it fails\n\n if thermo0 is None:\n # First try finding stable species in libraries and using HBI\n for mol in species.molecule:\n if mol.reactive:\n original_molecule = mol\n break\n else:\n for mol in species.molecule:\n logging.info(mol.to_adjacency_list())\n logging.info('reactive = {0}'.format(mol.reactive))\n logging.info('\\n')\n raise ValueError('Could not process a species with no reactive structures')\n if original_molecule.get_radical_count() > 0:\n # If the molecule is a radical, check if any of the saturated forms are in the libraries\n # first and perform an HBI correction on them\n thermo = []\n for molecule in species.molecule:\n if molecule.reactive:\n molecule.clear_labeled_atoms()\n # First see if the saturated molecule is in the libaries\n tdata = self.estimate_radical_thermo_via_hbi(molecule, self.get_thermo_data_from_libraries)\n if tdata:\n thermo.append((tdata.get_enthalpy(298.), molecule, tdata))\n\n if thermo:\n # Sort thermo by the most stable H298 value when choosing between thermoLibrary values\n thermo = sorted(thermo, key=lambda x: x[0])\n # Sort thermo by the structure reactive attribute, with `reactive=True` structures first\n thermo.sort(key=lambda x: x[1].reactive, reverse=True)\n for i, therm in enumerate(thermo):\n if therm[1].reactive:\n logging.debug(\"Resonance isomer {0} {1} gives H298={2:.0f} J/mol\"\n \"\".format(i + 1, therm[1].to_smiles(), therm[0]))\n else:\n logging.debug(\"Non-reactive resonance isomer {0} {1} gives H298={2:.0f} J/mol\"\n \"\".format(i + 1, therm[1].to_smiles(), therm[0]))\n # Save resonance isomers reordered by their thermo\n new_mol_list = [item[1] for item in thermo]\n if len(new_mol_list) < len(species.molecule):\n new_mol_list.extend([mol for mol in species.molecule if mol not in new_mol_list])\n species.molecule = new_mol_list\n thermo0 = thermo[0][2]\n\n if thermo0 is None:\n # If we still don't have thermo, use ML to estimate it, but\n # only if the molecule is made up of H, C, N, and O atoms and\n # is not a singlet carbene. ML settings are checked in\n # `self.get_thermo_data_from_ml`.\n if (ml_estimator is not None\n and all(a.element.number in {1, 6, 7, 8} for a in species.molecule[0].atoms)\n and species.molecule[0].get_singlet_carbene_count() == 0):\n thermo0 = self.get_thermo_data_from_ml(species,\n ml_estimator,\n ml_settings)\n\n if thermo0 is None:\n # And lastly, resort back to group additivity to determine thermo for molecule\n thermo0 = self.get_thermo_data_from_groups(species)\n\n # Update entropy by symmetry correction (not included in trained ML model)\n thermo0.S298.value_si -= constants.R * math.log(species.get_symmetry_number())\n\n # Make sure to calculate Cp0 and CpInf if it wasn't done already\n find_cp0_and_cpinf(species, thermo0)\n\n # Return the resulting thermo parameters\n return thermo0\n\n def set_binding_energies(self, binding_energies='Pt111'):\n \"\"\"\n Sets and stores the atomic binding energies specified in the input file.\n\n All adsorbates will be scaled to use these elemental binding energies.\n\n Args:\n binding_energies (dict, optional): the desired binding energies with\n elements as keys and binding energy/unit tuples (or Energy \n quantities) as values\n\n Returns:\n None, stores result in self.binding_energies\n \"\"\"\n \n if isinstance(binding_energies, str):\n if not self.surface:\n self.load_surface()\n binding_energies = self.surface['metal'].find_binding_energies(binding_energies)\n\n for element, energy in binding_energies.items():\n binding_energies[element] = rmgpy.quantity.Energy(energy)\n\n self.binding_energies = binding_energies\n\n def correct_binding_energy(self, thermo, species, metal_to_scale_from=None, metal_to_scale_to=None):\n \"\"\"\n Changes the provided thermo, by applying a linear scaling relation\n to correct the adsorption energy.\n\n :param thermo: starting thermo data\n :param species: the species (which is an adsorbate)\n :param metal_to_scale_from: the metal you want to scale from (string eg. 'Pt111' or None)\n :param metal_to_scale_to: the metal you want to scale to (string e.g 'Pt111' or None)\n :return: corrected thermo\n \"\"\"\n\n if metal_to_scale_from == metal_to_scale_to:\n return thermo\n\n if metal_to_scale_to is None:\n metal_to_scale_to_binding_energies = self.binding_energies\n else:\n metal_to_scale_to_binding_energies = self.surface['metal'].find_binding_energies(metal_to_scale_to)\n\n if metal_to_scale_from is None:\n metal_to_scale_from_binding_energies = self.binding_energies\n else:\n metal_to_scale_from_binding_energies = self.surface['metal'].find_binding_energies(metal_to_scale_from)\n\n delta_atomic_adsorption_energy = {\n 'C': rmgpy.quantity.Energy(0.0, 'eV/molecule'),\n 'H': rmgpy.quantity.Energy(0.0, 'eV/molecule'),\n 'O': rmgpy.quantity.Energy(0.0, 'eV/molecule'),\n 'N': rmgpy.quantity.Energy(0.0, 'eV/molecule'),\n }\n\n for element, delta_energy in delta_atomic_adsorption_energy.items():\n delta_energy.value_si = metal_to_scale_to_binding_energies[element].value_si - metal_to_scale_from_binding_energies[element].value_si\n\n if all(-0.01 < v.value_si < 0.01 for v in delta_atomic_adsorption_energy.values()):\n return thermo\n\n molecule = species.molecule[0]\n # only want/need to do one resonance structure\n surface_sites = []\n for atom in molecule.atoms:\n if atom.is_surface_site():\n surface_sites.append(atom)\n normalized_bonds = {'C': 0., 'O': 0., 'N': 0., 'H': 0.}\n max_bond_order = {'C': 4., 'O': 2., 'N': 3., 'H': 1.}\n for site in surface_sites:\n numbonds = len(site.bonds)\n if numbonds == 0:\n # vanDerWaals\n pass\n else:\n assert len(site.bonds) == 1, \"Each surface site can only be bonded to 1 atom\"\n bonded_atom = list(site.bonds.keys())[0]\n bond = site.bonds[bonded_atom]\n if bond.is_single():\n bond_order = 1.\n elif bond.is_double():\n bond_order = 2.\n elif bond.is_triple():\n bond_order = 3.\n elif bond.is_quadruple():\n bond_order = 4.\n else:\n raise NotImplementedError(\"Unsupported bond order {0} for binding energy \"\n \"correction.\".format(bond.order))\n\n normalized_bonds[bonded_atom.symbol] += bond_order / max_bond_order[bonded_atom.symbol]\n\n if not isinstance(thermo, ThermoData):\n thermo = thermo.to_thermo_data()\n find_cp0_and_cpinf(species, thermo)\n\n # now edit the adsorptionThermo using LSR\n comments = []\n for element in 'CHON':\n if normalized_bonds[element]:\n change_in_binding_energy = delta_atomic_adsorption_energy[element].value_si * normalized_bonds[element]\n thermo.H298.value_si += change_in_binding_energy\n comments.append(f'{normalized_bonds[element]:.2f}{element}')\n thermo.comment += \" Binding energy corrected by LSR ({}) from {}\".format('+'.join(comments), metal_to_scale_from)\n return thermo\n\n def get_thermo_data_for_surface_species(self, species):\n \"\"\"\n Get the thermo data for an adsorbed species,\n by desorbing it, finding the thermo of the gas-phase\n species, then adding an adsorption correction that\n is found from the groups/adsorption tree.\n Does not apply linear scaling relationship.\n \n Returns a :class:`ThermoData` object, with no Cp0 or CpInf\n \"\"\"\n\n if species.is_surface_site():\n raise DatabaseError(\"Can't estimate thermo of vacant site. Should be in library (and should be 0).\")\n\n logging.debug(\"Trying to generate thermo for surface species using first of %d resonance isomer(s):\",\n len(species.molecule))\n molecule = species.molecule[0]\n logging.debug(\"Before removing from surface:\\n\" + molecule.to_adjacency_list())\n # only want/need to do one resonance structure,\n # because will need to regenerate others in gas phase\n dummy_molecule = molecule.copy(deep=True)\n sites_to_remove = []\n adsorbed_atoms = []\n for atom in dummy_molecule.atoms:\n if atom.is_surface_site():\n sites_to_remove.append(atom)\n for site in sites_to_remove:\n numbonds = len(site.bonds)\n if numbonds == 0:\n # vanDerWaals\n pass\n else:\n assert len(site.bonds) == 1, \"Each surface site can only be bonded to 1 atom\"\n bonded_atom = list(site.bonds.keys())[0]\n adsorbed_atoms.append(bonded_atom)\n bond = site.bonds[bonded_atom]\n dummy_molecule.remove_bond(bond)\n if bond.is_single():\n bonded_atom.increment_radical()\n elif bond.is_double():\n bonded_atom.increment_radical()\n bonded_atom.increment_radical()\n elif bond.is_triple():\n bonded_atom.increment_radical()\n bonded_atom.increment_lone_pairs()\n elif bond.is_quadruple():\n bonded_atom.increment_radical()\n bonded_atom.increment_radical()\n bonded_atom.increment_lone_pairs()\n else:\n raise NotImplementedError(\"Can't remove surface bond of type {}\".format(bond.order))\n dummy_molecule.remove_atom(site)\n\n dummy_molecules = [dummy_molecule.copy(deep=True)]\n if len(adsorbed_atoms) == 2:\n # Bidentate adsorption.\n # Try to turn adjacent biradical into a bond.\n try:\n bond = adsorbed_atoms[0].bonds[adsorbed_atoms[1]]\n except KeyError:\n pass # the two adsorbed atoms are not bonded to each other\n else:\n if bond.order < 3:\n bond.increment_order()\n adsorbed_atoms[0].decrement_radical()\n adsorbed_atoms[1].decrement_radical()\n dummy_molecules.append(dummy_molecule.copy(deep=True))\n if (adsorbed_atoms[0].radical_electrons and\n adsorbed_atoms[1].radical_electrons and\n bond.order < 3):\n # There are still spare adjacenct radicals, so do it again\n bond.increment_order()\n adsorbed_atoms[0].decrement_radical()\n adsorbed_atoms[1].decrement_radical()\n dummy_molecules.append(dummy_molecule.copy(deep=True))\n if (adsorbed_atoms[0].lone_pairs and\n adsorbed_atoms[1].lone_pairs and \n bond.order < 3):\n # X#C-C#X will end up with .:C-C:. in gas phase\n # and we want to get to .C#C. but not :C=C:\n bond.increment_order()\n adsorbed_atoms[0].decrement_lone_pairs()\n adsorbed_atoms[0].increment_radical()\n adsorbed_atoms[1].decrement_lone_pairs()\n adsorbed_atoms[1].increment_radical()\n dummy_molecules.append(dummy_molecule.copy(deep=True))\n #For bidentate CO because we want C[-1]#O[+1] but not .C#O.\n if (bond.order == 3 and adsorbed_atoms[0].radical_electrons and \n adsorbed_atoms[1].radical_electrons and \n (adsorbed_atoms[0].lone_pairs or adsorbed_atoms[1].lone_pairs)):\n adsorbed_atoms[0].decrement_radical()\n adsorbed_atoms[1].decrement_radical()\n if adsorbed_atoms[0].lone_pairs:\n adsorbed_atoms[1].increment_lone_pairs()\n else:\n adsorbed_atoms[0].increment_lone_pairs()\n dummy_molecules.append(dummy_molecule.copy(deep=True))\n\n for dummy_molecule in dummy_molecules[:]:\n try:\n dummy_molecule.update_connectivity_values()\n dummy_molecule.update()\n except:\n dummy_molecules.remove(dummy_molecule)\n logging.debug(f\"Removing {dummy_molecule} from possible structure list:\\n{dummy_molecule.to_adjacency_list()}\")\n else:\n logging.debug(\"After removing from surface:\\n\" + dummy_molecule.to_adjacency_list())\n\n if len(dummy_molecules) == 0:\n raise RuntimeError(f\"Cannot get thermo for gas-phase molecule. No valid dummy molecules from original molecule:\\n{molecule.to_adjacency_list()}\")\n\n \n # if len(molecule) > 1, it will assume all resonance structures have already been generated when it tries to generate them, so evaluate each configuration separately and pick the lowest energy one by H298 value\n gas_phase_species_from_libraries = []\n gas_phase_species_estimates = []\n for dummy_molecule in dummy_molecules:\n dummy_species = Species()\n dummy_species.molecule = [dummy_molecule]\n dummy_species.generate_resonance_structures()\n dummy_species.thermo = self.get_thermo_data(dummy_species)\n if dummy_species.thermo.label:\n gas_phase_species_from_libraries.append(dummy_species)\n else:\n gas_phase_species_estimates.append(dummy_species)\n\n # define the comparison function to find the lowest energy\n def lowest_energy(species):\n if hasattr(species.thermo, 'H298'):\n return species.thermo.H298.value_si\n else:\n return species.thermo.get_enthalpy(298.0)\n\n if gas_phase_species_from_libraries:\n species = min(gas_phase_species_from_libraries, key=lowest_energy)\n else:\n species = min(gas_phase_species_estimates, key=lowest_energy)\n\n thermo = species.thermo\n thermo.comment = f\"Gas phase thermo for {thermo.label or species.molecule[0].to_smiles()} from {thermo.comment}. Adsorption correction:\"\n logging.debug(\"Using thermo from gas phase for species {}\\n\".format(species.label) + repr(thermo))\n\n if not isinstance(thermo, ThermoData):\n thermo = thermo.to_thermo_data()\n find_cp0_and_cpinf(species, thermo)\n\n # Get the adsorption energy\n # Create the ThermoData object\n adsorption_thermo = ThermoData(\n Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \"J/(mol*K)\"),\n H298=(0.0, \"kJ/mol\"),\n S298=(0.0, \"J/(mol*K)\"),\n )\n try:\n self._add_group_thermo_data(adsorption_thermo, self.groups['adsorptionPt111'], molecule, {})\n except (KeyError, DatabaseError):\n logging.error(\"Couldn't find in adsorption thermo database:\")\n logging.error(molecule)\n logging.error(molecule.to_adjacency_list())\n raise\n\n # (group_additivity=True means it appends the comments)\n add_thermo_data(thermo, adsorption_thermo, group_additivity=True)\n\n if thermo.label:\n thermo.label += 'X' * len(adsorbed_atoms)\n\n find_cp0_and_cpinf(species, thermo)\n return thermo\n\n def get_thermo_data_from_libraries(self, species, training_set=None):\n \"\"\"\n Return the thermodynamic parameters for a given :class:`Species`\n object `species`. This function first searches the loaded libraries\n in order, returning the first match found, before failing and returning None.\n `training_set` is used to identify if function is called during training set or not.\n During training set calculation we want to use gas phase thermo to not affect reverse\n rate calculation.\n \n Returns: ThermoData or None\n \"\"\"\n import rmgpy.rmg.main\n thermo_data = None\n\n # chatelak 11/15/14: modification to introduce liquid phase thermo libraries\n library_list = deepcopy(self.library_order) # copy the value to not affect initial object\n\n if rmgpy.rmg.main.solvent is not None:\n liq_libraries = []\n # Liquid phase simulation part:\n # This bloc \"for\": Identify liquid phase libraries and store them in liq_libraries\n for iterLib in library_list:\n if self.libraries[iterLib].solvent:\n liq_libraries.append(iterLib)\n # Check in liq_libraries if thermo for species exists and return the first match.\n # Only if function not called by training_set\n if liq_libraries and training_set is None:\n for label in liq_libraries:\n thermo_data = self.get_thermo_data_from_library(species, self.libraries[label])\n if thermo_data is not None:\n if len(thermo_data) != 3:\n raise RuntimeError(\"thermo_data should be a tuple (thermo_data, library, entry), \"\n \"not {0}\".format(thermo_data))\n # Watch out comments changed: this is used later to apply solvation or not on\n # species matching thermo. If required, Modify this carefully.\n thermo_data[0].comment += 'Liquid thermo library: ' + label\n return thermo_data\n # Remove liq_libraries from library_list if:\n # called by training set (training_set=True) or if no thermo found in liqLibrairies\n # if no liquid library found this does nothing.\n for libIter in liq_libraries:\n library_list.remove(libIter)\n\n # Condition to execute this part: gas phase simulation or training set or liquid phase simulation with:\n # noliquid libraries found or no matching species found in liquid libraries\n # If gas phase simulation library_list = self.library_order (just like before modifications) and they are\n # all gas phase, already checked by checkLibrairies function in database.load()\n # Check the libraries in order; return the first successful match\n for label in library_list:\n thermo_data = self.get_thermo_data_from_library(species, self.libraries[label])\n if thermo_data is not None:\n if len(thermo_data) != 3:\n raise RuntimeError(\"thermo_data should be a tuple (thermo_data, library, entry), \"\n \"not {0}\".format(thermo_data))\n if rmgpy.rmg.main.solvent is not None and training_set is None:\n thermo_data[0].comment += 'Thermo library corrected for liquid phase: ' + label\n else:\n thermo_data[0].comment += 'Thermo library: ' + label\n return thermo_data\n\n return None\n\n def get_all_thermo_data(self, species):\n \"\"\"\n Return all possible sets of thermodynamic parameters for a given\n :class:`Species` object `species`. The hits from the depository come\n first, then the libraries (in order), and then the group additivity\n estimate. This method is useful for a generic search job.\n \n Returns: a list of tuples (ThermoData, source, entry) \n (Source is a library or depository, or None)\n \"\"\"\n thermo_data_list = []\n # Data from depository comes first\n thermo_data_list.extend(self.get_thermo_data_from_depository(species))\n # Data from libraries comes second\n for label in self.library_order:\n data = self.get_thermo_data_from_library(species, self.libraries[label])\n if data:\n if len(data) != 3:\n raise RuntimeError(\"data should be a tuple (thermo_data, library, entry), \"\n \"not {0}\".format(data))\n data[0].comment += label\n thermo_data_list.append(data)\n\n # Last entry is always the estimate from group additivity\n # Make it a tuple\n # Distinguish surface species, as orignial get_thermo_data_from_groups does\n # not work for surface sites or surface species\n if species.is_surface_site():\n # Cannot estimate thermo of vacant site. Thermo stores in library\n pass\n elif species.contains_surface_site():\n try:\n # Estimate thermo of surface species based on modfied GA method\n data = (self.get_thermo_data_for_surface_species(species), None, None)\n except DatabaseError:\n # We don't have a GAV estimate, e.g. unsupported element\n pass\n else:\n thermo_data_list.append(data)\n else:\n try:\n data = (self.get_thermo_data_from_groups(species), None, None)\n except DatabaseError:\n # We don't have a GAV estimate, e.g. unsupported element\n pass\n else:\n # update group activity for symmetry\n data[0].S298.value_si -= constants.R * math.log(species.get_symmetry_number())\n thermo_data_list.append(data)\n # Return all of the resulting thermo parameters\n return thermo_data_list\n\n def get_thermo_data_from_depository(self, species):\n \"\"\"\n Return all possible sets of thermodynamic parameters for a given\n :class:`Species` object `species` from the depository. If no\n depository is loaded, a :class:`DatabaseError` is raised.\n \n Returns: a list of tuples (thermo_data, depository, entry) without any Cp0 or CpInf data.\n \"\"\"\n items = []\n for entry in self.depository['stable'].entries.values():\n for molecule in species.molecule:\n if molecule.is_isomorphic(entry.item):\n items.append((deepcopy(entry.data), self.depository['stable'], entry))\n break\n for entry in self.depository['radical'].entries.values():\n for molecule in species.molecule:\n if molecule.is_isomorphic(entry.item):\n items.append((deepcopy(entry.data), self.depository['radical'], entry))\n break\n return items\n\n def get_thermo_data_from_library(self, species, library):\n \"\"\"\n Return the set of thermodynamic parameters corresponding to a given\n :class:`Species` object `species` from the specified thermodynamics\n `library`. If `library` is a string, the list of libraries is searched\n for a library with that name. If no match is found in that library,\n ``None`` is returned. If no corresponding library is found, a\n :class:`DatabaseError` is raised.\n \n Returns a tuple: (ThermoData, library, entry) or None.\n \"\"\"\n match = None\n for entry in library.entries.values():\n for molecule in species.molecule:\n if molecule.is_isomorphic(entry.item) and entry.data is not None:\n thermo_data = deepcopy(entry.data)\n thermo_data.label = entry.label\n find_cp0_and_cpinf(species, thermo_data)\n match = (thermo_data, library, entry)\n break\n if match is not None:\n break\n if match is not None:\n # Move the matched molecule to the first position in the list\n species.molecule.remove(molecule)\n species.molecule.insert(0, molecule)\n return match\n\n def get_thermo_data_from_groups(self, species):\n \"\"\"\n Return the set of thermodynamic parameters corresponding to a given\n :class:`Species` object `species` by estimation using the group\n additivity values. If no group additivity values are loaded, a\n :class:`DatabaseError` is raised.\n \n The resonance isomer (molecule) with the lowest H298 is used, and as a side-effect\n the resonance isomers (items in `species.molecule` list) are sorted in ascending order.\n \n This does not account for symmetry. The method calling this sould correct for it.\n \n Returns: ThermoData\n \"\"\"\n thermo = []\n for molecule in species.molecule:\n molecule.clear_labeled_atoms()\n molecule.update_atomtypes()\n tdata = self.estimate_thermo_via_group_additivity(molecule)\n thermo.append(tdata)\n\n indices = self.prioritize_thermo(species, thermo)\n\n species.molecule = [species.molecule[ind] for ind in indices]\n\n thermo_data = thermo[indices[0]]\n find_cp0_and_cpinf(species, thermo_data)\n return thermo_data\n\n def get_thermo_data_from_ml(self, species, ml_estimator, ml_settings):\n \"\"\"\n Return the set of thermodynamic parameters corresponding to a\n given :class:`Species` object `species` by estimation using the\n ML estimator. Also compare the estimated uncertainties to the\n user-defined cutoffs. If any of the uncertainties are larger\n than their corresponding cutoffs, return None. Also check all\n other options in `ml_settings`.\n\n For HBI, the resonance isomer with the lowest H298 is used and\n the resonance isomers in species are sorted in ascending order.\n\n The entropy is not corrected for the symmetry of the molecule.\n This should be done later by the calling function.\n \"\"\"\n molecule = species.molecule[0]\n\n min_heavy = ml_settings['min_heavy_atoms'] or 1\n max_heavy = ml_settings['max_heavy_atoms'] or np.inf\n min_carbon = ml_settings['min_carbon_atoms'] or 0\n max_carbon = ml_settings['max_carbon_atoms'] or np.inf\n min_oxygen = ml_settings['min_oxygen_atoms'] or 0\n max_oxygen = ml_settings['max_oxygen_atoms'] or np.inf\n min_nitrogen = ml_settings['min_nitrogen_atoms'] or 0\n max_nitrogen = ml_settings['max_nitrogen_atoms'] or np.inf\n\n element_count = molecule.get_element_count()\n n_heavy = sum(count for element, count in element_count.items() if element != 'H')\n\n if not (min_heavy <= n_heavy <= max_heavy):\n return None\n if not (min_carbon <= element_count.get('C', 0) <= max_carbon):\n return None\n if not (min_oxygen <= element_count.get('O', 0) <= max_oxygen):\n return None\n if not (min_nitrogen <= element_count.get('N', 0) <= max_nitrogen):\n return None\n if ml_settings['only_heterocyclics'] and not molecule.is_heterocyclic():\n return None\n if ml_settings['only_cyclics'] and not molecule.is_cyclic():\n return None\n min_cycle_overlap = ml_settings['min_cycle_overlap']\n if min_cycle_overlap > 0 and molecule.get_max_cycle_overlap() < min_cycle_overlap:\n return None\n\n if molecule.is_radical():\n thermo = [self.estimate_radical_thermo_via_hbi(mol, ml_estimator.get_thermo_data) for mol in species.molecule]\n H298 = np.array([tdata.H298.value_si for tdata in thermo])\n indices = H298.argsort()\n species.molecule = [species.molecule[ind] for ind in indices]\n thermo0 = thermo[indices[0]]\n else:\n thermo0 = ml_estimator.get_thermo_data_for_species(species)\n\n # The keys for this dictionary should match the keys in\n # `RMG.ml_uncertainty_cutoffs`. Use a temperature-weighted\n # average to estimate uncertainty for Cp.\n uncertainties = dict(\n H298=thermo0.H298.uncertainty_si,\n S298=thermo0.S298.uncertainty_si,\n Cp=np.average(thermo0.Cpdata.uncertainty_si, weights=thermo0.Tdata.value_si)\n )\n\n ml_uncertainty_cutoffs = ml_settings['uncertainty_cutoffs']\n if any(uncertainties[p] > ml_uncertainty_cutoffs[p].value_si for p in ml_uncertainty_cutoffs):\n return None\n else:\n return thermo0\n\n def prioritize_thermo(self, species, thermo_data_list):\n \"\"\"\n Use some metrics to reorder a list of thermo data from best to worst.\n Return a list of indices with the desired order associated with the index of thermo from the data list.\n \"\"\"\n if len(species.molecule) > 1:\n # Go further only if there is more than one isomer\n if species.molecule[0].is_cyclic():\n # Special treatment for cyclic compounds\n entries = []\n for i, thermo in enumerate(thermo_data_list):\n ring_groups, polycyclic_groups = self.get_ring_groups_from_comments(thermo)\n\n # Use rank as a metric for prioritizing thermo. \n # The smaller the rank, the better.\n sum_rank = np.sum(\n [3 if entry.rank is None else entry.rank for entry in ring_groups + polycyclic_groups])\n\n # Also use number of aromatic rings as a metric, more aromatic rings is better\n # Group values are generally fitted to the most aromatic resonance structure\n num_arom_rings = species.molecule[i].count_aromatic_rings()\n\n entries.append((i, thermo, sum_rank, -num_arom_rings))\n\n # Sort first by number of aromatic rings, then rank, then by enthalpy at 298 K\n entries.sort(key=lambda entry: (entry[3], entry[2], entry[1].get_enthalpy(298.)))\n indices = [entry[0] for entry in entries]\n else:\n # For noncyclics, default to original algorithm of ordering thermo based on the most stable enthalpy\n H298 = np.array([t.get_enthalpy(298.) for t in thermo_data_list])\n indices = H298.argsort().tolist()\n # Sort indices again by the Molecule.has_charge()\n indices.sort(key=lambda index: species.molecule[index].has_charge(), reverse=False)\n # Sort indices again by the Molecule.reactive flag\n indices.sort(key=lambda index: species.molecule[index].reactive, reverse=True)\n else:\n indices = [0]\n return indices\n\n def estimate_radical_thermo_via_hbi(self, molecule, stable_thermo_estimator):\n \"\"\"\n Estimate the thermodynamics of a radical by saturating it,\n applying the provided stable_thermo_estimator method on the saturated species,\n then applying hydrogen bond increment corrections for the radical\n site(s) and correcting for the symmetry.\n \n No entropy is included in the returning term.\n This should be done later by the calling function.\n \"\"\"\n if not molecule.is_radical():\n raise ValueError(\"Method only valid for radicals.\")\n\n saturated_struct = molecule.copy(deep=True)\n added = saturated_struct.saturate_radicals()\n saturated_struct.props['saturated'] = True\n\n # Get thermo estimate for saturated form of structure\n if stable_thermo_estimator == self.get_thermo_data_from_libraries:\n # Get data from libraries\n saturated_spec = Species(molecule=[saturated_struct])\n thermo_data_sat = stable_thermo_estimator(saturated_spec)\n if thermo_data_sat:\n if len(thermo_data_sat) != 3:\n raise RuntimeError(\"thermo_data should be a tuple (thermo_data, library, entry), \"\n \"not {0}\".format(thermo_data_sat))\n thermo_data_sat = thermo_data_sat[0]\n else:\n thermo_data_sat = stable_thermo_estimator(saturated_struct)\n\n if thermo_data_sat is None:\n # We couldn't get thermo for the saturated species from libraries, ml, or qm\n # However, if we were trying group additivity, this could be a problem\n if stable_thermo_estimator == self.compute_group_additivity_thermo:\n logging.info(\"Thermo data of saturated {0} of molecule {1} is None.\".format(saturated_struct, molecule))\n return None\n\n # Convert to ThermoData object if necessary in order to add and subtract from enthalpy and entropy values\n if not isinstance(thermo_data_sat, ThermoData):\n thermo_data_sat = thermo_data_sat.to_thermo_data()\n\n if not (stable_thermo_estimator == self.compute_group_additivity_thermo\n or isinstance(stable_thermo_estimator.__self__, MLEstimator)):\n # remove the symmetry contribution to the entropy of the saturated molecule\n # assumes that the thermo data comes from QMTP or from a thermolibrary\n thermo_data_sat.S298.value_si += constants.R * math.log(saturated_struct.get_symmetry_number())\n\n thermo_data = thermo_data_sat\n\n # For each radical site, get radical correction\n # Only one radical site should be considered at a time; all others\n # should be saturated with hydrogen atoms\n for atom in added:\n # Remove the added hydrogen atoms and bond and restore the radical\n for H, bond in added[atom]:\n saturated_struct.remove_bond(bond)\n saturated_struct.remove_atom(H)\n atom.increment_radical()\n saturated_struct.update()\n try:\n self._add_group_thermo_data(thermo_data, self.groups['radical'], saturated_struct, {'*': atom})\n except KeyError:\n logging.error(\"Couldn't find in radical thermo database:\")\n logging.error(molecule)\n logging.error(molecule.to_adjacency_list())\n raise\n # Re-saturate\n for H, bond in added[atom]:\n saturated_struct.add_atom(H)\n saturated_struct.add_bond(bond)\n atom.decrement_radical()\n # Subtract the enthalpy of the added hydrogens\n for H, bond in added[atom]:\n thermo_data.H298.value_si -= 52.103 * 4184\n\n # Remove all of the interactions of the saturated structure. Then add the interactions of the radical.\n # Take C1=CC=C([O])C(O)=C1 as an example, we need to remove the interation of OH-OH, then add the interaction of Oj-OH.\n # For now, we only apply this part to cyclic structure because we only have radical interaction data for aromatic radical.\n if saturated_struct.is_cyclic():\n sssr = saturated_struct.get_smallest_set_of_smallest_rings()\n for ring in sssr:\n for atomPair in itertools.permutations(ring, 2):\n try:\n self._remove_group_thermo_data(thermo_data, self.groups['longDistanceInteraction_cyclic'],\n saturated_struct, {'*1': atomPair[0], '*2': atomPair[1]})\n except KeyError:\n pass\n sssr = molecule.get_smallest_set_of_smallest_rings()\n for ring in sssr:\n for atomPair in itertools.permutations(ring, 2):\n try:\n self._add_group_thermo_data(thermo_data, self.groups['longDistanceInteraction_cyclic'], molecule,\n {'*1': atomPair[0], '*2': atomPair[1]})\n except KeyError:\n pass\n\n # prevents the original thermo species name being used for the HBI corrected radical in species generation\n thermo_data.label = ''\n\n return thermo_data\n\n def estimate_thermo_via_group_additivity(self, molecule):\n \"\"\"\n Return the set of thermodynamic parameters corresponding to a given\n :class:`Molecule` object ``molecule`` using the group additivity values\n method. If no group additivity values are loaded, a :class:`DatabaseError`\n is raised.\n\n The entropy is not corrected for the symmetry of the molecule,\n this should be done later by the calling function.\n \"\"\"\n # For thermo estimation we need the atoms to already be sorted because we\n # iterate over them; if the order changes during the iteration then we\n # will probably not visit the right atoms, and so will get the thermo wrong.\n molecule.sort_atoms()\n\n if molecule.is_radical():\n thermo_data = self.estimate_radical_thermo_via_hbi(molecule, self.compute_group_additivity_thermo)\n else:\n thermo_data = self.compute_group_additivity_thermo(molecule)\n return thermo_data\n\n def compute_group_additivity_thermo(self, molecule):\n \"\"\"\n Return the set of thermodynamic parameters corresponding to a given\n :class:`Molecule` object ``molecule`` using the group additivity values\n method. If no group additivity values are loaded, a :class:`DatabaseError`\n is raised.\n\n The entropy is not corrected for the symmetry of the molecule,\n this should be done later by the calling function.\n \"\"\"\n\n assert not molecule.is_radical(), \"This method is only for saturated non-radical species.\"\n # For thermo estimation we need the atoms to already be sorted because we\n # iterate over them; if the order changes during the iteration then we\n # will probably not visit the right atoms, and so will get the thermo wrong.\n molecule.sort_atoms()\n\n # Create the ThermoData object\n thermo_data = ThermoData(\n Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \"J/(mol*K)\"),\n H298=(0.0, \"kJ/mol\"),\n S298=(0.0, \"J/(mol*K)\"),\n )\n\n cyclic = molecule.is_cyclic()\n # Generate estimates of the thermodynamics parameters\n for atom in molecule.atoms:\n # Iterate over atoms and skip hydogens and halogens (since there are no groups centered on these atomtypes)\n if atom.is_non_hydrogen() and not atom.is_halogen():\n # Get initial thermo estimate from main group database\n data_added = False\n try:\n data_added = self._add_group_thermo_data(thermo_data, self.groups['group'], molecule, {'*': atom})[1]\n except KeyError:\n logging.error(\"Couldn't find in main thermo database:\")\n logging.error(molecule)\n logging.error(molecule.to_adjacency_list())\n raise\n if not data_added:\n neighbors = ''.join(sorted([atom2.atomtype.label for atom2 in atom.edges.keys()\n if atom2.atomtype.label != 'H']))\n neighbors += 'H' * len(['H' for atom2 in atom.edges.keys() if atom2.atomtype.label == 'H'])\n if atom.atomtype.label == 'Cb':\n neighbors = neighbors.replace('Cb', '')\n group_str = f'{atom.atomtype.label}-{neighbors}'\n if group_str not in ['O2d-CO', 'S2d-CS']:\n if thermo_data.comment:\n thermo_data.comment += f' + missing({group_str})'\n else:\n thermo_data.comment = f'Thermo group additivity estimation: missing({group_str})'\n # Correct for gauche and 1,5- interactions\n # Pair atom with its 1st and 2nd nonHydrogen neighbors, \n # Then match the pair with the entries in the database longDistanceInteraction_noncyclic.py\n # Currently we only have gauche(1,4) and 1,5 interactions in that file. \n # If you want to add more corrections for longer distance, please call get_nth_neighbor() method accordingly.\n # Potentially we could include other.py in this database, but it's a little confusing how to label atoms for the entries in other.py\n if not molecule.is_atom_in_cycle(atom):\n for atom_2 in molecule.get_nth_neighbor([atom], [1, 2]):\n if not molecule.is_atom_in_cycle(atom_2):\n # This is the correction for noncyclic structure. If `atom` or `atom_2` is in a cycle, do not apply this correction.\n # Note that previously we do not do gauche for cyclic molecule, which is unreasonable for cyclic molecule with a long tail.\n try:\n self._add_group_thermo_data(thermo_data, self.groups['longDistanceInteraction_noncyclic'],\n molecule, {'*1': atom, '*2': atom_2})\n except KeyError:\n pass\n try:\n self._add_group_thermo_data(thermo_data, self.groups['other'], molecule, {'*': atom})\n except KeyError:\n pass\n\n # Do long distance interaction correction for cyclic molecule. \n # First get smallest set of smallest rings. \n # Then for every single ring, generate the atom pairs by itertools.permutation.\n # Finally match the atom pair with the database.\n # WIPWIPWIPWIPWIPWIPWIP ######################################### WIPWIPWIPWIPWIPWIPWIP\n # WIP: For now, in the database, if an entry describes the interaction between same groups, \n # it will be halved because it will be counted twice here. \n # Alternatively we could keep all the entries as their full values by using combinations instead of permutations here.\n # In that case, we need to add more lines to match from reverse side when we didn't hit the most specific level from the forward side.\n # PS: by saying 'forward side', I mean {'*1':atomPair[0], '*2':atomPair[1]}. So the following is the reverse side '{'*1':atomPair[1], '*2':atomPair[0]}'\n # In my opinion, it's cleaner to do it in the current way.\n # WIPWIPWIPWIPWIPWIPWIP ######################################### WIPWIPWIPWIPWIPWIPWIP\n if cyclic:\n sssr = molecule.get_smallest_set_of_smallest_rings()\n for ring in sssr:\n for atomPair in itertools.permutations(ring, 2):\n try:\n self._add_group_thermo_data(thermo_data, self.groups['longDistanceInteraction_cyclic'], molecule,\n {'*1': atomPair[0], '*2': atomPair[1]})\n except KeyError:\n pass\n\n # Do ring corrections separately because we only want to match\n # each ring one time\n\n if cyclic:\n monorings, polyrings = molecule.get_disparate_cycles()\n for ring in monorings:\n # Make a temporary structure containing only the atoms in the ring\n # NB. if any of the ring corrections depend on ligands not in the ring, they will not be found!\n try:\n self._add_ring_correction_thermo_data_from_tree(thermo_data, self.groups['ring'], molecule, ring)\n except KeyError:\n logging.error(\"Couldn't find a match in the monocyclic ring database even though \"\n \"monocyclic rings were found.\")\n logging.error(molecule)\n logging.error(molecule.to_adjacency_list())\n raise\n for polyring in polyrings:\n # Make a temporary structure containing only the atoms in the ring\n # NB. if any of the ring corrections depend on ligands not in the ring, they will not be found!\n try:\n self._add_polycyclic_correction_thermo_data(thermo_data, molecule, polyring)\n except KeyError:\n logging.error(\"Couldn't find a match in the polycyclic ring database even though \"\n \"polycyclic rings were found.\")\n logging.error(molecule)\n logging.error(molecule.to_adjacency_list())\n raise\n\n return thermo_data\n\n def _add_polycyclic_correction_thermo_data(self, thermo_data, molecule, polyring):\n \"\"\"\n INPUT: `polyring` as a list of `Atom` forming a polycyclic ring\n OUTPUT: if the input `polyring` can be fully matched in polycyclic database, the correction\n will be directly added to `thermo_data`; otherwise, a heuristic approach will\n be applied.\n \"\"\"\n # look up polycylic tree directly\n matched_group_thermodata, matched_group, is_partial_match = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['polycyclic'], molecule, polyring)\n\n # if partial match (non-H atoms number same between \n # polycylic ring in molecule and match group)\n # otherwise, apply heuristic algorithm\n if not is_partial_match:\n if is_bicyclic(polyring) and matched_group.label in self.groups['polycyclic'].generic_nodes:\n # apply secondary decompostion formula\n # to get a estimated_group_thermodata\n estimated_bicyclic_thermodata = self.get_bicyclic_correction_thermo_data_from_heuristic(polyring)\n if not estimated_bicyclic_thermodata:\n estimated_bicyclic_thermodata = matched_group_thermodata\n thermo_data = add_thermo_data(thermo_data, estimated_bicyclic_thermodata, group_additivity=True,\n verbose=True)\n else:\n # keep matched_group_thermodata as is\n thermo_data = add_thermo_data(thermo_data, matched_group_thermodata, group_additivity=True, verbose=True)\n # By setting verbose=True, we turn on the comments of polycyclic correction to pass the unittest.\n # Typically this comment is very short and also very helpful to check if the ring correction is calculated correctly.\n else:\n self._add_poly_ring_correction_thermo_data_from_heuristic(thermo_data, polyring)\n\n def _add_poly_ring_correction_thermo_data_from_heuristic(self, thermo_data, polyring):\n \"\"\"\n INPUT: `polyring` as a list of `Atom` forming a polycyclic ring, which can \n only be partially matched.\n OUTPUT: `polyring` will be decomposed into a combination of 2-ring polycyclics\n and each one will be looked up from polycyclic database. The heuristic formula \n is \"polyring thermo correction = sum of correction of all 2-ring sub-polycyclics - \n overlapped single-ring correction\"; the calculated polyring thermo correction \n will be finally added to input `thermo_data`.\n \"\"\"\n\n # polyring decomposition\n bicyclics_merged_from_ring_pair, ring_occurrences_dict = bicyclic_decomposition_for_polyring(polyring)\n\n # loop over 2-ring cores\n for bicyclic in bicyclics_merged_from_ring_pair:\n matched_group_thermodata, matched_group, _ = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['polycyclic'], bicyclic, bicyclic.atoms)\n\n if matched_group.label in self.groups['polycyclic'].generic_nodes:\n # apply secondary decompostion formula\n # to get a estimated_group_thermodata\n estimated_bicyclic_thermodata = self.get_bicyclic_correction_thermo_data_from_heuristic(bicyclic.atoms)\n if not estimated_bicyclic_thermodata:\n estimated_bicyclic_thermodata = matched_group_thermodata\n thermo_data = add_thermo_data(thermo_data, estimated_bicyclic_thermodata, group_additivity=True,\n verbose=True)\n else:\n # keep matched_group_thermodata as is\n thermo_data = add_thermo_data(thermo_data, matched_group_thermodata, group_additivity=True, verbose=True)\n\n # loop over 1-ring \n for singleRingTuple, occurrence in ring_occurrences_dict.items():\n single_ring = list(singleRingTuple)\n\n if occurrence >= 2:\n submol, _ = convert_ring_to_sub_molecule(single_ring)\n\n if not is_aromatic_ring(submol):\n aromatic_bonds = find_aromatic_bonds_from_sub_molecule(submol)\n for aromaticBond in aromatic_bonds:\n aromaticBond.set_order_num(1)\n\n submol.saturate_unfilled_valence()\n single_ring_thermodata = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['ring'], submol, submol.atoms)[0]\n\n else:\n submol.update()\n single_ring_thermodata = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['ring'], submol, submol.atoms)[0]\n for _ in range(occurrence - 1):\n thermo_data = remove_thermo_data(thermo_data, single_ring_thermodata, True, True)\n # By setting verbose=True, we turn on the comments of polycyclic correction to pass the unittest.\n # Typically this comment is very short and also very helpful to check if the ring correction is calculated correctly.\n\n def get_bicyclic_correction_thermo_data_from_heuristic(self, bicyclic):\n\n # saturate if the bicyclic has unsaturated bonds\n # otherwise return None\n bicyclic_submol = convert_ring_to_sub_molecule(bicyclic)[0]\n saturated_bicyclic_submol, already_saturated = saturate_ring_bonds(bicyclic_submol)\n\n if already_saturated:\n return None\n # split bicyclic into two single ring submols\n single_ring_submols = split_bicyclic_into_single_rings(bicyclic_submol)\n\n # split saturated bicyclic into two single ring submols\n saturated_single_ring_submols = split_bicyclic_into_single_rings(saturated_bicyclic_submol)\n\n # apply formula: \n # bicyclic correction ~= saturated bicyclic correction - \n # saturated single ring corrections + single ring corrections\n\n estimated_bicyclic_thermo_data = ThermoData(\n Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \"J/(mol*K)\"),\n H298=(0.0, \"kJ/mol\"),\n S298=(0.0, \"J/(mol*K)\")\n )\n\n saturated_bicyclic_thermo_data = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['polycyclic'], saturated_bicyclic_submol, saturated_bicyclic_submol.atoms)[0]\n\n estimated_bicyclic_thermo_data = add_thermo_data(estimated_bicyclic_thermo_data,\n saturated_bicyclic_thermo_data,\n group_additivity=True)\n\n estimated_bicyclic_thermo_data.comment = \"Estimated bicyclic component: \" + \\\n saturated_bicyclic_thermo_data.comment\n\n for submol in saturated_single_ring_submols:\n\n if not is_aromatic_ring(submol):\n aromatic_bonds = find_aromatic_bonds_from_sub_molecule(submol)\n for aromatic_bond in aromatic_bonds:\n aromatic_bond.set_order_num(1)\n\n submol.saturate_unfilled_valence()\n single_ring_thermo_data = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['ring'], submol, submol.atoms)[0]\n\n else:\n submol.update()\n single_ring_thermo_data = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['ring'], submol, submol.atoms)[0]\n estimated_bicyclic_thermo_data = remove_thermo_data(estimated_bicyclic_thermo_data,\n single_ring_thermo_data,\n group_additivity=True, verbose=True)\n\n for submol in single_ring_submols:\n\n if not is_aromatic_ring(submol):\n aromatic_bonds = find_aromatic_bonds_from_sub_molecule(submol)\n for aromatic_bond in aromatic_bonds:\n aromatic_bond.set_order_num(1)\n\n submol.saturate_unfilled_valence()\n single_ring_thermo_data = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['ring'], submol, submol.atoms)[0]\n\n else:\n submol.update()\n single_ring_thermo_data = self._add_ring_correction_thermo_data_from_tree(\n None, self.groups['ring'], submol, submol.atoms)[0]\n\n estimated_bicyclic_thermo_data = add_thermo_data(estimated_bicyclic_thermo_data,\n single_ring_thermo_data, group_additivity=True, verbose=True)\n\n return estimated_bicyclic_thermo_data\n\n def _add_ring_correction_thermo_data_from_tree(self, thermo_data, ring_database, molecule, ring):\n \"\"\"\n Determine the ring correction group additivity thermodynamic data for the given\n `ring` in the `molecule`, and add it to the existing thermo data\n `thermo_data`.\n Also returns the matched ring group from the database from which the data originated.\n \"\"\"\n matched_ring_entries = []\n # label each atom in the ring individually to try to match the group\n # for each ring, save only the ring that is matches the most specific leaf in the tree.\n for atom in ring:\n atoms = {'*': atom}\n entry = ring_database.descend_tree(molecule, atoms)\n matched_ring_entries.append(entry)\n\n if matched_ring_entries is []:\n raise KeyError('Node not found in database.')\n # Decide which group to keep\n is_partial_match = True\n complete_matched_groups = [entry for entry in matched_ring_entries\n if not is_ring_partial_matched(ring, entry.item)]\n\n if complete_matched_groups:\n is_partial_match = False\n matched_ring_entries = complete_matched_groups\n\n depth_list = [len(ring_database.ancestors(entry)) for entry in matched_ring_entries]\n most_specific_match_indices = [i for i, x in enumerate(depth_list) if x == max(depth_list)]\n\n most_specific_matched_entries = [matched_ring_entries[idx] for idx in most_specific_match_indices]\n if len(set(most_specific_matched_entries)) != 1:\n logging.debug('More than one type of node was found to be most specific for this ring.')\n logging.debug('This is either due to a database error in the ring or polycyclic groups, '\n 'or a partial match between the group and the full ring.')\n logging.debug(most_specific_matched_entries)\n\n # Condense the number of most specific groups down to one\n most_specific_matched_entry = matched_ring_entries[most_specific_match_indices[0]]\n\n node = most_specific_matched_entry\n\n if node is None:\n raise DatabaseError('Unable to determine thermo parameters for {0}: no data for {1} or '\n 'any of its ancestors.'.format(molecule, mostSpecificGroup))\n\n while node is not None and node.data is None:\n # do average of its children\n success, averaged_thermo_data = self._average_children_thermo(node)\n if success:\n node.data = averaged_thermo_data\n else:\n node = node.parent\n\n data = node.data\n comment = node.label\n while isinstance(data, str) and data is not None:\n for entry in ring_database.entries.values():\n if entry.label == data:\n data = entry.data\n comment = entry.label\n node = entry\n break\n data.comment = '{0}({1})'.format(ring_database.label, comment)\n\n if thermo_data is None:\n return data, node, is_partial_match\n else:\n return add_thermo_data(thermo_data, data, group_additivity=True, verbose=True), node, is_partial_match\n # By setting verbose=True, we turn on the comments of ring correction to pass the unittest.\n # Typically this comment is very short and also very helpful to check if the ring correction is calculated correctly.\n\n def _average_children_thermo(self, node):\n \"\"\"\n Use children's thermo data to guess thermo data of parent `node` \n that doesn't have thermo data built-in in tree yet. \n For `node` has children that have thermo data, return success flag \n `True` and the average thermo data.\n For `node` whose children that all have no thermo data, return flag\n `False` and None for the thermo data.\n \"\"\"\n if not node.children:\n if node.data is None:\n return False, None\n else:\n return True, node.data\n else:\n children_thermo_data_list = []\n for child in node.children:\n if child.data is None:\n success, child_thermo_data_average = self._average_children_thermo(child)\n if success:\n children_thermo_data_list.append(child_thermo_data_average)\n else:\n children_thermo_data_list.append(child.data)\n if children_thermo_data_list:\n return True, average_thermo_data(children_thermo_data_list)\n else:\n return False, None\n\n def _add_group_thermo_data(self, thermo_data, database, molecule, atom):\n \"\"\"\n Determine the group additivity thermodynamic data for the atom ``atom``\n in the structure ``molecule``, and add it to the existing thermo data\n ``thermo_data``.\n The parameter ``atom`` is a dictionary of label-atom pairs like {'*',atom}\n\n Returns:\n tuple: The combined ThermoData object and a bool flag indicating whether new data was added to it.\n \"\"\"\n node0 = database.descend_tree(molecule, atom, None)\n if node0 is None:\n raise KeyError(f'Node not found for atom {atom} in molecule {molecule} in thermo database {database.label}.')\n\n # It's possible (and allowed) that items in the tree may not be in the\n # library, in which case we need to fall up the tree until we find an\n # ancestor that has an entry in the library\n node = node0\n while node is not None and node.data is None:\n node = node.parent\n if node is None:\n raise DatabaseError(f'Unable to determine thermo parameters for atom {atom} in molecule {molecule}: '\n f'no data for node {node0} or any of its ancestors in database {database.label}.')\n\n data = node.data\n comment = node.label\n loop_count = 0\n while isinstance(data, str):\n loop_count += 1\n if loop_count > 100:\n raise DatabaseError(\"Maximum iterations reached while following thermo group data pointers. A circular\"\n f\" reference may exist. Last node was {node.label} pointing to group called {data} in \"\n f\"database {database.label}\")\n\n for entry in database.entries.values():\n if entry.label == data:\n data = entry.data\n comment = entry.label\n break\n else:\n raise DatabaseError(f\"Node {node.label} points to a non-existing group called {data} \"\n f\"in database {database.label}\")\n data.comment = f'{database.label}({comment})'\n\n # This code prints the hierarchy of the found node; useful for debugging\n # result = ''\n # while node is not None:\n # result = ' -> ' + node.label + result\n # node = node.parent\n # print result[4:]\n\n if thermo_data is None:\n return data, False\n else:\n if data.is_all_zeros():\n return thermo_data, False\n return add_thermo_data(thermo_data, data, group_additivity=True), True\n\n def _remove_group_thermo_data(self, thermo_data, database, molecule, atom):\n \"\"\"\n Based on the _add_group_thermo_data method. Just replace the last line with 'return remove_thermo_data()'.\n Determine the group additivity thermodynamic data for the atom `atom` in the structure `structure`,\n and REMOVE it from the existing thermo data `thermo_data`.\n \"\"\"\n node0 = database.descend_tree(molecule, atom, None)\n if node0 is None:\n raise KeyError(f'Node not found for atom {atom} in molecule {molecule} in thermo database {database.label}.')\n\n # It's possible (and allowed) that items in the tree may not be in the\n # library, in which case we need to fall up the tree until we find an\n # ancestor that has an entry in the library\n node = node0\n while node.data is None and node is not None:\n node = node.parent\n if node is None:\n raise DatabaseError(f'Unable to determine thermo parameters for atom {atom} in molecule {molecule}: '\n f'no data for node {node0} or any of its ancestors in database {database.label}.')\n\n data = node.data\n comment = node.label\n loop_count = 0\n while isinstance(data, str):\n loop_count += 1\n if loop_count > 100:\n raise DatabaseError(\"Maximum iterations reached while following thermo group data pointers. A circular\"\n f\" reference may exist. Last node was {node.label} pointing to group called {data} in \"\n f\"database {database.label}\")\n for entry in database.entries.values():\n if entry.label == data:\n data = entry.data\n comment = entry.label\n break\n else:\n raise DatabaseError(f\"Node {node.label} points to a non-existing group called {data} \"\n f\"in database {database.label}\")\n data.comment = f'{database.label}({comment})'\n\n # This code prints the hierarchy of the found node; useful for debugging\n # result = ''\n # while node is not None:\n # result = ' -> ' + node.label + result\n # node = node.parent\n # print result[4:]\n\n if thermo_data is None:\n return data\n else:\n return remove_thermo_data(thermo_data, data, True)\n\n def get_ring_groups_from_comments(self, thermo_data):\n \"\"\"\n Takes a string of comments from group additivity estimation, and extracts the ring and polycyclic ring groups\n from them, returning them as lists.\n \"\"\"\n tokens = thermo_data.comment.split()\n ring_groups = []\n polycyclic_groups = []\n regex = r\"\\((.*)\\)\" # only hit outermost parentheses\n for token in tokens:\n if token.startswith('ring'):\n split_tokens = re.split(regex, token)\n assert len(split_tokens) == 3, 'token: {}'.format(token)\n group_label = split_tokens[1]\n ring_groups.append(self.groups['ring'].entries[group_label])\n if token.startswith('polycyclic'):\n split_tokens = re.split(regex, token)\n assert len(split_tokens) == 3, 'token: {}'.format(token)\n group_label = split_tokens[1]\n polycyclic_groups.append(self.groups['polycyclic'].entries[group_label])\n\n return ring_groups, polycyclic_groups\n\n def extract_source_from_comments(self, species):\n \"\"\"\n `species`: A species object containing thermo data and thermo data comments\n \n Parses the verbose string of comments from the thermo data of the species object,\n and extracts the thermo sources.\n\n Returns a dictionary with keys of either 'Library', 'QM', and/or 'GAV'.\n Commonly, species thermo are estimated using only one of these sources.\n However, a radical can be estimated with more than one type of source, for \n instance a saturated library value and a GAV HBI correction, or a QM saturated value\n and a GAV HBI correction. \n \n source = {'Library': String_Name_of_Library_Used,\n 'QM': String_of_Method_Used,\n 'GAV': Dictionary_of_Groups_Used \n }\n \n The Dictionary_of_Groups_Used looks like \n {'groupType':[List of tuples containing (Entry, Weight)]\n \"\"\"\n comment = species.thermo.comment\n tokens = comment.split()\n\n source = {}\n\n if comment.startswith('Thermo library'):\n # Store name of the library source, which is the 3rd token in the comments\n source['Library'] = tokens[2]\n\n elif comment.startswith('QM'):\n # Store the level of the calculation, which is the 2nd token in the comments\n source['QM'] = tokens[1]\n\n # Check for group additivity contributions to the thermo in this species \n\n # The contribution of the groups can be either additive or substracting\n # after changes to the polycyclic algorithm\n\n comment = comment.replace(' + ', ' +')\n comment = comment.replace(' - ', ' -')\n tokens = comment.split()\n\n groups = {}\n group_types = list(self.groups.keys())\n\n regex = r\"\\((.*)\\)\" # only hit outermost parentheses\n for token in tokens:\n weight = 1 # default contribution is additive\n if token.startswith('+'):\n token = token[1:]\n elif token.startswith('-'):\n weight = -1\n token = token[1:]\n for groupType in group_types:\n if token.startswith(groupType + '(') and token.endswith(')'):\n split_tokens = re.split(regex, token)\n group_label = split_tokens[1]\n group_entry = self.groups[groupType].entries[group_label]\n # Use dictionary to combine into weights when necessary\n if groupType not in groups:\n groups[groupType] = {group_entry: weight}\n else:\n if group_entry in groups[groupType]:\n groups[groupType][group_entry] += weight\n else:\n groups[groupType][group_entry] = weight\n break\n\n if groups:\n # Indicate that group additivity is used when it is either an HBI correction\n # onto a thermo library or QM value, or if the entire molecule is estimated using group additivity\n # Save the groups into the source dictionary\n\n # Convert groups back into tuples \n for groupType, groupDict in groups.items():\n groups[groupType] = list(groupDict.items())\n\n source['GAV'] = groups\n\n # Perform a sanity check that this molecule is estimated by at least one method\n if not list(source.keys()):\n raise ValueError('Species {0} thermo appears to not be estimated using any methods.'.format(species))\n\n return source\n\n\nclass ThermoCentralDatabaseInterface(object):\n \"\"\"\n A class for interfacing with RMG online thermo central database.\n \"\"\"\n\n def __init__(self, host, port, username, password, application):\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n self.application = application\n self.client = self.connect()\n\n def connect(self):\n\n import pymongo\n\n remote_address = 'mongodb://{0}:{1}@{2}/thermoCentralDB'.format(self.username,\n self.password,\n self.host)\n client = pymongo.MongoClient(remote_address,\n self.port,\n serverSelectionTimeoutMS=2000)\n try:\n client.server_info()\n logging.info(\"\\nConnection success to RMG Thermo Central Database!\\n\")\n return client\n\n except (pymongo.errors.ServerSelectionTimeoutError,\n pymongo.errors.OperationFailure):\n logging.info(\"\\nConnection failure to RMG Thermo Central Database...\")\n logging.info(\"This RMG job still can run but cannot utilize data from central database.\\n\")\n return None\n\n def satisfy_registration_requirements(self, species, thermo, thermodb):\n \"\"\"\n Given a species, check if it's allowed to register in \n central thermo database.\n\n Requirements for now: \n cyclic, \n its thermo is estimated by GAV and no exact match/use heuristics\n \"\"\"\n if not species.molecule[0].is_cyclic():\n return False\n\n gav_keywords = 'Thermo group additivity estimation'\n if isinstance(thermo, ThermoData) and thermo.comment.startswith(gav_keywords):\n ring_groups, polycyclic_groups = thermodb.get_ring_groups_from_comments(thermo)\n\n # use GAV generic node to estimate thermo\n for group in ring_groups + polycyclic_groups:\n if group.label in thermodb.groups['ring'].generic_nodes + thermodb.groups['polycyclic'].generic_nodes:\n return True\n\n # used some heuristic way to estimate thermo\n if \") - ring(\" in thermo.comment:\n return True\n else:\n return False\n else:\n return False\n\n def register_in_central_thermo_db(self, species):\n\n # choose registration table\n db = getattr(self.client, 'thermoCentralDB')\n registration_table = getattr(db, 'registration_table')\n results_table = getattr(db, 'results_table')\n\n # prepare registration entry\n try:\n aug_inchi = species.get_augmented_inchi()\n\n # check if it's registered before or\n # already have available data in results_table\n registered_entries = list(registration_table.find({\"aug_inchi\": aug_inchi}))\n finished_entries = list(results_table.find({\"aug_inchi\": aug_inchi}))\n\n if len(registered_entries) + len(finished_entries) > 0:\n return\n\n smiles_input = species.molecule[0].to_smiles()\n status = 'pending'\n species_registration_entry = {'aug_inchi': aug_inchi,\n 'SMILES_input': smiles_input,\n 'radical_number': species.molecule[0].get_radical_count(),\n 'status': status,\n 'user': self.username,\n 'application': self.application,\n 'timestamp': time.time()\n }\n\n registration_table.insert(species_registration_entry)\n\n except ValueError:\n logging.info('Fail to generate inchi/smiles for species below:\\n{0}'.format(species.to_adjacency_list()))\n\n\ndef find_cp0_and_cpinf(species, heat_capacity):\n \"\"\"\n Calculate the Cp0 and CpInf values, and add them to the HeatCapacityModel object.\n \"\"\"\n if heat_capacity.Cp0 is None:\n cp_0 = species.calculate_cp0()\n heat_capacity.Cp0 = (cp_0, \"J/(mol*K)\")\n if heat_capacity.CpInf is None:\n cp_inf = species.calculate_cpinf()\n heat_capacity.CpInf = (cp_inf, \"J/(mol*K)\")\n"
] |
[
[
"numpy.std",
"numpy.array",
"numpy.sum",
"numpy.average"
]
] |
SanjibSarkarU/iver-rf-ac-test
|
[
"5b42711c1efe4ec99e8349c37431097aa86e279d"
] |
[
"MRC_Iver.py"
] |
[
"from time import monotonic\r\nimport time\r\n# import geopy.distance\r\nimport serial\r\nimport pandas as pd\r\nimport numpy as np\r\n# import checkSum\r\nimport re\r\nimport warnings\r\nimport tkinter as tk\r\nfrom geographiclib.geodesic import Geodesic\r\nimport numpy.polynomial.polynomial as poly\r\nimport pymap3d as pm\r\nfrom scipy import stats\r\n\r\n# import matplotlib.pyplot as plt\r\n# import rasterio\r\n# from rasterio.plot import show\r\n# from tkinter import filedialog\r\n\r\nroot = tk.Tk()\r\nroot.withdraw()\r\n\r\n\r\ndef check_sum(instruction):\r\n \"\"\" Remove any newlines and $ and calculate checksum \"\"\"\r\n if re.search(\"\\n$\", instruction):\r\n instruction = instruction[:-1]\r\n if re.search(\"\\r$\", instruction):\r\n instruction = instruction[:-1]\r\n if re.search(\"\\$\", instruction):\r\n instruction = instruction[1:]\r\n nmeadata, cksum = re.split('\\*', instruction)\r\n calc_cksum = 0\r\n for s in nmeadata:\r\n calc_cksum ^= ord(s)\r\n \"\"\" Return the calculated checksum \"\"\"\r\n return '{:02X}'.format(calc_cksum)\r\n\r\n\r\ndef received_stream(stream):\r\n if stream == '':\r\n # print(\" Waiting\")\r\n return 'None'\r\n else:\r\n if re.search(\"ACK\", stream):\r\n acknowledgement = {'8': 'osdAck', '16': 'omwAck'}\r\n return acknowledgement[stream.split(';')[-1].split(',')[1]]\r\n elif re.search(\"OSI\", stream):\r\n return 'osi'\r\n elif re.search(\"OSD\", stream):\r\n return 'osd'\r\n elif re.search(\"OMW\", stream):\r\n return 'omw'\r\n else:\r\n return 'not known keyword'\r\n\r\n\r\ndef omw_ack(stream):\r\n # print(\"omw_ack section: \", stream)\r\n rec_chksm = stream.split('*')[-1][0:3]\r\n cal_chksm = check_sum(stream.split(';')[-1][1:-2])\r\n if int(rec_chksm, 16) == int(cal_chksm, 16):\r\n # print('Right checksum')\r\n if int(stream.split(',')[2]) == 0:\r\n # print('The IVER has acknowledged the OMW command without an error')\r\n return 0\r\n else:\r\n # print(stream)\r\n print('The IVER has raised an error to execute the OMW command')\r\n return 1\r\n \r\n else:\r\n print('wrong checksum')\r\n print('Received checkSum: ' + rec_chksm + 'Calculated checkSum: ' + cal_chksm)\r\n\r\n\r\ndef osi(stream):\r\n try:\r\n # print(stream)\r\n if int(stream.split('*')[-1], 16) == int(check_sum(stream.split(';')[-1][1:-2]), 16):\r\n # print('Right checkSum')\r\n stream = stream.split(',')\r\n mode = {'N': 'Normal_UVC', 'S': 'Stopped', 'P': 'Parking',\r\n 'M': 'Manual_Override', 'mP': 'Manual_parking',\r\n 'A': 'Servo', 'W': 'Waypoint'}\r\n # print('Mode : {}'.format(mode[stream[2]]))\r\n # print('NextWp: ', stream[3])\r\n # print('Latitude: ', stream[4])\r\n # print('Longitude: ', stream[5])\r\n # print('Speed: {} Knots'.format(stream[6]))\r\n # print(\"Distance to next WP: {} meters\".format(stream[7]))\r\n # print('Battery percent: ', stream[16])\r\n # osi_return = (stream[3], stream[4], stream[5], stream[6], stream[7], stream[16])\r\n osi_return = {'NextWp': stream[3], 'Latitude': float(stream[4]), 'Longitude': float(stream[5]),\r\n 'Speed': float(stream[6]), 'DistanceToNxtWP': float(stream[7]), 'Battery': float(stream[16])}\r\n # return mode[stream[2]], stream[3]\r\n return osi_return\r\n else:\r\n print('Wrong checkSum')\r\n print(\"Received checkSum: \" + str(stream.split('*')[-1]) + 'Calculated checksum is : ' + str(\r\n check_sum(stream.split(';')[-1][1:-2])))\r\n print(\" Wrong CheckSum: \", stream)\r\n # osi_return = {'NextWp': 0, 'Latitude': 00.00000, 'Longitude': 00.00000,\r\n # 'Speed': 0.00, 'DistanceToNxtWP': 0.00, 'Battery': 0.00}\r\n return None\r\n except Exception as osi_exception:\r\n print(\"Error: \", osi_exception)\r\n # osi_return = {'NextWp': 0, 'Latitude': 00.00000, 'Longitude': 00.00000,\r\n # 'Speed': 0.00, 'DistanceToNxtWP': 0.00, 'Battery': 0.00}\r\n return None\r\n\r\n\r\ndef osd():\r\n ins_osd = 'OSD,,,S,,,,,*'\r\n instruction = ins_osd + check_sum(ins_osd)\r\n return instruction\r\n\r\n\r\ndef osd_req_recvd(stream):\r\n stream = stream.strip()\r\n # print(stream)\r\n if int(stream.split('*')[-1], 16) == int(check_sum(stream.split(';')[-1][1:-2]), 16):\r\n # print(\" right Check Sum\")\r\n return 0\r\n else:\r\n print(\"wrong CheckSum\")\r\n return 1\r\n\r\n\r\n# def osd_3089():\r\n# ins_osd_3089 = 'OSD,,,S,,,,,*'\r\n# instruction = ins_osd_3089 + check_sum(ins_osd_3089)\r\n# return instruction\r\n\r\n\r\n# def osd_3072():\r\n# ins_osd_3072 = 'OSD,,,S,,,,,*'\r\n# instruction = ins_osd_3072 + check_sum(ins_osd_3072)\r\n# return instruction\r\n\r\n\r\ndef osd_ack(stream):\r\n # print(\"osd_ack section: \", stream)\r\n rec_chksm = stream.split('*')[-1][0:3]\r\n cal_chksm = check_sum(stream.split(';')[-1][1:-2])\r\n if int(rec_chksm, 16) == int(cal_chksm, 16):\r\n # print('Right checksum')\r\n if int(stream.split(',')[2]) == 0:\r\n # print('The IVER has acknowledged the OSD command without an error.')\r\n return 0\r\n else:\r\n print('The IVER has raised an error to execute the OSD command')\r\n return 1\r\n \r\n else:\r\n print('wrong checksum')\r\n print('Received checkSum: ' + rec_chksm + 'Calculated checkSum: ' + cal_chksm)\r\n\r\n\r\ndef omw_stop():\r\n ins_omw_stop = 'OMW,STOP*'\r\n instruction = ins_omw_stop + check_sum(ins_omw_stop)\r\n return instruction\r\n\r\n\r\ndef omw_req_recvd(stream):\r\n # assert f\"{stream} is not string\"\r\n # $AC;Iver3-3089;$OMW,30.35197,-89.62897,0.0,,10,4.0,0, *64\r\n stream = stream.strip()\r\n # print(stream)\r\n if int(stream.split('*')[-1], 16) == int(check_sum(stream.split(';')[-1][1:-2]), 16):\r\n # print(\" right Check Sum\")\r\n return 0\r\n else:\r\n print(\"OMW Request Received: wrong CheckSum\")\r\n return 1\r\n\r\n\r\ndef wamv_gpgll(stream):\r\n if int((stream.split('*')[-1]), 16) == int(check_sum(stream.split('*')[0] + '*'), 16):\r\n # print(\"right CheckSum\")\r\n return 0\r\n else:\r\n print(\"Wamv_gpgll: Wrong CheckSum: received checkSum{}, calculated checkSum{}\".format(\r\n stream.split('*')[-1], check_sum(stream.split('*')[0] + '*')))\r\n return 1\r\n\r\n\r\n'''\r\ndef dd_ddm_nmea_lat(coordinates):\r\n # coordinates = float\r\n coordinates = str(coordinates)\r\n if re.search('-', coordinates):\r\n coordinates = coordinates.strip('-')\r\n coordinates = coordinates.split('.')\r\n co_return = ''.join((coordinates[0] + '.' + str(int(coordinates[-1]) * 60)).split('.'))\r\n co_return = co_return[:4] + '.' + co_return[4:]\r\n return co_return\r\n\r\n\r\ndef dd_ddm_nmea_lng(coordinates):\r\n # coordinates = float\r\n coordinates = str(coordinates)\r\n coordinates = '{}'.format(coordinates[1:] if coordinates.startswith('-') else coordinates)\r\n # if re.search('-', coordinates):\r\n # coordinates = coordinates.strip('-')\r\n coordinates = coordinates.split('.')\r\n co_return = ''.join((coordinates[0] + '.' + str(int(coordinates[-1]) * 60)).split('.'))\r\n co_return = co_return.zfill(len(co_return) + 1)\r\n co_return = (co_return[:4] + '.' + co_return[4:])\r\n # print(co_return)\r\n return co_return\r\n\r\n\r\ndef ddm_dd_nmea_lat(coordinates):\r\n coordinates = ''.join(str(coordinates).split('.'))\r\n co_return = ''.join(coordinates[:2] + '.' + str(int(int(coordinates[2:]) / 60)))\r\n return co_return\r\n\r\n\r\ndef ddm_dd_nmea_lng(coordinates):\r\n coordinates = str(coordinates)\r\n coordinates = '{}'.format(coordinates[1:] if coordinates.startswith('0') else coordinates)\r\n # print((coordinates))\r\n coordinates = ''.join(coordinates.split('.'))\r\n co_return = coordinates[:2] + '.' + ''.join(str(float(coordinates[2:]) / 60).split('.'))\r\n # print(co_return)\r\n return co_return\r\n'''\r\n\r\n\r\n# ddm = degree, decimal minutes, dd = degree decimal\r\ndef ddm2dd(coordinates):\r\n \"\"\" Convert degree, decimal minutes to degree decimal; return 'Lat_dd': float(lat_dd), 'Lng_dd': float(lng_dd)}\r\n Input Ex.: ['3020.1186383580', 'N', '0894.5222887340', 'W'],\r\n return: {'Lat_dd': float(lat_dd), 'Lng_dd': float(lng_dd)} \"\"\"\r\n lat, lat_direction, lng, lng_direction = coordinates[0], coordinates[1], coordinates[2], coordinates[3]\r\n lat = ''.join(lat.split('.'))\r\n lat_ddm = lat[:2] + '.' + str(int(int(lat[2:]) / 60))\r\n lat_dd = '{}'.format('-' + lat_ddm if lat_direction == 'S' else lat_ddm)\r\n lng_ddm = ''.join('{}'.format(lng[1:] if lng.startswith('0') else lng).split('.'))\r\n is_zero = lng_ddm[2:].startswith('0')\r\n lng_ddm = lng_ddm[:2] + '.' + (str(int(int(lng_ddm[2:]) / 60)) if not is_zero else ('0' + str(int(int(lng_ddm[2:]) / 60))))\r\n lng_dd = '{}'.format('-' + lng_ddm if lng_direction == 'W' else lng_ddm)\r\n dd = {'Lat_dd': float(lat_dd), 'Lng_dd': float(lng_dd)}\r\n return dd\r\n\r\n\r\ndef dd2ddm(coordinates):\r\n \"\"\" Convert degree decimal to degree decimal minute;\"\"\"\r\n # lat, lng = ''.join(str(coordinates[0]).split('.')), ''.join(str(coordinates[1]).split('.'))\r\n lat, lng = ''.join(str(coordinates[0]).split('.')), str(coordinates[1])\r\n lat = lat[:2] + ''.join(str(int(int(lat[2:]) * 60)).split('.'))\r\n lat = lat[:4] + '.' + lat[4:]\r\n lat_ddm = lat\r\n lng_ddm = '{}'.format(lng[1:] if lng.startswith('-') else lng)\r\n lng_ddm = lng_ddm.split('.')\r\n is_zero = lng_ddm[1].startswith('0')\r\n # lng_ddm = lng_ddm[:2] + ''.join(str((int(int(lng_ddm[2:]) * 60))).split('.'))\r\n lng_ddm = lng_ddm[0] + '{}'.format('0' + str(int(lng_ddm[1])*60) if is_zero else str(int(lng_ddm[1])*60))\r\n lng_ddm = lng_ddm.zfill(len(lng_ddm) + 1)\r\n lng_ddm = lng_ddm[:4] + '.' + lng_ddm[4:]\r\n ddm = {'Lat_ddm': lat_ddm, 'N_S': 'S' if lat.startswith('-') else 'N',\r\n 'Lng_ddm': lng_ddm, 'E_W': 'W' if lng.startswith('-') else 'E'}\r\n return ddm\r\n\r\n\r\ndef speed_ha_coordinates(coordinate1_withtimestamp, coordinate2_withtimestamp):\r\n # ['30.35059', '-89.62995', '104139'] # example coordinate_withtimestamp\r\n geod = Geodesic(6378388, 1 / 297.0)\r\n co1 = coordinate1_withtimestamp[0:2]\r\n co1_time = coordinate1_withtimestamp[-1]\r\n co2 = coordinate2_withtimestamp[0:2]\r\n co2_time = coordinate2_withtimestamp[-1]\r\n # l = geod.InverseLine(lat1, lng1, lat2, lng2)\r\n d = geod.Inverse(float(co1[0]), float(co1[1]), float(co2[0]), float(co2[1]))\r\n # print(d)\r\n distance = d['s12']\r\n ha = d['azi2']\r\n time_diff = (int(co2_time[0:2]) * 3600 + int(co2_time[2:4]) * 60 + int(co2_time[4:])) - \\\r\n (int(co1_time[0:2]) * 3600 + int(co1_time[2:4]) * 60 + int(co1_time[4:]))\r\n # print(time_diff)\r\n # speed = distance / time_diff\r\n # result = {'speed': speed, 'ha': ha, 'dis12': distance}\r\n try:\r\n speed = distance / time_diff\r\n result = {'speed': speed, 'ha': ha, 'dis12': distance}\r\n except ZeroDivisionError:\r\n result = {'speed': 0, 'ha': ha, 'dis12': distance}\r\n return result\r\n\r\n\r\ndef distance_in_m(coordinate_1, coordinate_2):\r\n return str(round(geopy.distance.GeodesicDistance(coordinate_1, coordinate_2).m, 1))\r\n\r\n\r\ndef haSpeed_ply(df):\r\n \"\"\" fitting ha & speed, example input: [[30.35158, -89.6296, '104511'], [30.3516, -89.62957, '104513'],\r\n [30.35162, -89.62954, '104515']] \"\"\"\r\n df = pd.DataFrame(df, columns=['lat', 'lon', 't'])\r\n # df['dt'] = df.t.diff()\r\n df['latlng'] = df[['lat', 'lon', 't']].values.tolist()\r\n ha, speed = [], []\r\n for i in range(len(df.latlng) - 1):\r\n h = speed_ha_coordinates(df.latlng[i], df.latlng[i + 1])\r\n # ha.append(h['ha'] + 360 if h['ha'] < 0 else np.nan if h['ha'] == 0 else h['ha'])\r\n ha.append(np.absolute(h['ha']) if h['ha'] < 0 else np.nan if h['ha'] == 0 else h['ha'])\r\n speed.append(h['speed'])\r\n ha.append(0)\r\n speed.append(0)\r\n df['speed'] = speed\r\n df['ha'] = ha\r\n # print(df)\r\n df = df[df['speed'].notna()]\r\n df = df[df['ha'].notna()]\r\n df = df.drop(df.index[len(df) - 1])\r\n # df['speed_'] = df['speed'].apply(lambda x: np.abs(x - df['speed'].mean()) / df['speed'].std())\r\n # print(df)\r\n deg = 5\r\n np_speed = df['speed'].to_numpy(dtype=np.float32)\r\n np_ha = df['ha'].to_numpy(dtype=np.float32)\r\n np_t = df['t'].to_numpy(dtype=np.float32)\r\n x = np.linspace(0, len(np_ha), len(np_ha))\r\n warnings.simplefilter('ignore', np.RankWarning)\r\n model_ha = np.poly1d(np.polyfit(x, np_ha, deg=deg))\r\n line_speed = np.linspace(x[0], x[-1], num=len(x) * 10)\r\n predict_ha = model_ha(len(np_ha))\r\n \r\n x = np.linspace(0, len(np_speed), len(np_speed))\r\n model_speed = np.poly1d(np.polyfit(x, np_speed, deg=deg))\r\n line_speed = np.linspace(x[0], x[-1], num=len(x) * 10)\r\n p_speed = model_speed(len(np_speed))\r\n \r\n coefs = poly.polyfit(x, np_speed, 4)\r\n x_new = np.linspace(x[0], x[-1], num=len(x) * 10)\r\n ffit = poly.polyval(x_new, coefs)\r\n result = {'speed': p_speed, 'ha': predict_ha}\r\n return result\r\n\r\n\r\ndef point_on_line(a, b, p):\r\n ap = p - a\r\n ab = b - a\r\n result = a + np.dot(ap, ab) / np.dot(ab, ab) * ab\r\n return result\r\n\r\n\r\ndef coordinate_fit(df, deg=1):\r\n \"\"\" Fitting coordinates; takes coordinates and return 2 coordinates with timestamp. Return\r\n [[lat1, lng1, t1], [lat2, lng2, t2]\"\"\"\r\n df = pd.DataFrame(df, columns=['lat', 'lon', 't'])\r\n df['h'] = df.apply(lambda h: 0, axis=1)\r\n ' Convert to cartesian coordinate'\r\n x_c, y_c, Az = [], [], []\r\n for i in range(len(df.lat)):\r\n l, n, a = pm.geodetic2enu(df.lat[i], df.lon[i], df.h[i], df.lat[0], df.lon[0], df.h[0], ell=None, deg=True)\r\n x_c.append(l)\r\n y_c.append(n)\r\n df['x_c'], df['y_c'] = x_c, y_c\r\n # df['Azm'] = Az\r\n m, n = df['x_c'], df['y_c']\r\n p1, p2 = np.array([df.x_c[0], df.y_c[0]]), np.array([df.x_c[len(df.x_c) - 1], df.y_c[len(df.x_c) - 1]])\r\n \r\n ' Apply linear regression'\r\n slope, intercept, r_value, p_value, std_err = stats.linregress(m, n)\r\n \r\n # def linefitline(x):\r\n # \treturn intercept + slope * x\r\n #\r\n # line = linefitline(m)\r\n line = list(map(lambda b: intercept + slope * b, m))\r\n df['line'] = line\r\n 'perpendicular on the '\r\n a = np.array([df['x_c'][0], df['line'][0]])\r\n b = np.array([df.x_c[len(df['x_c']) - 1], df.line[len(df['line']) - 1]])\r\n x_1, y_1 = point_on_line(a, b, p1)\r\n x_2, y_2 = point_on_line(a, b, p2)\r\n 'Back to lat lng'\r\n lat1, lng1, _h1 = pm.enu2geodetic(x_1, y_1, df.h[0], df.lat[0], df.lon[0], df.h[0], ell=None, deg=True)\r\n lat2, lng2, _h2 = pm.enu2geodetic(x_2, y_2, df.h[0], df.lat[0], df.lon[0], df.h[0], ell=None, deg=True)\r\n result = [[lat1, lng1, df.t[0]], [lat2, lng2, df.t[len(df.t) - 1]]]\r\n \r\n # x = np.linspace(df.lon[0], df.lon[len(df['lon']) - 1], num=len(df['lon']) * 15)\r\n # ffit = poly.polyval(x, poly.polyfit(df['lon'], df['lat'], deg=deg))\r\n # lat, lng = ffit[-1], df.lon[len(df['lon']) - 1]\r\n # # result = {'lat': lat, 'lng': lng, 't': df.t[len(df['t']) - 1]}\r\n # result = [lat, lng, df.t[len(df['t']) - 1]]\r\n return result\r\n\r\n\r\ndef iver_status(iver='3089', port_rf='com7', port_ac='com4', time_out=1, time_wait_ac=14):\r\n try:\r\n osi_return = {'NextWp': 0, 'Latitude': 00.00000, 'Longitude': 00.00000,\r\n 'Speed': 0.00, 'DistanceToNxtWP': 0.00, 'Battery': 0.00}\r\n # time_wait = 14 # fetch this data from iverVariables.txt, we get responses after 12 sec.\r\n t_start = monotonic()\r\n count = 0\r\n try:\r\n ser_rf = serial.Serial(port_rf, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=time_out,\r\n xonxoff=0)\r\n ser_rf.reset_output_buffer()\r\n except Exception as e_rf:\r\n print(\"I am in the RF com port exception block.\", e_rf)\r\n print(\"Will send through ACOMM...... \")\r\n ser_rf_open = 'notOpen'\r\n else:\r\n ser_rf_open = 'open'\r\n try:\r\n ser_ac = serial.Serial(port_ac, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=time_out,\r\n xonxoff=0)\r\n ser_ac.reset_output_buffer()\r\n except Exception as e_ac:\r\n print(\"I am in the AC com port exception block. \", e_ac)\r\n ser_ac_open = 'notOpen'\r\n else:\r\n ser_ac_open = 'open'\r\n break_innerloop = 'no'\r\n while ser_rf_open == 'open' or ser_ac_open == 'open':\r\n try:\r\n time.sleep(1)\r\n if ser_rf_open == 'open':\r\n inst_snd = '$AC;Iver3-' + iver + ';' + '$' + osd() + '\\r\\n'\r\n ser_rf.reset_output_buffer()\r\n ser_rf.write(inst_snd.encode())\r\n frm_iver = ser_rf.readline().decode()\r\n frm_iver_OSDAck = ser_rf.readline().decode()\r\n if ser_rf_open == 'open' and len(frm_iver) >= 1: # if rf com port is open and responded through rf comm\r\n if received_stream(frm_iver_OSDAck) == 'osdAck' and osd_ack(frm_iver_OSDAck) == 0:\r\n if received_stream(frm_iver) == 'osi':\r\n osi_return = osi(frm_iver)\r\n break\r\n else:\r\n count += 1\r\n # something other than osdAck and osi\r\n elif ser_ac_open == 'open' and (ser_rf_open == 'notOpen' or len(frm_iver) < 1) and \\\r\n monotonic() - t_start >= time_wait_ac: # send osd through ac comm\r\n # print(\"Sent\")\r\n inst_snd = '$AC;Iver3-' + iver + ';' + '$' + osd() + '\\r\\n'\r\n ser_ac.write(inst_snd.encode())\r\n print(\"Sent\")\r\n i = 0\r\n print(\"waiting for a response from the Iver...\")\r\n while True:\r\n frm_iver = ser_ac.readline().decode()\r\n frm_iver_OSDAck = ser_ac.readline().decode()\r\n # print(frm_iver, frm_iver_OSDAck)\r\n if received_stream(frm_iver_OSDAck) == 'osdAck' and osd_ack(frm_iver_OSDAck) == 0:\r\n if received_stream(frm_iver) == 'osi':\r\n osi_return = osi(frm_iver)\r\n # print(osi_return)\r\n t_start = monotonic()\r\n i = 0\r\n break_innerloop = 'yes'\r\n break\r\n else:\r\n print(i)\r\n i += 1\r\n if i == 15: # wait 15 sec to get the response from the iver through AC;can fetch data from file\r\n break\r\n else:\r\n continue\r\n else:\r\n if break_innerloop == 'yes':\r\n break\r\n else:\r\n continue\r\n except Exception as loop:\r\n print(\"I am in the exception loop block.\", loop)\r\n ser_rf.reset_input_buffer()\r\n ser_ac.reset_input_buffer()\r\n continue\r\n \r\n return osi_return\r\n except Exception as iverStatus:\r\n return osi_return\r\n\r\n# def listen_iver(iver='3089', read_port='b', port_rf='com7', port_ac='com6', time_out=1):\r\n# try:\r\n# if read_port == 'b':\r\n# try:\r\n# ser_rf = serial.Serial(port_rf, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=time_out,\r\n# xonxoff=0)\r\n# ser_rf.reset_output_buffer()\r\n# except Exception as e_rf:\r\n# print(\"I am in the RF com port exception block.\", e_rf)\r\n# ser_rf_open = 'notOpen'\r\n# else:\r\n# ser_rf_open = 'open'\r\n# try:\r\n# ser_ac = serial.Serial(port_ac, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=time_out,\r\n# xonxoff=0)\r\n# ser_ac.reset_output_buffer()\r\n# except Exception as e_ac:\r\n# print(\"I am in the AC com port exception block. \", e_ac)\r\n# ser_ac_open = 'notOpen'\r\n# else:\r\n# ser_ac_open = 'open'\r\n# elif read_port == 'r':\r\n# print(\"Rread RF only: \")\r\n# elif read_port == 'a':\r\n# print(\" Reading ACom port\")\r\n# else:\r\n# print(\"Wrong request\")\r\n# except Exception as e_listen:\r\n# print(\"Error: \", e_listen)\r\n# return \"None\"\r\n"
] |
[
[
"numpy.dot",
"numpy.polyfit",
"numpy.absolute",
"numpy.polynomial.polynomial.polyfit",
"pandas.DataFrame",
"scipy.stats.linregress",
"numpy.polynomial.polynomial.polyval",
"numpy.array"
]
] |
ralgara/pointer-generator
|
[
"4c5ebec95af14847218326175a1a31b0232a74aa"
] |
[
"batcher.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications Copyright 2017 Abigail See\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"This file contains code to process data into batches\"\"\"\n\nimport queue\nfrom random import shuffle\nfrom threading import Thread\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport data\n\n\nclass Example(object):\n \"\"\"Class representing a train/val/test example for text summarization.\"\"\"\n\n def __init__(self, article, abstract_sentences, vocab, hps):\n \"\"\"Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.\n\n Args:\n article: source text; a string. each token is separated by a single space.\n abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.\n vocab: Vocabulary object\n hps: hyperparameters\n \"\"\"\n self.hps = hps\n\n # Get ids of special tokens\n start_decoding = vocab.word2id(data.START_DECODING)\n stop_decoding = vocab.word2id(data.STOP_DECODING)\n\n # Process the article\n article_words = article.split()\n if len(article_words) > hps.max_enc_steps:\n article_words = article_words[:hps.max_enc_steps]\n self.enc_len = len(article_words) # store the length after truncation but before padding\n self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token\n\n # Process the abstract\n abstract = ' '.join(abstract_sentences) # string\n abstract_words = abstract.split() # list of strings\n abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token\n\n # Get the decoder input sequence and target sequence\n self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)\n self.dec_len = len(self.dec_input)\n\n # If using pointer-generator mode, we need to store some extra info\n if hps.pointer_gen:\n # Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves\n self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)\n\n # Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id\n abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)\n\n # Overwrite decoder target sequence so it uses the temp article OOV ids\n _, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)\n\n # Store the original strings\n self.original_article = article\n self.original_abstract = abstract\n self.original_abstract_sents = abstract_sentences\n\n\n def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):\n \"\"\"Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).\n\n Args:\n sequence: List of ids (integers)\n max_len: integer\n start_id: integer\n stop_id: integer\n\n Returns:\n inp: sequence length <=max_len starting with start_id\n target: sequence same length as input, ending with stop_id only if there was no truncation\n \"\"\"\n inp = [start_id] + sequence[:]\n target = sequence[:]\n if len(inp) > max_len: # truncate\n inp = inp[:max_len]\n target = target[:max_len] # no end_token\n else: # no truncation\n target.append(stop_id) # end token\n assert len(inp) == len(target)\n return inp, target\n\n\n def pad_decoder_inp_targ(self, max_len, pad_id):\n \"\"\"Pad decoder input and target sequences with pad_id up to max_len.\"\"\"\n while len(self.dec_input) < max_len:\n self.dec_input.append(pad_id)\n while len(self.target) < max_len:\n self.target.append(pad_id)\n\n\n def pad_encoder_input(self, max_len, pad_id):\n \"\"\"Pad the encoder input sequence with pad_id up to max_len.\"\"\"\n while len(self.enc_input) < max_len:\n self.enc_input.append(pad_id)\n if self.hps.pointer_gen:\n while len(self.enc_input_extend_vocab) < max_len:\n self.enc_input_extend_vocab.append(pad_id)\n\n\nclass Batch(object):\n \"\"\"Class representing a minibatch of train/val/test examples for text summarization.\"\"\"\n\n def __init__(self, example_list, hps, vocab):\n \"\"\"Turns the example_list into a Batch object.\n\n Args:\n example_list: List of Example objects\n hps: hyperparameters\n vocab: Vocabulary object\n \"\"\"\n self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences\n self.init_encoder_seq(example_list, hps) # initialize the input to the encoder\n self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder\n self.store_orig_strings(example_list) # store the original strings\n\n def init_encoder_seq(self, example_list, hps):\n \"\"\"Initializes the following:\n self.enc_batch:\n numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch\n self.enc_lens:\n numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).\n self.enc_padding_mask:\n numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.\n\n If hps.pointer_gen, additionally initializes the following:\n self.max_art_oovs:\n maximum number of in-article OOVs in the batch\n self.art_oovs:\n list of list of in-article OOVs (strings), for each example in the batch\n self.enc_batch_extend_vocab:\n Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.\n \"\"\"\n # Determine the maximum length of the encoder input sequence in this batch\n max_enc_seq_len = max([ex.enc_len for ex in example_list])\n\n # Pad the encoder input sequences up to the length of the longest sequence\n for ex in example_list:\n ex.pad_encoder_input(max_enc_seq_len, self.pad_id)\n\n # Initialize the numpy arrays\n # Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.\n self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)\n self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)\n self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)\n\n # Fill in the numpy arrays\n for i, ex in enumerate(example_list):\n self.enc_batch[i, :] = ex.enc_input[:]\n self.enc_lens[i] = ex.enc_len\n for j in range(ex.enc_len):\n self.enc_padding_mask[i][j] = 1\n\n # For pointer-generator mode, need to store some extra info\n if hps.pointer_gen:\n # Determine the max number of in-article OOVs in this batch\n self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])\n # Store the in-article OOVs themselves\n self.art_oovs = [ex.article_oovs for ex in example_list]\n # Store the version of the enc_batch that uses the article OOV ids\n self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)\n for i, ex in enumerate(example_list):\n self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]\n\n def init_decoder_seq(self, example_list, hps):\n \"\"\"Initializes the following:\n self.dec_batch:\n numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.\n self.target_batch:\n numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.\n self.dec_padding_mask:\n numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.\n \"\"\"\n # Pad the inputs and targets\n for ex in example_list:\n ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)\n\n # Initialize the numpy arrays.\n # Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.\n self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)\n self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)\n self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)\n\n # Fill in the numpy arrays\n for i, ex in enumerate(example_list):\n self.dec_batch[i, :] = ex.dec_input[:]\n self.target_batch[i, :] = ex.target[:]\n for j in range(ex.dec_len):\n self.dec_padding_mask[i][j] = 1\n\n def store_orig_strings(self, example_list):\n \"\"\"Store the original article and abstract strings in the Batch object\"\"\"\n self.original_articles = [ex.original_article for ex in example_list] # list of lists\n self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists\n self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists\n\n\nclass Batcher(object):\n \"\"\"A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence.\"\"\"\n\n BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold\n\n def __init__(self, data_path, vocab, hps, single_pass):\n \"\"\"Initialize the batcher. Start threads that process the data into batches.\n\n Args:\n data_path: tf.Example filepattern.\n vocab: Vocabulary object\n hps: hyperparameters\n single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).\n \"\"\"\n self._data_path = data_path\n self._vocab = vocab\n self._hps = hps\n self._single_pass = single_pass\n\n # Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched\n self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)\n self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)\n\n # Different settings depending on whether we're in single_pass mode or not\n if single_pass:\n self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once\n self._num_batch_q_threads = 1 # just one thread to batch examples\n self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing\n self._finished_reading = False # this will tell us when we're finished reading the dataset\n else:\n self._num_example_q_threads = 16 # num threads to fill example queue\n self._num_batch_q_threads = 4 # num threads to fill batch queue\n self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing\n\n # Start the threads that load the queues\n self._example_q_threads = []\n for _ in range(self._num_example_q_threads):\n self._example_q_threads.append(Thread(target=self.fill_example_queue))\n self._example_q_threads[-1].daemon = True\n self._example_q_threads[-1].start()\n self._batch_q_threads = []\n for _ in range(self._num_batch_q_threads):\n self._batch_q_threads.append(Thread(target=self.fill_batch_queue))\n self._batch_q_threads[-1].daemon = True\n self._batch_q_threads[-1].start()\n\n # Start a thread that watches the other threads and restarts them if they're dead\n if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever\n self._watch_thread = Thread(target=self.watch_threads)\n self._watch_thread.daemon = True\n self._watch_thread.start()\n\n\n def next_batch(self):\n \"\"\"Return a Batch from the batch queue.\n\n If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.\n\n Returns:\n batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.\n \"\"\"\n # If the batch queue is empty, print a warning\n if self._batch_queue.qsize() == 0:\n tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())\n if self._single_pass and self._finished_reading:\n tf.logging.info(\"Finished reading dataset in single_pass mode.\")\n return None\n\n batch = self._batch_queue.get() # get the next Batch\n return batch\n\n def fill_example_queue(self):\n \"\"\"Reads data from file and processes into Examples which are then placed into the example queue.\"\"\"\n\n input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))\n\n while True:\n try:\n (article, abstract) = next(input_gen) # read the next example from file. article and abstract are both strings.\n except StopIteration: # if there are no more examples:\n tf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n if self._single_pass:\n tf.logging.info(\"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n self._finished_reading = True\n break\n else:\n raise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n\n abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.\n self._example_queue.put(example) # place the Example in the example queue.\n\n\n def fill_batch_queue(self):\n \"\"\"Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.\n\n In decode mode, makes batches that each contain a single example repeated.\n \"\"\"\n while True:\n if self._hps.mode != 'decode':\n # Get bucketing_cache_size-many batches of Examples into a list, then sort\n inputs = []\n for _ in range(self._hps.batch_size * self._bucketing_cache_size):\n inputs.append(self._example_queue.get())\n inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence\n\n # Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n batches = []\n for i in range(0, len(inputs), self._hps.batch_size):\n batches.append(inputs[i:i + self._hps.batch_size])\n if not self._single_pass:\n shuffle(batches)\n for b in batches: # each b is a list of Example objects\n self._batch_queue.put(Batch(b, self._hps, self._vocab))\n\n else: # beam search decode mode\n ex = self._example_queue.get()\n b = [ex for _ in range(self._hps.batch_size)]\n self._batch_queue.put(Batch(b, self._hps, self._vocab))\n\n\n def watch_threads(self):\n \"\"\"Watch example queue and batch queue threads and restart if dead.\"\"\"\n while True:\n time.sleep(60)\n for idx,t in enumerate(self._example_q_threads):\n if not t.is_alive(): # if the thread is dead\n tf.logging.error('Found example queue thread dead. Restarting.')\n new_t = Thread(target=self.fill_example_queue)\n self._example_q_threads[idx] = new_t\n new_t.daemon = True\n new_t.start()\n for idx,t in enumerate(self._batch_q_threads):\n if not t.is_alive(): # if the thread is dead\n tf.logging.error('Found batch queue thread dead. Restarting.')\n new_t = Thread(target=self.fill_batch_queue)\n self._batch_q_threads[idx] = new_t\n new_t.daemon = True\n new_t.start()\n\n\n def text_generator(self, example_generator):\n \"\"\"Generates article and abstract text from tf.Example.\n\n Args:\n example_generator: a generator of tf.Examples from file. See data.example_generator\"\"\"\n while True:\n e = next(example_generator) # e is a tf.Example\n try:\n article_text = e.features.feature['article'].bytes_list.value[0] # the article text was saved under the key 'article' in the data files\n abstract_text = e.features.feature['abstract'].bytes_list.value[0] # the abstract text was saved under the key 'abstract' in the data files\n except ValueError:\n tf.logging.error('Failed to get article or abstract from example')\n continue\n if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1\n tf.logging.warning('Found an example with empty article text. Skipping it.')\n else:\n yield (article_text, abstract_text)\n"
] |
[
[
"tensorflow.logging.error",
"tensorflow.logging.warning",
"numpy.zeros",
"tensorflow.logging.info"
]
] |
sudhaveturi/repo
|
[
"7510d31fe27a0e759e12850f281619ce415dd279"
] |
[
"software/metax/MatrixManager.py"
] |
[
"import pandas\nimport numpy\nimport Exceptions\n\ndef load_matrix_manager(path):\n d = pandas.read_table(path, sep=\"\\s+\")\n m = MatrixManager(d)\n return m\n\n\nclass MatrixManager(object):\n def __init__(self, d):\n _validate(d)\n self.data = _build_data(d)\n\n def get(self, gene, snps=None, strict=True):\n return _get(self.data, gene, snps, strict)\n\n def n_snps(self,gene):\n if not gene in self.data:\n return numpy.nan\n snps = self.data[gene]\n snps = _non_na(snps)\n snps = {x[CDTF.RSID1] for x in snps}\n return len(snps)\n\nclass CDTF(object):\n GENE=0\n RSID1=1\n RSID2=2\n VALUE=3\n\n K_GENE = \"GENE\"\n K_RSID1 = \"RSID1\"\n K_RSID2 = \"RSID2\"\n K_VALUE = \"VALUE\"\n\ndef _validate(d):\n processed_genes = set()\n last_gene = None\n genes = d[CDTF.K_GENE]\n for g in genes:\n if g != last_gene:\n if g in processed_genes:\n msg = \"Snp Matrix Entries for genes must be contiguous but %s was found in two different places\" % (g)\n raise Exceptions.InvalidInputFormat(msg)\n processed_genes.add(g)\n last_gene = g\n\n if numpy.any(d.duplicated()):\n msg = \"Duplicated SNP entries found\"\n raise Exceptions.InvalidInputFormat(msg)\n\ndef _build_data(d):\n d = d.fillna(\"NA\")\n d.GENE = pandas.Categorical(d.GENE, d.GENE.drop_duplicates()) # speed things up!\n d = zip(d[CDTF.K_GENE].values, d[CDTF.K_RSID1].values, d[CDTF.K_RSID2].values, d[CDTF.K_VALUE].values)\n r = {}\n for t in d:\n gene = t[0]\n if not gene in r:\n r[gene] = []\n r[gene].append(t)\n return r\n\ndef _get(d, gene, snps_whitelist=None, strict=True):\n if not gene in d:\n return None,None\n\n d = d[gene]\n\n if snps_whitelist is not None:\n g, r1, r2, v = zip(*d)\n snps = set(r1)\n snps_whitelist = set(snps_whitelist)\n if strict:\n extra = {x for x in snps_whitelist if not x in snps}\n if len(extra):\n msg = \"SNPs in whitelist not in matrix for %s:%s\"%(gene,extra)\n raise Exceptions.InvalidArguments(msg)\n d = [x for x in d if x[CDTF.RSID1] in snps_whitelist]\n\n _s = set()\n snps = []\n entries = {}\n for row in d:\n rsid1 = row[CDTF.RSID1]\n rsid2 = row[CDTF.RSID2]\n if not rsid1 in entries: entries[rsid1] = {}\n if not rsid2 in entries: entries[rsid2] = {}\n value = row[CDTF.VALUE]\n if value == \"NA\":continue\n entries[rsid1][rsid2] = value\n entries[rsid2][rsid1] = value\n if not rsid1 in _s:\n _s.add(rsid1)\n snps.append(rsid1)\n\n snps = list(snps)\n rows = []\n for snp_i in snps:\n row = []\n rows.append(row)\n for snp_j in snps:\n row.append(entries[snp_i][snp_j])\n covariance_matrix = numpy.matrix(rows)\n return snps, covariance_matrix\n\ndef _non_na(snps_data):\n return [x for x in snps_data if x[CDTF.VALUE] != \"NA\"]\n"
] |
[
[
"numpy.matrix",
"pandas.read_table"
]
] |
DK-Jang/human_motion_manifold
|
[
"dd3b603b892d66685204909c8818f3e1621ab7dc"
] |
[
"result2bvh.py"
] |
[
"import sys\nimport os\nimport numpy as np\nimport h5py\nsys.path.append('./utils_motion')\nfrom Animation import Animation, positions_global\nfrom Quaternions import Quaternions\nfrom BVH import save\nfrom skeleton import Skeleton\nimport argparse\n\noffsets = np.array([\n [ 0. , 0. , 0. ],\n [-132.948591, 0. , 0. ],\n [ 0. , -442.894612, 0. ],\n [ 0. , -454.206447, 0. ],\n [ 0. , 0. , 162.767078],\n [ 0. , 0. , 74.999437],\n [ 132.948826, 0. , 0. ],\n [ 0. , -442.894413, 0. ],\n [ 0. , -454.20659 , 0. ],\n [ 0. , 0. , 162.767426],\n [ 0. , 0. , 74.999948],\n [ 0. , 0.1 , 0. ],\n [ 0. , 233.383263, 0. ],\n [ 0. , 257.077681, 0. ],\n [ 0. , 121.134938, 0. ],\n [ 0. , 115.002227, 0. ],\n [ 0. , 257.077681, 0. ],\n [ 0. , 151.034226, 0. ],\n [ 0. , 278.882773, 0. ],\n [ 0. , 251.733451, 0. ],\n [ 0. , 0. , 0. ],\n [ 0. , 0. , 99.999627],\n [ 0. , 100.000188, 0. ],\n [ 0. , 0. , 0. ],\n [ 0. , 257.077681, 0. ],\n [ 0. , 151.031437, 0. ],\n [ 0. , 278.892924, 0. ],\n [ 0. , 251.72868 , 0. ],\n [ 0. , 0. , 0. ],\n [ 0. , 0. , 99.999888],\n [ 0. , 137.499922, 0. ],\n [ 0. , 0. , 0. ]\n ], dtype='float64') * 0.01\n\nparents = np.array([-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,\n 16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30], dtype='int64')\njoints_left = np.array([1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31], dtype='int64')\njoints_right = np.array([6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23], dtype='int64')\n\norients = Quaternions.id(1)\norients_final = np.array([[1,0,0,0]]).repeat(len(offsets), axis=0)\norients.qs = np.append(orients.qs, orients_final, axis=0)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--bvh_dir', \n type=str, \n default='./pretrained/output/recon/bvh')\n parser.add_argument('--hdf5_path', \n type=str, \n default='./pretrained/output/recon/m_recon.hdf5')\n args = parser.parse_args()\n\n file_dir = args.bvh_dir\n for jj in range(60): # # of test motions: 60\n with h5py.File(args.hdf5_path, 'r') as h5f:\n rotations = h5f['batch{0}'.format(jj + 1)][:] # (fnum, n_joint, 4)\n rotations = rotations[:-10] # drop the last few frames\n fnum = rotations.shape[0]\n positions = offsets[np.newaxis].repeat(fnum, axis=0)\n \n rotations_Quat = Quaternions(rotations)\n anim = Animation(rotations_Quat, positions, orients, offsets, parents)\n\n xyz = positions_global(anim)\n height_offset = np.min(xyz[:, :, 1]) # Min height\n positions[:, :, 1] -= height_offset\n anim.positions = positions\n\n filename = 'batch{0}.bvh'.format(jj+1)\n filepath = os.path.join(file_dir, filename)\n\n try:\n if not(os.path.isdir(file_dir)):\n print(\"Creating directory: {}\".format(file_dir))\n os.makedirs(file_dir)\n except OSError:\n pass\n\n save(filepath, anim, frametime=1.0/24.0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.append",
"numpy.array",
"numpy.min"
]
] |
adeeconometrics/s_distributions
|
[
"a29b1db324d7ee7f63498c8ed1888ab6ead849d8"
] |
[
"source/univariate/test/BetaPrime.py"
] |
[
"try:\r\n from scipy.special import beta as _beta, betainc as _betainc\r\n import numpy as _np\r\n from typing import Union, Dict, List\r\n from math import sqrt as _sqrt\r\n from univariate._base import SemiInfinite\r\nexcept Exception as e:\r\n print(f\"some modules are missing {e}\")\r\n\r\n\r\nclass BetaPrime(SemiInfinite):\r\n \"\"\"\r\n This class contains methods concerning Beta prime Distirbution [#]_ .\r\n\r\n .. math:: \r\n \\\\text{BetaPrime}(x;\\\\alpha,\\\\beta) = \\\\frac{x^{\\\\alpha -1}(1+x)^{-\\\\alpha -\\\\beta}}{\\\\text{B}(\\\\alpha ,\\\\beta )}\r\n\r\n Args:\r\n\r\n alpha(float): shape parameter where alpha > 0\r\n beta(float): shape parameter where beta > 0\r\n x(float): random variable where x >= 0\r\n\r\n Reference:\r\n .. [#] Wikipedia contributors. (2020, October 8). Beta prime distribution. https://en.wikipedia.org/w/index.php?title=Beta_prime_distribution&oldid=982458594\r\n \"\"\"\r\n\r\n def __init__(self, alpha: float, beta: float):\r\n if alpha < 0:\r\n raise ValueError(\r\n 'alpha parameter(shape) should be a positive number.')\r\n if beta < 0:\r\n raise ValueError(\r\n 'beta parameter(shape) should be a positive number.')\r\n\r\n self.alpha = alpha\r\n self.beta = beta\r\n\r\n def pdf(self, x: Union[List[float], _np.ndarray, float]) -> Union[float, _np.ndarray]:\r\n \"\"\"\r\n Args:\r\n x (Union[List[float], numpy.ndarray, float]): random variable(s)\r\n\r\n Raises:\r\n ValueError: when there exist a value of x less than 0\r\n\r\n Returns:\r\n Union[float, numpy.ndarray]: evaluation of pdf at x\r\n \"\"\"\r\n a = self.alpha\r\n b = self.beta\r\n\r\n if isinstance(x, (_np.ndarray, List)):\r\n if not type(x) is _np.ndarray:\r\n x = _np.array(x)\r\n if _np.any(x < 0):\r\n raise ValueError('random variable should not be less then 0.')\r\n return _np.power(x, a-1)*_np.power(1+x, -a-b)/_beta(a, b)\r\n\r\n if x < 0:\r\n raise ValueError('random variable should not be less then 0.')\r\n return pow(x, a-1)*pow(1+x, -a-b)/_beta(a, b)\r\n\r\n def cdf(self, x: Union[List[float], _np.ndarray, float]) -> Union[float, _np.ndarray]:\r\n \"\"\"\r\n Args:\r\n x (Union[List[float], numpy.ndarray, float]): data point(s) of interest\r\n\r\n Raises:\r\n ValueError: when there exist a value of x less than 0\r\n\r\n Returns:\r\n Union[float, numpy.ndarray]: evaluation of cdf at x\r\n \"\"\"\r\n a = self.alpha\r\n b = self.beta\r\n\r\n if isinstance(x, (_np.ndarray, List)):\r\n if not type(x) is _np.ndarray:\r\n x = _np.array(x)\r\n if _np.any(x < 0):\r\n raise ValueError(\r\n 'evaluation of cdf is not supported for values less than 0')\r\n return _betainc(a, b, x/(1+x))\r\n\r\n return _betainc(a, b, x/(1+x))\r\n\r\n def mean(self) -> Union[float, str]:\r\n \"\"\"\r\n Returns: Mean of the Beta prime distribution.\r\n \"\"\"\r\n if self.beta > 1:\r\n return self.alpha/(self.beta-1)\r\n return \"Undefined.\"\r\n\r\n def median(self) -> str:\r\n \"\"\"\r\n Returns: Median of the Beta prime distribution.\r\n \"\"\"\r\n # warning: not yet validated.\r\n return \"Undefined.\"\r\n\r\n def mode(self) -> float:\r\n \"\"\"\r\n Returns: Mode of the Beta prime distribution.\r\n \"\"\"\r\n if self.alpha >= 1:\r\n return (self.alpha+1)/(self.beta+1)\r\n return 0.0\r\n\r\n def var(self) -> Union[float, str]:\r\n \"\"\"\r\n Returns: Variance of the Beta prime distribution.\r\n \"\"\"\r\n alpha = self.alpha\r\n beta = self.beta\r\n if beta > 2:\r\n return (alpha*(alpha+beta-1))/((beta-2)*(beta-1)**2)\r\n return \"Undefined.\"\r\n\r\n def std(self) -> Union[float, str]:\r\n \"\"\"\r\n Returns: Standard deviation of the Log logistic distribution\r\n \"\"\"\r\n var = self.var()\r\n if type(var) is str:\r\n return \"Undefined.\"\r\n return _sqrt(var)\r\n\r\n def skewness(self) -> Union[float, str]:\r\n \"\"\"\r\n Returns: Skewness of the Beta prime distribution.\r\n \"\"\"\r\n alpha = self.alpha\r\n beta = self.beta\r\n if beta > 3:\r\n scale = (2*(2*alpha+beta-1))/(beta-3)\r\n return scale*_sqrt((beta-2)/(alpha*(alpha+beta-1)))\r\n return \"Undefined.\"\r\n\r\n def kurtosis(self) -> str:\r\n \"\"\"\r\n Returns: Kurtosis of the Beta prime distribution.\r\n \"\"\"\r\n return \"Undefined.\"\r\n\r\n def entropy(self):\r\n \"\"\"\r\n Returns: differential entropy of the Beta prime distribution.\r\n\r\n Reference: Park, S.Y. & Bera, A.K.(2009). Maximum entropy autoregressive conditional heteroskedasticity model. Elsivier.\r\n link: http://wise.xmu.edu.cn/uploadfiles/paper-masterdownload/2009519932327055475115776.pdf\r\n \"\"\"\r\n return NotImplemented\r\n\r\n def summary(self) -> Dict[str, Union[float, str]]:\r\n \"\"\"\r\n Returns:\r\n Dictionary of BetaPrime distirbution moments. This includes standard deviation. \r\n \"\"\"\r\n return {\r\n 'mean': self.mean(), 'median': self.median(), 'mode': self.mode(),\r\n 'var': self.var(), 'std': self.std(), 'skewness': self.skewness(), 'kurtosis': self.kurtosis()\r\n }\r\n"
] |
[
[
"numpy.power",
"numpy.any",
"numpy.array",
"scipy.special.betainc",
"scipy.special.beta"
]
] |
Bahrd/AppliedPythonology
|
[
"65d7bd665eba823c3319c3efdc5a5047ddfa534d"
] |
[
"auxiliary.py"
] |
[
"## Auxiliary routines for image processing algorithms\nimport numpy as np; from numpy.linalg import inv\nimport matplotlib.pyplot as plt; from matplotlib.colors import LinearSegmentedColormap as lscm\n\n#Image presentation\ndef displayImages(images, titles = '', cmp = 'gray', show = True):\n if type(images) is tuple or type(images) is list: \n number = len(images)\n fig = plt.figure(figsize = (number * 3, 3)); fig.tight_layout()\n for p, (image, title) in enumerate(zip(images, titles)):\n sb = plt.subplot(1, number, p + 1)\n sb.set_xticks([]); sb.set_yticks([])\n plt.title(title); plt.imshow(image, cmap = cmp)\n else:\n sb = plt.subplot(1, 1, 1)\n sb.set_xticks([]); sb.set_yticks([])\n plt.title(titles); plt.imshow(images, cmap = cmp)\n if show: plt.show()\n\ndef displayPlots(plots, titles):\n for p, (pl, ttl) in enumerate(zip(plots, titles)):\n plt.subplot(1, len(plots), p + 1)\n plt.title(ttl); plt.plot(pl)\n plt.show()\n\ndef displayPlotsXY(plots, titles):\n for p, ((x, y), ttl) in enumerate(zip(plots, titles)):\n plt.subplot(1, len(plots), p + 1)\n plt.title(ttl); plt.plot(x, y)\n plt.show()\n\n# Image dissection presentation (the channels and the resulting image)\ndef displayChannels(images, channels, rows = 1, cols = 4, title = 'RGB'):\n for image in images:\n for p, c in enumerate(channels):\n sb = plt.subplot(rows, cols, p + 1)\n sb.set_xticks([]); sb.set_yticks([])\n cmp = lscm.from_list('_', ['black', c])\n plt.title(c); plt.imshow(image[..., p], cmp)\n sb = plt.subplot(rows, cols, rows * cols)\n sb.set_xticks([]); sb.set_yticks([])\n plt.title(title); plt.imshow(image)\n plt.show()\n\n# CFA filter mask (replication of a single CFA segment into a whole sensor mask)\ndef CFA(masks, X):\n return np.dstack([np.tile(mask, X) for mask in masks])\n\nJPG_QT_Y = [[16, 11, 10, 16, 24, 40, 51, 61],\n [12, 12, 14, 19, 26, 58, 60, 55],\n [14, 13, 16, 24, 40, 57, 69, 56],\n [14, 17, 22, 29, 51, 87, 80, 62],\n [18, 22, 37, 56, 68, 109, 103, 77],\n [24, 35, 55, 64, 81, 104, 113, 92],\n [49, 64, 78, 87, 103, 121, 120, 101],\n [72, 92, 95, 98, 112, 100, 103, 99]]\n\nJPG_QT_CbCr = [[17, 18, 24, 47, 99, 99, 99, 99], \n [18, 21, 26, 66, 99, 99, 99, 99], \n [24, 26, 56, 99, 99, 99, 99, 99], \n [47, 66, 99, 99, 99, 99, 99, 99], \n [99, 99, 99, 99, 99, 99, 99, 99],\n [99, 99, 99, 99, 99, 99, 99, 99], \n [99, 99, 99, 99, 99, 99, 99, 99], \n [99, 99, 99, 99, 99, 99, 99, 99]]\n\n## Irréversible Color Transform (ICT)\nRGB2YCbCr = [[ .299, .587, .114],\n [-.168736, -.331264, .5],\n [ .5, -.418688, -.081312]]\nYCbCr2RGB = inv(np.array(RGB2YCbCr))\n\n## Reversible Color Transform (RCT)\ndef RCT(R, G, B): \n Y, Cb, Cr = int(np.floor((R + 2*G + B)/4)), B - G, R - G\n return (Y, Cb, Cr)\n\ndef invRCT(Y, Cb, Cr): \n G = Y - int(np.floor((Cb + Cr)/4))\n R, B = Cr + G, Cb + G\n return (R, G, B)\n\n\n## A decorative fun... See: https://www.geeksforgeeks.org/decorators-in-python/\nfrom time import time as TT\ndef ITT(f):\n\tdef time_warper_wrapper(*args, **kwargs): \n\t\tbegin = TT() # from time import time as TT\n\t\tr = f(*args, **kwargs) \n\t\tend = TT()\n\t\tprint(f'{f.__name__} evaluated in {round(end - begin)}s')\n\t\treturn r\n\treturn time_warper_wrapper\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.tile",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.floor",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
rabernat/oceanspy
|
[
"9bd58f8529cb0fa865393c057ad7498e4f99681d"
] |
[
"oceanspy/plot.py"
] |
[
"\"\"\"\nPlot using OceanDataset objects.\n\"\"\"\n# TODO: add test that check squeezing!\n\nimport xarray as _xr\nimport oceanspy as _ospy\nimport numpy as _np\nimport warnings as _warnings\nimport copy as _copy\nimport functools as _functools\n\nfrom . import compute as _compute\n\ntry: \n import matplotlib as _matplotlib\n _matplotlib.use('agg')\n import matplotlib.pyplot as _plt\nexcept ImportError: pass\ntry:\n import cartopy.crs as _ccrs\nexcept ImportError: pass\n\ndef TS_diagram(od, \n Tlim = None,\n Slim = None,\n dens = None,\n meanAxes = None,\n colorName = None,\n plotFreez = True,\n ax = None,\n cmap_kwargs = None,\n contour_kwargs = None,\n clabel_kwargs = None,\n cutout_kwargs = None,\n **kwargs):\n \n \"\"\"\n Plot temperature-salinity diagram.\n \n Parameters\n ----------\n od: OceanDataset\n oceandataset to check for missing variables\n Tlim: array_like with 2 elements\n Temperature limits on the y axis.\n If None, uses the min and max value.\n Slim: array_like with 2 elements\n Salinity limits on the x axis.\n If None, uses the min and max value.\n meanAxes: 1D array_like, str, or None\n List of axes over which to apply weighted mean.\n If None, don't average. \n dens: xarray.DataArray\n DataArray corresponding to density used for isopycnals.\n Must contain coordinates (Temp, S)\n In None, it will be inferred.\n colorName: str, None\n Name of the variable to use to color (e.g., Temp).\n If None, uses plot insted of scatter (much faster)\n plotFreez: bool\n If True, plot freezing line in blue.\n ax: matplotlib.pyplot.axes\n If None, uses the current axis.\n cmap_kwargs: dict\n Keyword arguments for the colormap (same used by xarray)\n contour_kwargs: dict\n Keyword arguments for matplotlib.pytplot.contour (isopycnals)\n clabel_kwargs: dict\n Keyword arguments for matplotlib.pytplot.clabel (isopycnals) \n cutout_kwargs: dict\n Keyword arguments for subsample.cutout\n **kwargs:\n If colorName is None: Kewyword arguments for matplotlib.pytplot.plot()\n Otherwise, kewyword arguments for matplotlib.pytplot.scatter()\n \n Returns\n -------\n Axes object\n \n See also\n --------\n subsample.coutout\n animate.TS_diagram\n \n References\n ----------\n http://xarray.pydata.org/en/stable/plotting.html#introduction\n \"\"\"\n \n # Check parameters\n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if Tlim is not None:\n Tlim = _np.asarray(Tlim)\n if Tlim.size!=2: raise TypeError('`Tlim` must contain 2 elements')\n Tlim = Tlim.reshape(2)\n \n if Slim is not None:\n Slim = _np.asarray(Slim)\n if Slim.size!=2: raise TypeError('`Slim` must contain 2 elements')\n Slim = Slim.reshape(2)\n \n if dens is not None:\n if not isinstance(dens, _xr.DataArray):\n raise TypeError('`dens` must be xarray.DataArray')\n elif not set(['Temp', 'S']).issubset(dens.coords):\n raise ValueError('`dens` must have coordinates (Temp, S)') \n \n if not isinstance(colorName, (type(None), str)):\n raise TypeError('`colorName` must be str or None')\n \n if not isinstance(plotFreez, bool):\n raise TypeError('`plotFreez` must be bool')\n \n if not isinstance(ax, (type(None), _plt.Axes)):\n raise TypeError('`ax` must be matplotlib.pyplot.Axes')\n \n if not isinstance(cmap_kwargs, (type(None), dict)):\n raise TypeError('`cmap_kwargs` must be None or dict')\n \n if not isinstance(contour_kwargs, (type(None), dict)):\n raise TypeError('`contour_kwargs` must be None or dict')\n \n if not isinstance(clabel_kwargs, (type(None), dict)):\n raise TypeError('`clabel_kwargs` must be None or dict')\n \n if not isinstance(cutout_kwargs, (type(None), dict)):\n raise TypeError('`cutout_kwargs` must be None or dict')\n \n # Handle kwargs\n if cmap_kwargs is None: cmap_kwargs = {}\n if contour_kwargs is None: contour_kwargs = {}\n if clabel_kwargs is None: clabel_kwargs = {}\n if cutout_kwargs is None: cutout_kwargs = {}\n \n # Cutout first\n if len(cutout_kwargs)!=0:\n od = od.subsample.cutout(**cutout_kwargs)\n \n # Check and extract T and S\n varList = ['Temp', 'S']\n od = _compute._add_missing_variables(od, varList)\n \n # Compute mean\n if meanAxes is not None:\n mean_ds = _compute.weighted_mean(od, varNameList= ['Temp', 'S'], axesList=meanAxes, storeWeights=False, aliased=False)\n T = mean_ds['w_mean_Temp'].rename('Temp')\n S = mean_ds['w_mean_S'].rename('S')\n lost_dims = list(set(od._ds['Temp'].dims)-set(T.dims))\n else:\n T = od._ds['Temp']\n S = od._ds['S']\n lost_dims = []\n \n # Extract color field, and interpolate if needed\n if colorName is not None:\n \n # Add missing variables (use private)\n _colorName = _compute._rename_aliased(od, colorName)\n od = _compute._add_missing_variables(od, _colorName)\n\n # Extract color (use public)\n color = od.dataset[colorName]\n if meanAxes is not None:\n mean_ds = _compute.weighted_mean(od, varNameList= [_colorName], axesList=meanAxes, storeWeights=False, aliased=False)\n color = mean_ds['w_mean_'+_colorName].rename(_colorName)\n else:\n color = od.dataset[colorName]\n grid = od.grid\n dims2interp = [dim for dim in color.dims if dim not in T.dims]\n \n # Interpolation \n for dim in dims2interp:\n for axis in od.grid.axes.keys():\n if dim in [od.grid.axes[axis].coords[k].name for k in od.grid.axes[axis].coords.keys()]: \n print('Interpolating [{}] along [{}]-axis.'.format(colorName, axis))\n attrs = color.attrs\n color = grid.interp(color, axis, to='center', boundary='fill', fill_value=_np.nan)\n color.attrs = attrs\n \n # Broadcast, in case color has different dimensions\n T, S, color = _xr.broadcast(T, S, color)\n \n # Compute density\n T = T.persist()\n S = S.persist()\n if Tlim is None:\n Tlim = [T.min().values, T.max().values]\n if Slim is None:\n Slim = [S.min().values, S.max().values]\n \n if dens is None:\n print('Isopycnals: ', end='')\n t, s = _xr.broadcast(_xr.DataArray(_np.linspace(Tlim[0], Tlim[-1], 100), dims= ('t')),\n _xr.DataArray(_np.linspace(Slim[0], Slim[-1], 100), dims= ('s')))\n odSigma0 = _ospy.OceanDataset(_xr.Dataset({'Temp': t, 'S': s})).set_parameters(od.parameters)\n odSigma0 = odSigma0.compute.potential_density_anomaly()\n odSigma0._ds = odSigma0._ds.set_coords(['Temp', 'S'])\n \n # Freezing point\n paramsList = ['tempFrz0', 'dTempFrz_dS']\n params2use = {par:od.parameters[par] for par in od.parameters if par in paramsList}\n tempFrz0 = params2use['tempFrz0']\n dTempFrz_dS = params2use['dTempFrz_dS']\n freez_point = tempFrz0 + odSigma0._ds['S']*dTempFrz_dS\n \n # Extract Density\n dens = odSigma0._ds['Sigma0'].where(odSigma0._ds['Temp']>freez_point) \n\n # Extract temp and salinity\n t = dens['Temp']\n s = dens['S']\n \n # Create axis\n if ax is None: ax = _plt.gca()\n \n # Use plot if colorless (faster!), otherwise use scatter\n if colorName is None:\n default_kwargs = {'color': 'k', 'linestyle': 'None', 'marker': '.'}\n kwargs = {**default_kwargs, **kwargs}\n ax.plot(S.values.flatten(), T.values.flatten(), **kwargs)\n else:\n # Mask points out of axes\n color = color.where(_np.logical_and(T>min(Tlim), T<max(Tlim)))\n color = color.where(_np.logical_and(S>min(Slim), T<max(Slim)))\n color = color.stack(all_dims=color.dims)\n c = color.values\n # Create colorbar (stolen from xarray)\n cmap_kwargs['plot_data'] = c\n cmap_params = _xr.plot.utils._determine_cmap_params(**cmap_kwargs)\n extend = cmap_params.pop('extend')\n _ = cmap_params.pop('levels')\n kwargs = {**cmap_params, **kwargs}\n # Scatter\n sc = ax.scatter(S.values.flatten(), T.values.flatten(), c=c, **kwargs)\n cbar = _plt.colorbar(sc, label=_xr.plot.utils.label_from_attrs(color), extend=extend)\n \n # Plot isopycnals\n default_contour_kwargs = {'colors': 'gray'}\n contour_kwargs = {**default_contour_kwargs, **contour_kwargs}\n CS = ax.contour(s.values, t.values, dens.values, **contour_kwargs)\n ax.clabel(CS, **clabel_kwargs)\n \n # Plot freezing point\n if plotFreez:\n paramsList = ['tempFrz0', 'dTempFrz_dS']\n params2use = {par:od.parameters[par] for par in od.parameters if par in paramsList}\n tempFrz0 = params2use['tempFrz0']\n dTempFrz_dS = params2use['dTempFrz_dS']\n s = _np.unique(s.values.flatten())\n ax.plot(s, tempFrz0 +s*dTempFrz_dS, 'b')\n \n # Set labels and limits\n ax.set_xlabel(_xr.plot.utils.label_from_attrs(S))\n ax.set_ylabel(_xr.plot.utils.label_from_attrs(T))\n ax.set_xlim(Slim)\n ax.set_ylim(Tlim)\n \n # Set title\n title = ''\n T = od._ds['Temp']\n for dim in list(T.dims):\n dim2rem = [d for d in T.dims if len(T[d])==1 and d!=dim]\n tit0 = T.squeeze(dim2rem).drop(dim2rem).isel({dim: 0})._title_for_slice()\n tit1 = T.squeeze(dim2rem).drop(dim2rem).isel({dim: -1})._title_for_slice()\n \n if tit0==tit1:\n tit = tit0\n else:\n if dim in lost_dims:\n tit0 = tit0.replace(dim+' = ', dim+': mean from ')\n else:\n tit0 = tit0.replace(dim+' = ', dim+': from ')\n tit1 = tit1.replace(dim+' = ', ' to ')\n tit = tit0 + tit1\n if title=='':\n title = title + tit\n else:\n title = title + '\\n' + tit\n ax.set_title(title)\n _plt.tight_layout()\n \n return ax\n\n\ndef time_series(od, \n varName, \n meanAxes = False, \n intAxes = False,\n cutout_kwargs = None,\n **kwargs):\n \n \"\"\"\n Plot time series.\n \n Parameters\n ----------\n od: OceanDataset\n oceandataset to check for missing variables\n varName: str, None\n Name of the variable to plot.\n meanAxes: 1D array_like, str, or bool\n List of axes over which to apply mean.\n If True, set meanAxes=od.grid_coords (excluding time, mooring, station).\n If False, does not apply mean.\n intAxes: 1D array_like, str, or bool\n List of axes over which to integrate.\n Integration is performed after mean.\n If True, set intAxes=od.grid_coords (excluding time, mooring, station).\n If False, does not apply int.\n cutout_kwargs: dict\n Keyword arguments for subsample.cutout\n **kwargs:\n Kewyword arguments for xarray.plot.line\n \n Returns\n -------\n Axes object\n \n See also\n --------\n subsample.coutout\n \n References\n ----------\n http://xarray.pydata.org/en/stable/generated/xarray.plot.line.html#xarray.plot.line\n \"\"\"\n \n \n # Check parameters\n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if not isinstance(varName, str):\n raise TypeError('`varName` must be str')\n \n meanAxes, intAxes = _check_mean_and_int_axes(od=od, \n meanAxes=meanAxes, intAxes=intAxes, \n exclude=['time', 'mooring', 'station'])\n \n if not isinstance(cutout_kwargs, (dict, type(None))):\n raise ValueError('`cutout_kwargs` must be dict or None')\n \n # Handle kwargs\n if cutout_kwargs is None: cutout_kwargs = {}\n \n # Cutout first\n if len(cutout_kwargs)!=0:\n od = od.subsample.cutout(**cutout_kwargs)\n \n # Variable name \n _varName = _compute._rename_aliased(od, varName)\n od = _compute._add_missing_variables(od, _varName)\n \n # Get time name\n time_name = [dim for dim in od.grid_coords['time'] if dim in od.dataset[varName].dims][0]\n \n # Mean and sum\n da, varName = _compute_mean_and_int(od, varName, meanAxes, intAxes)\n\n # Check\n if len(da.shape)>2:\n dims = list(da.dims)\n dims.remove(time_name)\n raise ValueError('Timeseries containing multiple dimension other than time: {}'.format(dims))\n \n # Plot\n _ = da.plot.line(**{'x': time_name, **kwargs})\n _plt.tight_layout()\n \n return _plt.gca()\n\n\n\ndef horizontal_section(od, \n varName, \n plotType = 'pcolormesh',\n use_coords = True,\n contourName = None,\n meanAxes = False, \n intAxes = False,\n contour_kwargs = None,\n clabel_kwargs = None,\n cutout_kwargs = None,\n **kwargs):\n \n \"\"\"\n Plot horizontal sections.\n \n Parameters\n ----------\n od: OceanDataset\n oceandataset to check for missing variables\n varName: str, None\n Name of the variable to plot.\n plotType: str \n 2D plot type: {'contourf', 'contour', 'imshow', 'pcolormesh'}\n use_coords: bool\n If True, use coordinates for x and y axis (e.g., XC and YC).\n If False, use dimensions for x and y axis (e.g., X and Y)\n contourName: str, None\n Name of the variable to contour on top.\n meanAxes: 1D array_like, str, or bool\n List of axes over which to apply mean.\n If True, set meanAxes=od.grid_coords (excluding X, Y).\n If False, does not apply mean.\n intAxes: 1D array_like, str, or bool\n List of axes over which to integrate.\n Integration is performed after mean.\n If True, set intAxes=od.grid_coords (excluding X, Y).\n If False, does not apply int.\n contour_kwargs: dict\n Keyword arguments for xarray.plot.contour\n clabel_kwargs: dict\n Keyword arguments for matplotlib.pyplot.clabel\n cutout_kwargs: dict\n Keyword arguments for subsample.cutout\n **kwargs:\n Kewyword arguments for xarray.plot.['plotType']\n \n Returns\n -------\n Axes or FacetGrid object\n \n See also\n --------\n subsample.coutout\n animate.horizontal_section\n \n References\n ----------\n http://xarray.pydata.org/en/stable/plotting.html\n \"\"\"\n \n # Check parameters\n if 'mooring' in od.grid_coords or 'station' in od.grid_coords:\n raise ValueError('`od` can not be a mooring or a survey')\n \n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if not isinstance(varName, str):\n raise TypeError('`varName` must be str')\n \n plotTypes = ['contourf', 'contour', 'imshow', 'pcolormesh']\n if not isinstance(plotType, str):\n raise TypeError('`plotType` must be str')\n elif plotType not in plotTypes:\n raise TypeError('plotType [{}] not available. Options are: {}'.format(plotType, plotTypes))\n \n if not isinstance(use_coords, bool):\n raise TypeError('`use_coords` must be bool')\n \n if not isinstance(contourName, (type(None), str)):\n raise TypeError('`contourName` must be str or None')\n \n meanAxes, intAxes = _check_mean_and_int_axes(od=od, \n meanAxes=meanAxes, intAxes=intAxes, \n exclude=['X', 'Y'])\n \n if not isinstance(contour_kwargs, (type(None), dict)):\n raise TypeError('`contour_kwargs` must be None or dict')\n \n if not isinstance(clabel_kwargs, (type(None), dict)):\n raise TypeError('`clabel_kwargs` must be None or dict')\n \n if not isinstance(cutout_kwargs, (type(None), dict)):\n raise TypeError('`cutout_kwargs` must be None or dict')\n \n # Handle kwargs\n if contour_kwargs is None: contour_kwargs = {}\n if clabel_kwargs is None: clabel_kwargs = {}\n if cutout_kwargs is None: cutout_kwargs = {}\n \n # Cutout first\n if len(cutout_kwargs)!=0:\n od = od.subsample.cutout(**cutout_kwargs)\n \n # Check variables and add\n listName = [varName]\n if contourName is not None: listName = listName + [contourName]\n _listName = _compute._rename_aliased(od, listName)\n od = _compute._add_missing_variables(od, _listName)\n \n # Apply mean and sum\n da, varName = _compute_mean_and_int(od, varName, meanAxes, intAxes)\n \n # SQUEEZE! Otherwise animation don't show up because xarray make a faceted plot\n da = da.squeeze()\n \n # Get dimension names\n X_name = [dim for dim in od.grid_coords['X'] if dim in da.dims][0]\n Y_name = [dim for dim in od.grid_coords['Y'] if dim in da.dims][0]\n \n # CONTOURNAME\n if contourName is not None: \n\n # Apply mean and sum\n da_contour, contourName = _compute_mean_and_int(od, contourName, meanAxes, intAxes)\n \n # SQUEEZE! Otherwise animation don't show up because xarray make a faceted plot\n da_contour = da_contour.squeeze()\n \n # Get dimension names\n X_name_cont = [dim for dim in od.grid_coords['X'] if dim in da_contour.dims][0]\n Y_name_cont = [dim for dim in od.grid_coords['Y'] if dim in da_contour.dims][0] \n \n # Check dimensions\n dims = list(da.dims)\n dims.remove(X_name)\n dims.remove(Y_name)\n \n # Use coordinates\n if use_coords:\n if X_name=='X' and Y_name=='Y': \n point = 'C'\n elif X_name=='Xp1' and Y_name=='Y':\n point = 'U'\n elif X_name=='X' and Y_name=='Yp1':\n point = 'V'\n elif X_name=='Xp1' and Y_name=='Yp1':\n point = 'G'\n X_name = 'X'+point\n Y_name = 'Y'+point\n\n if contourName is not None:\n if X_name_cont=='X' and Y_name_cont=='Y': \n point_cont = 'C'\n elif X_name_cont=='Xp1' and Y_name_cont=='Y':\n point_cont = 'U'\n elif X_name_cont=='X' and Y_name_cont=='Yp1':\n point_cont = 'V'\n elif X_name_cont=='Xp1' and Y_name_cont=='Yp1':\n point_cont = 'G'\n X_name_cont = 'X'+point_cont\n Y_name_cont = 'Y'+point_cont\n \n # Pop from kwargs\n ax = kwargs.pop('ax', None)\n col = kwargs.pop('col', None)\n col_wrap = kwargs.pop('col_wrap', None)\n subplot_kws = kwargs.pop('subplot_kws', None)\n transform = kwargs.pop('transform', None)\n \n if len(dims)==0:\n # Single plot: \n # Add ax\n if ax is None:\n ax = _plt.axes(projection=od.projection);\n elif od.projection is not None and not hasattr(ax, 'projection'):\n od = od.set_projection(None)\n _warnings.warn(\"\\nSwitching projection off.\"\n \"If ax is passed, it needs to be initialiazed with a projection.\"\n \"\\nE.g., fig, ax=plt.subplots(1, 1, subplot_kw={'projection': od.projection}\", stacklevel=2)\n \n kwargs['ax'] = ax\n \n elif len(dims)==1:\n \n # Multiple plots:\n extra_name = dims[0]\n \n # TODO: For some reason, faceting and cartopy are not working very nice with our configurations\n # Drop it for now, but we need to explore it more\n if od.projection is not None:\n _warnings.warn(\"\\nSwitch projection off.\"\n \" This function currently does not support faceting for projected plots.\", stacklevel=2)\n od = od.set_projection(None)\n transform = None\n \n # Add col\n if col is None:\n col = extra_name\n kwargs['col'] = col\n kwargs['col_wrap'] = col_wrap\n \n # Add projection\n if isinstance(subplot_kws, dict):\n projection = subplot_kws.pop('projection', None)\n if projection is None:\n projection = od.projection\n subplot_kws['projection'] = projection\n else:\n subplot_kws = {'projection': od.projection}\n kwargs['subplot_kws'] = subplot_kws\n \n # Add transform\n if transform is None and od.projection is not None:\n kwargs['transform'] = _ccrs.PlateCarree()\n \n # Plot\n args = {'x': X_name, 'y': Y_name, **kwargs}\n plotfunc = eval('_xr.plot.'+plotType)\n p = plotfunc(da, **args)\n\n # Contour\n if contourName is not None: \n ax = args.pop('ax', None)\n transform = args.pop('transform', None)\n subplot_kws = args.pop('subplot_kws', None)\n args = {'x': X_name_cont, 'y': Y_name_cont, 'ax': ax, 'transform': transform, 'subplot_kws': subplot_kws, 'colors': 'gray', 'add_labels': False, **contour_kwargs}\n if ax is not None:\n cont = da_contour.plot.contour(**args, **clabel_kwargs)\n _plt.clabel(cont)\n else:\n for i, thisax in enumerate(p.axes.flat):\n if extra_name in da_contour.dims:\n da_contour_i = da_contour.isel({extra_name: i}).squeeze()\n else:\n da_contour_i = da_contour\n cont = da_contour_i.plot.contour(**{**args, 'ax': thisax})\n _plt.clabel(cont, **clabel_kwargs)\n \n # Labels and return\n add_labels = kwargs.pop('add_labels', None)\n if ax is not None: \n if add_labels is not False:\n try:\n gl = ax.gridlines(crs=transform, draw_labels=True)\n gl.xlabels_top = False\n gl.ylabels_right = False\n except: pass\n if od.projection is None:\n _plt.tight_layout()\n return ax\n else: \n return p\n\n\n\ndef vertical_section(od, \n varName, \n plotType = 'pcolormesh',\n use_dist = True,\n subsampMethod = None,\n contourName = None,\n meanAxes = False, \n intAxes = False,\n contour_kwargs = None,\n clabel_kwargs = None,\n subsamp_kwargs = None,\n cutout_kwargs = None, \n **kwargs):\n \"\"\"\n Plot vertical section.\n \n Parameters\n ----------\n od: OceanDataset\n oceandataset to check for missing variables\n varName: str, None\n Name of the variable to plot.\n plotType: str \n 2D plot type: {'contourf', 'contour', 'imshow', 'pcolormesh'}\n use_dist: bool\n If True, use distances for x axis.\n If False, use mooring or station.\n subsampMethod: str, None\n Subsample methods: {'mooring_array', 'survey_station'}\n contourName: str, None\n Name of the variable to contour on top.\n meanAxes: 1D array_like, str, or bool\n List of axes over which to apply mean.\n If True, set meanAxes=od.grid_coords (time only).\n If False, does not apply mean.\n intAxes: 1D array_like, str, or bool\n List of axes over which to integrate.\n Integration is performed after mean.\n If True, set intAxes=od.grid_coords (time only).\n If False, does not apply int.\n contour_kwargs: dict\n Keyword arguments for xarray.plot.contour\n clabel_kwargs: dict\n Keyword arguments for matplotlib.pyplot.clabel\n subsamp_kwargs: dict\n Keyword arguments for subsample.mooring_array or subsample.survey_stations\n cutout_kwargs: dict\n Keyword arguments for subsample.cutout\n **kwargs:\n Kewyword arguments for xarray.plot.['plotType']\n \n Returns\n -------\n Axes or FacetGrid object\n \n See also\n --------\n subsample.coutout\n subsample.mooring_array\n subsample.survey_stations\n animate.vertical_section\n \n References\n ----------\n http://xarray.pydata.org/en/stable/plotting.html\n \"\"\"\n \n # Check parameters\n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if not isinstance(varName, str):\n raise TypeError('`varName` must be str')\n \n plotTypes = ['contourf', 'contour', 'imshow', 'pcolormesh']\n if not isinstance(plotType, str):\n raise TypeError('`plotType` must be str')\n elif plotType not in plotTypes:\n raise TypeError('plotType [{}] not available. Options are: {}'.format(plotType, plotTypes))\n \n if not isinstance(use_dist, bool):\n raise TypeError('`use_dist` must be bool')\n \n if subsampMethod is not None:\n subsampMethods = ['mooring_array', 'survey_stations']\n if not isinstance(subsampMethod, str):\n raise TypeError('`subsampMethod` must be str or None')\n elif subsampMethod not in subsampMethods:\n raise TypeError('subsampMethod [{}] not available. Options are: {}'.format(subsampMethod, subsampMethods))\n \n if not isinstance(contourName, (type(None), str)):\n raise TypeError('`contourName` must be str or None')\n \n meanAxes, intAxes = _check_mean_and_int_axes(od=od, \n meanAxes=meanAxes, intAxes=intAxes, \n exclude=['mooring', 'station', 'X', 'Y', 'Z']) \n \n if not isinstance(contour_kwargs, (type(None), dict)):\n raise TypeError('`contour_kwargs` must be None or dict')\n \n if not isinstance(clabel_kwargs, (type(None), dict)):\n raise TypeError('`clabel_kwargs` must be None or dict')\n \n if not isinstance(subsamp_kwargs, (type(None), dict)):\n raise TypeError('`subsamp_kwargs` must be None or dict')\n \n # Handle kwargs\n if contour_kwargs is None: contour_kwargs = {}\n if clabel_kwargs is None: clabel_kwargs = {}\n if cutout_kwargs is None: cutout_kwargs = {} \n \n # For animation purposes.\n # TODO: take out useless variables?\n if len(cutout_kwargs)!=0: \n od = od.subsample.cutout(**cutout_kwargs)\n \n # Subsample first\n if subsamp_kwargs is not None:\n if subsampMethod=='mooring_array':\n od = od.subsample.mooring_array(**subsamp_kwargs)\n elif subsampMethod=='survey_stations':\n od = od.subsample.survey_stations(**subsamp_kwargs)\n \n # Check variables and add\n listName = [varName]\n if contourName is not None: listName = listName + [contourName]\n _listName = _compute._rename_aliased(od, listName)\n od = _compute._add_missing_variables(od, _listName)\n \n # Apply mean and sum\n da = od.dataset[varName]\n if 'time' in da.dims or 'time_midp' in da.dims:\n da, varName = _compute_mean_and_int(od, varName, meanAxes, intAxes)\n \n # SQUEEZE! Otherwise animation don't show up because xarray make a faceted plot\n da = da.squeeze()\n time_coords = {timeName: da[timeName] for timeName in ['time', 'time_midp'] if timeName in da.coords}\n if 'mooring' in od.grid_coords:\n if 'Xp1' in da.dims:\n print('Regridding [{}] along [{}]-axis.'.format(varName, 'X'))\n da_attrs = da.attrs\n da = od.grid.interp(da, 'X')\n da.attrs = da_attrs\n if 'Yp1' in da.dims:\n print('Regridding [{}] along [{}]-axis.'.format(varName, 'Y'))\n da_attrs = da.attrs\n da = od.grid.interp(da, 'Y')\n da.attrs = da_attrs\n hor_name = [dim for dim in od.grid_coords['mooring'] if dim in da.dims][0]\n da = da.assign_coords(**time_coords)\n if hor_name+'_dist' in od._ds.coords:\n da = da.assign_coords(**{hor_name+'_dist': od._ds[hor_name+'_dist']})\n for toRem in ['X', 'Y', 'Xp1', 'Yp1']:\n if toRem in da.coords: da=da.drop(toRem)\n elif 'station' in od.grid_coords:\n hor_name = [dim for dim in od.grid_coords['station'] if dim in da.dims][0]\n else: \n raise ValueError('The oceandataset must be subsampled using mooring or survey')\n ver_name = [dim for dim in od.grid_coords['Z'] if dim in da.dims][0]\n da = da.squeeze()\n \n # CONTOURNAME\n if contourName is not None: \n \n # Apply mean and sum\n da_contour = od.dataset[contourName]\n if 'time' in da_contour.dims or 'time_midp' in da_contour.dims:\n da_contour, contourName = _compute_mean_and_int(od, contourName, meanAxes, intAxes)\n \n # SQUEEZE! Otherwise animation don't show up because xarray make a faceted plot\n da_contour = da_contour.squeeze()\n\n # Get dimension names\n # TODO: make interpolation work with aliases\n if 'mooring' in od.grid_coords:\n if 'Xp1' in da_contour.dims:\n print('Regridding [{}] along [{}]-axis.'.format(contourName, 'X'))\n da_contour_attrs = da_contour.attrs\n da_contour = od.grid.interp(da_contour, 'X')\n da_contour.attrs = da_contour_attrs\n if 'Yp1' in da.dims:\n print('Regridding [{}] along [{}]-axis.'.format(contourName, 'Y'))\n da_contour_attrs = da_contour.attrs\n da_contour = od.grid.interp(da_contour, 'Y')\n da_contour.attrs = da_contour_attrs\n hor_name_cont = [dim for dim in od.grid_coords['mooring'] if dim in da_contour.dims][0]\n if hor_name+'_dist' in od._ds.coords:\n da_contour = da_contour.assign_coords(**{hor_name+'_dist': od._ds[hor_name+'_dist']})\n for toRem in ['X', 'Y', 'Xp1', 'Yp1']:\n if toRem in da_contour.coords: da_contour=da_contour.drop(toRem)\n elif 'station' in od.grid_coords:\n hor_name_cont = [dim for dim in od.grid_coords['station'] if dim in da_contour.dims][0]\n ver_name_cont = [dim for dim in od.grid_coords['Z'] if dim in da_contour.dims][0]\n da_contour = da_contour.squeeze()\n \n # Check dimensions\n dims = list(da.dims)\n dims.remove(hor_name)\n dims.remove(ver_name)\n \n # Use distances\n if use_dist:\n if contourName is None:\n if hor_name+'_dist' in da.coords: \n hor_name=hor_name+'_dist'\n else:\n if hor_name+'_dist' in da.coords and hor_name_cont+'_dist' in da_contour.coords:\n hor_name =hor_name+'_dist'\n hor_name_cont =hor_name_cont+'_dist'\n \n # Pop from kwargs\n ax = kwargs.pop('ax', None)\n col = kwargs.pop('col', None)\n \n if len(dims)==0:\n # Single plot: \n # Add ax\n if ax is None:\n ax = _plt.axes();\n kwargs['ax'] = ax\n \n elif len(dims)==1:\n \n # Multiple plots:\n extra_name = dims[0]\n \n # Add col\n if col is None:\n col = extra_name\n kwargs['col'] = col\n \n # Plot\n args = {'x': hor_name, 'y': ver_name, **kwargs}\n plotfunc = eval('_xr.plot.'+plotType)\n p = plotfunc(da, **args)\n \n # Contour\n if contourName is not None: \n ax = args.pop('ax', None)\n args = {'x': hor_name_cont, 'y': ver_name_cont, 'ax': ax, 'colors': 'gray', 'add_labels': False, **contour_kwargs}\n if ax is not None:\n cont = da_contour.plot.contour(**args, **clabel_kwargs)\n _plt.clabel(cont)\n else:\n for i, thisax in enumerate(p.axes.flat):\n if extra_name in da_contour.dims:\n da_contour_i = da_contour.isel({extra_name: i}).squeeze()\n else:\n da_contour_i = da_contour\n cont = da_contour_i.plot.contour(**{**args, 'ax': thisax})\n _plt.clabel(cont, **clabel_kwargs)\n \n \n # Return\n if ax is not None: \n _plt.tight_layout()\n return ax\n else: \n return p\n\n \n \nclass _plotMethdos(object):\n \"\"\"\n Enables use of oceanspy.plot functions as attributes on a OceanDataset.\n For example, OceanDataset.plot.TS_diagram\n \"\"\"\n \n def __init__(self, od):\n self._od = od\n\n @_functools.wraps(TS_diagram)\n def TS_diagram(self, **kwargs):\n return TS_diagram(self._od, **kwargs)\n \n @_functools.wraps(time_series)\n def time_series(self, **kwargs):\n return time_series(self._od, **kwargs)\n \n @_functools.wraps(horizontal_section)\n def horizontal_section(self, **kwargs):\n return horizontal_section(self._od, **kwargs)\n \n @_functools.wraps(vertical_section)\n def vertical_section(self, **kwargs):\n return vertical_section(self._od, **kwargs)\n\n \n# TODO: document this private functions! \n \ndef _check_mean_and_int_axes(od, meanAxes, intAxes, exclude):\n\n if (meanAxes is True and intAxes is not False) or (intAxes is True and meanAxes is not False):\n raise ValueError('If one between `meanAxes` and `intAxes` is True, the other must be False')\n\n if not isinstance(meanAxes, bool):\n meanAxes = _np.asarray(meanAxes, dtype='str')\n if meanAxes.ndim == 0: meanAxes = meanAxes.reshape(1)\n elif meanAxes.ndim >1: raise TypeError('Invalid `meanAxes`')\n axis_error = [axis for axis in meanAxes if axis not in od.grid_coords]\n if len(axis_error)!=0:\n raise ValueError('{} are not in od.grid_coords and can not be averaged'.format(axis_error))\n elif 'time' in meanAxes:\n raise ValueError('`time` can not be in `meanAxes`')\n elif meanAxes is True:\n meanAxes = [coord for coord in od.grid_coords if coord not in exclude]\n else:\n meanAxes = []\n\n if not isinstance(intAxes, bool):\n intAxes = _np.asarray(intAxes, dtype='str')\n if intAxes.ndim == 0: intAxes = intAxes.reshape(1)\n elif intAxes.ndim >1: raise TypeError('Invalid `meanAxes`')\n axis_error = [axis for axis in intAxes if axis not in od.grid_coords]\n if len(axis_error)!=0:\n raise ValueError('{} are not in od.grid_coords and can not be averaged'.format(axis_error))\n elif 'time' in intAxes:\n raise ValueError('`time` can not be in `intAxes`')\n elif intAxes is True:\n intAxes = [coord for coord in od.grid_coords if coord not in exclude]\n else:\n intAxes = [] \n \n if len(intAxes)>0 and len(meanAxes)>0:\n if set(meanAxes).issubset(intAxes) or set(intAxes).issubset(meanAxes):\n raise ValueError('`meanAxes` and `intAxes` can not contain the same Axes')\n \n return meanAxes, intAxes\n\n\ndef _compute_mean_and_int(od, varName, meanAxes, intAxes):\n # Mean and sum\n if len(meanAxes)!=0:\n ds = _compute.weighted_mean(od, varNameList= [varName], axesList=meanAxes, storeWeights=False)\n for var in ds.data_vars:\n varName = var\n od = od.merge_into_oceandataset(ds)\n \n if len(intAxes)!=0:\n ds = _compute.integral(od, varNameList= [varName], axesList=intAxes)\n for var in ds.data_vars:\n varName = var\n od = od.merge_into_oceandataset(ds)\n \n # Extract da\n da = od.dataset[varName]\n return da, varName"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.clabel",
"numpy.linspace",
"numpy.asarray",
"matplotlib.use",
"matplotlib.pyplot.axes"
]
] |
robotcreating2020-1/cola_with_crane_x7_ros
|
[
"bf9f9df35925703a0b625fd3dda3b88e80d1f48a"
] |
[
"cola_examples/scripts/find_red.py"
] |
[
"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\nimport sys\nimport rospy\nimport cv2\nimport numpy as np\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Int32\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nclass image_converter:\n def __init__(self):\n self.image_pub = rospy.Publisher(\"image_topic\", Image, queue_size=1)\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\"/camera/color/image_raw\",Image,self.callback)\n\n def callback(self,data):\n pub = rospy.Publisher(\"bottle_size\", Int32, queue_size=1)\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n # RGB表色系からHSV表色系に変換 \n hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)\n\n # しきい値の設定1(ここでは赤を抽出) \n color_min = np.array([0,200,50])\n color_max = np.array([30,255,255])\n\n # マスク画像を生成1 \n color_mask1 = cv2.inRange(hsv_image, color_min, color_max)\n \n # しきい値の設定2\n color_min = np.array([150,200,50])\n color_max = np.array([179,255,255])\n\n # マスク画像を生成2 \n color_mask2 = cv2.inRange(hsv_image, color_min, color_max)\n \n # 赤色のマスク\n mask = color_mask1 + color_mask2\n\n # 画像配列のビット毎の倫理席。マスク画像だけが抽出される。 \n cv_image2 = cv2.bitwise_and(cv_image, cv_image, mask = mask)\n\n # マスクした画像からグレースケールへ変換\n gray_image = cv2.cvtColor(cv_image2, cv2.COLOR_BGR2GRAY)\n\n # グレースケールから白黒に変換\n ret,thresh = cv2.threshold(gray_image, 0, 255, cv2.THRESH_OTSU)\n\n # ボトルの輪郭検出\n labels, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\n for i in range(0, len(contours)):\n if len(contours[i]) > 0:\n\n # 小さいものは排除\n if cv2.contourArea(contours[i]) < 500:\n continue\n\n # 検出された輪郭線を代入、矩形外枠の座標を計算、矩形を描画\n rect = contours[i]\n x, y, w, h = cv2.boundingRect(rect)\n cv2.rectangle(thresh, (x, y), (x + w, y + h), (255, 0, 0), 10)\n area_size = w * h\n if area_size > 10000:\n print(area_size)\n pub.publish(area_size)\n\n \n #ウインドウのサイズを変更 \n #cv_half_image = cv2.resize(cv_image, (0,0),fx=0.5, fy=0.5)\n #cv_half_image2 = cv2.resize(cv_image2, (0,0),fx=0.5,fy=0.5);\n #cv_half_image3 = cv2.resize(gray_image, (0,0),fx=0.5,fy=0.5);\n #cv_half_image4 = cv2.resize(thresh, (0,0),fx=0.5,fy=0.5);\n\n # ウインドウ表示 \n cv2.imshow(\"Origin Image\", cv_image)\n #cv2.imshow(\"Result Image\", cv_half_image2)\n #cv2.imshow(\"gray Image\", cv_half_image3)\n cv2.imshow(\"bounding\", thresh)\n cv2.waitKey(3)\n\n try:\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image2, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n\ndef main(args):\n ic = image_converter()\n rospy.init_node('image_converter', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n sub = rospy.Subscriber(\"/bottle_color\", Image, main)\n main(sys.argv)\n pub = rospy.Publisher(\"bottle_size\", Int32, queue_size=1)\n rospy.spin()\n"
] |
[
[
"numpy.array"
]
] |
lorenzobasile/DeepRobust
|
[
"3f56dcc45f1fed788423d32cc179c26513416e2e"
] |
[
"deeprobust/image/defense/pgdtraining.py"
] |
[
"\"\"\"\nReference:\nMądry, A., Makelov, A., Schmidt, L., Tsipras, D., & Vladu, A. (2017).\nTowards Deep Learning Models Resistant to Adversarial Attacks.\nstat, 1050, 9.\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom PIL import Image\nfrom deeprobust.image.attack.pgd import PGD\nfrom deeprobust.image.netmodels.CNN import Net\nfrom deeprobust.image.defense.base_defense import BaseDefense\n\n\nclass PGDtraining(BaseDefense):\n def __init__(self, model, device):\n if not torch.cuda.is_available():\n print('CUDA not availiable, using cpu...')\n self.device = 'cpu'\n else:\n self.device = device\n\n self.model = model\n\n def generate(self, train_loader, test_loader, **kwargs):\n \"\"\"\n Pgd defense process:\n \"\"\"\n self.parse_params(**kwargs)\n\n torch.manual_seed(100)\n device = torch.device(self.device)\n\n optimizer = optim.Adam(self.model.parameters(), self.lr)\n scheduler = optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=self.gamma)\n\n\n save_model = True\n for epoch in range(1, self.epoch + 1):\n print(epoch, flush = True)\n self.train(self.device, train_loader, optimizer, epoch, scheduler)\n self.test(self.model, self.device, test_loader)\n\n if (self.save_model and epoch % 10 == 0):\n if os.path.isdir('./' + str(self.save_dir)):\n torch.save(self.model.state_dict(), str(self.save_dir) + \"/\" + self.save_name)\n print(\"model saved in \" + './' + str(self.save_dir))\n else:\n print(\"make new directory and save model in \" + './' + str(self.save_dir))\n os.mkdir('./' + str(self.save_dir))\n torch.save(self.model.state_dict(), './' + str(self.save_dir) +\"/\" + self.save_name)\n return self.model\n\n def parse_params(self,\n epoch = 100,\n save_dir = \"./defense_models\",\n save_name = \"mnist_pgdtraining_0.3.pt\",\n save_model = True,\n epsilon = 0.3,\n num_steps = 40,\n perturb_step_size = 0.01,\n lr = 5e-4,\n momentum = 0.1,\n gamma = 0.99):\n \"\"\"\n :param epoch : int\n - pgd training epoch\n :param save_dir : str\n - directory path to save model\n :param epsilon : float\n - perturb constraint of pgd adversary example used to train defense model\n :param num_steps : int\n - the perturb\n :param perturb_step_size : float\n - step_size\n :param lr : float\n - learning rate for adversary training process\n :param momentum : float\n - parameter for optimizer in training process\n :param gamma : float\n - parameter for lr decay in training process\n \"\"\"\n self.epoch = epoch\n self.save_model = True\n self.save_dir = save_dir\n self.save_name = save_name\n self.epsilon = epsilon\n self.num_steps = num_steps\n self.perturb_step_size = perturb_step_size\n self.lr = lr\n self.momentum = momentum\n self.gamma = gamma\n\n def train(self, device, train_loader, optimizer, epoch, scheduler):\n \"\"\"\n Training process.\n \"\"\"\n self.model.train()\n correct = 0\n bs = train_loader.batch_size\n\n for batch_idx, (data, target) in enumerate(train_loader):\n\n optimizer.zero_grad()\n\n data, target = data.to(device), target.to(device)\n\n data_adv, output = self.adv_data(data, target, ep = self.epsilon, num_steps = self.num_steps, perturb_step_size = self.perturb_step_size)\n loss = self.calculate_loss(output, target)\n\n loss.backward()\n optimizer.step()\n\n pred = output.argmax(dim = 1, keepdim = True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n #print every 10\n if batch_idx % 10 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tAccuracy:{:.2f}%'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item(), 100 * correct/(10*bs)))\n correct = 0\n scheduler.step()\n\n\n def test(self, model, device, test_loader):\n \"\"\"\n Testing process.\n\n \"\"\"\n model.eval()\n\n test_loss = 0\n correct = 0\n test_loss_adv = 0\n correct_adv = 0\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n\n # print clean accuracy\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim = 1, keepdim = True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n # print adversarial accuracy\n data_adv, output_adv = self.adv_data(data, target, ep = self.epsilon, num_steps = self.num_steps)\n\n test_loss_adv += self.calculate_loss(output_adv, target, redmode = 'sum').item() # sum up batch loss\n pred_adv = output_adv.argmax(dim = 1, keepdim = True) # get the index of the max log-probability\n correct_adv += pred_adv.eq(target.view_as(pred_adv)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n test_loss_adv /= len(test_loader.dataset)\n\n print('\\nTest set: Clean loss: {:.3f}, Clean Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n print('\\nTest set: Adv loss: {:.3f}, Adv Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss_adv, correct_adv, len(test_loader.dataset),\n 100. * correct_adv / len(test_loader.dataset)))\n\n def adv_data(self, data, output, ep = 0.3, num_steps = 40, perturb_step_size = 0.01):\n \"\"\"\n Generate input(adversarial) data for training.\n \"\"\"\n\n adversary = PGD(self.model)\n data_adv = adversary.generate(data, output.flatten(), epsilon = ep, num_steps = num_steps, step_size = perturb_step_size)\n output = self.model(data_adv)\n\n return data_adv, output\n\n def calculate_loss(self, output, target, redmode = 'mean'):\n \"\"\"\n Calculate loss for training.\n \"\"\"\n\n loss = F.nll_loss(output, target, reduction = redmode)\n return loss\n\n"
] |
[
[
"torch.nn.functional.nll_loss",
"torch.manual_seed",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.cuda.is_available",
"torch.device"
]
] |
vatj/integer_polyomino
|
[
"ffa7f53bb17e43c53ea387bfc3cf8a6c917037c4"
] |
[
"notebooks/example_genome_metrics_to_hdf.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport sys, os\nos.nice(0)\n\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport glob\n\n\n# In[3]:\n\n\nimport integer_polyomino.assembly as ipa\nsys.path.append(os.path.join(os.getcwd(), \"..\", \"src\", \"integer_polyomino\", \"scripts\"))\nimport hdf\n\n\n# In[4]:\n\n\ndata_dir = os.path.join(os.getcwd(), \"..\", \"data\", \"V\" + ipa.__version__)\nif not os.path.exists(data_dir):\n raise ValueError(\"Specify an existing directory\")\n\n\n# In[5]:\n\n\n# file_name = 'GenomeMetrics_N3_C6_T25_B150_Cx8_J10_D1_S0.txt'\ncurrent_files = glob.glob(os.path.join(data_dir,'*.txt'))\nfile_names = [file.rsplit('/')[-1] for file in current_files]\ngenome_files = [file_name for file_name in file_names if (\"GenomeMetrics_\" in file_name)]\nduplicate_files = [file_name for file_name in file_names if (\"GenomeMetricsDuplicate\" in file_name)]\nfile_hdf = 'Processed_GenomeMetrics.h5'\n\n\n# In[6]:\n\n\ngenome_files = ['GenomeMetrics_N4_C6_T25_B200_Cx8_J1000_D1_S0.txt',\n 'GenomeMetrics_N3_C8_T25_B150_Cx10_J1000_D1_S0.txt',\n 'GenomeMetrics_N3_C6_T25_B150_Cx8_J1000_D1_S0.txt',\n 'GenomeMetrics_N2_C6_T25_B100_Cx8_J1000_D1_S0.txt']\nduplicate_files = ['GenomeMetricsDuplicate_N4_C8_T25_B200_Cx10_J1000_D1_S0.txt',\n 'GenomeMetricsDuplicate_N4_C6_T25_B200_Cx8_J1000_D1_S0.txt',\n 'GenomeMetricsDuplicate_N3_C6_T25_B150_Cx8_J1000_D1_S0.txt']\n\n\n# In[7]:\n\n\npd.set_option('io.hdf.default_format', 'table')\n\n\n# In[8]:\n\n\nwith pd.HDFStore(os.path.join(data_dir, file_hdf)) as store:\n hdf.write_to_hdf(data_dir, genome_files, store, 'genome', False)\n\n\n# In[11]:\n\n\nwith pd.HDFStore(os.path.join(data_dir, file_hdf)) as store:\n hdf.write_to_hdf(data_dir, duplicate_files, store, 'genome', True)\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.set_option"
]
] |
javiermcebrian/glcapsnet
|
[
"07b1093c922461024d8049161e646bd1847eccbb"
] |
[
"configs/02_gf/config.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport sys\nimport os\nsys.path.append(os.path.abspath('../'))\n\nfrom utils.functions import kld, margin_loss, spread_loss, mean_squared_error, sum_squared_errors, information_gain_wrapper, normalized_scanpath_saliency, cc, similarity, auc_judd, auc_borji_wrapper, sAUC_wrapper, sNSS_wrapper\nfrom utils.callbacks import VariableScheduler\nfrom keras.optimizers import Adam\nfrom keras.callbacks import LearningRateScheduler\nimport keras.backend as K\nimport tensorflow as tf\nimport cv2\n\n# --- GLOBAL --- #\nsetup = 'experiments/v0'\npath_output = os.path.join('/home/javier/TFM/results', setup)\npath_features = '/home/javier/TFM/heavy/features'\npath_conditions = '/home/javier/TFM/heavy/DREYEVE_DATA/dr(eye)ve_design.txt'\npath_gt = '/home/javier/TFM/heavy/gt'\n###\npath_checkpoints = 'checkpoints'\npath_logs = 'logs'\npath_tests = 'tests'\npath_predictions = 'predictions'\npath_rgb = 'rgb'\npath_of = 'of'\npath_segmentation = 'segmentation_probabilities'\ntotal_videos = 74\ntotal_frames_each_video = 7500 # GT has 7500\nh, w = 112, 112\nh_gt, w_gt = 112, 112\n\n# SUBSETS\nregistry_ids = pd.read_csv('/home/javier/TFM/results/registries/registry.csv')\n# Train, val, test, predict\nmask_train_val = registry_ids['video_id'].isin(np.arange(1, 37 + 1))\nmask_val = registry_ids['frame_id'].isin(np.arange(3500 + 1, 4000 + 1))\n# Train\nregistry_ids_train = registry_ids[mask_train_val & ~mask_val]\n# Val\nregistry_ids_val = registry_ids[mask_train_val & mask_val]\n# Test\nregistry_ids_test = registry_ids[~mask_train_val]\n# Predict\nregistry_ids_predict = registry_ids_test\n\n# TRAIN + TEST + PREDICT\nuse_multiprocessing = True # True is thread safe when workers > 0\nworkers = 8\nmax_queue_size = 32\n\n# TRAIN\nmonitor = 'val_loss'\nmode = 'min'\nfilename_save = 'weights.h5'\nsave_best_only = True\nlr = 0.0001\nlr_decay = 0.99\nbatch_size = 8 # single-feature: 32. multi-feature: 8\nepochs = 50\ninitial_epoch = 0\nsteps_per_epoch = 512 # Number of train batches: if None it takes all\nvalidation_steps = 512 # Number of val batches: if None it takes all\noptimizer = Adam(lr = lr)\ncustom_callbacks = [LearningRateScheduler(schedule = lambda epoch: lr * (lr_decay ** epoch))]\ndata_augmentation_config = {}\n\n# COMPILE\nloss = kld\nloss_weights = None\nmetrics = [kld]\n\n# TEST\nsteps = len(registry_ids_test) # Number of test batches: if None it takes all [TEST BATCH_SIZE IS 1]\n\n# PREDICT\nsteps_pred = len(registry_ids_predict)\nshuffle_pred = True\ndo_pipeline_predictions = True # Losses + Metrics + VAM.png\ndo_pipeline_hidden = False # Data\nlayer_names = []\nop_names = []\n\n# CAPSNET (design params: TRAIN + TEST + PREDICT)\nload_weights_by_name = True\nfreeze_loaded_weights = False\npretrain_config = [\n 'path_to_pretrained_weights_rgb.h5',\n 'path_to_pretrained_weights_of.h5',\n 'path_to_pretrained_weights_seg.h5'\n]\nfusion_config = [\n {'op': 'Concatenate', 'params': {'name': 'fusion_concat', 'axis': -1}},\n {'op': 'Conv2D', 'params': {'name': 'fusion_conv1', 'filters': 576, 'kernel_size': 3, 'padding': 'same', 'activation': 'relu'}},\n {'op': 'Conv2D', 'params': {'name': 'fusion_conv2', 'filters': 96, 'kernel_size': 3, 'padding': 'same', 'activation': 'relu'}},\n {'op': 'Conv2D', 'params': {'name': 'fusion_conv3', 'filters': 48, 'kernel_size': 3, 'padding': 'same', 'activation': 'relu'}},\n {'op': 'Conv2D', 'params': {'name': 'fusion_conv4', 'filters': 24, 'kernel_size': 3, 'padding': 'same', 'activation': 'relu'}},\n {'op': 'BilinearUpsampling', 'params': {'name': 'fusion_bilinearupsampling1', 'output_size': (55, 55)}},\n {'op': 'Conv2D', 'params': {'name': 'fusion_conv5', 'filters': 12, 'kernel_size': 3, 'padding': 'same', 'activation': 'relu'}},\n {'op': 'BilinearUpsampling', 'params': {'name': 'fusion_bilinearupsampling2', 'output_size': (112, 112)}},\n {'op': 'Conv2D', 'params': {'name': 'decoded', 'filters': 1, 'kernel_size': 3, 'padding': 'same', 'activation': 'linear'}}\n]\ncapsnet_config = {\n 'inputs': {'rgb': {'norm': 'mean_3std_clip'}, 'of': {'norm': 'mean_3std_clip'}, 'seg': {'norm': 'probability'}},\n 'branch': {\n 'shortcuts': {},\n 'blocks': [\n {'op': 'Conv2D', 'params': {'name': 'branch_conv1', 'filters': 96, 'kernel_size': 7, 'padding': 'same', 'strides': 1, 'activation': 'relu'}},\n {'op': 'MaxPooling2D', 'params': {'name': 'branch_maxpool1', 'pool_size': 3, 'strides': 2}},\n {'op': 'Dropout', 'params': {'name': 'branch_dropout1', 'rate': 0.5}},\n {'op': 'Conv2D', 'params': {'name': 'branch_conv2', 'filters': 256, 'kernel_size': 5, 'padding': 'same', 'strides': 1, 'activation': 'relu'}},\n {'op': 'MaxPooling2D', 'params': {'name': 'branch_maxpool2', 'pool_size': 3, 'strides': 2}},\n {'op': 'Dropout', 'params': {'name': 'branch_dropout2', 'rate': 0.5}},\n {'op': 'Conv2D', 'params': {'name': 'branch_conv3', 'filters': 512, 'kernel_size': 3, 'padding': 'same', 'strides': 1, 'activation': 'relu'}},\n {'op': 'Conv2D', 'params': {'name': 'branch_conv4', 'filters': 512, 'kernel_size': 5, 'padding': 'same', 'strides': 1, 'activation': 'relu'}},\n {'op': 'Conv2D', 'params': {'name': 'branch_conv5', 'filters': 512, 'kernel_size': 5, 'padding': 'same', 'strides': 1, 'activation': 'relu'}}\n ]\n }\n}\n"
] |
[
[
"numpy.arange",
"pandas.read_csv"
]
] |
eman-ramadan/deepcache_netai2018
|
[
"b13474dc677f4b13095498baa7b8be05f9c2b3d0"
] |
[
"RequestsGeneration/requestAnalysis.py"
] |
[
"\"\"\"\n DeepCache\n \nDeepCache is distributed under the following BSD 3-Clause License:\n\nCopyright(c) 2019\n University of Minensota - Twin Cities\n Authors: Arvind Narayanan, Saurabh Verma, Eman Ramadan, Pariya Babaie, and Zhi-Li Zhang\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n@author: Eman Ramadan ([email protected])\n\n\nDESCRIPTION:\n This code analyzes the generated requests and extracts the properties of each object such as: when it was first\n introduced, its frequency, its lifespan. It also extracts the number of active unique objects requested in each\n hour.\n\n\nPREPROCESSING_SCRIPTS:\n Need to run any of these scripts before running the requestAnalysis.py script\n 1. generateSyntheticDataset.py: generates a synthetic dataset\n 2. generateMediSynDataset.py: generates a synthetic dataset according to MediSyn paper\n\n\nINPUT:\n The input directory is '../Datasets':\n 1- REQUESTFILENAME: The request file to be analyzed @Line 41\n 2- FORCE_GENERATE_BINS: a flag to force the regeneration of the bin file, by default it is False\n 3- FORCE_GENERATE_PROPERTIES: a flag to force the regeneration of the object properties, by default it is False\n\n\nOUTPUT:\n The output files are generated in '../Datasets' directory:\n 1- {RequestFile}_bins.csv: which indicates the number of unique objects in each hour.\n Format: {binID, uniqueObjNum, binMinRequestTime, binMaxRequestTime}\n 2- {RequestFile}_properties.csv: which includes the properties of each object.\n Format: {object_ID, frequency, lifeSpan, minRequestTime, maxRequestTime, start_day, end_day}\n\"\"\"\n\nfrom __future__ import print_function\nimport pandas as pd\nimport numpy as np\nimport os\n\nFORCE_GENERATE_BINS = False\nFORCE_GENERATE_PROPERTIES = False\n\nREQDIR = '../Datasets/'\nREQUESTFILENAME = 'mediSynDataset_x2_O3488.csv' #'syntheticDataset_O50.csv'\n\nREQUESTPATH = REQDIR + REQUESTFILENAME\nBINFILENAME = REQDIR + REQUESTFILENAME[:-4] + '_bins.csv'\nPROPERTIES_FILENAME = REQDIR + REQUESTFILENAME[:-4] + '_properties.csv'\nBIN_SECONDS_WIDTH = 3600\n\n# Load Requests File\nprint('Loading Request File ...')\nreqdf = pd.read_csv(REQUESTPATH, sep=',')\nprint('Sorting Request File by time ...')\nreqdf.sort_values(by=['request_time'], inplace=True)\nprint('Request File Sorted')\n\n# get all 1-hour intervals/bins\nif not os.path.isfile(BINFILENAME) or FORCE_GENERATE_BINS:\n bins = np.arange(np.ceil(reqdf.request_time.min()), np.ceil(reqdf.request_time.max()), BIN_SECONDS_WIDTH)\n print('Starting binning process ...')\n reqdf['binID'] = pd.cut(reqdf['request_time'], bins, labels=np.arange(0, len(bins)-1))\n\n grp = reqdf.groupby(['binID']).agg({'object_ID': {'uniqueObjNum': lambda x: x.nunique()},\n 'request_time': ['min', 'max']})\n grp.reset_index(level=0, inplace=True)\n # clean up columns\n cols = list()\n for k in grp.columns:\n if k[1] == '':\n cols.append(k[0])\n else:\n cols.append(k[1])\n grp.columns = cols\n\n filtered = grp.dropna()\n filtered[\"uniqueObjNum\"] = filtered[\"uniqueObjNum\"].apply(int)\n filtered.rename(columns={'min': 'binMinRequestTime'}, inplace=True)\n filtered.rename(columns={'max': 'binMaxRequestTime'}, inplace=True)\n filtered.to_csv(BINFILENAME, index=False)\n del filtered\n\n\nif not os.path.isfile(PROPERTIES_FILENAME) or FORCE_GENERATE_PROPERTIES:\n # Calculate object frequency\n print('Calculating Object Frequency')\n objfreqdf = (reqdf['object_ID'].value_counts()).to_frame()\n objfreqdf.rename(columns={'object_ID': 'frequency'}, inplace=True)\n objfreqdf['object_ID'] = objfreqdf.index\n\n # Calculate object lifespan\n print('Calculating Object LifeSpan & Introduction Day')\n reqdf.sort_values(by=['object_ID'], inplace=True)\n objLifespandf = reqdf.groupby(['object_ID']).agg({'request_time': ['min', 'max']})\n objLifespandf.columns = ['_'.join(col).strip() for col in objLifespandf.columns.values]\n objLifespandf.rename(columns={'request_time_min': 'minRequestTime'}, inplace=True)\n objLifespandf.rename(columns={'request_time_max': 'maxRequestTime'}, inplace=True)\n objLifespandf['object_ID'] = objLifespandf.index\n objLifespandf['lifeSpan'] = (objLifespandf['maxRequestTime'] - objLifespandf['minRequestTime'])/86400\n min_request_time = reqdf['request_time'].min()\n objLifespandf['start_day'] = (objLifespandf['minRequestTime'] - min_request_time) / 86400\n objLifespandf['end_day'] = (objLifespandf['maxRequestTime'] - min_request_time) / 86400\n objLifespandf[\"start_day\"] = objLifespandf[\"start_day\"].apply(int)\n objLifespandf[\"end_day\"] = objLifespandf[\"end_day\"].apply(int)\n objLifespandf.sort_values('start_day', ascending=True, inplace=True)\n objLifespandf.index.names = ['index']\n\n # Save the properties of the objects\n mergeddf = pd.merge(objfreqdf, objLifespandf, on='object_ID')\n mergeddf = mergeddf[['object_ID', 'frequency', 'lifeSpan', 'minRequestTime', 'maxRequestTime', 'start_day',\n 'end_day']]\n mergeddf.sort_values('start_day', ascending=True, inplace=True)\n mergeddf.to_csv(PROPERTIES_FILENAME, index=False)\n\n print('Properties File Saved')\n del objfreqdf\n del objLifespandf\n del mergeddf\n\ndel reqdf\nprint('Done')\n"
] |
[
[
"pandas.merge",
"pandas.read_csv"
]
] |
Erica233/mlflow-project-best-practices
|
[
"3208af250fb91f44c635031db694b92a16d31729"
] |
[
"old_main.py"
] |
[
"# pylint: disable=no-name-in-module\n# pylint: disable=no-self-argument\n\nfrom fastapi import FastAPI\nimport uvicorn\nimport mlflow\nimport pandas as pd\nfrom pydantic import BaseModel\nfrom fastapi.responses import JSONResponse\nfrom fastapi.encoders import jsonable_encoder\n\nclass Story(BaseModel):\n text: str\n\n#real news\n#\"text\": \"they are all treasonous lying narcissistical sociopathic bastardswhen trump says they need to drain the swamp or whateverhes not wrong but whos going to have hes back when hes dredging the filth out of the swamp that is political washington and all the corporate skid marks along with the corrupt banksters and especially the fed reservewho\"\ndef predict(text):\n print(f\"Accepted payload: {text}\")\n my_data = {\n \"author\": {0: \"Jacques de Seingalt\"},\n \"published\": {0: \"2016-11-24T03:01:28.961+02:00\"},\n \"title\": {0: \"an exploration of finding meaning in life through logotherapy\"},\n \"text\": {0: text},\n \"language\": {0: \"english\"},\n \"site_url\": {0: \"returnofkings.com\"},\n \"main_img_url\": {0: \"No Image URL\"},\n \"type\": {0: \"hate\"},\n \"title_without_stopwords\": {0: \"ugly truth six leftist heroes\"},\n \"text_without_stopwords\": {0: \"saker message current saker messages russia celebrates unity day liberation moscow polish roman papists army views november comments scotts corner scott national unity day first celebrated november commemorates popular uprising lead prince dmitry pozharsky meat merchant kuzma minin ejected alien occupying forces polish roman papists army moscow november generally end time troubles foreign interventions russia name alludes idea classes russian society willingly united preserve russian statehood demise seemed inevitable even though neither tsar patriarch guide recently episode made russian movie minin pozharsky liberation moscow triptych russian land artist yuri pantyukhin russia muscovites celebrate unity day capital river dance simferopol crimea russia putin patriarch kirill bless new monument vladimir great nov president vladimir putin unveiled new monument russias first christian leader vladimir great moscow friday opening ceremony took place meters kremlin walls coincided russian national unity day vladimir putin russian president russian holiness respected muscovites dear friends greet congratulate opening monument saint equaltoapostles prince vladimir big significant event moscow whole country russian compatriots symbolic held national unity day centre capital near walls ancient kremlin heart russia vladimir putin russian president russian strong moral support cohesion unity helped ancestors overcome difficulties live win glory fatherland strengthen power greatness generation generation today duty stand together modern threats challenges basing spiritual precepts invaluable traditions unity concord move forward ensuring continuity thousandyear history patriarch kirill moscow russia russian monument prince vladimir symbol unity peoples farther peoples historical rus currently living within borders many states monument farther may everywhere children live contradiction bad children forget father essential saker trenches emerging multipolar world first comment leave reply click get info formatting leave name field empty want post anonymous preferable choose name becomes clear said email address mandatory either website automatically checks spam please refer moderation policies details check make sure comment mistakenly marked spam takes time effort please patient comment appears thanks replies comment maximum formating examples use writingbbold textb results bold text iitalic texti results italic text also combine two formating tags example get bolditalic textememphasized textem results emphasized text strongstrong textstrong results strong text qa quote textq results quote text quotation marks added automatically citea phrase block text needs citedcite results phrase block text needs cited blockquotea heavier version quoting block textblockquote results heavier version quoting block text span several lines use possibilities appropriately meant help create follow discussions better way assist grasping content value comment quickly last leasta hrefhttplinkaddresscomname linka results name link need use special character paragraphs need anymore write like paragraphs separated live preview appears automatically start typing text area show comment look like send think confusing ignore code write like search articles\"},\n \"hasImage\": {0: 0},\n }\n data = pd.DataFrame(data=my_data)\n result = loaded_model.predict(pd.DataFrame(data))\n return result\n\n\n# Load model as a PyFuncModel.\nloaded_model = mlflow.pyfunc.load_model('model')\napp = FastAPI()\n\[email protected](\"/predict\")\nasync def predict_story(story: Story):\n print(f\"predict_story accepted json payload: {story}\")\n result = predict(story.text)\n print(f\"The result is the following payload: {result}\")\n payload = {\"FakeNewsTrueFalse\": result.tolist()[0]}\n json_compatible_item_data = jsonable_encoder(payload)\n return JSONResponse(content=json_compatible_item_data)\n\[email protected](\"/\")\nasync def root():\n return {\"message\": \"Hello Model\"}\n\[email protected](\"/add/{num1}/{num2}\")\nasync def add(num1: int, num2: int):\n \"\"\"Add two numbers together\"\"\"\n\n total = num1 + num2\n return {\"total\": total}\n\n\nif __name__ == '__main__':\n uvicorn.run(app, port=8080, host='0.0.0.0')"
] |
[
[
"pandas.DataFrame"
]
] |
sushmita-mishra/e-comm-chatobot-luis
|
[
"ede7c98e75bcac1ff1c55396dc24392d33ce0ee5"
] |
[
"dialogs/operations/createorder_dialog.py"
] |
[
"from botbuilder.dialogs import (\r\n ComponentDialog,\r\n WaterfallDialog,\r\n WaterfallStepContext,\r\n DialogTurnResult,\r\n)\r\nfrom botbuilder.dialogs.prompts import TextPrompt, PromptOptions, ChoicePrompt, ConfirmPrompt\r\nfrom botbuilder.core import MessageFactory\r\nfrom botbuilder.schema import InputHints\r\n\r\nfrom .completeorder_dialog import CompleteOrderDialog\r\nfrom order_details import OrderDetails\r\n\r\nimport pandas as pd\r\nimport string\r\nimport random\r\nimport orderApp\r\nfrom datetime import date\r\n\r\nimport luisApp\r\n\r\n\r\nclass CreateOrderDialog(ComponentDialog):\r\n def __init__(self, completeorder_dialog: CompleteOrderDialog, dialog_id:str = None):\r\n super(CreateOrderDialog, self).__init__(dialog_id or CreateOrderDialog.__name__)\r\n\r\n self.add_dialog(TextPrompt(TextPrompt.__name__))\r\n self.add_dialog(ChoicePrompt(ChoicePrompt.__name__))\r\n self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__))\r\n\r\n self._completeorder_dialog_id = completeorder_dialog.id\r\n self.add_dialog(completeorder_dialog)\r\n\r\n self.add_dialog(\r\n WaterfallDialog(\r\n WaterfallDialog.__name__,\r\n [self.order_step, self.act_step, self.completeorder_step, self.summary_step]\r\n )\r\n )\r\n\r\n self.initial_dialog_id = WaterfallDialog.__name__\r\n\r\n async def order_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n message_text = \"Please provide the order details.\"\r\n prompt_message = MessageFactory.text(\r\n message_text, message_text, InputHints.expecting_input)\r\n return await step_context.prompt(\r\n TextPrompt.__name__, PromptOptions(prompt=prompt_message)\r\n )\r\n\r\n \r\n async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n user_details = step_context.options\r\n user_id = user_details.user_id\r\n order_desc = str(step_context.result)\r\n\r\n user_details.orders_list = []\r\n\r\n\r\n data = luisApp.getLuisResponse(order_desc)\r\n\r\n \r\n orders = data['prediction']['entities']['order']\r\n\r\n for order in orders:\r\n obj_order = OrderDetails()\r\n if 'item_name' in order:\r\n obj_order.item_name = order['item_name'][0]\r\n if 'item_quantity' in order:\r\n obj_order.item_quantity = order['item_quantity'][0] \r\n if 'item_flavour' in order:\r\n obj_order.item_flavour = order['item_flavour'][0]\r\n if 'item_size' in order:\r\n obj = order['item_size'][0]\r\n size_value = None\r\n size_unit = None\r\n if 'size_value' in obj:\r\n size_value = str(obj['size_value'][0]) + \" \"\r\n if 'size_unit' in obj:\r\n size_unit = obj['size_unit'][0]\r\n obj_order.item_size = size_value + size_unit \r\n\r\n user_details.orders_list.append(obj_order)\r\n\r\n for o in user_details.orders_list:\r\n print(\"item name : \", o.item_name)\r\n print(\"item quantity : \", o.item_quantity)\r\n print(\"item flavour : \", o.item_flavour)\r\n print(\"item size : \", o.item_size)\r\n\r\n user_details.current_order = 0\r\n\r\n \r\n\r\n return await step_context.next(user_details)\r\n\r\n\r\n async def completeorder_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n user_details = step_context.options\r\n\r\n if len(user_details.orders_list) > 0:\r\n \r\n\r\n return await step_context.begin_dialog(self._completeorder_dialog_id, user_details)\r\n \r\n else:\r\n return await step_context.next(user_details)\r\n\r\n\r\n\r\n async def summary_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n\r\n user_details = step_context.options\r\n user_id = user_details.user_id\r\n #generate order id\r\n N = 4\r\n order_id = ''.join(random.choices(string.digits, k= N))\r\n order_id = 'ord' + order_id\r\n order_date =date.today()\r\n order_status = \"Order Received\"\r\n\r\n df = pd.DataFrame()\r\n items = []\r\n\r\n \r\n\r\n print(\"print the data:\")\r\n for o in user_details.orders_list:\r\n order = o.item_quantity + \" \" + o.item_flavour + \" \" + o.item_name \r\n if o.item_size:\r\n order = order + \" \" + o.item_size\r\n print(order)\r\n\r\n items.append(order)\r\n\r\n df['order_description'] = items\r\n df['user_id'] = user_id\r\n df['order_id'] = order_id\r\n df['order_status'] = order_status\r\n df['creation_date'] = order_date\r\n \r\n\r\n orderApp.addOrders(df)\r\n\r\n msg_text = (\"Your order number is \" + order_id + \". Here are the orders provided-\")\r\n\r\n msg = MessageFactory.text(\r\n msg_text, msg_text, InputHints.ignoring_input\r\n )\r\n await step_context.context.send_activity(msg)\r\n\r\n for i in range(0, len(items)):\r\n msg_text = (items[i])\r\n msg = MessageFactory.text(msg_text, msg_text, InputHints.ignoring_input)\r\n await step_context.context.send_activity(msg)\r\n \r\n\r\n return await step_context.end_dialog(user_details)"
] |
[
[
"pandas.DataFrame"
]
] |
WHSnyder/tensorflow
|
[
"a4821531c61821b1442417694aaf93821f07045a"
] |
[
"tensorflow/python/eager/context.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"State management for eager execution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport copy\nimport os\nimport random\nimport threading\n\nfrom absl import logging\nimport numpy as np\nimport six\n\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import pywrap_tfe\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.eager import executor\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import is_in_graph_mode\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\nGRAPH_MODE = 0\nEAGER_MODE = 1\n\ndefault_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE\n\n# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,\n# new_device_spec).\n# Note that we do not protect this with a lock and instead rely on python's GIL\n# and the idempotent nature of writes to provide thread safety.\n_device_parsing_cache = {}\n_starting_device_spec = pydev.DeviceSpec.from_string(\"\")\n\n_MAXINT32 = 2**31 - 1\n\nDEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT\nDEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN\nDEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT\nDEVICE_PLACEMENT_SILENT_FOR_INT32 = (\n pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)\n\nSYNC = 0\nASYNC = 1\n\nMIRRORING_NONE = pywrap_tfe.TFE_MIRRORING_NONE\nMIRRORING_ALL = pywrap_tfe.TFE_MIRRORING_ALL\n\n_KEEP_ALIVE_SECS = 600\n\n_python_eager_context_create_counter = monitoring.Counter(\n \"/tensorflow/api/python/eager_context_create_counter\",\n \"Counter for number of eager contexts created in Python.\")\n\n\nclass _EagerTensorCache(object):\n \"\"\"Simple cache which evicts items based on length in a FIFO manner.\"\"\"\n\n def __init__(self, max_items=256, max_tensor_size=10000):\n self._data = collections.OrderedDict()\n self._max_items = max_items\n self._max_tensor_size = max_tensor_size\n\n def put(self, key, value):\n if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access\n return\n\n self._data[key] = value\n\n if len(self._data) > self._max_items:\n self._data.popitem(last=False)\n\n def get(self, key):\n return self._data.get(key, None)\n\n def flush(self):\n self._data = {}\n\n\nclass FunctionCallOptions(object):\n \"\"\"Options applied at call sites of eager functions.\n\n Eager functions are functions decorated with tf.contrib.eager.defun.\n \"\"\"\n\n def __init__(self, executor_type=None, config_proto=None):\n \"\"\"Constructor.\n\n Args:\n executor_type: (optional) name of the executor to be used to execute the\n eager function. If None or an empty string, the default Tensorflow\n executor will be used.\n config_proto: (optional) a `config_pb2.ConfigProto` proto or\n a serialized string of that proto.\n The config used by Grappler when optimizing the function graph.\n Each concrete function is optimized the first time is called. Changing\n config_proto after the first call has no effect.\n If config_proto is None, an empty RewriterConfig will be used.\n \"\"\"\n self.config_proto_serialized = config_proto\n self.executor_type = executor_type\n\n @property\n def executor_type(self):\n return self._executor_type\n\n @executor_type.setter\n def executor_type(self, executor_type):\n self._executor_type = executor_type\n\n @property\n def config_proto_serialized(self):\n return self._config_proto_serialized\n\n @config_proto_serialized.setter\n def config_proto_serialized(self, config):\n if isinstance(config, config_pb2.ConfigProto):\n self._config_proto_serialized = config.SerializeToString(\n deterministic=True)\n elif isinstance(config, str):\n self._config_proto_serialized = config\n elif config is None:\n self._config_proto_serialized = (\n config_pb2.ConfigProto().SerializeToString())\n else:\n raise ValueError(\"the rewriter config must be either a \"\n \"config_pb2.ConfigProto, or a serialized string of that \"\n \"proto or None. got: {}\".format(type(config)))\n\n\n# Map from context_id (an int) to _TensorCaches.\n# Dicts are thread safe in CPython.\n# TODO(iga): Remove this once TensorCaches are moved to C++.\n_tensor_caches_map = {}\n\n\nclass _TensorCaches(threading.local):\n \"\"\"Thread local tensor caches.\"\"\"\n\n def __init__(self):\n super(_TensorCaches, self).__init__()\n self._ones_rank_cache = None\n self._zeros_cache = None\n\n @property\n def ones_rank_cache(self):\n if not self._ones_rank_cache:\n self._ones_rank_cache = _EagerTensorCache()\n return self._ones_rank_cache\n\n @property\n def zeros_cache(self):\n if not self._zeros_cache:\n self._zeros_cache = _EagerTensorCache()\n return self._zeros_cache\n\n\nclass _ThreadLocalData(threading.local):\n \"\"\"Thread local storage for the eager context.\"\"\"\n\n def __init__(self):\n super(_ThreadLocalData, self).__init__()\n self.device_spec = _starting_device_spec\n self.device_name = \"\"\n self.is_eager = default_execution_mode == EAGER_MODE\n self.scope_name = \"\"\n self.function_call_options = None\n self.executor = None\n self.op_callbacks = []\n self.invoking_op_callbacks = False\n\n\nContextSwitch = collections.namedtuple(\n \"ContextSwitch\", [\"is_building_function\", \"enter_context_fn\",\n \"device_stack\"])\n\n\n# `_ContextSwitchStack` is a `threading.local` to match the semantics of\n# ``DefaultGraphStack`, which is also a `threading.local`.\nclass _ContextSwitchStack(threading.local):\n \"\"\"A thread-local stack of context switches.\"\"\"\n\n def __init__(self, eager):\n super(_ContextSwitchStack, self).__init__()\n self.stack = []\n if eager:\n # Initialize the stack with a pointer to enter the eager context; this\n # ensures that the fact that eager execution was enabled is propagated\n # across threads, since (1) `enable_eager_execution` modifies a\n # process-level flag (`default_execution_mode`) and (2) `__init__` is\n # called each time a threading.local object is used in a separate thread.\n self.push(is_building_function=False, enter_context_fn=eager_mode,\n device_stack=None)\n\n def push(self, is_building_function, enter_context_fn, device_stack):\n \"\"\"Push metadata about a context switch onto the stack.\n\n A context switch can take any one of the two forms: installing a graph as\n the default graph, or entering the eager context. For each context switch,\n we record whether or not the entered context is building a function.\n\n Args:\n is_building_function: (bool.) Whether the context is building a function.\n enter_context_fn: (function.) A callable that executes the context switch.\n For example, `graph.as_default` or `eager_mode`.\n device_stack: If applicable, the device function stack for this\n graph. When breaking out of graphs in init_scope, the innermost nonempty\n device stack is used. Eager contexts put `None` here and the value is\n never used.\n \"\"\"\n\n self.stack.append(\n ContextSwitch(is_building_function, enter_context_fn, device_stack))\n\n def pop(self):\n \"\"\"Pop the stack.\"\"\"\n\n self.stack.pop()\n\n\n@tf_export(\"config.LogicalDevice\")\nclass LogicalDevice(\n collections.namedtuple(\"LogicalDevice\", [\"name\", \"device_type\"])):\n \"\"\"Abstraction for a logical device initialized by the runtime.\n\n A `tf.config.LogicalDevice` corresponds to an initialized logical device on a\n `tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors\n and operations can be placed on a specific logical device by calling\n `tf.device` with a specified `tf.config.LogicalDevice`.\n\n Fields:\n name: The fully qualified name of the device. Can be used for Op or function\n placement.\n device_type: String declaring the type of device such as \"CPU\" or \"GPU\".\n \"\"\"\n pass\n\n\n@tf_export(\"config.LogicalDeviceConfiguration\",\n \"config.experimental.VirtualDeviceConfiguration\")\nclass LogicalDeviceConfiguration(\n collections.namedtuple(\"LogicalDeviceConfiguration\",\n [\"memory_limit\", \"experimental_priority\"])):\n \"\"\"Configuration class for a logical devices.\n\n The class specifies the parameters to configure a `tf.config.PhysicalDevice`\n as it is initialized to a `tf.config.LogicalDevice` during runtime\n initialization. Not all fields are valid for all device types.\n\n See `tf.config.get_logical_device_configuration` and\n `tf.config.set_logical_device_configuration` for usage examples.\n\n Fields:\n memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual\n device. Currently only supported for GPUs.\n experimental_priority: (optional) Priority to assign to a virtual device.\n Lower values have higher priorities and 0 is the default.\n Within a physical GPU, the GPU scheduler will prioritize ops on virtual\n devices with higher priority. Currently only supported for Nvidia GPUs.\n \"\"\"\n\n def __new__(cls, memory_limit=None, experimental_priority=None):\n return super(LogicalDeviceConfiguration,\n cls).__new__(cls, memory_limit, experimental_priority)\n\n\n@tf_export(\"config.PhysicalDevice\")\nclass PhysicalDevice(\n collections.namedtuple(\"PhysicalDevice\", [\"name\", \"device_type\"])):\n \"\"\"Abstraction for a locally visible physical device.\n\n TensorFlow can utilize various devices such as the CPU or multiple GPUs\n for computation. Before initializing a local device for use, the user can\n customize certain properties of the device such as it's visibility or memory\n configuration.\n\n Once a visible `tf.config.PhysicalDevice` is initialized one or more\n `tf.config.LogicalDevice` objects are created. Use\n `tf.config.set_visible_devices` to configure the visibility of a physical\n device and `tf.config.set_logical_device_configuration` to configure multiple\n `tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is\n useful when separation between models is needed or to simulate a multi-device\n environment.\n\n Fields:\n name: Unique identifier for device.\n device_type: String declaring the type of device such as \"CPU\" or \"GPU\".\n \"\"\"\n pass\n\n\nclass _AtomicCounter(object):\n \"\"\"A simple atomic counter.\"\"\"\n\n def __init__(self):\n self._value = 0\n self._lock = threading.Lock()\n\n def increment_and_get(self):\n with self._lock:\n self._value += 1\n return self._value\n\n\n_context_id_counter = _AtomicCounter()\n\n\nclass _TensorCacheDeleter(object):\n \"\"\"Deletes tensor caches for a given context.\"\"\"\n\n def __init__(self, context_id):\n self._context_id = context_id\n\n def __del__(self):\n if _tensor_caches_map is None:\n return\n if self._context_id in _tensor_caches_map:\n del _tensor_caches_map[self._context_id]\n\n\n# If the below import is made available through the BUILD rule, then this\n# function is overridden and will instead return True and cause Tensorflow\n# graphs to run with TFRT.\ndef is_tfrt_enabled():\n return None\n\n\ntry:\n from tensorflow.python.framework.is_tfrt_test_true import is_tfrt_enabled # pylint: disable=g-import-not-at-top\nexcept: # pylint: disable=bare-except\n pass\n\n\n# TODO(agarwal): rename to EagerContext / EagerRuntime ?\n# TODO(agarwal): consider keeping the corresponding Graph here.\nclass Context(object):\n \"\"\"Environment in which eager operations execute.\"\"\"\n\n # TODO(agarwal): create and link in some documentation for `execution_mode`.\n # pylint: disable=redefined-outer-name\n def __init__(self,\n config=None,\n device_policy=None,\n execution_mode=None,\n server_def=None):\n \"\"\"Creates a new Context.\n\n Args:\n config: (Optional.) A `ConfigProto` protocol buffer with configuration\n options for the Context. Note that a lot of these options may be\n currently unimplemented or irrelevant when eager execution is enabled.\n device_policy: (Optional.) What policy to use when trying to run an\n operation on a device with inputs which are not on that device.\n When set to None, an appropriate value will be picked automatically.\n The value picked may change between TensorFlow releases.\n\n Defaults to DEVICE_PLACEMENT_SILENT.\n Valid values:\n - DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is\n not correct.\n - DEVICE_PLACEMENT_WARN: copies the tensors which are not on the\n right device but raises a warning.\n - DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might\n hide performance problems.\n - DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,\n raising errors on the other ones.\n execution_mode: (Optional.) Policy controlling how operations dispatched\n are actually executed. When set to None, an appropriate value will be\n picked automatically. The value picked may change between TensorFlow\n releases.\n Valid values:\n - SYNC: executes each operation synchronously.\n - ASYNC: executes each operation asynchronously. These\n operations may return \"non-ready\" handles.\n server_def: (Optional.) A tensorflow::ServerDef proto.\n Enables execution on remote devices. GrpcServers need to be started by\n creating an identical server_def to this, and setting the appropriate\n task_indexes, so that the servers can communicate. It will then be\n possible to execute operations on remote devices.\n\n Raises:\n ValueError: If execution_mode is not valid.\n \"\"\"\n # This _id is used only to index the tensor caches.\n # TODO(iga): Remove this when tensor caches are moved to C++.\n self._id = _context_id_counter.increment_and_get()\n self._tensor_cache_deleter = _TensorCacheDeleter(self._id)\n _tensor_caches_map[self._id] = _TensorCaches()\n\n self._config = config\n self._thread_local_data = _ThreadLocalData()\n self._context_switches = _ContextSwitchStack(self.executing_eagerly())\n self._context_handle = None\n self._context_devices = None\n self._seed = None\n self._initialize_lock = threading.Lock()\n self._initialized = False\n if device_policy is None:\n device_policy = DEVICE_PLACEMENT_SILENT\n self._device_policy = device_policy\n self._mirroring_policy = None\n if execution_mode not in (None, SYNC, ASYNC):\n raise ValueError(\n \"execution_mode should be None/SYNC/ASYNC. Got %s\" % execution_mode)\n if execution_mode is None:\n execution_mode = SYNC\n self._default_is_async = execution_mode == ASYNC\n self._lazy_remote_inputs_copy = None\n self._use_tfrt = is_tfrt_enabled()\n self._server_def = server_def\n self._collective_ops_server_def = None\n self._collective_leader = None\n self._collective_scoped_allocator_enabled_ops = None\n self._collective_use_nccl_communication = None\n self._collective_device_filters = None\n\n self._device_lock = threading.Lock()\n self._physical_devices = None\n self._physical_device_to_index = None\n self._visible_device_list = []\n self._memory_growth_map = None\n self._virtual_device_map = {}\n\n # Values set after construction\n self._optimizer_jit = None\n self._intra_op_parallelism_threads = None\n self._inter_op_parallelism_threads = None\n self._soft_device_placement = None\n self._log_device_placement = None\n self._enable_mlir_bridge = None\n self._enable_mlir_graph_optimization = None\n self._optimizer_experimental_options = {}\n\n _python_eager_context_create_counter.get_cell().increase_by(1)\n # pylint: enable=redefined-outer-name\n\n def _set_global_seed(self, seed):\n \"\"\"Set a global eager mode seed for random ops.\"\"\"\n self._seed = seed\n # `random.Random(seed)` needs `seed` to be hashable, while values of type\n # e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them\n # to int.\n try:\n hash(seed)\n except TypeError:\n seed = int(np.array(seed))\n self._rng = random.Random(seed)\n # Also clear the kernel cache, to reset any existing seeds\n if self._context_handle is not None:\n pywrap_tfe.TFE_ContextClearCaches(self._context_handle)\n\n def _internal_operation_seed(self):\n \"\"\"Returns a fake operation seed.\n\n In eager mode, user shouldn't set or depend on operation seed.\n Here, we generate a random seed based on global seed to make\n operation's randomness different and depend on the global seed.\n\n Returns:\n A fake operation seed based on global seed.\n \"\"\"\n return self._rng.randint(0, _MAXINT32)\n\n def _initialize_logical_devices(self):\n \"\"\"Helper to initialize devices.\"\"\"\n # Store list of devices\n logical_devices = []\n context_devices = []\n device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle)\n try:\n self._num_gpus = 0\n for i in range(pywrap_tfe.TF_DeviceListCount(device_list)):\n dev_name = pywrap_tfe.TF_DeviceListName(device_list, i)\n context_devices.append(pydev.canonical_name(dev_name))\n spec = pydev.DeviceSpec.from_string(dev_name)\n # If the job is localhost, we assume that the cluster has not yet been\n # configured and thus clear the job, replica & task.\n if spec.job == \"localhost\":\n spec = spec.replace(job=None, replica=None, task=None)\n logical_devices.append(\n LogicalDevice(name=spec.to_string(), device_type=spec.device_type))\n dev_type = pywrap_tfe.TF_DeviceListType(device_list, i)\n if dev_type == \"GPU\":\n self._num_gpus += 1\n\n finally:\n self._logical_devices = logical_devices\n self._context_devices = context_devices\n pywrap_tfe.TF_DeleteDeviceList(device_list)\n\n def ensure_initialized(self):\n \"\"\"Initialize handle and devices if not already done so.\"\"\"\n if self._initialized:\n return\n with self._initialize_lock:\n if self._initialized:\n return\n assert self._context_devices is None\n opts = pywrap_tfe.TFE_NewContextOptions()\n try:\n config_str = self.config.SerializeToString()\n pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)\n if self._device_policy is not None:\n pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy(\n opts, self._device_policy)\n if self._mirroring_policy is not None:\n pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy(\n opts, self._mirroring_policy)\n if self._default_is_async == ASYNC:\n pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True)\n if self._lazy_remote_inputs_copy is not None:\n pywrap_tfe.TFE_ContextOptionsSetLazyRemoteInputsCopy(\n opts, self._lazy_remote_inputs_copy)\n if self._use_tfrt is not None:\n pywrap_tfe.TFE_ContextOptionsSetTfrt(opts, self._use_tfrt)\n context_handle = pywrap_tfe.TFE_NewContext(opts)\n finally:\n pywrap_tfe.TFE_DeleteContextOptions(opts)\n assert not (self._server_def and self._collective_ops_server_def), (\n \"Cannot enable remote execution as well as collective ops at the \"\n \"moment. If this is important to you, please file an issue.\")\n if self._server_def is not None:\n server_def_str = self._server_def.SerializeToString()\n pywrap_tfe.TFE_ContextSetServerDef(context_handle, _KEEP_ALIVE_SECS,\n server_def_str)\n elif self._collective_ops_server_def is not None:\n server_def_str = self._collective_ops_server_def.SerializeToString()\n pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str)\n\n self._context_handle = context_handle\n self._initialize_logical_devices()\n self._initialized = True\n\n def _clear_caches(self):\n self.ones_rank_cache().flush()\n self.zeros_cache().flush()\n pywrap_tfe.TFE_ClearScalarCache()\n\n def get_server_def(self):\n return self._server_def\n\n def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n \"\"\"Allow setting a server_def on the context.\n\n When a server def is replaced, it effectively clears a bunch of caches\n within the context. If you attempt to use a tensor object that was pointing\n to a tensor on the remote device, it will raise an error.\n\n Args:\n server_def: A tensorflow::ServerDef proto.\n Enables execution on remote devices.\n keep_alive_secs: Num. seconds after which the remote end will hang up.\n As long as the client is still alive, the server state for the context\n will be kept alive. If the client is killed (or there is some failure),\n the server will clean up its context keep_alive_secs after the final RPC\n it receives.\n\n Raises:\n ValueError: if server_def is None.\n \"\"\"\n if not server_def:\n raise ValueError(\"server_def is None.\")\n\n self._server_def = server_def\n\n if self._context_handle:\n server_def_str = server_def.SerializeToString()\n pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs,\n server_def_str)\n self._initialize_logical_devices()\n\n # Clear all the caches in case there are remote tensors in them.\n self._clear_caches()\n\n def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n \"\"\"Update a server_def on the context.\n\n Args:\n server_def: A tensorflow::ServerDef proto. Enables execution on remote\n devices.\n keep_alive_secs: Num. seconds after which the remote end will hang up. As\n long as the client is still alive, the server state for the context will\n be kept alive. If the client is killed (or there is some failure), the\n server will clean up its context keep_alive_secs after the final RPC it\n receives.\n\n Raises:\n ValueError: if server_def is None.\n \"\"\"\n if not server_def:\n raise ValueError(\"server_def is None.\")\n\n self._server_def = server_def\n\n if self._context_handle:\n server_def_str = server_def.SerializeToString()\n pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle,\n keep_alive_secs, server_def_str)\n self._initialize_logical_devices()\n\n self._clear_caches()\n\n def check_alive(self, worker_name):\n \"\"\"Checks whether a remote worker is alive or not.\n\n Args:\n worker_name: a string representing the remote worker. It must be a fully\n specified name like \"/job:worker/replica:0/task:0\".\n\n Returns:\n a boolean indicating whether the remote worker is alive or not.\n\n Raises:\n ValueError: if context is not initialized.\n \"\"\"\n # TODO(yuefengz): support checking multiple workers.\n if self._context_handle:\n return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)\n else:\n raise ValueError(\"Context is not initialized.\")\n\n def sync_executors(self):\n \"\"\"Sync both local executors and the ones on remote workers.\n\n In async execution mode, local function calls can return before the\n coresponding remote op/function execution requests are completed. Calling\n this method creates a synchronization barrier for remote executors. It only\n returns when all remote pending nodes are finished, potentially with errors\n if any remote executors are in error state.\n\n Raises:\n ValueError: if context is not initialized.\n \"\"\"\n if self._context_handle:\n pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle)\n else:\n raise ValueError(\"Context is not initialized.\")\n\n def clear_executor_errors(self):\n \"\"\"Clear errors in both local executors and remote workers.\n\n After receiving errors from remote workers, additional requests on the fly\n could further taint the status on the remote workers due to the async nature\n of remote execution. Calling this method block on waiting for all pending\n nodes in remote executors to finish and clear their error statuses.\n\n Raises:\n ValueError: if context is not initialized.\n \"\"\"\n if self._context_handle:\n pywrap_tfe.TFE_ContextClearExecutors(self._context_handle)\n else:\n raise ValueError(\"Context is not initialized.\")\n\n def enable_collective_ops(self, server_def):\n \"\"\"Enable distributed collective ops with an appropriate server_def.\n\n Args:\n server_def: A tensorflow::ServerDef proto. Enables execution on remote\n devices.\n\n Raises:\n ValueError: if server_def is None.\n RuntimeError: if this method is not called at program startup.\n \"\"\"\n if not server_def:\n raise ValueError(\"server_def is None.\")\n\n self._collective_ops_server_def = server_def\n\n # TODO(b/129298253): Allow creating datasets/tensors before enabling\n # collective ops.\n if self._context_handle is not None:\n logging.warning(\"Enabling collective ops after program startup may cause \"\n \"error when accessing previously created tensors.\")\n with self._initialize_lock:\n assert self._initialized\n server_def_str = self._collective_ops_server_def.SerializeToString()\n pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str)\n self._initialize_logical_devices()\n self._clear_caches()\n\n def configure_collective_ops(\n self,\n collective_leader=\"\",\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n use_nccl_communication=False,\n device_filters=None):\n \"\"\"Configure collective ops.\n\n Collective group leader is necessary for collective ops to run, other\n configurations are mainly for the purpose of performance.\n\n Args:\n collective_leader: a device string for collective leader, e.g.\n \"/job:worker/replica:0/task:0\"; empty string means local execution of\n collective ops.\n scoped_allocator_enabled_ops: a tuple or a list of op names for scoped\n allocator to run with.\n use_nccl_communication: whether to use nccl communication for collective\n ops.\n device_filters: a tuple or a list of device strings. If set, corresponding\n task can only see the devices filtered by these device filters.\n\n Raises:\n RuntimeError: if this method is not called at program startup.\n \"\"\"\n if self._collective_leader is not None:\n if (self._collective_leader != collective_leader or\n self._collective_scoped_allocator_enabled_ops !=\n scoped_allocator_enabled_ops or\n self._collective_use_nccl_communication != use_nccl_communication or\n self._collective_device_filters != device_filters):\n raise ValueError(\"Collective ops are already configured.\")\n else:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\"Collective ops must be configured at program startup\")\n\n self._collective_leader = collective_leader\n self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops\n self._collective_use_nccl_communication = use_nccl_communication\n self._collective_device_filters = device_filters\n\n @property\n def _handle(self):\n if self._context_handle is None:\n raise AssertionError(\"Context must be initialized first.\")\n\n return self._context_handle\n\n @property\n def _devices(self):\n if self._context_devices is None:\n raise AssertionError(\"Context must be initialized first.\")\n\n return self._context_devices\n\n def __str__(self):\n if self._context_handle is None:\n return \"Eager TensorFlow Context. Devices currently uninitialized.\"\n else:\n devices = self._devices\n lines = [\"Eager TensorFlow Context with %d devices\" % (len(devices))]\n for i, d in enumerate(devices):\n lines.append(\" Device %d: %s\" % (i, d))\n return \"\\n\".join(lines)\n\n @tf_contextlib.contextmanager\n def _mode(self, mode):\n \"\"\"A context manager to allow setting the mode to EAGER/GRAPH.\"\"\"\n ctx = self._thread_local_data\n old_is_eager = ctx.is_eager\n ctx.is_eager = mode == EAGER_MODE\n if mode == EAGER_MODE:\n # Entering graph mode does not provide us with sufficient information to\n # record a context switch; graph-based context switches are only logged\n # when a graph is registered as the default graph.\n self.context_switches.push(False, eager_mode, None)\n try:\n yield\n finally:\n ctx.is_eager = old_is_eager\n if mode == EAGER_MODE:\n self.context_switches.pop()\n\n def executing_eagerly(self):\n \"\"\"Returns True if current thread has eager executing enabled.\"\"\"\n return self._thread_local_data.is_eager\n\n def ones_rank_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return _tensor_caches_map[self._id].ones_rank_cache\n\n def zeros_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return _tensor_caches_map[self._id].zeros_cache\n\n @property\n def scope_name(self):\n \"\"\"Returns scope name for the current thread.\"\"\"\n return self._thread_local_data.scope_name\n\n @scope_name.setter\n def scope_name(self, s):\n \"\"\"Sets scope name for the current thread.\"\"\"\n self._thread_local_data.scope_name = s\n\n @property\n def device_name(self):\n \"\"\"Returns the device name for the current thread.\"\"\"\n return self._thread_local_data.device_name\n\n @property\n def device_spec(self):\n \"\"\"Returns the device spec for the current thread.\"\"\"\n return self._thread_local_data.device_spec\n\n def _set_device(self, device_name, device_spec):\n self._thread_local_data.device_name = device_name\n self._thread_local_data.device_spec = device_spec\n\n def device(self, name):\n \"\"\"Context-manager to force placement of operations and Tensors on a device.\n\n Args:\n name: Name of the device or None to get default placement.\n\n Returns:\n Context manager that forces device placement.\n\n Raises:\n ValueError: If name is not a string or is an invalid device name.\n RuntimeError: If device scopes are not properly nested.\n \"\"\"\n if isinstance(name, LogicalDevice):\n name = name.name\n elif pydev.is_device_spec(name):\n name = name.to_string()\n return _EagerDeviceContext(self, name)\n\n def devices(self):\n \"\"\"List of the names of devices available to execute operations.\"\"\"\n return self._devices\n\n def host_address_space(self):\n self.ensure_initialized()\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tfe.TFE_HostAddressSpace(self._context_handle, buffer_)\n address_space = pywrap_tf_session.TF_GetBuffer(buffer_).decode(\"utf-8\")\n return address_space\n\n # TODO(fishx): remove this property.\n @property\n def execution_mode(self):\n \"\"\"Gets execution mode for current thread.\"\"\"\n return ASYNC if self.is_async() else SYNC\n\n @execution_mode.setter\n def execution_mode(self, mode):\n \"\"\"Sets execution mode for current thread.\"\"\"\n if mode not in (None, SYNC, ASYNC):\n raise ValueError(\n \"Execution mode should be None/SYNC/ASYNC. Got %s\" % mode)\n\n if mode is None:\n mode = SYNC\n\n enable_async = (mode == ASYNC)\n if self.is_async() != enable_async:\n # Only set the execution mode if the context has already been initialized\n if self._context_handle is not None:\n self.executor.wait()\n executor_new = executor.new_executor(enable_async)\n self._thread_local_data.executor = executor_new\n pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle,\n executor_new.handle())\n else:\n self._default_is_async = enable_async\n\n def is_async(self):\n if self._context_handle is not None:\n return self.executor.is_async()\n else:\n return self._default_is_async\n\n @property\n def executor(self):\n ensure_initialized()\n return executor.Executor(\n pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle))\n\n @executor.setter\n def executor(self, e):\n ensure_initialized()\n pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle())\n\n @property\n def config(self):\n \"\"\"Return the ConfigProto with all runtime deltas applied.\"\"\"\n # Ensure physical devices have been discovered and config has been imported\n self._initialize_physical_devices()\n\n config = config_pb2.ConfigProto()\n if self._config is not None:\n config.CopyFrom(self._config)\n\n if self._optimizer_jit is not None:\n config.graph_options.optimizer_options.global_jit_level = (\n config_pb2.OptimizerOptions.ON_1\n if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)\n if self._intra_op_parallelism_threads is not None:\n config.intra_op_parallelism_threads = self._intra_op_parallelism_threads\n if self._inter_op_parallelism_threads is not None:\n config.inter_op_parallelism_threads = self._inter_op_parallelism_threads\n\n if self._soft_device_placement is not None:\n config.allow_soft_placement = self._soft_device_placement\n else:\n config.allow_soft_placement = self.executing_eagerly()\n\n if self._log_device_placement is not None:\n config.log_device_placement = self._log_device_placement\n\n if self._enable_mlir_bridge is not None:\n config.experimental.enable_mlir_bridge = self._enable_mlir_bridge\n if self._enable_mlir_graph_optimization is not None:\n config.experimental.enable_mlir_graph_optimization = (\n self._enable_mlir_graph_optimization)\n\n def rewriter_toggle(option):\n toggle = self._optimizer_experimental_options.get(option, None)\n if toggle is None:\n return\n\n setattr(config.graph_options.rewrite_options,\n option,\n (rewriter_config_pb2.RewriterConfig.ON\n if toggle else rewriter_config_pb2.RewriterConfig.OFF))\n\n def rewriter_bool(option):\n toggle = self._optimizer_experimental_options.get(option, None)\n if toggle is None:\n return\n\n setattr(config.graph_options.rewrite_options,\n option,\n toggle)\n\n rewriter_toggle(\"layout_optimizer\")\n rewriter_toggle(\"constant_folding\")\n rewriter_toggle(\"shape_optimization\")\n rewriter_toggle(\"remapping\")\n rewriter_toggle(\"arithmetic_optimization\")\n rewriter_toggle(\"dependency_optimization\")\n rewriter_toggle(\"loop_optimization\")\n rewriter_toggle(\"function_optimization\")\n rewriter_toggle(\"debug_stripper\")\n rewriter_bool(\"disable_model_pruning\")\n rewriter_toggle(\"scoped_allocator_optimization\")\n rewriter_toggle(\"pin_to_host_optimization\")\n rewriter_toggle(\"implementation_selector\")\n rewriter_toggle(\"auto_mixed_precision\")\n rewriter_bool(\"disable_meta_optimizer\")\n nodes = self._optimizer_experimental_options.get(\"min_graph_nodes\", None)\n if nodes is not None:\n config.graph_options.rewrite_options.min_graph_nodes = nodes\n\n # Compute device counts\n config.device_count[\"CPU\"] = 0\n config.device_count[\"GPU\"] = 0\n for dev in self._physical_devices:\n if dev not in self._visible_device_list:\n continue\n\n virtual_devices = self._virtual_device_map.get(dev)\n if virtual_devices is None:\n config.device_count[dev.device_type] += 1\n else:\n config.device_count[dev.device_type] += len(virtual_devices)\n\n # Configure gpu_options\n gpu_options = self._compute_gpu_options()\n config.gpu_options.MergeFrom(gpu_options)\n\n # Configure collective ops\n if self._collective_leader:\n config.experimental.collective_group_leader = self._collective_leader\n if self._collective_scoped_allocator_enabled_ops:\n rewrite_options = config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n for op in self._collective_scoped_allocator_enabled_ops:\n rewrite_options.scoped_allocator_opts.enable_op.append(op)\n if self._collective_use_nccl_communication:\n config.experimental.collective_nccl = True\n if self._collective_device_filters:\n del config.device_filters[:]\n for f in self._collective_device_filters:\n config.device_filters.append(f)\n\n return config\n\n def _compute_gpu_options(self):\n \"\"\"Build the GPUOptions proto.\"\"\"\n visible_device_list = []\n virtual_devices = []\n gpu_index = -1\n memory_growths = set()\n for dev in self.list_physical_devices(\"GPU\"):\n gpu_index += 1\n\n if dev not in self._visible_device_list:\n continue\n\n growth = self._memory_growth_map[dev]\n memory_growths.add(growth)\n visible_device_list.append(str(gpu_index))\n\n if self._virtual_device_map:\n vdevs = self._virtual_device_map.get(dev, [])\n device_limits = []\n priority = []\n for virt_dev in vdevs:\n device_limits.append(virt_dev.memory_limit)\n if virt_dev.experimental_priority is not None:\n priority.append(virt_dev.experimental_priority)\n # If priority is specified, it must be specified for all virtual\n # devices.\n if priority and len(device_limits) != len(priority):\n raise ValueError(\"priority must be specified for all virtual devices\")\n\n virtual_devices.append(\n config_pb2.GPUOptions.Experimental.VirtualDevices(\n memory_limit_mb=device_limits, priority=priority))\n\n # Only compute growth if virtual devices have not been configured and we\n # have GPUs\n if not virtual_devices and memory_growths:\n if len(memory_growths) > 1:\n raise ValueError(\"Memory growth cannot differ between GPU devices\")\n allow_growth = memory_growths.pop()\n else:\n allow_growth = None\n\n return config_pb2.GPUOptions(\n allow_growth=allow_growth,\n visible_device_list=\",\".join(visible_device_list),\n experimental=config_pb2.GPUOptions.Experimental(\n virtual_devices=virtual_devices))\n\n @property\n def function_call_options(self):\n \"\"\"Returns function call options for current thread.\n\n Note that the returned object is still referenced by the eager context.\n\n Returns: the FunctionCallOptions for current thread.\n \"\"\"\n if self._thread_local_data.function_call_options is None:\n config = self.config\n\n # Default to soft placement for functions unless specified\n if self._soft_device_placement is None:\n config.allow_soft_placement = True\n self._thread_local_data.function_call_options = FunctionCallOptions(\n config_proto=config)\n\n return self._thread_local_data.function_call_options\n\n @function_call_options.setter\n def function_call_options(self, options):\n \"\"\"Returns function call options for current thread.\"\"\"\n self._thread_local_data.function_call_options = options\n\n def num_gpus(self):\n \"\"\"The number of GPUs available to execute operations.\"\"\"\n self.ensure_initialized()\n return self._num_gpus\n\n def add_function(self, fn):\n \"\"\"Add a function definition to the context.\n\n Once added, the function (identified by its name) can be executed like any\n other operation.\n\n Args:\n fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).\n \"\"\"\n self.ensure_initialized()\n pywrap_tfe.TFE_ContextAddFunction(self._handle, fn)\n\n def add_function_def(self, fdef):\n \"\"\"Add a function definition to the context.\n\n Once added, the function (identified by its name) can be executed like any\n other operation.\n\n Args:\n fdef: A FunctionDef protocol buffer message.\n \"\"\"\n self.ensure_initialized()\n fdef_string = fdef.SerializeToString()\n pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string,\n len(fdef_string))\n\n def get_function_def(self, name):\n \"\"\"Get a function definition from the context.\n\n Args:\n name: function signature name.\n\n Returns:\n The requested FunctionDef.\n\n Raises:\n tf.errors.NotFoundError: if name is not the name of a registered function.\n \"\"\"\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)\n proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n function_def = function_pb2.FunctionDef()\n function_def.ParseFromString(proto_data)\n\n return function_def\n\n def register_custom_device(self, device_capsule, device_name,\n device_info_capsule):\n \"\"\"Calls TFE_RegisterCustomDevice. See the non-member function.\"\"\"\n self.ensure_initialized()\n pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule,\n device_name, device_info_capsule)\n\n def pack_eager_tensors(self, tensors):\n \"\"\"Pack multiple `EagerTensor`s of the same dtype and shape.\n\n Args:\n tensors: a list of EagerTensors to pack.\n\n Returns:\n A packed EagerTensor.\n \"\"\"\n self.ensure_initialized()\n if self._lazy_remote_inputs_copy is not None and (\n not self._lazy_remote_inputs_copy):\n raise ValueError(\"Packing eager tensors is not supported when \"\n \"lazy_remote_inputs_copy is disabled.\")\n return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)\n\n def remove_function(self, name):\n \"\"\"Remove a function from the context.\n\n Once removed, the function cannot be executed anymore.\n\n Args:\n name: function signature name.\n \"\"\"\n self.ensure_initialized()\n pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)\n\n def has_function(self, name):\n \"\"\"Check if a function `name` is registered.\"\"\"\n self.ensure_initialized()\n return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))\n\n def add_op_callback(self, callback):\n \"\"\"Add a post-op callback to the context.\n\n A post-op callback is invoked immediately after an eager operation or\n function has finished execution or after a op has been added to a graph,\n providing access to the op's type, name input and output tensors. Multiple\n op callbacks can be added, in which case the callbacks will be invoked in\n the order in which they are added.\n\n Args:\n callback: a callable of the signature\n `f(op_type, inputs, attrs, outputs, op_name=None, graph=None)`.\n See doc strings in `op_callbacks.py` for details on the function\n signature and its semantics.\n \"\"\"\n if callback not in self._thread_local_data.op_callbacks:\n self._thread_local_data.op_callbacks.append(callback)\n\n def remove_op_callback(self, callback):\n \"\"\"Remove an already-registered op callback.\n\n Args:\n callback: The op callback to be removed.\n\n Raises:\n KeyError: If `callback` is not already registered.\n \"\"\"\n if callback not in self._thread_local_data.op_callbacks:\n raise KeyError(\n \"The specified op callback has not been registered, \"\n \"and hence cannot be removed.\")\n del self._thread_local_data.op_callbacks[\n self._thread_local_data.op_callbacks.index(callback)]\n\n @property\n def op_callbacks(self):\n return self._thread_local_data.op_callbacks\n\n @property\n def invoking_op_callbacks(self):\n return self._thread_local_data.invoking_op_callbacks\n\n @invoking_op_callbacks.setter\n def invoking_op_callbacks(self, value):\n self._thread_local_data.invoking_op_callbacks = value\n\n def _initialize_physical_devices(self):\n \"\"\"Get local devices visible to the system.\"\"\"\n # We lazy initialize self._physical_devices since we do not want to do this\n # the constructor since the backend may not be initialized yet.\n with self._device_lock:\n if self._physical_devices is not None:\n return\n\n devs = pywrap_tfe.TF_ListPhysicalDevices()\n self._physical_devices = [\n PhysicalDevice(name=d.decode(),\n device_type=d.decode().split(\":\")[1]) for d in devs]\n self._physical_device_to_index = {\n p: i for i, p in enumerate(self._physical_devices)\n }\n\n # Construct the visible device list from all physical devices but ignore\n # XLA devices\n self._visible_device_list = [\n d for d in self._physical_devices\n if not d.device_type.startswith(\"XLA\")\n ]\n self._memory_growth_map = {\n d: None for d in self._physical_devices if d.device_type == \"GPU\"\n }\n\n # Import device settings that may have been passed into the constructor\n self._import_config()\n\n def list_physical_devices(self, device_type=None):\n \"\"\"List local devices visible to the system.\n\n This API allows a client to query the devices before they have been\n initialized by the eager runtime. Additionally a user can filter by device\n type, to get only CPUs or GPUs.\n\n Args:\n device_type: Optional device type to limit results to\n\n Returns:\n List of PhysicalDevice objects.\n \"\"\"\n self._initialize_physical_devices()\n\n if device_type is None:\n return list(self._physical_devices)\n\n return [d for d in self._physical_devices if d.device_type == device_type]\n\n def get_device_details(self, device): # pylint: disable=redefined-outer-name\n \"\"\"Returns details about a physical devices.\n\n Args:\n device: A `tf.config.PhysicalDevice` returned by\n `tf.config.list_physical_devices` or `tf.config.get_visible_devices`.\n\n Returns:\n A dict with string keys.\n \"\"\"\n if not isinstance(device, PhysicalDevice):\n raise ValueError(\"device must be a tf.config.PhysicalDevice, but got: \"\n \"%s\" % (device,))\n if (self._physical_device_to_index is None or\n device not in self._physical_device_to_index):\n raise ValueError(\"The PhysicalDevice must be one obtained from \"\n \"calling `tf.config.list_physical_devices`, but got: \"\n \"%s\" % (device,))\n index = self._physical_device_to_index[device]\n details = pywrap_tfe.TF_GetDeviceDetails(index)\n\n # Change compute_capability from a string to a tuple\n if \"compute_capability\" in details:\n try:\n major, minor = details[\"compute_capability\"].split(\".\")\n details[\"compute_capability\"] = (int(major), int(minor))\n except ValueError:\n raise RuntimeError(\"Device returned compute capability an in invalid \"\n \"format: %s\" % details[\"compute_capability\"])\n return details\n\n def _import_config(self):\n \"\"\"Import config if passed in during construction.\n\n If Context was created with a ConfigProto such as when calling\n tf.compat.v1.enable_eager_execution(), then we need to pull out the\n various pieces we might be replacing and import then into our internal\n class representation.\n \"\"\"\n if self._config is None:\n return\n\n num_cpus = self._config.device_count.get(\"CPU\", 1)\n if num_cpus != 1:\n cpus = [d for d in self._physical_devices if d.device_type == \"CPU\"]\n if num_cpus == 0:\n self.set_visible_devices([], \"CPU\")\n elif num_cpus > 1:\n self.set_logical_device_configuration(\n cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])\n\n # Parse GPU options\n gpus = [d for d in self._physical_devices if d.device_type == \"GPU\"]\n\n # If there are no GPUs detected, simply ignore all the GPU options passed in\n # rather than doing any validation checks.\n if not gpus:\n return\n\n gpu_count = self._config.device_count.get(\"GPU\", None)\n\n visible_gpus = []\n # TODO(gjn): Handle importing existing virtual GPU configuration\n visible_indices = self._config.gpu_options.visible_device_list\n if visible_indices:\n for index in visible_indices.split(\",\"):\n if int(index) >= len(gpus):\n raise ValueError(\"Invalid visible device index: %s\" % index)\n visible_gpus.append(gpus[int(index)])\n else:\n visible_gpus = gpus\n\n if gpu_count is not None:\n visible_gpus = visible_gpus[:gpu_count]\n\n self.set_visible_devices(visible_gpus, \"GPU\")\n\n def list_logical_devices(self, device_type=None):\n \"\"\"Return logical devices.\"\"\"\n self.ensure_initialized()\n if device_type is None:\n return list(self._logical_devices)\n\n return [d for d in self._logical_devices if d.device_type == device_type]\n\n def get_visible_devices(self, device_type=None):\n \"\"\"Get the list of visible devices.\"\"\"\n self._initialize_physical_devices()\n\n if device_type is None:\n return list(self._visible_device_list)\n\n return [\n d for d in self._visible_device_list if d.device_type == device_type\n ]\n\n def set_visible_devices(self, devices, device_type=None):\n \"\"\"Set the list of visible devices.\"\"\"\n self._initialize_physical_devices()\n\n if not isinstance(devices, list):\n devices = [devices]\n\n for d in devices:\n if d not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(d))\n if device_type is not None and d.device_type != device_type:\n raise ValueError(\"Unrecognized device: %s\" % repr(d))\n\n visible_device_list = []\n if device_type is not None:\n visible_device_list = [\n d for d in self._visible_device_list if d.device_type != device_type\n ]\n\n visible_device_list += devices\n\n if self._visible_device_list == visible_device_list:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Visible devices cannot be modified after being initialized\")\n\n self._visible_device_list = visible_device_list\n\n def get_memory_growth(self, dev):\n \"\"\"Get if memory growth is enabled for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n return self._memory_growth_map[dev]\n\n def set_memory_growth(self, dev, enable):\n \"\"\"Set if memory growth should be enabled for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n if dev in self._virtual_device_map:\n raise ValueError(\n \"Cannot set memory growth on device when virtual devices configured\")\n\n if dev.device_type != \"GPU\":\n raise ValueError(\"Cannot set memory growth on non-GPU devices\")\n\n if self._memory_growth_map.get(dev) == enable:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Physical devices cannot be modified after being initialized\")\n\n self._memory_growth_map[dev] = enable\n\n def get_logical_device_configuration(self, dev):\n \"\"\"Get the virtual device configuration for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n return self._virtual_device_map.get(dev)\n\n def set_logical_device_configuration(self, dev, virtual_devices):\n \"\"\"Set the virtual device configuration for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n if dev.device_type == \"CPU\":\n for vdev in virtual_devices:\n if vdev.memory_limit is not None:\n raise ValueError(\"Setting memory limit on CPU virtual devices is \"\n \"currently not supported\")\n if vdev.experimental_priority is not None:\n raise ValueError(\"Setting experimental_priority on CPU virtual \"\n \" devices is currently not supported\")\n elif dev.device_type == \"GPU\":\n for vdev in virtual_devices:\n if vdev.memory_limit is None:\n raise ValueError(\n \"Setting memory limit is required for GPU virtual devices\")\n else:\n raise ValueError(\"Virtual devices are not supported for %s\" %\n dev.device_type)\n\n if self._virtual_device_map.get(dev) == virtual_devices:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Virtual devices cannot be modified after being initialized\")\n\n self._virtual_device_map[dev] = virtual_devices\n\n @property\n def enable_mlir_bridge(self):\n return self._enable_mlir_bridge\n\n @property\n def enable_mlir_graph_optimization(self):\n return self._enable_mlir_graph_optimization\n\n @enable_mlir_bridge.setter\n def enable_mlir_bridge(self, enabled):\n self._enable_mlir_bridge = enabled\n self._thread_local_data.function_call_options = None\n\n @enable_mlir_graph_optimization.setter\n def enable_mlir_graph_optimization(self, enabled):\n self._enable_mlir_graph_optimization = enabled\n self._thread_local_data.function_call_options = None\n\n @property\n def optimizer_jit(self):\n level = self.config.graph_options.optimizer_options.global_jit_level\n return (level == config_pb2.OptimizerOptions.ON_1 or\n level == config_pb2.OptimizerOptions.ON_2)\n\n @optimizer_jit.setter\n def optimizer_jit(self, enabled):\n self._optimizer_jit = enabled\n\n self._thread_local_data.function_call_options = None\n\n def get_optimizer_experimental_options(self):\n \"\"\"Get experimental options for the optimizer.\n\n Returns:\n Dictionary of current option values\n \"\"\"\n rewrite_options = self.config.graph_options.rewrite_options\n options = {}\n\n def rewriter_toggle(option):\n attr = getattr(rewrite_options, option)\n if attr != 0:\n options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)\n\n def rewriter_bool(option):\n options[option] = getattr(rewrite_options, option)\n\n rewriter_toggle(\"layout_optimizer\")\n rewriter_toggle(\"constant_folding\")\n rewriter_toggle(\"shape_optimization\")\n rewriter_toggle(\"remapping\")\n rewriter_toggle(\"arithmetic_optimization\")\n rewriter_toggle(\"dependency_optimization\")\n rewriter_toggle(\"loop_optimization\")\n rewriter_toggle(\"function_optimization\")\n rewriter_toggle(\"debug_stripper\")\n rewriter_bool(\"disable_model_pruning\")\n rewriter_toggle(\"scoped_allocator_optimization\")\n rewriter_toggle(\"pin_to_host_optimization\")\n rewriter_toggle(\"implementation_selector\")\n rewriter_toggle(\"auto_mixed_precision\")\n rewriter_bool(\"disable_meta_optimizer\")\n\n if rewrite_options.min_graph_nodes != 0:\n options[\"min_graph_nodes\"] = rewrite_options.min_graph_nodes\n\n return options\n\n def set_optimizer_experimental_options(self, options):\n \"\"\"Set experimental options for the optimizer.\n\n Args:\n options: Dictionary of options to modify\n \"\"\"\n self._optimizer_experimental_options.update(options)\n\n self._thread_local_data.function_call_options = None\n\n @property\n def intra_op_parallelism_threads(self):\n return self.config.intra_op_parallelism_threads\n\n @intra_op_parallelism_threads.setter\n def intra_op_parallelism_threads(self, num_threads):\n if self._intra_op_parallelism_threads == num_threads:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Intra op parallelism cannot be modified after initialization.\")\n\n self._intra_op_parallelism_threads = num_threads\n\n @property\n def inter_op_parallelism_threads(self):\n return self.config.inter_op_parallelism_threads\n\n @inter_op_parallelism_threads.setter\n def inter_op_parallelism_threads(self, num_threads):\n if self._inter_op_parallelism_threads == num_threads:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Inter op parallelism cannot be modified after initialization.\")\n\n self._inter_op_parallelism_threads = num_threads\n\n @property\n def soft_device_placement(self):\n return self.config.allow_soft_placement\n\n @soft_device_placement.setter\n def soft_device_placement(self, enable):\n if self._context_handle is not None:\n pywrap_tfe.TFE_ContextSetSoftDevicePlacement(self._handle, enable)\n\n self._soft_device_placement = enable\n self._thread_local_data.function_call_options = None\n\n @property\n def log_device_placement(self):\n return self.config.log_device_placement\n\n @log_device_placement.setter\n def log_device_placement(self, enable):\n if self._context_handle is not None:\n pywrap_tfe.TFE_ContextSetLogDevicePlacement(self._handle, enable)\n\n self._log_device_placement = enable\n self._thread_local_data.function_call_options = None\n\n @property\n def device_policy(self):\n # Only get the policy from the context if it has already been initialized\n if self._context_handle is not None:\n return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle)\n\n return self._device_policy\n\n @device_policy.setter\n def device_policy(self, policy):\n if policy is None:\n policy = DEVICE_PLACEMENT_SILENT\n\n if self._device_policy != policy:\n self._device_policy = policy\n\n # Only set the policy if the context has already been initialized\n if self._context_handle is not None:\n pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy(\n self._handle, self._device_policy)\n\n @property\n def mirroring_policy(self):\n # Only get the policy from the context if it has already been initialized\n if self._context_handle is not None:\n return pywrap_tfe.TFE_ContextGetMirroringPolicy(self._handle)\n\n return self._mirroring_policy\n\n @mirroring_policy.setter\n def mirroring_policy(self, policy):\n if policy is None:\n policy = MIRRORING_NONE\n\n if self._mirroring_policy is None or self._mirroring_policy != policy:\n self._mirroring_policy = policy\n\n # Only set the policy if the context has already been initialized\n if self._context_handle is not None:\n pywrap_tfe.TFE_ContextSetThreadLocalMirroringPolicy(\n self._handle, self._mirroring_policy)\n\n @property\n def lazy_remote_inputs_copy(self):\n return self._lazy_remote_inputs_copy\n\n @lazy_remote_inputs_copy.setter\n def lazy_remote_inputs_copy(self, lazy_copy):\n \"\"\"Sets whether to copy remote inputs lazily for functions.\"\"\"\n if not isinstance(lazy_copy, bool):\n raise ValueError(\"Expecting a boolean but got %s\" % type(lazy_copy))\n\n if self._lazy_remote_inputs_copy != lazy_copy:\n if self._initialized:\n raise ValueError(\n \"lazy_remote_inputs_copy should be set before being initialized.\")\n self._lazy_remote_inputs_copy = lazy_copy\n\n @property\n def use_tfrt(self):\n return self._use_tfrt\n\n @use_tfrt.setter\n def use_tfrt(self, tfrt):\n \"\"\"Sets whether to use TFRT.\"\"\"\n if not isinstance(tfrt, bool):\n raise ValueError(\"Expecting a boolean but got %s\" % type(tfrt))\n\n if self._use_tfrt != tfrt:\n if self._initialized:\n raise ValueError(\"use_tfrt should be set before being initialized.\")\n self._use_tfrt = tfrt\n\n def enable_run_metadata(self):\n \"\"\"Enables tracing of op execution via RunMetadata.\n\n To retrieve the accumulated metadata call context.export_run_metadata()\n and to stop tracing call context.disable_run_metadata().\n \"\"\"\n self.ensure_initialized()\n pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)\n\n def disable_run_metadata(self):\n \"\"\"Disables tracing of op execution via RunMetadata.\"\"\"\n if not self._context_handle:\n return\n pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)\n\n def enable_graph_collection(self):\n \"\"\"Enables graph collection of executed functions.\n\n To retrieve the accumulated graphs call context.export_run_metadata()\n and to stop collecting graphs call context.disable_graph_collection().\n \"\"\"\n self.ensure_initialized()\n pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)\n\n def disable_graph_collection(self):\n \"\"\"Disables graph collection of executed functions.\"\"\"\n if not self._context_handle:\n return\n pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)\n\n def export_run_metadata(self):\n \"\"\"Returns a RunMetadata proto with accumulated information.\n\n The returned protocol buffer contains information since the most recent call\n to either enable_run_metadata or export_run_metadata.\n\n Returns:\n A RunMetadata protocol buffer. Or None if not enabled.\n \"\"\"\n if not self._context_handle:\n return None\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)\n proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n run_metadata = config_pb2.RunMetadata()\n run_metadata.ParseFromString(compat.as_bytes(proto_data))\n return run_metadata\n\n @property\n def context_switches(self):\n \"\"\"Returns a stack of context switches.\"\"\"\n return self._context_switches\n\n def start_step(self):\n pywrap_tfe.TFE_ContextStartStep(self._handle)\n\n def end_step(self):\n pywrap_tfe.TFE_ContextEndStep(self._handle)\n\n\nclass _EagerDeviceContext(object):\n \"\"\"Context-manager forcing placement of ops and Tensors on a device.\"\"\"\n\n def __init__(self, ctx, device_name):\n self._device_name = device_name\n self._ctx = ctx\n self._stack = []\n\n def __enter__(self):\n ctx = self._ctx\n old_device_name = ctx.device_name\n old_device_spec = ctx.device_spec\n new_device_name = self._device_name\n cache_key = (old_device_name, new_device_name)\n try:\n new_device_name, new_device_spec = _device_parsing_cache[cache_key]\n except TypeError:\n # Error while trying to compute the cache key.\n raise ValueError(\"Expecting a string device name. Got %s(%s)\" %\n (type(new_device_name), new_device_name))\n except KeyError:\n # Handle a cache miss.\n if new_device_name is not None:\n if not isinstance(new_device_name, six.string_types):\n raise ValueError(\"Expecting a string device name. Got %s(%s)\" %\n (type(new_device_name), new_device_name))\n device_spec = pydev.DeviceSpec.from_string(new_device_name)\n if old_device_name:\n new_device_spec = copy.copy(old_device_spec)\n else:\n ctx.ensure_initialized()\n new_device_spec = pydev.DeviceSpec.from_string(\n ctx._context_devices[0]) # pylint: disable=protected-access\n new_device_spec = new_device_spec.make_merged_spec(device_spec)\n else:\n new_device_spec = pydev.DeviceSpec.from_string(\"\")\n new_device_name = new_device_spec.to_string()\n _device_parsing_cache[cache_key] = (new_device_name, new_device_spec)\n\n ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access\n self._stack.append((old_device_name, old_device_spec, new_device_spec))\n\n def __exit__(self, *ex_info):\n ctx = self._ctx\n old_device_name, old_device_spec, new_device_spec = self._stack[-1]\n if ctx.device_spec is not new_device_spec:\n raise RuntimeError(\n \"Exiting device scope without proper scope nesting\")\n del self._stack[-1]\n ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access\n\n\n# Do not set directly. Use _set_context.\n_context = None\n_context_lock = threading.Lock()\n\n\ndef _set_context_locked(ctx):\n global _context\n pywrap_tfe.TFE_Py_SetEagerContext(ctx)\n _context = ctx\n\n\ndef _set_context(ctx):\n with _context_lock:\n _set_context_locked(ctx)\n\n\ndef _create_context():\n with _context_lock:\n if _context is None:\n ctx = Context()\n _set_context_locked(ctx)\n\n\ndef _reset_context():\n \"\"\"Clears and re-initializes the singleton context.\n\n Should only be used for testing.\n \"\"\"\n global _context\n with _context_lock:\n if _context is not None:\n _context._clear_caches()\n _context = None\n _create_context()\n pywrap_tfe.TFE_ClearScalarCache()\n\n\ndef context():\n \"\"\"Returns a singleton context object.\"\"\"\n if _context is None:\n _create_context()\n return _context\n\n\ndef context_safe():\n \"\"\"Returns current context (or None if one hasn't been initialized).\"\"\"\n return _context\n\n\ndef ensure_initialized():\n \"\"\"Initialize the context.\"\"\"\n context().ensure_initialized()\n\n\ndef set_global_seed(seed):\n \"\"\"Sets the eager mode seed.\"\"\"\n context()._set_global_seed(seed) # pylint: disable=protected-access\n\n\ndef global_seed():\n \"\"\"Returns the eager mode seed.\"\"\"\n return context()._seed # pylint: disable=protected-access\n\n\ndef internal_operation_seed():\n \"\"\"Returns the operation seed generated based on global seed.\"\"\"\n return context()._internal_operation_seed() # pylint: disable=protected-access\n\n\n@tf_export(\"executing_eagerly\", v1=[])\ndef executing_eagerly():\n \"\"\"Checks whether the current thread has eager execution enabled.\n\n Eager execution is enabled by default and this API returns `True`\n in most of cases. However, this API might return `False` in the following use\n cases.\n\n * Executing inside `tf.function`, unless under `tf.init_scope` or\n `tf.config.run_functions_eagerly(True)` is previously called.\n * Executing inside a transformation function for `tf.dataset`.\n * `tf.compat.v1.disable_eager_execution()` is called.\n\n General case:\n\n >>> print(tf.executing_eagerly())\n True\n\n Inside `tf.function`:\n\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n False\n\n Inside `tf.function` after\n\n `tf.config.run_functions_eagerly(True)` is called:\n >>> tf.config.run_functions_eagerly(True)\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n True\n >>> tf.config.run_functions_eagerly(False)\n\n Inside a transformation function for `tf.dataset`:\n\n >>> def data_fn(x):\n ... print(tf.executing_eagerly())\n ... return x\n >>> dataset = tf.data.Dataset.range(100)\n >>> dataset = dataset.map(data_fn)\n False\n\n Returns:\n `True` if the current thread has eager execution enabled.\n \"\"\"\n ctx = context_safe()\n if ctx is None:\n return default_execution_mode == EAGER_MODE\n\n return ctx.executing_eagerly()\n\n\n@tf_export(v1=[\"executing_eagerly\"])\ndef executing_eagerly_v1():\n \"\"\"Checks whether the current thread has eager execution enabled.\n\n Eager execution is typically enabled via\n `tf.compat.v1.enable_eager_execution`, but may also be enabled within the\n context of a Python function via tf.contrib.eager.py_func.\n\n When eager execution is enabled, returns `True` in most cases. However,\n this API might return `False` in the following use cases.\n\n * Executing inside `tf.function`, unless under `tf.init_scope` or\n `tf.config.run_functions_eagerly(True)` is previously called.\n * Executing inside a transformation function for `tf.dataset`.\n * `tf.compat.v1.disable_eager_execution()` is called.\n\n >>> tf.compat.v1.enable_eager_execution()\n\n General case:\n\n >>> print(tf.executing_eagerly())\n True\n\n Inside `tf.function`:\n\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n False\n\n Inside `tf.function`\n after `tf.config.run_functions_eagerly(True)` is called:\n\n >>> tf.config.run_functions_eagerly(True)\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n True\n >>> tf.config.run_functions_eagerly(False)\n\n Inside a transformation function for `tf.dataset`:\n\n >>> def data_fn(x):\n ... print(tf.executing_eagerly())\n ... return x\n >>> dataset = tf.data.Dataset.range(100)\n >>> dataset = dataset.map(data_fn)\n False\n\n Returns:\n `True` if the current thread has eager execution enabled.\n \"\"\"\n return executing_eagerly()\n\n\ndef in_eager_mode():\n \"\"\"Use executing_eagerly() instead. This function will be removed.\"\"\"\n return executing_eagerly()\n\n\ndef shared_name(name=None):\n \"\"\"Returns the anonymous shared name GUID if no shared name is specified.\n\n In eager mode we need to use a unique shared name to avoid spurious sharing\n issues. The runtime generates a unique name on our behalf when the reserved\n GUID is used as a shared name.\n\n Args:\n name: Optional shared name\n\n Returns:\n Eager compatible shared name.\n \"\"\"\n if name or not executing_eagerly():\n return name\n\n # Ensure a unique name when eager execution is enabled to avoid spurious\n # sharing issues.\n return \"cd2c89b7-88b7-44c8-ad83-06c2a9158347\"\n\n\ndef graph_mode():\n \"\"\"Context-manager to disable eager execution for the current thread.\"\"\"\n return context()._mode(GRAPH_MODE) # pylint: disable=protected-access\n\n\ndef eager_mode():\n \"\"\"Context-manager to enable eager execution for the current thread.\"\"\"\n return context()._mode(EAGER_MODE) # pylint: disable=protected-access\n\n\ndef scope_name():\n \"\"\"Name of the current scope.\"\"\"\n return context().scope_name\n\n\ndef device(name):\n \"\"\"Context-manager to force placement of operations and Tensors on a device.\n\n Example:\n ```python\n with tf.device('gpu:0'):\n with tf.device('cpu:0'):\n shape = tf.constant([], dtype=tf.int32)\n x = tf.random.truncated_normal(shape, tf.float32)\n ```\n will ensure that the `shape` Tensor is on CPU but the `truncated_normal`\n operation runs on GPU 0.\n\n Args:\n name: Name of the device (see context().devices()), or None to\n perform automatic placement.\n\n Returns:\n Context manager for setting the device.\n \"\"\"\n ensure_initialized()\n return context().device(name)\n\n\n@tf_export(\"debugging.get_log_device_placement\")\ndef get_log_device_placement():\n \"\"\"Get if device placements are logged.\n\n Returns:\n If device placements are logged.\n \"\"\"\n return context().log_device_placement\n\n\n@tf_export(\"debugging.set_log_device_placement\")\ndef set_log_device_placement(enabled):\n \"\"\"Set if device placements should be logged.\n\n Args:\n enabled: Whether to enabled device placement logging.\n \"\"\"\n context().log_device_placement = enabled\n\n\n@tf_contextlib.contextmanager\ndef device_policy(policy):\n \"\"\"Context manager for setting device placement policy for current thread.\"\"\"\n ctx = context()\n old_policy = ctx.device_policy\n try:\n ctx.device_policy = policy\n yield\n finally:\n ctx.device_policy = old_policy\n\n\n@tf_contextlib.contextmanager\ndef mirroring_policy(policy):\n \"\"\"Context manager for setting mirroring policy for current thread.\"\"\"\n ctx = context()\n old_policy = ctx.mirroring_policy\n try:\n ctx.mirroring_policy = policy\n yield\n finally:\n ctx.mirroring_policy = old_policy\n\n\ndef set_execution_mode(mode):\n \"\"\"Sets execution mode for the current thread.\"\"\"\n context().execution_mode = mode\n\n\n# TODO(fishx): remove this method.\n@tf_contextlib.contextmanager\ndef execution_mode(mode):\n \"\"\"Context manager for setting execution mode for current thread.\"\"\"\n if mode is None:\n yield\n else:\n ctx = context()\n executor_new = executor.new_executor(mode == ASYNC)\n executor_old = ctx.executor\n try:\n executor_old.wait()\n ctx.executor = executor_new\n yield\n finally:\n ctx.executor = executor_old\n executor_new.wait()\n\n\n@tf_contextlib.contextmanager\ndef executor_scope(e):\n \"\"\"Context manager for changing executor for current thread.\n\n Args:\n e: A Executor to execute eager ops under this scope. Setting it to None will\n switch back to use the default executor for the context.\n\n Yields:\n Context manager for setting the executor for current thread.\n \"\"\"\n ctx = context()\n executor_old = ctx.executor\n try:\n ctx.executor = e\n yield\n finally:\n ctx.executor = executor_old\n\n\n@tf_export(\"experimental.function_executor_type\")\n@tf_contextlib.contextmanager\ndef function_executor_type(executor_type):\n \"\"\"Context manager for setting the executor of eager defined functions.\n\n Eager defined functions are functions decorated by tf.contrib.eager.defun.\n\n Args:\n executor_type: a string for the name of the executor to be used to execute\n functions defined by tf.contrib.eager.defun.\n\n Yields:\n Context manager for setting the executor of eager defined functions.\n \"\"\"\n current_options = context().function_call_options\n old_options = copy.copy(current_options)\n try:\n current_options.executor_type = executor_type\n yield\n finally:\n context().function_call_options = old_options\n\n\ndef is_async():\n \"\"\"Returns true if current thread is in async mode.\"\"\"\n return context().is_async()\n\n\ndef num_gpus():\n \"\"\"Get the number of available GPU devices.\n\n Returns:\n The number of available GPU devices.\n \"\"\"\n return context().num_gpus()\n\n\ndef enable_run_metadata():\n \"\"\"Enables tracing of op execution via RunMetadata.\n\n To retrieve the accumulated metadata call context.export_run_metadata()\n and to stop tracing call context.disable_run_metadata().\n \"\"\"\n context().enable_run_metadata()\n\n\ndef disable_run_metadata():\n \"\"\"Disables tracing of op execution via RunMetadata.\"\"\"\n context().disable_run_metadata()\n\n\ndef enable_graph_collection():\n \"\"\"Enables graph collection of executed functions.\n\n To retrieve the accumulated graphs call context.export_run_metadata()\n and to stop collecting graphs call context.disable_graph_collection().\n \"\"\"\n context().enable_graph_collection()\n\n\ndef disable_graph_collection():\n \"\"\"Disables graph collection of executed functions.\"\"\"\n context().disable_graph_collection()\n\n\ndef export_run_metadata():\n \"\"\"Returns a RunMetadata proto with accumulated information.\n\n The returned protocol buffer contains information since the most recent call\n to either enable_run_metadata or export_run_metadata.\n\n Returns:\n A RunMetadata protocol buffer.\n \"\"\"\n return context().export_run_metadata()\n\n\[email protected]\ndef collect_graphs(optimized=True):\n \"\"\"Collects a flat list of pre- or post-optimization graphs.\n\n The collected graphs include device placements, which can be useful for\n testing.\n\n Usage:\n\n ```\n @def_function.function\n def f(x):\n return x + constant_op.constant(1.)\n\n with context.collect_graphs() as graphs:\n with ops.device(\"CPU:0\"):\n f(constant_op.constant(1.))\n\n graph, = graphs # `graph` contains a single GraphDef for inspection\n ```\n\n Args:\n optimized: whether to collect optimized graphs or non-optimized graphs\n Yields:\n A list of GraphDefs, populated when the context manager exits.\n \"\"\"\n ctx = context()\n ctx.enable_graph_collection()\n try:\n graphs = []\n yield graphs\n metadata = ctx.export_run_metadata()\n finally:\n ctx.disable_graph_collection()\n for graph in metadata.function_graphs:\n if optimized:\n graphs.append(graph.post_optimization_graph)\n else:\n graphs.append(graph.pre_optimization_graph)\n\n\ndef get_server_def():\n return context().get_server_def()\n\n\ndef set_server_def(server_def):\n context().set_server_def(server_def)\n\n\ndef update_server_def(server_def):\n context().update_server_def(server_def)\n\n\ndef check_alive(worker_name):\n return context().check_alive(worker_name)\n\n\n@tf_export(\"experimental.async_scope\")\n@tf_contextlib.contextmanager\ndef async_scope():\n \"\"\"Context manager for grouping async operations.\n\n Ops/function calls inside the scope can return before finishing the actual\n execution. When exiting the async scope, a synchronization barrier will be\n automatically added to ensure the completion of all async op and function\n execution, potentially raising exceptions if async execution results in\n an error state.\n\n Users may write the following code to asynchronuously invoke `train_step_fn`\n and log the `loss` metric for every `num_steps` steps in a training loop.\n `train_step_fn` internally consumes data using `iterator.get_next()`, and may\n throw OutOfRangeError when running out of data. In the case:\n\n ```\n try:\n with tf.experimental.async_scope():\n for _ in range(num_steps):\n # Step function updates the metric `loss` internally\n train_step_fn()\n except tf.errors.OutOfRangeError:\n tf.experimental.async_clear_error()\n logging.info('loss =', loss.numpy())\n ```\n\n Yields:\n Context manager for grouping async operations.\n \"\"\"\n # TODO(haoyuzhang): replace env var once we have a config method to turn on\n # and off async streaming RPC\n remote_async_env_var = \"TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE\"\n old_policy = os.environ.get(remote_async_env_var)\n try:\n os.environ[remote_async_env_var] = str(True)\n yield\n # Note: sync local and remote executors iff the async block does not raise\n # an exception. Triggering sync after an exception may lead to derived\n # runtime errors and unexpected exception types.\n context().sync_executors()\n finally:\n if old_policy is None:\n del os.environ[remote_async_env_var]\n else:\n os.environ[remote_async_env_var] = old_policy\n\n\ndef async_wait():\n \"\"\"Sync all async operations and raise any errors during execution.\n\n In async execution mode, an op/function call can return before finishing the\n actual execution. Calling this method creates a synchronization barrier for\n all async op and function execution. It only returns when all pending nodes\n are finished, potentially raising exceptions if async execution results in\n an error state.\n \"\"\"\n context().sync_executors()\n\n\n@tf_export(\"experimental.async_clear_error\")\ndef async_clear_error():\n \"\"\"Clear pending operations and error statuses in async execution.\n\n In async execution mode, an error in op/function execution can lead to errors\n in subsequent ops/functions that are scheduled but not yet executed. Calling\n this method clears all pending operations and reset the async execution state.\n\n Example:\n\n ```\n while True:\n try:\n # Step function updates the metric `loss` internally\n train_step_fn()\n except tf.errors.OutOfRangeError:\n tf.experimental.async_clear_error()\n break\n logging.info('loss =', loss.numpy())\n ```\n \"\"\"\n context().clear_executor_errors()\n\n\ndef add_function(fdef):\n \"\"\"Add a function definition to the context.\"\"\"\n context().add_function(fdef)\n\n\ndef remove_function(name):\n \"\"\"Remove a function from the context.\"\"\"\n context().remove_function(name)\n\n\ndef get_function_def(name):\n return context().get_function_def(name)\n\n\ndef register_custom_device(device_capsule, device_name, device_info_capsule):\n \"\"\"Calls TFE_RegisterCustomDevice to register a custom device with Python.\n\n Enables using C extensions specifying a custom device from Python. See the\n experimental eager C API in tensorflow/c/eager/c_api_experimental.h for\n details.\n\n Note that custom devices are not currently supported inside `tf.function`s.\n\n Args:\n device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice'\n containing a pointer to a TFE_CustomDevice struct. The capsule retains\n ownership of the memory.\n device_name: A string indicating the name to register the custom device\n under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may\n subsequently be passed to `with tf.device(...):`.\n device_info_capsule: A PyCapsule with the name set to\n 'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific\n struct with the initial state of the custom device (the void* device_info\n argument to TFE_RegisterCustomDevice). This method takes ownership of the\n memory and clears the capsule destructor.\n \"\"\"\n context().register_custom_device(device_capsule, device_name,\n device_info_capsule)\n\n\n# Not every user creates a Context via context.context()\n# (for example, enable_eager_execution in python/framework/ops.py),\n# but they do all import this file. Note that IS_IN_GRAPH_MODE and\n# in_graph_mode are both parameterless functions.\ndef _tmp_in_graph_mode():\n if context_safe() is None:\n # Context not yet initialized. Assume graph mode following the\n # default implementation in `is_in_graph_mode`.\n return True\n return not executing_eagerly()\n\n\nis_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode\n"
] |
[
[
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.pywrap_tfe.TFE_ContextSetThreadLocalMirroringPolicy",
"tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy",
"tensorflow.python.pywrap_tfe.TFE_ContextGetExecutorForThread",
"tensorflow.python.pywrap_tfe.TFE_ContextGetFunctionDef",
"tensorflow.python.pywrap_tfe.TFE_NewContext",
"tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetAsync",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.pywrap_tfe.TF_DeviceListName",
"tensorflow.python.pywrap_tfe.TFE_ContextUpdateServerDef",
"tensorflow.python.pywrap_tfe.TFE_ContextGetDevicePlacementPolicy",
"tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetTfrt",
"tensorflow.python.pywrap_tfe.TFE_ContextSetLogDevicePlacement",
"tensorflow.python.pywrap_tfe.TFE_Py_PackEagerTensors",
"tensorflow.python.pywrap_tfe.TFE_ContextSetServerDef",
"tensorflow.python.pywrap_tfe.TFE_ContextEnableRunMetadata",
"tensorflow.python.pywrap_tfe.TFE_ContextHasFunction",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterCustomDevice",
"tensorflow.python.framework.device.is_device_spec",
"tensorflow.python.pywrap_tfe.TF_DeleteDeviceList",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.pywrap_tfe.TFE_ContextDisableGraphCollection",
"tensorflow.python.pywrap_tfe.TFE_ContextClearExecutors",
"tensorflow.python.pywrap_tfe.TFE_HostAddressSpace",
"tensorflow.python.pywrap_tfe.TFE_ContextEnableGraphCollection",
"tensorflow.python.eager.monitoring.Counter",
"tensorflow.python.pywrap_tfe.TF_DeviceListType",
"tensorflow.python.pywrap_tfe.TFE_Py_SetEagerContext",
"tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental.VirtualDevices",
"tensorflow.python.pywrap_tfe.TFE_ContextStartStep",
"tensorflow.python.pywrap_tfe.TF_ListPhysicalDevices",
"tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy",
"tensorflow.python.pywrap_tfe.TFE_ContextListDevices",
"tensorflow.python.framework.device.canonical_name",
"tensorflow.python.pywrap_tfe.TFE_ContextGetMirroringPolicy",
"tensorflow.python.pywrap_tfe.TFE_ContextAddFunction",
"tensorflow.python.pywrap_tfe.TF_GetDeviceDetails",
"tensorflow.python.pywrap_tfe.TFE_ContextCheckAlive",
"tensorflow.python.pywrap_tfe.TFE_ContextSetSoftDevicePlacement",
"tensorflow.python.framework.c_api_util.tf_buffer",
"numpy.array",
"tensorflow.python.pywrap_tfe.TFE_ContextRemoveFunction",
"tensorflow.core.framework.function_pb2.FunctionDef",
"tensorflow.python.pywrap_tfe.TFE_ContextClearCaches",
"tensorflow.python.pywrap_tfe.TFE_ContextSyncExecutors",
"tensorflow.python.eager.executor.new_executor",
"tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetConfig",
"tensorflow.python.framework.is_tfrt_test_true.is_tfrt_enabled",
"tensorflow.python.pywrap_tfe.TFE_ContextDisableRunMetadata",
"tensorflow.python.pywrap_tfe.TFE_ContextEndStep",
"tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetLazyRemoteInputsCopy",
"tensorflow.python.pywrap_tfe.TFE_NewContextOptions",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.pywrap_tfe.TFE_ContextExportRunMetadata",
"tensorflow.python.pywrap_tfe.TF_DeviceListCount",
"tensorflow.python.pywrap_tfe.TFE_DeleteContextOptions",
"tensorflow.python.tf2.enabled",
"tensorflow.python.client.pywrap_tf_session.TF_GetBuffer",
"tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental",
"tensorflow.python.pywrap_tfe.TFE_EnableCollectiveOps",
"tensorflow.python.pywrap_tfe.TFE_ClearScalarCache",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy"
]
] |
hassanmohsin/2d-image-classification
|
[
"7b52a203fa305d6064fe938d380c94067dc696ee"
] |
[
"train/utils.py"
] |
[
"import os\n\nimport torch\nimport torch.functional as F\nimport torch.nn as nn\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, alpha=0.25, gamma=3):\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n\n def forward(self, inputs, targets):\n bce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')\n pt = torch.exp(-bce_loss) # prevents nans when probability 0\n focal_loss = self.alpha * (1 - pt) ** self.gamma * bce_loss\n return focal_loss.mean()\n\n\ndef get_mean_std(loader):\n channels_sum, channels_squared_sum, num_batches = 0, 0, 0\n\n for _, data, _ in loader:\n channels_sum += torch.mean(data, dim=(0, 2, 3))\n channels_squared_sum += torch.mean(data ** 2, dim=(0, 2, 3))\n num_batches += 1\n\n mean = channels_sum / num_batches\n std = (channels_squared_sum / num_batches - mean ** 2) ** 0.5\n\n return mean, std\n\n\n# TODO: Enable loading epoch, loss and accuracy also\ndef load_checkpoint(model, optimizer, filename='checkpoint-best.pth.tar'):\n # Note: Input model & optimizer should be pre-defined. This routine only updates their states.\n start_epoch = 1\n best_f2 = 0.\n if os.path.isfile(filename):\n print(\"=> loading checkpoint '{}'\".format(filename))\n checkpoint = torch.load(filename)\n start_epoch = checkpoint['epoch']\n best_f2 = checkpoint['best_f2']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(filename, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(filename))\n\n return model, optimizer, start_epoch, best_f2\n\n\ndef save_checkpoint(state, is_best, filename):\n weight_dir = os.path.dirname(filename)\n \"\"\"Save checkpoint if a new best is achieved\"\"\"\n if is_best:\n print(\"=> Saving a new best\")\n torch.save(state, os.path.join(weight_dir, \"checkpoint-best.pth.tar\")) # save checkpoint\n else:\n print(\"=> Validation Accuracy did not improve\")\n torch.save(state, filename)\n\n\ndef binary_acc(y_pred, y_test):\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n\n correct_results_sum = (y_pred_tag == y_test).sum().float()\n acc = correct_results_sum / y_test.shape[0]\n acc = torch.round(acc * 100)\n\n return acc\n\n\ndef score(y_pred, y_test):\n predicted_classes = torch.round(torch.sigmoid(y_pred))\n predicted_true = torch.sum(predicted_classes == 1).float()\n target_true = torch.sum(y_test == 1).float()\n correct_true = torch.sum(predicted_classes == y_test * predicted_classes == 1).float()\n\n recall = correct_true / target_true\n precision = correct_true / predicted_true\n f1_score = 2 * precision * recall / (precision + recall)\n\n correct_results_sum = (predicted_classes == y_test).sum().float()\n acc = correct_results_sum / y_test.shape[0]\n acc = torch.round(acc * 100)\n\n return acc, precision, recall, f1_score\n\n\ndef optimizer_to(optim, device):\n for param in optim.state.values():\n # Not sure there are any global tensors in the state dict\n if isinstance(param, torch.Tensor):\n param.data = param.data.to(device)\n if param._grad is not None:\n param._grad.data = param._grad.data.to(device)\n elif isinstance(param, dict):\n for subparam in param.values():\n if isinstance(subparam, torch.Tensor):\n subparam.data = subparam.data.to(device)\n if subparam._grad is not None:\n subparam._grad.data = subparam._grad.data.to(device)\n"
] |
[
[
"torch.mean",
"torch.sigmoid",
"torch.load",
"torch.round",
"torch.sum",
"torch.exp",
"torch.functional.binary_cross_entropy_with_logits",
"torch.save"
]
] |
JonathanShor/DoubletDetection
|
[
"fd08d1c0310a78a63438ddff0693a1fba0ad50e4"
] |
[
"tests/test_package.py"
] |
[
"import numpy as np\n\nimport doubletdetection\n\n\ndef test_classifier():\n\n counts = np.random.poisson(size=(500, 100))\n\n # no phenograph\n clf = doubletdetection.BoostClassifier(n_iters=2, use_phenograph=False, standard_scaling=True)\n clf.fit(counts).predict(p_thresh=1e-16, voter_thresh=0.5)\n clf.doublet_score()\n\n # with phenograph\n clf = doubletdetection.BoostClassifier(n_iters=2, use_phenograph=True, standard_scaling=True)\n clf.fit(counts).predict(p_thresh=1e-16, voter_thresh=0.5)\n clf.doublet_score()\n\n doubletdetection.plot.convergence(clf, show=False, p_thresh=1e-16, voter_thresh=0.5)\n doubletdetection.plot.threshold(clf, show=False, p_step=6)\n"
] |
[
[
"numpy.random.poisson"
]
] |
shahrukhx01/siamese-nn-semantic-text-similarity
|
[
"df15c4f1473f8ab3ff1dbfb1c4a888817c684ecd"
] |
[
"siamese_sts/siamese_net/siamese_lstm.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom siamese_sts.utils.utils import similarity_score\n\n\"\"\"\nWrapper class using Pytorch nn.Module to create the architecture for our \nbinary classification model\n\"\"\"\n\n\nclass SiameseLSTM(nn.Module):\n def __init__(\n self,\n batch_size: int,\n output_size: int,\n hidden_size: int,\n vocab_size: int,\n embedding_size: int,\n embedding_weights: torch.TensorType,\n lstm_layers: int,\n device: str,\n ):\n super(SiameseLSTM, self).__init__()\n \"\"\"\n Initializes model layers and loads pre-trained embeddings from task 1\n \"\"\"\n ## model hyper parameters\n self.batch_size = batch_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.lstm_layers = lstm_layers\n self.device = device\n\n ## model layers\n # initializing the look-up table.\n self.word_embeddings = nn.Embedding(vocab_size, embedding_size)\n\n # assigning the look-up table to the pre-trained fasttext word embeddings.\n self.word_embeddings.weight = nn.Parameter(\n embedding_weights.to(self.device), requires_grad=True\n )\n\n self.lstm = nn.LSTM(embedding_size, hidden_size, num_layers=lstm_layers)\n\n def init_hidden(self, batch_size):\n \"\"\"\n Initializes hidden and context weight matrix before each\n forward pass through LSTM\n \"\"\"\n return (\n Variable(\n torch.zeros(self.lstm_layers, batch_size, self.hidden_size).to(\n self.device\n )\n ),\n Variable(torch.zeros(self.lstm_layers, batch_size, self.hidden_size)).to(\n self.device\n ),\n )\n\n def forward_once(self, batch, lengths):\n # embedded input of shape = (batch_size, sequence_len, embedding_size)\n embeddings = self.word_embeddings(batch)\n\n # permute embedded input to shape = (sequence_len, batch_size, embedding_size)\n embeddings = embeddings.permute(1, 0, 2)\n\n # perform forward pass of LSTM\n output, (final_hidden_state, final_cell_state) = self.lstm(\n embeddings, self.hidden\n )\n\n return final_hidden_state[-1]\n\n def forward(self, sent1_batch, sent2_batch, sent1_lengths, sent2_lengths):\n \"\"\"\n Performs the forward pass for each batch\n \"\"\"\n ## init context and hidden weights for lstm cell\n self.hidden = self.init_hidden(sent1_batch.size(0))\n\n self.sent1_out = self.forward_once(sent1_batch, sent1_lengths)\n self.sent2_out = self.forward_once(sent2_batch, sent2_lengths)\n similarity = similarity_score(self.sent1_out, self.sent2_out)\n return similarity\n"
] |
[
[
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.LSTM"
]
] |
thunlp/VisualDS
|
[
"855b5e0904b9bd6a8d6c30e4ea9156c5967e1143"
] |
[
"maskrcnn_benchmark/structures/bounding_box.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\n# transpose\nFLIP_LEFT_RIGHT = 0\nFLIP_TOP_BOTTOM = 1\n\n\nclass BoxList(object):\n \"\"\"\n This class represents a set of bounding boxes.\n The bounding boxes are represented as a Nx4 Tensor.\n In order to uniquely determine the bounding boxes with respect\n to an image, we also store the corresponding image dimensions.\n They can contain extra information that is specific to each bounding box, such as\n labels.\n \"\"\"\n\n def __init__(self, bbox, image_size, mode=\"xyxy\"):\n device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device(\"cpu\")\n bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)\n if bbox.ndimension() != 2:\n raise ValueError(\n \"bbox should have 2 dimensions, got {}\".format(bbox.ndimension())\n )\n if bbox.size(-1) != 4:\n raise ValueError(\n \"last dimension of bbox should have a \"\n \"size of 4, got {}\".format(bbox.size(-1))\n )\n if mode not in (\"xyxy\", \"xywh\"):\n raise ValueError(\"mode should be 'xyxy' or 'xywh'\")\n\n self.bbox = bbox\n self.size = image_size # (image_width, image_height)\n self.mode = mode\n self.extra_fields = {}\n self.triplet_extra_fields = [] # e.g. relation field, which is not the same size as object bboxes and should not respond to __getitem__ slicing v[item]\n\n def add_field(self, field, field_data, is_triplet=False):\n # if field in self.extra_fields:\n # print('{} is already in extra_fields. Try to replace with new data. '.format(field))\n self.extra_fields[field] = field_data\n if is_triplet:\n self.triplet_extra_fields.append(field)\n\n def get_field(self, field):\n return self.extra_fields[field]\n\n def has_field(self, field):\n return field in self.extra_fields\n\n def fields(self):\n return list(self.extra_fields.keys())\n\n def _copy_extra_fields(self, bbox):\n for k, v in bbox.extra_fields.items():\n self.extra_fields[k] = v\n\n def convert(self, mode):\n if mode not in (\"xyxy\", \"xywh\"):\n raise ValueError(\"mode should be 'xyxy' or 'xywh'\")\n if mode == self.mode:\n return self\n # we only have two modes, so don't need to check\n # self.mode\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n if mode == \"xyxy\":\n bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)\n bbox = BoxList(bbox, self.size, mode=mode)\n else:\n TO_REMOVE = 1\n bbox = torch.cat(\n (xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1\n )\n bbox = BoxList(bbox, self.size, mode=mode)\n bbox._copy_extra_fields(self)\n return bbox\n\n def _split_into_xyxy(self):\n if self.mode == \"xyxy\":\n xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)\n return xmin, ymin, xmax, ymax\n elif self.mode == \"xywh\":\n TO_REMOVE = 1\n xmin, ymin, w, h = self.bbox.split(1, dim=-1)\n return (\n xmin,\n ymin,\n xmin + (w - TO_REMOVE).clamp(min=0),\n ymin + (h - TO_REMOVE).clamp(min=0),\n )\n else:\n raise RuntimeError(\"Should not be here\")\n\n def resize(self, size, *args, **kwargs):\n \"\"\"\n Returns a resized copy of this bounding box\n\n :param size: The requested size in pixels, as a 2-tuple:\n (width, height).\n \"\"\"\n\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))\n if ratios[0] == ratios[1]:\n ratio = ratios[0]\n scaled_box = self.bbox * ratio\n bbox = BoxList(scaled_box, size, mode=self.mode)\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not isinstance(v, torch.Tensor) and hasattr(v, \"resize\"):\n v = v.resize(size, *args, **kwargs)\n if k in self.triplet_extra_fields:\n bbox.add_field(k, v, is_triplet=True)\n else:\n bbox.add_field(k, v)\n return bbox\n\n ratio_width, ratio_height = ratios\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n scaled_xmin = xmin * ratio_width\n scaled_xmax = xmax * ratio_width\n scaled_ymin = ymin * ratio_height\n scaled_ymax = ymax * ratio_height\n scaled_box = torch.cat(\n (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1\n )\n bbox = BoxList(scaled_box, size, mode=\"xyxy\")\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not isinstance(v, torch.Tensor) and hasattr(v, \"resize\"):\n v = v.resize(size, *args, **kwargs)\n if k in self.triplet_extra_fields:\n bbox.add_field(k, v, is_triplet=True)\n else:\n bbox.add_field(k, v)\n\n return bbox.convert(self.mode)\n\n def transpose(self, method):\n \"\"\"\n Transpose bounding box (flip or rotate in 90 degree steps)\n :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,\n :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,\n :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,\n :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.\n \"\"\"\n if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):\n raise NotImplementedError(\n \"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented\"\n )\n\n image_width, image_height = self.size\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n if method == FLIP_LEFT_RIGHT:\n TO_REMOVE = 1\n transposed_xmin = image_width - xmax - TO_REMOVE\n transposed_xmax = image_width - xmin - TO_REMOVE\n transposed_ymin = ymin\n transposed_ymax = ymax\n elif method == FLIP_TOP_BOTTOM:\n transposed_xmin = xmin\n transposed_xmax = xmax\n transposed_ymin = image_height - ymax\n transposed_ymax = image_height - ymin\n\n transposed_boxes = torch.cat(\n (transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1\n )\n bbox = BoxList(transposed_boxes, self.size, mode=\"xyxy\")\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not isinstance(v, torch.Tensor):\n v = v.transpose(method)\n if k in self.triplet_extra_fields:\n bbox.add_field(k, v, is_triplet=True)\n else:\n bbox.add_field(k, v)\n return bbox.convert(self.mode)\n\n def crop(self, box):\n \"\"\"\n Cropss a rectangular region from this bounding box. The box is a\n 4-tuple defining the left, upper, right, and lower pixel\n coordinate.\n \"\"\"\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n w, h = box[2] - box[0], box[3] - box[1]\n cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)\n cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)\n cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)\n cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)\n\n # TODO should I filter empty boxes here?\n if False:\n is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)\n\n cropped_box = torch.cat(\n (cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1\n )\n bbox = BoxList(cropped_box, (w, h), mode=\"xyxy\")\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not isinstance(v, torch.Tensor):\n v = v.crop(box)\n if k in self.triplet_extra_fields:\n bbox.add_field(k, v, is_triplet=True)\n else:\n bbox.add_field(k, v)\n return bbox.convert(self.mode)\n\n # Tensor-like methods\n\n def to(self, device):\n bbox = BoxList(self.bbox.to(device), self.size, self.mode)\n for k, v in self.extra_fields.items():\n if hasattr(v, \"to\"):\n v = v.to(device)\n if k in self.triplet_extra_fields:\n bbox.add_field(k, v, is_triplet=True)\n else:\n bbox.add_field(k, v)\n return bbox\n\n def __getitem__(self, item):\n bbox = BoxList(self.bbox[item], self.size, self.mode)\n for k, v in self.extra_fields.items():\n if k in self.triplet_extra_fields:\n bbox.add_field(k, v[item][:,item], is_triplet=True)\n else:\n bbox.add_field(k, v[item])\n return bbox\n\n def __len__(self):\n return self.bbox.shape[0]\n\n def clip_to_image(self, remove_empty=True):\n TO_REMOVE = 1\n self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n if remove_empty:\n box = self.bbox\n keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])\n return self[keep]\n return self\n\n def area(self):\n box = self.bbox\n if self.mode == \"xyxy\":\n TO_REMOVE = 1\n area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)\n elif self.mode == \"xywh\":\n area = box[:, 2] * box[:, 3]\n else:\n raise RuntimeError(\"Should not be here\")\n\n return area\n\n def copy(self):\n return BoxList(self.bbox, self.size, self.mode)\n\n def copy_with_fields(self, fields, skip_missing=False):\n bbox = BoxList(self.bbox, self.size, self.mode)\n if not isinstance(fields, (list, tuple)):\n fields = [fields]\n for field in fields:\n if self.has_field(field):\n if field in self.triplet_extra_fields:\n bbox.add_field(field, self.get_field(field), is_triplet=True)\n else:\n bbox.add_field(field, self.get_field(field))\n elif not skip_missing:\n raise KeyError(\"Field '{}' not found in {}\".format(field, self))\n return bbox\n\n def __repr__(self):\n s = self.__class__.__name__ + \"(\"\n s += \"num_boxes={}, \".format(len(self))\n s += \"image_width={}, \".format(self.size[0])\n s += \"image_height={}, \".format(self.size[1])\n s += \"mode={})\".format(self.mode)\n return s\n\n\nif __name__ == \"__main__\":\n bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))\n s_bbox = bbox.resize((5, 5))\n print(s_bbox)\n print(s_bbox.bbox)\n\n t_bbox = bbox.transpose(0)\n print(t_bbox)\n print(t_bbox.bbox)\n"
] |
[
[
"torch.device",
"torch.cat",
"torch.as_tensor"
]
] |
ehgh/COVID-19-case-estimation-and-policy-effects
|
[
"afc2001a0f29caf1e6e0ae9311983752ac7efff8"
] |
[
"Linear Regression and Scenario Generation/code/Linear Regression.py"
] |
[
"# policy contains the information on which day the policy was introduced\r\n# policy_lift contains information on which day the policy was relaxed\r\n# 1000: the policy was not introduced (relaxed) in according to the file (policy/ policy_lift)\r\n# we use data from February 18, 2020 to May 7, 2020 to estimate the coefficients \r\n# each number in the file policy and policy_lift files refers to the number of days from Dec 1, 2019\r\n# - for example, Feb 15 is represented by 76\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.linear_model import Lasso\r\nfrom sklearn.model_selection import train_test_split\r\nimport scipy\r\nfrom scipy.stats import iqr\r\n\r\n\r\ndef RegressionModels():\r\n # uploading the data\r\n policy = pd.read_csv(\"./policy.csv\")\r\n policy_lift = pd.read_csv(\"./policy_lift.csv\")\r\n popularity = np.load(\"./popularity_germany.npy\")\r\n weather = pd.read_csv(\"./weather.csv\")\r\n trend = pd.read_csv(\"./trend.csv\") # cumulative trend numbers\r\n PTV = pd.read_csv(\"./PTV.csv\")\r\n\r\n X = []\r\n Y = []\r\n\r\n cols = ['Border Closure', 'Initial Business closure','Educational Facilities Closed',\r\n 'Non-essential Services Closed',\r\n 'Stay at Home Order', 'Contact Restriction',\r\n 'Retails Closed','Trend','Tmax','PTV'] \r\n\r\n # going ove 16 states\r\n for j in range(16):\r\n \r\n # going over the time slots in the study\r\n for t in range(popularity.shape[1]):\r\n c = []\r\n \r\n # adding the policy if yes or no\r\n # 79 added to t represent the shift in day number (as we count the number of days from Dec1, 2019)\r\n\r\n for p in range(7): \r\n po = 0\r\n if t+79 >= policy.iloc[j,1+p]: \r\n po = 1\r\n if t+79 > policy_lift.iloc[j,1+p]:\r\n po = 0\r\n c.append(po) \r\n\r\n c.append(trend.iloc[t,j+1]) \r\n c.append(weather.iloc[t,j+1]) \r\n c.append(PTV.iloc[t,1]) \r\n\r\n X.append(c)\r\n # we store the values for three outcomes - predict commnunity mobility in parks and recreation,\r\n # transit stations and workplace (to estimate mobility as used in the paper, and to adjust for car and train movement)\r\n Y.append([popularity[j,t,0],popularity[j,t,3],popularity[j,t,4]])\r\n\r\n x = pd.DataFrame(X,columns=cols)\r\n y = pd.DataFrame(Y,columns=['mobility','train','car'])\r\n\r\n\r\n # Lasso Regression Models\r\n\r\n model_mobility = Lasso(alpha=0.25).fit(x,y['mobility'])\r\n model_train = Lasso(alpha=0.25).fit(x,y['train'])\r\n model_car = Lasso(alpha=0.25).fit(x,y['car'])\r\n\r\n # getting the coefficients\r\n coeff = pd.DataFrame(model2.coef_,columns=['Lasso'])\r\n coeff['Variables'] = cols\r\n\r\n\r\n # Bootstrapping\r\n\r\n for j in range(1000):\r\n # bootstrapping with 90% data\r\n X_train, X_test, y_train, y_test = train_test_split(x, y['mobility'], test_size=0.1, random_state=j)\r\n model_temp = Lasso(alpha=0.25).fit(X_train,y_train)\r\n coeff[str(j)] = model_temp.coef_ \r\n\r\n # estimating the IQR from Bootstrapping\r\n\r\n IQR = np.zeros((3,10)) # 25 percentile, mean and 75th percentile for the 10 predictor variables\r\n\r\n for k in range(10):\r\n sample_standard_error = scipy.stats.sem(coeff.values[k,2:])\r\n q75, q25 = np.percentile(coeff.values[k,2:], [75 ,25])\r\n\r\n IQR[0,k] = q25\r\n IQR[1,k] = coeff['Lasso'].values[k]\r\n IQR[2,k] = q75\r\n \r\n return(model_mobility,model_train,model_car,coeff,IQR)"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.percentile",
"sklearn.linear_model.Lasso",
"numpy.load",
"scipy.stats.sem",
"numpy.zeros"
]
] |
manideep2510/Lipreading-Keras
|
[
"d5b41a62599a68e6506cbf8d25779f770505f6b7"
] |
[
"aligns.py"
] |
[
"import numpy as np\n\nclass Align(object):\n def __init__(self, absolute_max_string_len=128, label_func=None):\n self.label_func = label_func\n self.absolute_max_string_len = absolute_max_string_len\n\n def from_file(self, path):\n with open(path, 'r') as f:\n lines = f.readlines()\n align = [(float((y[1]))*25, float((y[2]))*25, y[0]) for y in [x.strip().split(\" \") for x in lines[4:]]]\n self.build(align)\n return self\n\n def from_array(self, align):\n self.build(align)\n return self\n\n def build(self, align):\n self.align = align\n self.sentence = self.get_sentence(align)\n self.label = self.get_label(self.sentence)\n self.padded_label = self.get_padded_label(self.label)\n\n def strip(self, align, items):\n return [sub for sub in align if sub[2] not in items]\n\n def get_sentence(self, align):\n return \" \".join([y[-1] for y in align if y[0]/25 <4.8] )\n\n def get_label(self, sentence):\n return self.label_func(sentence)\n\n def get_padded_label(self, label):\n padding = np.ones((self.absolute_max_string_len-len(label))) * -1\n return np.concatenate((np.array(label), padding), axis=0)\n\n @property\n def word_length(self):\n return len(self.sentence.split(\" \"))\n\n @property\n def sentence_length(self):\n return len(self.sentence)\n\n @property\n def label_length(self):\n return len(self.label)\n"
] |
[
[
"numpy.array"
]
] |
jfkcooper/SScanSS-2
|
[
"ae50c1d065732a7742eaf1a7b9a9349907c29f8a"
] |
[
"sscanss/core/instrument/robotics.py"
] |
[
"from enum import Enum, unique\nimport logging\nimport math\nimport time\nimport nlopt\nimport numpy as np\nfrom PyQt5 import QtCore\nfrom ..geometry.mesh import MeshGroup\nfrom ..math.constants import VECTOR_EPS\nfrom ..math.matrix import Matrix44\nfrom ..math.misc import trunc\nfrom ..math.transform import (rotation_btw_vectors, angle_axis_btw_vectors, rigid_transform, xyz_eulers_from_matrix,\n matrix_to_angle_axis)\nfrom ..math.quaternion import Quaternion, QuaternionVectorPair\nfrom ..math.vector import Vector3\n\n\nclass SerialManipulator:\n \"\"\"Creates a serial, open loop kinematic chain or serial robot which is used for\n positioning other objects. The manipulator consists mainly of a set of links connected end to end\n via joints. The base of the robot can be controlled by setting the base matrix and an extra offset\n can be added after the last link by setting the tool matrix\n\n :param name: name of the manipulator\n :type name: str\n :param links: list of link objects\n :type links: List[Link]\n :param base: base matrix. None sets base to an identity matrix\n :type base: Union[Matrix44, None]\n :param tool: tool matrix. None sets tool to an identity matrix\n :type tool: Union[Matrix44, None]\n :param base_mesh: mesh object for the base of the manipulator\n :type base_mesh: Union[Mesh, None]\n :param custom_order: order of joint if order is different from kinematic order\n :type custom_order: List[int]\n \"\"\"\n def __init__(self, name, links, base=None, tool=None, base_mesh=None, custom_order=None):\n self.name = name\n self.links = links\n self.base = Matrix44.identity() if base is None else base\n self.default_base = self.base\n self.tool = Matrix44.identity() if tool is None else tool\n self.base_mesh = base_mesh\n self.order = custom_order if custom_order is not None else list(range(len(links)))\n self.revolute_index = [link.type == link.Type.Revolute for link in links]\n\n def fkine(self, q, start_index=0, end_index=None, include_base=True, ignore_locks=False, set_point=True):\n \"\"\"Moves the manipulator to specified configuration and returns the forward kinematics\n transformation matrix of the manipulator. The transformation matrix can be computed for a subset\n of links i.e a start index to end index\n\n :param q: list of joint offsets to move to. The length must be equal to number of links\n :type q: List[float]\n :param start_index: index to start\n :type start_index: int\n :param end_index: index to end. None sets end_index to index of last link\n :type end_index: Union[int, None]\n :param include_base: indicates that base matrix should be included\n :type include_base: bool\n :param ignore_locks: indicates that joint locks should be ignored\n :type ignore_locks: bool\n :param set_point: indicates that given configuration, q is a set_point\n :type set_point: bool\n :return: Forward kinematic transformation matrix\n :rtype: Matrix44\n \"\"\"\n link_count = self.link_count\n\n start = 0 if start_index < 0 else start_index\n end = link_count if end_index is None or end_index > link_count else end_index\n\n base = self.base if include_base and start == 0 else Matrix44.identity()\n tool = self.tool if end == link_count else Matrix44.identity()\n\n qs = QuaternionVectorPair.identity()\n for i in range(start, end):\n self.links[i].move(q[i], ignore_locks, set_point)\n qs *= self.links[i].quaternion_vector_pair\n\n return base @ qs.toMatrix() @ tool\n\n def fromUserFormat(self, q):\n \"\"\"Converts the joint offset from user defined format to kinematic order\n\n :param q: list of joint offsets in user format. The length must be equal to number of links\n :type q: List[float]\n :return: list of joint offsets in kinematic order.\n :rtype: List[float]\n \"\"\"\n conf = np.zeros(self.link_count)\n conf[self.order] = q\n conf[self.revolute_index] = np.radians(conf[self.revolute_index])\n\n return conf.tolist()\n\n def toUserFormat(self, q):\n \"\"\"Converts the joint offsets from kinematic order to user defined format\n\n :param q: list of joint offsets in kinematic order. The length must be equal to number of links\n :type q: List[float]\n :return: list of joint offsets in user format.\n :rtype: List[float]\n \"\"\"\n conf = np.copy(q)\n conf[self.revolute_index] = np.degrees(conf[self.revolute_index])\n conf = conf[self.order]\n\n return conf.tolist()\n\n def resetOffsets(self):\n \"\"\"Resets the link offsets to the default\"\"\"\n for link in self.links:\n link.reset()\n\n def reset(self):\n \"\"\"Resets the base matrix, link offsets, locks, and limits to their default values\"\"\"\n self.base = self.default_base\n for link in self.links:\n link.reset()\n link.locked = False\n link.ignore_limits = False\n\n @property\n def link_count(self):\n \"\"\"Gets the number of links in manipulator\n\n :return: number of links\n :rtype: int\n \"\"\"\n return len(self.links)\n\n @property\n def set_points(self):\n \"\"\"Gets the expected configuration (set-point for all links) of the manipulator.\n This is useful when the animating the manipulator in that case the actual configuration\n differs from the set-point or final configuration.\n\n :return: expected configuration\n :rtype: list[float]\n \"\"\"\n return [link.set_point for link in self.links]\n\n @set_points.setter\n def set_points(self, q):\n \"\"\"Sets the expected configuration (set_points) of the manipulator\n\n :param q: expected configuration\n :type q: list[float]\n \"\"\"\n for offset, link in zip(q, self.links):\n link.set_point = offset\n\n @property\n def configuration(self):\n \"\"\"Gets the current configuration (joint offsets for all links) of the manipulator\n\n :return: current configuration\n :rtype: list[float]\n \"\"\"\n return [link.offset for link in self.links]\n\n @property\n def pose(self):\n \"\"\"Gets the pose of the end effector of the manipulator\n\n :return: transformation matrix\n :rtype: Matrix44\n \"\"\"\n qs = QuaternionVectorPair.identity()\n for link in self.links:\n qs *= link.quaternion_vector_pair\n\n return self.base @ qs.toMatrix() @ self.tool\n\n def model(self, matrix=None):\n \"\"\"Generates the 3d model of the manipulator and transforms it with specified matrix.\n\n :param matrix: transformation matrix\n :type matrix: Union[Matrix44, None]\n :return: 3D model of manipulator\n :rtype: MeshGroup\n \"\"\"\n model = MeshGroup()\n\n if matrix is None:\n base_matrix = self.base\n else:\n base_matrix = matrix @ self.base\n\n if self.base_mesh is not None:\n model.addMesh(self.base_mesh, base_matrix)\n\n qs = QuaternionVectorPair.identity()\n joint_pos = Vector3()\n for link in self.links:\n qs *= link.quaternion_vector_pair\n m = Matrix44.identity()\n m[0:3, 0:3] = qs.quaternion.toMatrix() @ link.up_matrix\n m[0:3, 3] = joint_pos if link.type == Link.Type.Revolute else qs.vector\n\n m = base_matrix @ m\n if link.mesh is not None:\n model.addMesh(link.mesh, m)\n\n joint_pos = qs.vector\n\n return model\n\n\nclass Link:\n \"\"\"This class represents a link/joint that belongs to a serial manipulator.\n The joint could be revolute or prismatic. The link is represented using the Quaternion-vector\n kinematic notation.\n\n :param name: name of the link\n :type name: str\n :param axis: axis of rotation or translation for joint i\n :type axis: List[float]\n :param vector: vector from joint i to joint i+1\n :type vector: List[float]\n :param joint_type: joint type\n :type joint_type: Link.Type\n :param lower_limit: lower limit of joint\n :type lower_limit: float\n :param upper_limit: upper limit of joint\n :type upper_limit: float\n :param default_offset: default joint offset\n :type default_offset: float\n :param mesh: mesh object for the base\n :type mesh: Mesh\n \"\"\"\n @unique\n class Type(Enum):\n \"\"\"Type of joint based on allowed motion\"\"\"\n Revolute = 'revolute'\n Prismatic = 'prismatic'\n\n def __init__(self, name, axis, vector, joint_type, lower_limit, upper_limit, default_offset, mesh=None):\n self.joint_axis = Vector3(axis)\n\n if self.joint_axis.length < 0.00001:\n raise ValueError('The joint axis cannot be a zero vector.')\n\n self.quaternion = Quaternion.fromAxisAngle(self.joint_axis, 0.0)\n self.vector = Vector3(vector)\n self.home = Vector3(vector)\n self.type = joint_type\n self.lower_limit = lower_limit\n self.upper_limit = upper_limit\n self.default_offset = default_offset\n self.set_point = default_offset\n self.up_matrix = rotation_btw_vectors(Vector3([0., 0., 1.]), self.joint_axis)\n self.mesh = mesh\n self.name = name\n self.locked = False\n self.ignore_limits = False # This just stores state it does not affect behaviour\n self.reset()\n\n def move(self, offset, ignore_locks=False, set_point=True):\n \"\"\"Moves the link by the specified offset\n\n :param offset: joint offset\n :type offset: float\n :param ignore_locks: indicates that joint locks should be ignored\n :type ignore_locks: bool\n :param set_point: indicates that given offset is a set_point\n :type set_point: bool\n \"\"\"\n if self.locked and not ignore_locks:\n return\n\n self.offset = offset\n self.set_point = offset if set_point else self.set_point\n if self.type == Link.Type.Revolute:\n self.quaternion = Quaternion.fromAxisAngle(self.joint_axis, offset)\n self.vector = self.quaternion.rotate(self.home)\n else:\n self.vector = self.home + self.joint_axis * offset\n\n def reset(self):\n \"\"\"Moves the link to it default offset\"\"\"\n self.move(self.default_offset, True)\n\n @property\n def transformation_matrix(self):\n \"\"\"Gets the pose of the link as homogeneous matrix\n\n :return: pose of the link\n :rtype: Matrix44\n \"\"\"\n return self.quaternion_vector_pair.toMatrix()\n\n @property\n def quaternion_vector_pair(self):\n \"\"\"Gets the pose of the link as quaternion vector\n\n :return: pose of the link\n :rtype: QuaternionVectorPair\n \"\"\"\n return QuaternionVectorPair(self.quaternion, self.vector)\n\n\ndef joint_space_trajectory(start_pose, stop_pose, step):\n \"\"\"Generates a trajectory from a start to end configuration.\n\n :param start_pose: inclusive start joint configuration/offsets\n :type start_pose: List[float]\n :param stop_pose: inclusive stop joint configuration/offsets\n :type stop_pose: List[float]\n :param step: number of steps\n :type step: int\n :return: array of configurations defining the trajectory\n :rtype: numpy.ndarray\n \"\"\"\n dof = len(start_pose)\n trajectory = np.zeros((step, dof))\n\n for i in range(dof):\n t = cubic_polynomial_trajectory(start_pose[i], stop_pose[i], step=step)\n trajectory[:, i] = t\n\n return trajectory\n\n\ndef cubic_polynomial_trajectory(p0, p1, step=100):\n \"\"\"Generates a trajectory from p0 to p1 using a cubic polynomial.\n\n :param p0: inclusive start value\n :type p0: float\n :param p1: inclusive stop value\n :type p1: float\n :param step: number of steps\n :type step: int\n :return: offsets in the trajectory\n :rtype: numpy.ndarray\n \"\"\"\n\n t0 = 0.0\n tf = step\n t = np.linspace(t0, tf, step)\n\n t0_2 = t0 * t0\n t0_3 = t0_2 * t0\n\n tf_2 = tf * tf\n tf_3 = tf_2 * tf\n\n m = [[1.0, t0, t0_2, t0_3], [0.0, 1.0, 2 * t0, 3 * t0_2], [1.0, tf, tf_2, tf_3], [0.0, 1.0, 2 * tf, 3 * tf_2]]\n\n v0 = v1 = 0.0\n b = [p0, v0, p1, v1]\n a = np.dot(np.linalg.inv(m), b)\n\n pd = np.polyval(a[::-1], t)\n\n return pd\n\n\nclass Sequence(QtCore.QObject):\n \"\"\"Creates an animation from start to end joint configuration\n\n :param frames: function to generate frame at each way point\n :type frames: Callable[numpy.ndarray, Any]\n :param start: inclusive start joint configuration/offsets\n :type start: List[float]\n :param stop: inclusive stop joint configuration/offsets\n :type stop: List[float]\n :param duration: time duration in milliseconds\n :type duration: int\n :param step: number of steps\n :type step: int\n \"\"\"\n frame_changed = QtCore.pyqtSignal()\n\n def __init__(self, frames, start, stop, duration, step):\n super().__init__()\n self.start_time = 0\n self.timer_id = 0\n self.current_frame = 0\n\n self.step = step\n self.duration = duration\n self.frames = frames\n self.trajectory = joint_space_trajectory(start, stop, step)\n\n def start(self):\n \"\"\"Starts the animation\"\"\"\n self.timer_id = self.startTimer(self.duration // self.step, QtCore.Qt.PreciseTimer)\n self.start_time = time.perf_counter()\n self.updateFrame()\n\n def stop(self):\n \"\"\"Stops the animation\"\"\"\n self.killTimer(self.timer_id)\n self.timer_id = 0\n\n if self.current_frame < self.step - 1:\n self.setFrame(self.step - 1)\n\n def isRunning(self):\n \"\"\"Indicates if the animation is running\n\n :return: indicates if the animation is running\n :rtype: bool\n \"\"\"\n if self.timer_id != 0:\n return True\n\n return False\n\n def setFrame(self, index):\n \"\"\"Sets current frame to the frame with given index. The current frame will be\n set to the last frame if the given index is out of bounds\n\n :param index: frame index\n :type index: int\n \"\"\"\n if self.current_frame == index:\n return\n\n self.current_frame = index if -1 < index < self.step else self.step - 1\n self.frames(self.trajectory[self.current_frame, :])\n self.frame_changed.emit()\n\n def updateFrame(self):\n \"\"\"Updates the current frame to match the current time\"\"\"\n percent = (time.perf_counter() - self.start_time) * 1000 / self.duration\n percent = max(0.0, min(percent, 1.0))\n self.setFrame(math.ceil(percent * self.step) - 1)\n\n def timerEvent(self, _event):\n if self.current_frame >= self.step - 1:\n self.stop()\n\n self.updateFrame()\n\n\nclass IKResult:\n \"\"\"Data class for the inverse kinematics result\n\n :param q: final configuration\n :type q: Union[List[float], numpy.ndarray]\n :param status: solver status\n :type status: IKSolver.Status\n :param pos_err: 3D position error\n :type pos_err: Union[List[float], numpy.ndarray]\n :param orient_err: 3D orientation error\n :type orient_err: Union[List[float], numpy.ndarray]\n :param pos_err_ok: flag indicates if position error is within tolerance\n :type pos_err_ok: bool\n :param orient_err_ok: flag indicates if orientation error is within tolerance\n :type orient_err_ok: bool\n \"\"\"\n def __init__(self, q, status, pos_err, orient_err, pos_err_ok, orient_err_ok):\n self.q = q\n self.status = status\n self.position_error = pos_err\n self.position_converged = pos_err_ok\n self.orientation_error = orient_err\n self.orientation_converged = orient_err_ok\n\n\nclass IKSolver:\n \"\"\"General inverse kinematics solver for serial robots. Inverse kinematics is framed as an optimization\n problem and solved using randomized global optimizer with local optimization step to refine result.\n\n :param robot: robot used in the solver\n :type robot: PositioningStack\n \"\"\"\n @unique\n class Status(Enum):\n \"\"\"Status of IK solver\"\"\"\n Converged = 0\n NotConverged = 1\n HardwareLimit = 2\n Unreachable = 3\n DeformedVectors = 4\n Failed = 5\n\n def __init__(self, robot):\n self.robot = robot\n\n def unbounds(self):\n \"\"\"Returns the unbounded limit for the robot\n\n :return: array of lower and upp limit for each joint in the robot\n :rtype: np.array(Tuple[float, float])\n \"\"\"\n\n return np.array([(-100000, 100000) if link.type == link.Type.Prismatic else (-2 * np.pi, 2 * np.pi)\n for link in self.robot.links])\n\n def __createOptimizer(self, n, tolerance, lower_bounds, upper_bounds, local_max_eval, global_max_eval):\n \"\"\"Creates an optimizer to find joint configuration that achieves specified tolerance, the number of joints\n could be less than the number of joints in robot if some joints are locked. Since its not possible to change\n the optimizer size after creation, re-creating the optimizer was the simplest way to accommodate locked joints\n\n :param n: number of joints configuration\n :type n: int\n :param tolerance: stopping criterion for optimizer\n :type tolerance: float\n :param lower_bounds: lower joint bounds\n :type lower_bounds: numpy.ndarray\n :param upper_bounds: upper joint bounds\n :type upper_bounds: numpy.ndarray\n :param local_max_eval: number of evaluations for local optimization\n :type local_max_eval: int\n :param global_max_eval: number of evaluations for global optimization\n :type global_max_eval: int\n \"\"\"\n nlopt.srand(10)\n self.optimizer = nlopt.opt(nlopt.G_MLSL, n)\n self.optimizer.set_lower_bounds(lower_bounds)\n self.optimizer.set_upper_bounds(upper_bounds)\n self.optimizer.set_min_objective(self.objective)\n self.optimizer.set_stopval(tolerance)\n self.optimizer.set_maxeval(global_max_eval)\n self.optimizer.set_ftol_abs(1e-6)\n\n opt = nlopt.opt(nlopt.LD_SLSQP, n)\n opt.set_maxeval(local_max_eval)\n opt.set_ftol_abs(1e-6)\n self.optimizer.set_local_optimizer(opt)\n\n def __gradient(self, q, epsilon, f0):\n \"\"\"Computes the gradient of objective function at configuration q using finite difference\n\n :param q: joint configuration candidate\n :type q: numpy.ndarray\n :param epsilon: increment used to determine gradient\n :type epsilon: float\n :param f0: objective error\n :type f0: float\n :return: approximate gradient\n :rtype: numpy.ndarray\n \"\"\"\n grad = np.zeros((len(q), ))\n ei = np.zeros((len(q), ))\n for k in range(len(q)):\n ei[k] = 1.0\n d = epsilon * ei\n grad[k] = (self.objective(q + d, np.array([])) - f0) / d[k]\n ei[k] = 0.0\n return grad\n\n def objective(self, q, gradient):\n \"\"\"Optimization objective\n\n :param q: joint configuration candidate\n :type q: numpy.ndarray\n :param gradient: gradient\n :type gradient: numpy.ndarray\n :return: objective error\n :rtype: float\n \"\"\"\n conf = self.start.copy()\n conf[self.active_joints] = q\n matrix = self.robot.fkine(conf) @ self.robot.tool_link\n\n residuals = np.zeros(6)\n residuals[0:3] = self.target_position - (matrix[0:3, 0:3] @ self.current_position + matrix[0:3, 3])\n\n if self.current_orientation.shape[0] == 1:\n v1 = matrix[0:3, 0:3] @ self.current_orientation[0]\n v2 = self.target_orientation[0]\n angle, axis = angle_axis_btw_vectors(v1, v2)\n else:\n v1 = np.append(self.current_orientation @ matrix[0:3, 0:3].transpose(), [0., 0., 0.]).reshape(-1, 3)\n v2 = np.append(self.target_orientation, [0., 0., 0.]).reshape(-1, 3)\n result = rigid_transform(v1, v2)\n angle, axis = matrix_to_angle_axis(result.matrix)\n\n residuals[3:6] = math.degrees(angle) * axis\n error = np.dot(residuals, residuals)\n if error < self.best_result:\n self.best_result = error\n self.best_conf = conf\n\n if gradient.size > 0:\n gradient[:] = self.__gradient(q, 1e-6, error)\n\n return error\n\n def solve(self,\n current_pose,\n target_pose,\n start=None,\n tol=(1e-2, 1.0),\n bounded=True,\n local_max_eval=1000,\n global_max_eval=100):\n \"\"\"Finds the configuration that moves current pose to target pose within specified tolerance.\n\n :param current_pose: current position and vector orientation\n :type current_pose: Tuple[numpy.ndarray, numpy.ndarray]\n :param target_pose: target position and vector orientation\n :type target_pose: Tuple[numpy.ndarray, numpy.ndarray]\n :param start: starting joint configuration if None current configuration is used\n :type start: Union[None, numpy.ndarray]\n :param tol: position and orientation convergence tolerance\n :type tol: Tuple[float, float]\n :param bounded: indicates if joint bounds should be used\n :type bounded: bool\n :param local_max_eval: number of evaluations for local optimization\n :type local_max_eval: int\n :param global_max_eval: number of evaluations for global optimization\n :type global_max_eval: int\n :return: result from the inverse kinematics optimization\n :rtype: IKResult\n \"\"\"\n self.status = IKSolver.Status.NotConverged\n\n self.tolerance = tol\n stop_eval_tol = min(tol)**2\n self.target_position, self.target_orientation = target_pose\n self.current_position, self.current_orientation = current_pose\n\n self.best_conf = np.array(self.robot.set_points, dtype=float)\n self.best_result = np.inf\n\n self.start = self.best_conf if start is None else start\n self.active_joints = [not link.locked for link in self.robot.links]\n q0 = self.start[self.active_joints]\n\n # Using very large value to simulate unbounded joints\n bounds = self.unbounds()\n\n if bounded:\n active_limits = [not link.ignore_limits for link in self.robot.links]\n real_bounds = np.array([(link.lower_limit, link.upper_limit) for link in self.robot.links])\n bounds[active_limits] = real_bounds[active_limits]\n\n lower_bounds, upper_bounds = list(zip(*bounds[self.active_joints]))\n\n q0 = np.clip(q0, lower_bounds, upper_bounds) # ensure starting config is bounded avoids crash\n\n try:\n self.__createOptimizer(q0.size, stop_eval_tol, lower_bounds, upper_bounds, local_max_eval, global_max_eval)\n self.optimizer.optimize(q0)\n except nlopt.RoundoffLimited:\n logging.exception(\"Roundoff Error occurred during inverse kinematics\")\n except RuntimeError:\n self.status = IKSolver.Status.Failed\n logging.exception(\"Unknown runtime error occurred during inverse kinematics\")\n\n best_conf = self.best_conf\n residual_error = self.computeResidualError()\n match = 0\n if self.current_orientation.shape[0] > 1:\n v1 = np.append(self.current_orientation, [0., 0., 0.]).reshape(-1, 3)\n v2 = np.append(self.target_orientation, [0., 0., 0.]).reshape(-1, 3)\n result = rigid_transform(v1, v2)\n match = result.total\n\n if np.isfinite(best_conf).all():\n if residual_error[2] and residual_error[3]:\n self.status = IKSolver.Status.DeformedVectors if match > VECTOR_EPS else IKSolver.Status.Converged\n elif not self.reachabilityCheck():\n self.status = IKSolver.Status.Unreachable\n elif bounded and self.jointLimitCheck(self.best_conf[self.active_joints], stop_eval_tol,\n local_max_eval // 10, global_max_eval // 10):\n self.status = IKSolver.Status.HardwareLimit\n else:\n self.status = IKSolver.Status.NotConverged\n\n return IKResult(best_conf, self.status, *residual_error)\n\n def jointLimitCheck(self, q0, stop_eval_tol, local_max_eval, global_max_eval):\n \"\"\"Checks if the simulation fails because of joint limits. This runs the simulation without\n joint limits to check if non convergence is because of joint limits\n\n :param q0: starting configuration\n :type q0: numpy.ndarray\n :param stop_eval_tol: stopping tolerance\n :type stop_eval_tol: float\n :param local_max_eval: number of evaluations for local optimization\n :type local_max_eval: int\n :param global_max_eval: number of evaluations for global optimization\n :type global_max_eval: int\n :return: indicates if the ik solution converged without bounds\n :rtype: bool\n \"\"\"\n bounds = self.unbounds()\n lower_bounds, upper_bounds = list(zip(*bounds[self.active_joints]))\n try:\n self.__createOptimizer(q0.size, stop_eval_tol, lower_bounds, upper_bounds, local_max_eval, global_max_eval)\n self.optimizer.optimize(q0)\n except nlopt.RoundoffLimited:\n logging.exception(\"Roundoff Error occurred during checkJointLimit\")\n except RuntimeError:\n logging.exception(\"Unknown runtime error occurred during checkJointLimit\")\n\n residual_error = self.computeResidualError()\n if residual_error[2] and residual_error[3]:\n return True\n\n return False\n\n def reachabilityCheck(self):\n \"\"\"Checks if the orientation can be reached by the positioning system. This assumes that positioners\n with more than one non-parallel revolute joints can achieve any orientation\n\n :return: indicates that the orientation can be reached by the positioning system\n :rtype: bool\n \"\"\"\n revolute_axis = None\n for index, link in enumerate(self.robot.links):\n if not self.active_joints[index]:\n continue\n\n if link.type == link.Type.Prismatic:\n continue\n\n if revolute_axis is None:\n revolute_axis = link.joint_axis\n else:\n if (1 - abs(np.dot(revolute_axis, link.joint_axis))) > np.radians(self.tolerance[1]):\n return True\n\n if self.current_orientation.shape[0] == 1:\n v1 = self.current_orientation[0]\n v2 = self.target_orientation[0]\n angle, axis = angle_axis_btw_vectors(v1, v2)\n else:\n v1 = np.append(self.current_orientation, [0., 0., 0.]).reshape(-1, 3)\n v2 = np.append(self.target_orientation, [0., 0., 0.]).reshape(-1, 3)\n result = rigid_transform(v1, v2)\n angle, axis = matrix_to_angle_axis(result.matrix)\n\n if revolute_axis is None and abs(angle) < 1e-2:\n return True\n\n if revolute_axis is not None and (1 - abs(np.dot(revolute_axis, axis))) < np.radians(self.tolerance[1]):\n return True\n\n return False\n\n def computeResidualError(self):\n \"\"\"Computes the residual error and checks converges, the result is a tuple in the format\n [position_error, orientation_error, position_error_flag, orient_error_flag]\n\n :return: 3D position and orientation error and flags indicating convergence\n :rtype: Tuple[numpy.ndarray, numpy.ndarray, bool, bool]\n \"\"\"\n matrix = self.robot.fkine(self.best_conf) @ self.robot.tool_link\n position_error = self.target_position - (matrix[0:3, 0:3] @ self.current_position + matrix[0:3, 3])\n position_error_good = trunc(np.linalg.norm(position_error), 3) <= self.tolerance[0]\n\n if self.current_orientation.shape[0] == 1:\n v1 = matrix[0:3, 0:3] @ self.current_orientation[0]\n v2 = self.target_orientation[0]\n matrix = rotation_btw_vectors(v1, v2)\n else:\n v1 = np.append(self.current_orientation @ matrix[0:3, 0:3].transpose(), [0., 0., 0.]).reshape(-1, 3)\n v2 = np.append(self.target_orientation, [0., 0., 0.]).reshape(-1, 3)\n matrix = rigid_transform(v1, v2).matrix\n\n orientation_error = np.degrees(xyz_eulers_from_matrix(matrix))\n orient_error_good = trunc(np.linalg.norm(orientation_error), 3) <= self.tolerance[1]\n\n return position_error, orientation_error, position_error_good, orient_error_good\n"
] |
[
[
"numpy.dot",
"numpy.radians",
"numpy.linspace",
"numpy.clip",
"numpy.linalg.inv",
"numpy.isfinite",
"numpy.degrees",
"numpy.linalg.norm",
"numpy.copy",
"numpy.append",
"numpy.array",
"numpy.polyval",
"numpy.zeros"
]
] |
phykn/smatrix
|
[
"d23655a5c96500e5a5b8905ff3733baaf89dffdb"
] |
[
"helper.py"
] |
[
"import numpy as np\r\n\r\ndef constant_refractive_index(n, ws):\r\n '''\r\n Args:\r\n n (float): constant refractive index\r\n ws (float list): wavenumber\r\n Return:\r\n refractive_index (complex array): refractive index\r\n '''\r\n refractive_index = complex(n, 0)\r\n refractive_index = refractive_index + np.array(ws) * 0\r\n return refractive_index\r\n\r\ndef set_layer(refractive_index, thickness=0, coherence=True):\r\n '''\r\n Args:\r\n refractive_index (complex array): complex refractive index\r\n thickness (float): thickness of layer (unit:cm)\r\n coherence (boolean): coherence in layer\r\n Return:\r\n layer information (dictionary)\r\n ''' \r\n return {'refractive_index': refractive_index, 'thickness': thickness, 'coherence': coherence}"
] |
[
[
"numpy.array"
]
] |
JRaidal/Bayesian2D
|
[
"67d130d56566e68075317f6f520ac6607604f8fe"
] |
[
"Bayesian2D/tests/test_acquisition.py"
] |
[
"import unittest\nfrom Bayesian2D.tools import acquisition\nimport numpy as np\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn import preprocessing\nfrom sklearn.gaussian_process.kernels import Matern\n\nclass TestAcquisition(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.XY = np.array([[1, 2], [3, 4]])\n\t\tself.x_bounds = ([-10, 10])\n\t\tself.y_bounds = ([-10, 10])\n\t\tself.e = 0.5\n\t\tself.max_min = 'minimum' \n\t\tself.model = GaussianProcessRegressor(kernel = Matern(), alpha = 1e-10)\n\n\t\t\n\tdef tearDown(self):\n\t\tself.XY = 0\n\t\tself.x_bounds = 0\n\t\tself.y_bounds = 0\n\t\tself.e = 0\n\t\tself.max_min = '' \n\t\t\n\tdef test_output_float(self):\n\t\t\n\t\tx, y = acquisition(self.XY, self.x_bounds, self.y_bounds, self.e, self.model, self.max_min)\n\n\t\tself.assertEqual(type(x), float and type(y), float)\n\n"
] |
[
[
"numpy.array",
"sklearn.gaussian_process.kernels.Matern"
]
] |
austiezr/pandas
|
[
"4c8d66ecfe2b13607afd254443979b1ff842b6c1"
] |
[
"pandas/core/internals/blocks.py"
] |
[
"from datetime import datetime, timedelta\nimport inspect\nimport re\nfrom typing import TYPE_CHECKING, Any, List, Optional, Type, Union, cast\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, algos as libalgos, internals as libinternals, lib, writers\nfrom pandas._libs.internals import BlockPlacement\nfrom pandas._libs.tslibs import conversion\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas._typing import ArrayLike, Scalar, Shape\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import (\n astype_nansafe,\n convert_scalar_for_putitemlike,\n find_common_type,\n infer_dtype_from,\n infer_dtype_from_scalar,\n maybe_box_datetimelike,\n maybe_downcast_numeric,\n maybe_downcast_to_dtype,\n maybe_infer_dtype_type,\n maybe_promote,\n maybe_upcast,\n soft_convert_objects,\n)\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n TD64NS_DTYPE,\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_re,\n is_re_compilable,\n is_sparse,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndexClass,\n ABCPandasArray,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.array_algos.replace import compare_or_regex_search, replace_regex\nfrom pandas.core.array_algos.transforms import shift\nfrom pandas.core.arrays import (\n Categorical,\n DatetimeArray,\n ExtensionArray,\n PandasArray,\n PandasDtype,\n TimedeltaArray,\n)\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexers import (\n check_setitem_lengths,\n is_empty_indexer,\n is_scalar_indexer,\n)\nimport pandas.core.missing as missing\nfrom pandas.core.nanops import nanpercentile\n\nif TYPE_CHECKING:\n from pandas import Index\n\n\nclass Block(PandasObject):\n \"\"\"\n Canonical n-dimensional unit of homogeneous dtype contained in a pandas\n data structure\n\n Index-ignorant; let the container take care of that\n \"\"\"\n\n values: Union[np.ndarray, ExtensionArray]\n\n __slots__ = [\"_mgr_locs\", \"values\", \"ndim\"]\n is_numeric = False\n is_float = False\n is_integer = False\n is_complex = False\n is_datetime = False\n is_datetimetz = False\n is_timedelta = False\n is_bool = False\n is_object = False\n is_extension = False\n _can_hold_na = False\n _can_consolidate = True\n _validate_ndim = True\n\n @classmethod\n def _simple_new(\n cls, values: ArrayLike, placement: BlockPlacement, ndim: int\n ) -> \"Block\":\n \"\"\"\n Fastpath constructor, does *no* validation\n \"\"\"\n obj = object.__new__(cls)\n obj.ndim = ndim\n obj.values = values\n obj._mgr_locs = placement\n return obj\n\n def __init__(self, values, placement, ndim: int):\n \"\"\"\n Parameters\n ----------\n values : np.ndarray or ExtensionArray\n placement : BlockPlacement (or castable)\n ndim : int\n 1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame\n \"\"\"\n # TODO(EA2D): ndim will be unnecessary with 2D EAs\n self.ndim = self._check_ndim(values, ndim)\n self.mgr_locs = placement\n self.values = self._maybe_coerce_values(values)\n\n if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):\n raise ValueError(\n f\"Wrong number of items passed {len(self.values)}, \"\n f\"placement implies {len(self.mgr_locs)}\"\n )\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Ensure we have correctly-typed values.\n\n Parameters\n ----------\n values : np.ndarray, ExtensionArray, Index\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n return values\n\n def _check_ndim(self, values, ndim):\n \"\"\"\n ndim inference and validation.\n\n Infers ndim from 'values' if not provided to __init__.\n Validates that values.ndim and ndim are consistent if and only if\n the class variable '_validate_ndim' is True.\n\n Parameters\n ----------\n values : array-like\n ndim : int or None\n\n Returns\n -------\n ndim : int\n\n Raises\n ------\n ValueError : the number of dimensions do not match\n \"\"\"\n if ndim is None:\n ndim = values.ndim\n\n if self._validate_ndim and values.ndim != ndim:\n raise ValueError(\n \"Wrong number of dimensions. \"\n f\"values.ndim != ndim [{values.ndim} != {ndim}]\"\n )\n return ndim\n\n @property\n def _holder(self):\n \"\"\"\n The array-like that can hold the underlying values.\n\n None for 'Block', overridden by subclasses that don't\n use an ndarray.\n \"\"\"\n return None\n\n @property\n def _consolidate_key(self):\n return self._can_consolidate, self.dtype.name\n\n @property\n def is_view(self) -> bool:\n \"\"\" return a boolean if I am possibly a view \"\"\"\n values = self.values\n values = cast(np.ndarray, values)\n return values.base is not None\n\n @property\n def is_categorical(self) -> bool:\n return self._holder is Categorical\n\n @property\n def is_datelike(self) -> bool:\n \"\"\" return True if I am a non-datelike \"\"\"\n return self.is_datetime or self.is_timedelta\n\n def external_values(self):\n \"\"\"\n The array that Series.values returns (public attribute).\n\n This has some historical constraints, and is overridden in block\n subclasses to return the correct array (e.g. period returns\n object ndarray and datetimetz a datetime64[ns] ndarray instead of\n proper extension array).\n \"\"\"\n return self.values\n\n def internal_values(self):\n \"\"\"\n The array that Series._values returns (internal values).\n \"\"\"\n return self.values\n\n def array_values(self) -> ExtensionArray:\n \"\"\"\n The array that Series.array returns. Always an ExtensionArray.\n \"\"\"\n return PandasArray(self.values)\n\n def get_values(self, dtype=None):\n \"\"\"\n return an internal format, currently just the ndarray\n this is often overridden to handle to_dense like operations\n \"\"\"\n if is_object_dtype(dtype):\n return self.values.astype(object)\n return self.values\n\n def get_block_values_for_json(self) -> np.ndarray:\n \"\"\"\n This is used in the JSON C code.\n \"\"\"\n # TODO(EA2D): reshape will be unnecessary with 2D EAs\n return np.asarray(self.values).reshape(self.shape)\n\n @property\n def fill_value(self):\n return np.nan\n\n @property\n def mgr_locs(self):\n return self._mgr_locs\n\n @mgr_locs.setter\n def mgr_locs(self, new_mgr_locs):\n if not isinstance(new_mgr_locs, libinternals.BlockPlacement):\n new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)\n\n self._mgr_locs = new_mgr_locs\n\n def make_block(self, values, placement=None) -> \"Block\":\n \"\"\"\n Create a new block, with type inference propagate any values that are\n not specified\n \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if self.is_extension:\n values = _block_shape(values, ndim=self.ndim)\n\n return make_block(values, placement=placement, ndim=self.ndim)\n\n def make_block_same_class(self, values, placement=None, ndim=None):\n \"\"\" Wrap given values in a block of same type as self. \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if ndim is None:\n ndim = self.ndim\n return type(self)(values, placement=placement, ndim=ndim)\n\n def __repr__(self) -> str:\n # don't want to print out all of the items here\n name = type(self).__name__\n if self.ndim == 1:\n result = f\"{name}: {len(self)} dtype: {self.dtype}\"\n else:\n\n shape = \" x \".join(str(s) for s in self.shape)\n result = f\"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}\"\n\n return result\n\n def __len__(self) -> int:\n return len(self.values)\n\n def __getstate__(self):\n return self.mgr_locs.indexer, self.values\n\n def __setstate__(self, state):\n self.mgr_locs = libinternals.BlockPlacement(state[0])\n self.values = state[1]\n self.ndim = self.values.ndim\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n\n return self.values[slicer]\n\n def getitem_block(self, slicer, new_mgr_locs=None):\n \"\"\"\n Perform __getitem__-like, return result as block.\n\n As of now, only supports slices that preserve dimensionality.\n \"\"\"\n if new_mgr_locs is None:\n axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer\n new_mgr_locs = self.mgr_locs[axis0_slicer]\n elif not isinstance(new_mgr_locs, BlockPlacement):\n new_mgr_locs = BlockPlacement(new_mgr_locs)\n\n new_values = self._slice(slicer)\n\n if self._validate_ndim and new_values.ndim != self.ndim:\n raise ValueError(\"Only same dim slicing is allowed\")\n\n return type(self)._simple_new(new_values, new_mgr_locs, self.ndim)\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n def iget(self, i):\n return self.values[i]\n\n def set_inplace(self, locs, values):\n \"\"\"\n Modify block values in-place with new item value.\n\n Notes\n -----\n `set` never creates a new array or new Block, whereas `setitem` _may_\n create a new array and always creates a new Block.\n \"\"\"\n self.values[locs] = values\n\n def delete(self, loc) -> None:\n \"\"\"\n Delete given loc(-s) from block in-place.\n \"\"\"\n self.values = np.delete(self.values, loc, 0)\n self.mgr_locs = self.mgr_locs.delete(loc)\n\n def apply(self, func, **kwargs) -> List[\"Block\"]:\n \"\"\"\n apply the function to my values; return a block if we are not\n one\n \"\"\"\n with np.errstate(all=\"ignore\"):\n result = func(self.values, **kwargs)\n\n return self._split_op_result(result)\n\n def reduce(self, func, ignore_failures: bool = False) -> List[\"Block\"]:\n # We will apply the function and reshape the result into a single-row\n # Block with the same mgr_locs; squeezing will be done at a higher level\n assert self.ndim == 2\n\n try:\n result = func(self.values)\n except (TypeError, NotImplementedError):\n if ignore_failures:\n return []\n raise\n\n if np.ndim(result) == 0:\n # TODO(EA2D): special case not needed with 2D EAs\n res_values = np.array([[result]])\n else:\n res_values = result.reshape(-1, 1)\n\n nb = self.make_block(res_values)\n return [nb]\n\n def _split_op_result(self, result) -> List[\"Block\"]:\n # See also: split_and_operate\n if is_extension_array_dtype(result) and result.ndim > 1:\n # TODO(EA2D): unnecessary with 2D EAs\n # if we get a 2D ExtensionArray, we need to split it into 1D pieces\n nbs = []\n for i, loc in enumerate(self.mgr_locs):\n vals = result[i]\n block = self.make_block(values=vals, placement=[loc])\n nbs.append(block)\n return nbs\n\n if not isinstance(result, Block):\n result = self.make_block(result)\n\n return [result]\n\n def fillna(\n self, value, limit=None, inplace: bool = False, downcast=None\n ) -> List[\"Block\"]:\n \"\"\"\n fillna on the block with the value. If we fail, then convert to\n ObjectBlock and try again\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n mask = isna(self.values)\n mask = _extract_bool_array(mask)\n if limit is not None:\n limit = libalgos.validate_limit(None, limit=limit)\n mask[mask.cumsum(self.ndim - 1) > limit] = False\n\n if not self._can_hold_na:\n if inplace:\n return [self]\n else:\n return [self.copy()]\n\n if self._can_hold_element(value):\n nb = self if inplace else self.copy()\n nb._putmask_simple(mask, value)\n # TODO: should be nb._maybe_downcast?\n return self._maybe_downcast([nb], downcast)\n\n # we can't process the value, but nothing to do\n if not mask.any():\n return [self] if inplace else [self.copy()]\n\n # operate column-by-column\n def f(mask, val, idx):\n block = self.coerce_to_target_dtype(value)\n\n # slice out our block\n if idx is not None:\n # i.e. self.ndim == 2\n block = block.getitem_block(slice(idx, idx + 1))\n return block.fillna(value, limit=limit, inplace=inplace, downcast=None)\n\n return self.split_and_operate(None, f, inplace)\n\n def _split(self) -> List[\"Block\"]:\n \"\"\"\n Split a block into a list of single-column blocks.\n \"\"\"\n assert self.ndim == 2\n\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n vals = self.values[slice(i, i + 1)]\n\n nb = self.make_block(vals, [ref_loc])\n new_blocks.append(nb)\n return new_blocks\n\n def split_and_operate(\n self, mask, f, inplace: bool, ignore_failures: bool = False\n ) -> List[\"Block\"]:\n \"\"\"\n split the block per-column, and apply the callable f\n per-column, return a new block for each. Handle\n masking which will not change a block unless needed.\n\n Parameters\n ----------\n mask : 2-d boolean mask\n f : callable accepting (1d-mask, 1d values, indexer)\n inplace : bool\n ignore_failures : bool, default False\n\n Returns\n -------\n list of blocks\n \"\"\"\n if mask is None:\n mask = np.broadcast_to(True, shape=self.shape)\n\n new_values = self.values\n\n def make_a_block(nv, ref_loc):\n if isinstance(nv, list):\n assert len(nv) == 1, nv\n assert isinstance(nv[0], Block)\n block = nv[0]\n else:\n # Put back the dimension that was taken from it and make\n # a block out of the result.\n nv = _block_shape(nv, ndim=self.ndim)\n block = self.make_block(values=nv, placement=ref_loc)\n return block\n\n # ndim == 1\n if self.ndim == 1:\n if mask.any():\n nv = f(mask, new_values, None)\n else:\n nv = new_values if inplace else new_values.copy()\n block = make_a_block(nv, self.mgr_locs)\n return [block]\n\n # ndim > 1\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n m = mask[i]\n v = new_values[i]\n\n # need a new block\n if m.any() or m.size == 0:\n # Apply our function; we may ignore_failures if this is a\n # reduction that is dropping nuisance columns GH#37827\n try:\n nv = f(m, v, i)\n except TypeError:\n if ignore_failures:\n continue\n else:\n raise\n else:\n nv = v if inplace else v.copy()\n\n block = make_a_block(nv, [ref_loc])\n new_blocks.append(block)\n\n return new_blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n # no need to downcast our float\n # unless indicated\n if downcast is None and (self.is_float or self.is_datelike):\n return blocks\n\n return extend_blocks([b.downcast(downcast) for b in blocks])\n\n def downcast(self, dtypes=None) -> List[\"Block\"]:\n \"\"\" try to downcast each item to the dict of dtypes if present \"\"\"\n # turn it off completely\n if dtypes is False:\n return [self]\n\n values = self.values\n\n if self.ndim == 1:\n\n # try to cast all non-floats here\n if dtypes is None:\n dtypes = \"infer\"\n\n nv = maybe_downcast_to_dtype(values, dtypes)\n return [self.make_block(nv)]\n\n # ndim > 1\n if dtypes is None:\n return [self]\n\n if not (dtypes == \"infer\" or isinstance(dtypes, dict)):\n raise ValueError(\n \"downcast must have a dictionary or 'infer' as its argument\"\n )\n elif dtypes != \"infer\":\n raise AssertionError(\"dtypes as dict is not supported yet\")\n\n # operate column-by-column\n # this is expensive as it splits the blocks items-by-item\n def f(mask, val, idx):\n val = maybe_downcast_to_dtype(val, dtype=\"infer\")\n return val\n\n return self.split_and_operate(None, f, False)\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n Coerce to the new dtype.\n\n Parameters\n ----------\n dtype : str, dtype convertible\n copy : bool, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'ignore'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n Block\n \"\"\"\n errors_legal_values = (\"raise\", \"ignore\")\n\n if errors not in errors_legal_values:\n invalid_arg = (\n \"Expected value of kwarg 'errors' to be one of \"\n f\"{list(errors_legal_values)}. Supplied value is '{errors}'\"\n )\n raise ValueError(invalid_arg)\n\n if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):\n msg = (\n f\"Expected an instance of {dtype.__name__}, \"\n \"but got the class instead. Try instantiating 'dtype'.\"\n )\n raise TypeError(msg)\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # may need to convert to categorical\n if is_categorical_dtype(dtype):\n\n if is_categorical_dtype(self.values.dtype):\n # GH 10696/18593: update an existing categorical efficiently\n return self.make_block(self.values.astype(dtype, copy=copy))\n\n return self.make_block(Categorical(self.values, dtype=dtype))\n\n dtype = pandas_dtype(dtype)\n\n # astype processing\n if is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n\n # force the copy here\n if self.is_extension:\n try:\n values = self.values.astype(dtype)\n except (ValueError, TypeError):\n if errors == \"ignore\":\n values = self.values\n else:\n raise\n else:\n if issubclass(dtype.type, str):\n\n # use native type formatting for datetime/tz/timedelta\n if self.is_datelike:\n values = self.to_native_types().values\n\n # astype formatting\n else:\n # Because we have neither is_extension nor is_datelike,\n # self.values already has the correct shape\n values = self.values\n\n else:\n values = self.get_values(dtype=dtype)\n\n # _astype_nansafe works fine with 1-d only\n vals1d = values.ravel()\n try:\n values = astype_nansafe(vals1d, dtype, copy=True)\n except (ValueError, TypeError):\n # e.g. astype_nansafe can fail on object-dtype of strings\n # trying to convert to float\n if errors == \"raise\":\n raise\n newb = self.copy() if copy else self\n return newb\n\n # TODO(EA2D): special case not needed with 2D EAs\n if isinstance(values, np.ndarray):\n values = values.reshape(self.shape)\n\n newb = self.make_block(values)\n\n if newb.is_numeric and self.is_numeric:\n if newb.shape != self.shape:\n raise TypeError(\n f\"cannot set astype for copy = [{copy}] for dtype \"\n f\"({self.dtype.name} [{self.shape}]) to different shape \"\n f\"({newb.dtype.name} [{newb.shape}])\"\n )\n return newb\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n ) -> List[\"Block\"]:\n \"\"\"\n attempt to coerce any object types to better types return a copy\n of the block (if copy = True) by definition we are not an ObjectBlock\n here!\n \"\"\"\n return [self.copy()] if copy else [self]\n\n def _can_hold_element(self, element: Any) -> bool:\n \"\"\" require the same dtype as ourselves \"\"\"\n dtype = self.values.dtype.type\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, dtype)\n return isinstance(element, dtype)\n\n def should_store(self, value: ArrayLike) -> bool:\n \"\"\"\n Should we set self.values[indexer] = value inplace or do we need to cast?\n\n Parameters\n ----------\n value : np.ndarray or ExtensionArray\n\n Returns\n -------\n bool\n \"\"\"\n return is_dtype_equal(value.dtype, self.dtype)\n\n def to_native_types(self, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\" convert to our native types format \"\"\"\n values = self.values\n\n mask = isna(values)\n itemsize = writers.word_len(na_rep)\n\n if not self.is_object and not quoting and itemsize:\n values = values.astype(str)\n if values.dtype.itemsize / np.dtype(\"U1\").itemsize < itemsize:\n # enlarge for the na_rep\n values = values.astype(f\"<U{itemsize}\")\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return self.make_block(values)\n\n # block actions #\n def copy(self, deep: bool = True):\n \"\"\" copy constructor \"\"\"\n values = self.values\n if deep:\n values = values.copy()\n return self.make_block_same_class(values, ndim=self.ndim)\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n replace the to_replace value with value, possible to create new\n blocks here this is just a call to putmask. regex is not used here.\n It is used in ObjectBlocks. It is here for API compatibility.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n original_to_replace = to_replace\n\n if not self._can_hold_element(to_replace):\n # We cannot hold `to_replace`, so we know immediately that\n # replacing it is a no-op.\n # Note: If to_replace were a list, NDFrame.replace would call\n # replace_list instead of replace.\n return [self] if inplace else [self.copy()]\n\n values = self.values\n if lib.is_scalar(to_replace) and isinstance(values, np.ndarray):\n # The only non-DatetimeLike class that also has a non-trivial\n # try_coerce_args is ObjectBlock, but that overrides replace,\n # so does not get here.\n to_replace = convert_scalar_for_putitemlike(to_replace, values.dtype)\n\n mask = missing.mask_missing(values, to_replace)\n if not mask.any():\n # Note: we get here with test_replace_extension_other incorrectly\n # bc _can_hold_element is incorrect.\n return [self] if inplace else [self.copy()]\n\n if not self._can_hold_element(value):\n blk = self.astype(object)\n return blk.replace(\n to_replace=original_to_replace,\n value=value,\n inplace=True,\n regex=regex,\n )\n\n blk = self if inplace else self.copy()\n blk._putmask_simple(mask, value)\n blocks = blk.convert(numeric=False, copy=not inplace)\n return blocks\n\n def _replace_regex(\n self,\n to_replace,\n value,\n inplace: bool = False,\n convert: bool = True,\n mask=None,\n ) -> List[\"Block\"]:\n \"\"\"\n Replace elements by the given value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n List[Block]\n \"\"\"\n if not self._can_hold_element(to_replace):\n # i.e. only ObjectBlock, but could in principle include a\n # String ExtensionBlock\n return [self] if inplace else [self.copy()]\n\n rx = re.compile(to_replace)\n\n new_values = self.values if inplace else self.values.copy()\n replace_regex(new_values, rx, value, mask)\n\n block = self.make_block(new_values)\n if convert:\n nbs = block.convert(numeric=False)\n else:\n nbs = [block]\n return nbs\n\n def _replace_list(\n self,\n src_list: List[Any],\n dest_list: List[Any],\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n See BlockManager._replace_list docstring.\n \"\"\"\n # Exclude anything that we know we won't contain\n pairs = [\n (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x)\n ]\n if not len(pairs):\n # shortcut, nothing to replace\n return [self] if inplace else [self.copy()]\n\n src_len = len(pairs) - 1\n\n def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray:\n \"\"\"\n Generate a bool array by perform an equality check, or perform\n an element-wise regular expression matching\n \"\"\"\n if isna(s):\n return ~mask\n\n s = maybe_box_datetimelike(s)\n return compare_or_regex_search(self.values, s, regex, mask)\n\n if self.is_object:\n # Calculate the mask once, prior to the call of comp\n # in order to avoid repeating the same computations\n mask = ~isna(self.values)\n masks = [comp(s[0], mask, regex) for s in pairs]\n else:\n # GH#38086 faster if we know we dont need to check for regex\n masks = [missing.mask_missing(self.values, s[0]) for s in pairs]\n\n masks = [_extract_bool_array(x) for x in masks]\n\n rb = [self if inplace else self.copy()]\n for i, (src, dest) in enumerate(pairs):\n new_rb: List[\"Block\"] = []\n for blk in rb:\n m = masks[i]\n convert = i == src_len # only convert once at the end\n result = blk._replace_coerce(\n to_replace=src,\n value=dest,\n mask=m,\n inplace=inplace,\n regex=regex,\n )\n if convert and blk.is_object:\n result = extend_blocks(\n [b.convert(numeric=False, copy=True) for b in result]\n )\n new_rb.extend(result)\n rb = new_rb\n return rb\n\n def setitem(self, indexer, value):\n \"\"\"\n Attempt self.values[indexer] = value, possibly creating a new array.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n transpose = self.ndim == 2\n\n if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:\n raise ValueError(f\"Cannot set values with ndim > {self.ndim}\")\n\n # coerce None values, if appropriate\n if value is None:\n if self.is_numeric:\n value = np.nan\n\n # coerce if block dtype can store value\n values = self.values\n if self._can_hold_element(value):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(value):\n value = convert_scalar_for_putitemlike(value, values.dtype)\n\n else:\n # current dtype cannot store value, coerce to common dtype\n\n if hasattr(value, \"dtype\"):\n dtype = value.dtype\n\n elif lib.is_scalar(value) and not isna(value):\n dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)\n\n else:\n # e.g. we are bool dtype and value is nan\n # TODO: watch out for case with listlike value and scalar/empty indexer\n dtype, _ = maybe_promote(np.array(value).dtype)\n return self.astype(dtype).setitem(indexer, value)\n\n dtype = find_common_type([values.dtype, dtype])\n assert not is_dtype_equal(self.dtype, dtype)\n # otherwise should have _can_hold_element\n\n return self.astype(dtype).setitem(indexer, value)\n\n # value must be storable at this moment\n if is_extension_array_dtype(getattr(value, \"dtype\", None)):\n # We need to be careful not to allow through strings that\n # can be parsed to EADtypes\n is_ea_value = True\n arr_value = value\n else:\n is_ea_value = False\n arr_value = np.array(value)\n\n if transpose:\n values = values.T\n\n # length checking\n check_setitem_lengths(indexer, value, values)\n exact_match = (\n len(arr_value.shape)\n and arr_value.shape[0] == values.shape[0]\n and arr_value.size == values.size\n )\n if is_empty_indexer(indexer, arr_value):\n # GH#8669 empty indexers\n pass\n\n elif is_scalar_indexer(indexer, self.ndim):\n # setting a single element for each dim and with a rhs that could\n # be e.g. a list; see GH#6043\n values[indexer] = value\n\n elif exact_match and is_categorical_dtype(arr_value.dtype):\n # GH25495 - If the current dtype is not categorical,\n # we need to create a new categorical block\n values[indexer] = value\n return self.make_block(Categorical(self.values, dtype=arr_value.dtype))\n\n elif exact_match and is_ea_value:\n # GH#32395 if we're going to replace the values entirely, just\n # substitute in the new array\n return self.make_block(arr_value)\n\n # if we are an exact match (ex-broadcasting),\n # then use the resultant dtype\n elif exact_match:\n # We are setting _all_ of the array's values, so can cast to new dtype\n values[indexer] = value\n\n values = values.astype(arr_value.dtype, copy=False)\n\n # set\n else:\n values[indexer] = value\n\n if transpose:\n values = values.T\n block = self.make_block(values)\n return block\n\n def _putmask_simple(self, mask: np.ndarray, value: Any):\n \"\"\"\n Like putmask but\n\n a) we do not cast on failure\n b) we do not handle repeating or truncating like numpy.\n\n Parameters\n ----------\n mask : np.ndarray[bool]\n We assume _extract_bool_array has already been called.\n value : Any\n We assume self._can_hold_element(value)\n \"\"\"\n values = self.values\n\n if lib.is_scalar(value) and isinstance(values, np.ndarray):\n value = convert_scalar_for_putitemlike(value, values.dtype)\n\n if self.is_extension or (self.is_object and not lib.is_scalar(value)):\n # GH#19266 using np.putmask gives unexpected results with listlike value\n if is_list_like(value) and len(value) == len(values):\n values[mask] = value[mask]\n else:\n values[mask] = value\n else:\n # GH#37833 np.putmask is more performant than __setitem__\n np.putmask(values, mask, value)\n\n def putmask(\n self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False\n ) -> List[\"Block\"]:\n \"\"\"\n putmask the data to the block; it is possible that we may create a\n new dtype of block\n\n Return the resulting block(s).\n\n Parameters\n ----------\n mask : np.ndarray[bool], SparseArray[bool], or BooleanArray\n new : a ndarray/object\n inplace : bool, default False\n Perform inplace modification.\n axis : int\n transpose : bool, default False\n Set to True if self is stored with axes reversed.\n\n Returns\n -------\n List[Block]\n \"\"\"\n mask = _extract_bool_array(mask)\n assert not isinstance(new, (ABCIndexClass, ABCSeries, ABCDataFrame))\n\n new_values = self.values # delay copy if possible.\n # if we are passed a scalar None, convert it here\n if not is_list_like(new) and isna(new) and not self.is_object:\n # FIXME: make sure we have compatible NA\n new = self.fill_value\n\n if self._can_hold_element(new):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(new):\n new = convert_scalar_for_putitemlike(new, self.values.dtype)\n\n if transpose:\n new_values = new_values.T\n\n # If the default repeat behavior in np.putmask would go in the\n # wrong direction, then explicitly repeat and reshape new instead\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim and axis == 1:\n new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)\n new = new.astype(new_values.dtype)\n\n if new_values is self.values and not inplace:\n new_values = new_values.copy()\n # we require exact matches between the len of the\n # values we are setting (or is compat). np.putmask\n # doesn't check this and will simply truncate / pad\n # the output, but we want sane error messages\n #\n # TODO: this prob needs some better checking\n # for 2D cases\n if (\n is_list_like(new)\n and np.any(mask[mask])\n and getattr(new, \"ndim\", 1) == 1\n ):\n if mask[mask].shape[-1] == len(new):\n # GH 30567\n # If length of ``new`` is less than the length of ``new_values``,\n # `np.putmask` would first repeat the ``new`` array and then\n # assign the masked values hence produces incorrect result.\n # `np.place` on the other hand uses the ``new`` values at it is\n # to place in the masked locations of ``new_values``\n np.place(new_values, mask, new)\n elif mask.shape[-1] == len(new) or len(new) == 1:\n np.putmask(new_values, mask, new)\n else:\n raise ValueError(\"cannot assign mismatch length to masked array\")\n else:\n np.putmask(new_values, mask, new)\n\n # maybe upcast me\n elif mask.any():\n if transpose:\n mask = mask.T\n if isinstance(new, np.ndarray):\n new = new.T\n axis = new_values.ndim - axis - 1\n\n # Pseudo-broadcast\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim:\n new_shape = list(new.shape)\n new_shape.insert(axis, 1)\n new = new.reshape(tuple(new_shape))\n\n # operate column-by-column\n def f(mask, val, idx):\n\n if idx is None:\n # ndim==1 case.\n n = new\n else:\n\n if isinstance(new, np.ndarray):\n n = np.squeeze(new[idx % new.shape[0]])\n else:\n n = np.array(new)\n\n # type of the new block\n dtype, _ = maybe_promote(n.dtype)\n\n # we need to explicitly astype here to make a copy\n n = n.astype(dtype)\n\n nv = _putmask_smart(val, mask, n)\n return nv\n\n new_blocks = self.split_and_operate(mask, f, inplace)\n return new_blocks\n\n if inplace:\n return [self]\n\n if transpose:\n if new_values is None:\n new_values = self.values if inplace else self.values.copy()\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def coerce_to_target_dtype(self, other):\n \"\"\"\n coerce the current block to a dtype compat for other\n we will return a block, possibly object, and not raise\n\n we can also safely try to coerce to the same dtype\n and will receive the same block\n \"\"\"\n # if we cannot then coerce to object\n dtype, _ = infer_dtype_from(other, pandas_dtype=True)\n\n if is_dtype_equal(self.dtype, dtype):\n return self\n\n if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):\n # we don't upcast to bool\n return self.astype(object)\n\n elif (self.is_float or self.is_complex) and (\n is_integer_dtype(dtype) or is_float_dtype(dtype)\n ):\n # don't coerce float/complex to int\n return self\n\n elif self.is_datetime or is_datetime64_any_dtype(dtype):\n # The is_dtype_equal check above ensures that at most one of\n # these two conditions hold, so we must cast to object.\n return self.astype(object)\n\n elif self.is_timedelta or is_timedelta64_dtype(dtype):\n # The is_dtype_equal check above ensures that at most one of\n # these two conditions hold, so we must cast to object.\n return self.astype(object)\n\n try:\n return self.astype(dtype)\n except (ValueError, TypeError, OverflowError):\n return self.astype(object)\n\n def interpolate(\n self,\n method: str = \"pad\",\n axis: int = 0,\n index: Optional[\"Index\"] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n limit_direction: str = \"forward\",\n limit_area: Optional[str] = None,\n fill_value: Optional[Any] = None,\n coerce: bool = False,\n downcast: Optional[str] = None,\n **kwargs,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if not self._can_hold_na:\n # If there are no NAs, then interpolate is a no-op\n return self if inplace else self.copy()\n\n # a fill na type method\n try:\n m = missing.clean_fill_method(method)\n except ValueError:\n m = None\n\n if m is not None:\n if fill_value is not None:\n # similar to validate_fillna_kwargs\n raise ValueError(\"Cannot pass both fill_value and method\")\n\n return self._interpolate_with_fill(\n method=m,\n axis=axis,\n inplace=inplace,\n limit=limit,\n limit_area=limit_area,\n downcast=downcast,\n )\n # validate the interp method\n m = missing.clean_interp_method(method, **kwargs)\n\n assert index is not None # for mypy\n\n return self._interpolate(\n method=m,\n index=index,\n axis=axis,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n def _interpolate_with_fill(\n self,\n method: str = \"pad\",\n axis: int = 0,\n inplace: bool = False,\n limit: Optional[int] = None,\n limit_area: Optional[str] = None,\n downcast: Optional[str] = None,\n ) -> List[\"Block\"]:\n \"\"\" fillna but using the interpolate machinery \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n assert self._can_hold_na # checked by caller\n\n values = self.values if inplace else self.values.copy()\n\n values = missing.interpolate_2d(\n values,\n method=method,\n axis=axis,\n limit=limit,\n limit_area=limit_area,\n )\n\n blocks = [self.make_block_same_class(values, ndim=self.ndim)]\n return self._maybe_downcast(blocks, downcast)\n\n def _interpolate(\n self,\n method: str,\n index: \"Index\",\n fill_value: Optional[Any] = None,\n axis: int = 0,\n limit: Optional[int] = None,\n limit_direction: str = \"forward\",\n limit_area: Optional[str] = None,\n inplace: bool = False,\n downcast: Optional[str] = None,\n **kwargs,\n ) -> List[\"Block\"]:\n \"\"\" interpolate using scipy wrappers \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n data = self.values if inplace else self.values.copy()\n\n # only deal with floats\n if not self.is_float:\n if not self.is_integer:\n return [self]\n data = data.astype(np.float64)\n\n if fill_value is None:\n fill_value = self.fill_value\n\n if method in (\"krogh\", \"piecewise_polynomial\", \"pchip\"):\n if not index.is_monotonic:\n raise ValueError(\n f\"{method} interpolation requires that the index be monotonic.\"\n )\n # process 1-d slices in the axis direction\n\n def func(yvalues: np.ndarray) -> np.ndarray:\n\n # process a 1-d slice, returning it\n # should the axis argument be handled below in apply_along_axis?\n # i.e. not an arg to missing.interpolate_1d\n return missing.interpolate_1d(\n xvalues=index,\n yvalues=yvalues,\n method=method,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n bounds_error=False,\n **kwargs,\n )\n\n # interp each column independently\n interp_values = np.apply_along_axis(func, axis, data)\n\n blocks = [self.make_block_same_class(interp_values)]\n return self._maybe_downcast(blocks, downcast)\n\n def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_value=lib.no_default):\n \"\"\"\n Take values according to indexer and return them as a block.bb\n\n \"\"\"\n # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock\n # so need to preserve types\n # sparse is treated like an ndarray, but needs .get_values() shaping\n\n values = self.values\n\n if fill_value is lib.no_default:\n fill_value = self.fill_value\n allow_fill = False\n else:\n allow_fill = True\n\n new_values = algos.take_nd(\n values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value\n )\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (axis == 0 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n if not is_dtype_equal(new_values.dtype, self.dtype):\n return self.make_block(new_values, new_mgr_locs)\n else:\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def diff(self, n: int, axis: int = 1) -> List[\"Block\"]:\n \"\"\" return block for the diff of the values \"\"\"\n new_values = algos.diff(self.values, n, axis=axis, stacklevel=7)\n return [self.make_block(values=new_values)]\n\n def shift(self, periods: int, axis: int = 0, fill_value=None):\n \"\"\" shift the block by periods, possibly upcast \"\"\"\n # convert integer to float if necessary. need to do a lot more than\n # that, handle boolean etc also\n new_values, fill_value = maybe_upcast(self.values, fill_value)\n\n new_values = shift(new_values, periods, axis, fill_value)\n\n return [self.make_block(new_values)]\n\n def where(\n self, other, cond, errors=\"raise\", try_cast: bool = False, axis: int = 0\n ) -> List[\"Block\"]:\n \"\"\"\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : np.ndarray[bool], SparseArray[bool], or BooleanArray\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n try_cast: bool, default False\n axis : int, default 0\n\n Returns\n -------\n List[Block]\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n cond = _extract_bool_array(cond)\n assert not isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame))\n\n assert errors in [\"raise\", \"ignore\"]\n transpose = self.ndim == 2\n\n values = self.values\n orig_other = other\n if transpose:\n values = values.T\n\n # If the default broadcasting would go in the wrong direction, then\n # explicitly reshape other instead\n if getattr(other, \"ndim\", 0) >= 1:\n if values.ndim - 1 == other.ndim and axis == 1:\n other = other.reshape(tuple(other.shape + (1,)))\n elif transpose and values.ndim == self.ndim - 1:\n # TODO(EA2D): not neceesssary with 2D EAs\n cond = cond.T\n\n if not hasattr(cond, \"shape\"):\n raise ValueError(\"where must have a condition that is ndarray like\")\n\n if cond.ravel(\"K\").all():\n result = values\n else:\n # see if we can operate on the entire block, or need item-by-item\n # or if we are a single block (ndim == 1)\n if (\n (self.is_integer or self.is_bool)\n and lib.is_float(other)\n and np.isnan(other)\n ):\n # GH#3733 special case to avoid object-dtype casting\n # and go through numexpr path instead.\n # In integer case, np.where will cast to floats\n pass\n elif not self._can_hold_element(other):\n # we cannot coerce, return a compat dtype\n # we are explicitly ignoring errors\n block = self.coerce_to_target_dtype(other)\n blocks = block.where(\n orig_other, cond, errors=errors, try_cast=try_cast, axis=axis\n )\n return self._maybe_downcast(blocks, \"infer\")\n\n if not (\n (self.is_integer or self.is_bool)\n and lib.is_float(other)\n and np.isnan(other)\n ):\n # convert datetime to datetime64, timedelta to timedelta64\n other = convert_scalar_for_putitemlike(other, values.dtype)\n\n # By the time we get here, we should have all Series/Index\n # args extracted to ndarray\n result = expressions.where(cond, values, other)\n\n if self._can_hold_na or self.ndim == 1:\n\n if transpose:\n result = result.T\n\n return [self.make_block(result)]\n\n # might need to separate out blocks\n axis = cond.ndim - 1\n cond = cond.swapaxes(axis, 0)\n mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)\n\n result_blocks: List[\"Block\"] = []\n for m in [mask, ~mask]:\n if m.any():\n result = cast(np.ndarray, result) # EABlock overrides where\n taken = result.take(m.nonzero()[0], axis=axis)\n r = maybe_downcast_numeric(taken, self.dtype)\n nb = self.make_block(r.T, placement=self.mgr_locs[m])\n result_blocks.append(nb)\n\n return result_blocks\n\n def _unstack(self, unstacker, fill_value, new_placement):\n \"\"\"\n Return a list of unstacked blocks of self\n\n Parameters\n ----------\n unstacker : reshape._Unstacker\n fill_value : int\n Only used in ExtensionBlock._unstack\n\n Returns\n -------\n blocks : list of Block\n New blocks of unstacked values.\n mask : array_like of bool\n The mask of columns of `blocks` we should keep.\n \"\"\"\n new_values, mask = unstacker.get_new_values(\n self.values.T, fill_value=fill_value\n )\n\n mask = mask.any(0)\n # TODO: in all tests we have mask.all(); can we rely on that?\n\n new_values = new_values.T[mask]\n new_placement = new_placement[mask]\n\n blocks = [self.make_block_same_class(new_values, placement=new_placement)]\n return blocks, mask\n\n def quantile(self, qs, interpolation=\"linear\", axis: int = 0):\n \"\"\"\n compute the quantiles of the\n\n Parameters\n ----------\n qs: a scalar or list of the quantiles to be computed\n interpolation: type of interpolation, default 'linear'\n axis: axis to compute, default 0\n\n Returns\n -------\n Block\n \"\"\"\n # We should always have ndim == 2 because Series dispatches to DataFrame\n assert self.ndim == 2\n\n values = self.get_values()\n\n is_empty = values.shape[axis] == 0\n orig_scalar = not is_list_like(qs)\n if orig_scalar:\n # make list-like, unpack later\n qs = [qs]\n\n if is_empty:\n # create the array of na_values\n # 2d len(values) * len(qs)\n result = np.repeat(\n np.array([self.fill_value] * len(qs)), len(values)\n ).reshape(len(values), len(qs))\n else:\n # asarray needed for Sparse, see GH#24600\n mask = np.asarray(isna(values))\n result = nanpercentile(\n values,\n np.array(qs) * 100,\n axis=axis,\n na_value=self.fill_value,\n mask=mask,\n ndim=values.ndim,\n interpolation=interpolation,\n )\n\n result = np.array(result, copy=False)\n result = result.T\n\n if orig_scalar and not lib.is_scalar(result):\n # result could be scalar in case with is_empty and self.ndim == 1\n assert result.shape[-1] == 1, result.shape\n result = result[..., 0]\n result = lib.item_from_zerodim(result)\n\n ndim = np.ndim(result)\n return make_block(result, placement=np.arange(len(result)), ndim=ndim)\n\n def _replace_coerce(\n self,\n to_replace,\n value,\n mask: np.ndarray,\n inplace: bool = True,\n regex: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n mask : np.ndarray[bool]\n True indicate corresponding element is ignored.\n inplace : bool, default True\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n\n Returns\n -------\n List[Block]\n \"\"\"\n if mask.any():\n if not regex:\n nb = self.coerce_to_target_dtype(value)\n if nb is self and not inplace:\n nb = nb.copy()\n nb._putmask_simple(mask, value)\n return [nb]\n else:\n regex = _should_use_regex(regex, to_replace)\n if regex:\n return self._replace_regex(\n to_replace,\n value,\n inplace=inplace,\n convert=False,\n mask=mask,\n )\n return self.replace(to_replace, value, inplace=inplace, regex=False)\n return [self]\n\n\nclass ExtensionBlock(Block):\n \"\"\"\n Block for holding extension types.\n\n Notes\n -----\n This holds all 3rd-party extension array types. It's also the immediate\n parent class for our internal extension types' blocks, CategoricalBlock.\n\n ExtensionArrays are limited to 1-D.\n \"\"\"\n\n _can_consolidate = False\n _validate_ndim = False\n is_extension = True\n\n values: ExtensionArray\n\n def __init__(self, values, placement, ndim: int):\n \"\"\"\n Initialize a non-consolidatable block.\n\n 'ndim' may be inferred from 'placement'.\n\n This will call continue to call __init__ for the other base\n classes mixed in with this Mixin.\n \"\"\"\n\n # Placement must be converted to BlockPlacement so that we can check\n # its length\n if not isinstance(placement, libinternals.BlockPlacement):\n placement = libinternals.BlockPlacement(placement)\n\n # Maybe infer ndim from placement\n if ndim is None:\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n super().__init__(values, placement, ndim=ndim)\n\n if self.ndim == 2 and len(self.mgr_locs) != 1:\n # TODO(EA2D): check unnecessary with 2D EAs\n raise AssertionError(\"block.size != values.size\")\n\n @property\n def shape(self):\n # TODO(EA2D): override unnecessary with 2D EAs\n if self.ndim == 1:\n return (len(self.values),)\n return len(self.mgr_locs), len(self.values)\n\n def iget(self, col):\n\n if self.ndim == 2 and isinstance(col, tuple):\n # TODO(EA2D): unnecessary with 2D EAs\n col, loc = col\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(f\"{self} only contains one item\")\n elif isinstance(col, slice):\n if col != slice(None):\n raise NotImplementedError(col)\n return self.values[[loc]]\n return self.values[loc]\n else:\n if col != 0:\n raise IndexError(f\"{self} only contains one item\")\n return self.values\n\n def set_inplace(self, locs, values):\n # NB: This is a misnomer, is supposed to be inplace but is not,\n # see GH#33457\n assert locs.tolist() == [0]\n self.values = values\n\n def putmask(\n self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False\n ) -> List[\"Block\"]:\n \"\"\"\n See Block.putmask.__doc__\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n mask = _extract_bool_array(mask)\n\n new_values = self.values if inplace else self.values.copy()\n\n if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask):\n new = new[mask]\n\n mask = safe_reshape(mask, new_values.shape)\n\n new_values[mask] = new\n return [self.make_block(values=new_values)]\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Unbox to an extension array.\n\n This will unbox an ExtensionArray stored in an Index or Series.\n ExtensionArrays pass through. No dtype coercion is done.\n\n Parameters\n ----------\n values : Index, Series, ExtensionArray\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return extract_array(values)\n\n @property\n def _holder(self):\n # For extension blocks, the holder is values-dependent.\n return type(self.values)\n\n @property\n def fill_value(self):\n # Used in reindex_indexer\n return self.values.dtype.na_value\n\n @property\n def _can_hold_na(self):\n # The default ExtensionArray._can_hold_na is True\n return self._holder._can_hold_na\n\n @property\n def is_view(self) -> bool:\n \"\"\"Extension arrays are never treated as views.\"\"\"\n return False\n\n @property\n def is_numeric(self):\n return self.values.dtype._is_numeric\n\n def setitem(self, indexer, value):\n \"\"\"\n Attempt self.values[indexer] = value, possibly creating a new array.\n\n This differs from Block.setitem by not allowing setitem to change\n the dtype of the Block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n if not self._can_hold_element(value):\n # This is only relevant for DatetimeTZBlock, which has a\n # non-trivial `_can_hold_element`.\n # https://github.com/pandas-dev/pandas/issues/24020\n # Need a dedicated setitem until GH#24020 (type promotion in setitem\n # for extension arrays) is designed and implemented.\n return self.astype(object).setitem(indexer, value)\n\n if isinstance(indexer, tuple):\n # TODO(EA2D): not needed with 2D EAs\n # we are always 1-D\n indexer = indexer[0]\n\n check_setitem_lengths(indexer, value, self.values)\n self.values[indexer] = value\n return self\n\n def get_values(self, dtype=None):\n # ExtensionArrays must be iterable, so this works.\n # TODO(EA2D): reshape not needed with 2D EAs\n return np.asarray(self.values).reshape(self.shape)\n\n def array_values(self) -> ExtensionArray:\n return self.values\n\n def to_native_types(self, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\"override to use ExtensionArray astype for the conversion\"\"\"\n values = self.values\n mask = isna(values)\n\n values = np.asarray(values.astype(object))\n values[mask] = na_rep\n\n # TODO(EA2D): reshape not needed with 2D EAs\n # we are expected to return a 2-d ndarray\n return self.make_block(values)\n\n def take_nd(\n self, indexer, axis: int = 0, new_mgr_locs=None, fill_value=lib.no_default\n ):\n \"\"\"\n Take values according to indexer and return them as a block.\n \"\"\"\n if fill_value is lib.no_default:\n fill_value = None\n\n # TODO(EA2D): special case not needed with 2D EAs\n # axis doesn't matter; we are really a single-dim object\n # but are passed the axis depending on the calling routing\n # if its REALLY axis 0, then this will be a reindex and not a take\n new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (self.ndim == 1 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def _can_hold_element(self, element: Any) -> bool:\n # TODO: We may need to think about pushing this onto the array.\n # We're doing the same as CategoricalBlock here.\n return True\n\n def _slice(self, slicer):\n \"\"\"\n Return a slice of my values.\n\n Parameters\n ----------\n slicer : slice, ndarray[int], or a tuple of these\n Valid (non-reducing) indexer for self.values.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n # return same dims as we currently have\n if not isinstance(slicer, tuple) and self.ndim == 2:\n # reached via getitem_block via _slice_take_blocks_ax0\n # TODO(EA2D): wont be necessary with 2D EAs\n slicer = (slicer, slice(None))\n\n if isinstance(slicer, tuple) and len(slicer) == 2:\n first = slicer[0]\n if not isinstance(first, slice):\n raise AssertionError(\n \"invalid slicing for a 1-ndim ExtensionArray\", first\n )\n # GH#32959 only full-slicers along fake-dim0 are valid\n # TODO(EA2D): wont be necessary with 2D EAs\n new_locs = self.mgr_locs[first]\n if len(new_locs):\n # effectively slice(None)\n slicer = slicer[1]\n else:\n raise AssertionError(\n \"invalid slicing for a 1-ndim ExtensionArray\", slicer\n )\n\n return self.values[slicer]\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n values = self.values if inplace else self.values.copy()\n values = values.fillna(value=value, limit=limit)\n return [\n self.make_block_same_class(\n values=values, placement=self.mgr_locs, ndim=self.ndim\n )\n ]\n\n def interpolate(\n self, method=\"pad\", axis=0, inplace=False, limit=None, fill_value=None, **kwargs\n ):\n\n values = self.values if inplace else self.values.copy()\n return self.make_block_same_class(\n values=values.fillna(value=fill_value, method=method, limit=limit),\n placement=self.mgr_locs,\n )\n\n def diff(self, n: int, axis: int = 1) -> List[\"Block\"]:\n if axis == 0 and n != 0:\n # n==0 case will be a no-op so let is fall through\n # Since we only have one column, the result will be all-NA.\n # Create this result by shifting along axis=0 past the length of\n # our values.\n return super().diff(len(self.values), axis=0)\n if axis == 1:\n # TODO(EA2D): unnecessary with 2D EAs\n # we are by definition 1D.\n axis = 0\n return super().diff(n, axis)\n\n def shift(\n self, periods: int, axis: int = 0, fill_value: Any = None\n ) -> List[\"ExtensionBlock\"]:\n \"\"\"\n Shift the block by `periods`.\n\n Dispatches to underlying ExtensionArray and re-boxes in an\n ExtensionBlock.\n \"\"\"\n return [\n self.make_block_same_class(\n self.values.shift(periods=periods, fill_value=fill_value),\n placement=self.mgr_locs,\n ndim=self.ndim,\n )\n ]\n\n def where(\n self, other, cond, errors=\"raise\", try_cast: bool = False, axis: int = 0\n ) -> List[\"Block\"]:\n\n cond = _extract_bool_array(cond)\n assert not isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame))\n\n if isinstance(other, np.ndarray) and other.ndim == 2:\n # TODO(EA2D): unnecessary with 2D EAs\n assert other.shape[1] == 1\n other = other[:, 0]\n\n if isinstance(cond, np.ndarray) and cond.ndim == 2:\n # TODO(EA2D): unnecessary with 2D EAs\n assert cond.shape[1] == 1\n cond = cond[:, 0]\n\n if lib.is_scalar(other) and isna(other):\n # The default `other` for Series / Frame is np.nan\n # we want to replace that with the correct NA value\n # for the type\n other = self.dtype.na_value\n\n if is_sparse(self.values):\n # TODO(SparseArray.__setitem__): remove this if condition\n # We need to re-infer the type of the data after doing the\n # where, for cases where the subtypes don't match\n dtype = None\n else:\n dtype = self.dtype\n\n result = self.values.copy()\n icond = ~cond\n if lib.is_scalar(other):\n set_other = other\n else:\n set_other = other[icond]\n try:\n result[icond] = set_other\n except (NotImplementedError, TypeError):\n # NotImplementedError for class not implementing `__setitem__`\n # TypeError for SparseArray, which implements just to raise\n # a TypeError\n result = self._holder._from_sequence(\n np.where(cond, self.values, other), dtype=dtype\n )\n\n return [self.make_block_same_class(result, placement=self.mgr_locs)]\n\n def _unstack(self, unstacker, fill_value, new_placement):\n # ExtensionArray-safe unstack.\n # We override ObjectBlock._unstack, which unstacks directly on the\n # values of the array. For EA-backed blocks, this would require\n # converting to a 2-D ndarray of objects.\n # Instead, we unstack an ndarray of integer positions, followed by\n # a `take` on the actual values.\n n_rows = self.shape[-1]\n dummy_arr = np.arange(n_rows)\n\n new_values, mask = unstacker.get_new_values(dummy_arr, fill_value=-1)\n mask = mask.any(0)\n # TODO: in all tests we have mask.all(); can we rely on that?\n\n blocks = [\n self.make_block_same_class(\n self.values.take(indices, allow_fill=True, fill_value=fill_value),\n [place],\n )\n for indices, place in zip(new_values.T, new_placement)\n ]\n return blocks, mask\n\n\nclass ObjectValuesExtensionBlock(ExtensionBlock):\n \"\"\"\n Block providing backwards-compatibility for `.values`.\n\n Used by PeriodArray and IntervalArray to ensure that\n Series[T].values is an ndarray of objects.\n \"\"\"\n\n def external_values(self):\n return self.values.astype(object)\n\n def _can_hold_element(self, element: Any) -> bool:\n if is_valid_nat_for_dtype(element, self.dtype):\n return True\n if isinstance(element, list) and len(element) == 0:\n return True\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, self.dtype.type)\n return isinstance(element, self.dtype.type)\n\n\nclass NumericBlock(Block):\n __slots__ = ()\n is_numeric = True\n _can_hold_na = True\n\n\nclass FloatBlock(NumericBlock):\n __slots__ = ()\n is_float = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(\n tipo.type, np.timedelta64\n )\n return isinstance(\n element, (float, int, np.floating, np.int_)\n ) and not isinstance(\n element,\n (bool, np.bool_, np.timedelta64),\n )\n\n def to_native_types(\n self, na_rep=\"\", float_format=None, decimal=\".\", quoting=None, **kwargs\n ):\n \"\"\" convert to our native types format \"\"\"\n values = self.values\n\n # see gh-13418: no special formatting is desired at the\n # output (important for appropriate 'quoting' behaviour),\n # so do not pass it through the FloatArrayFormatter\n if float_format is None and decimal == \".\":\n mask = isna(values)\n\n if not quoting:\n values = values.astype(str)\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return self.make_block(values)\n\n from pandas.io.formats.format import FloatArrayFormatter\n\n formatter = FloatArrayFormatter(\n values,\n na_rep=na_rep,\n float_format=float_format,\n decimal=decimal,\n quoting=quoting,\n fixed_width=False,\n )\n res = formatter.get_result_as_array()\n return self.make_block(res)\n\n\nclass ComplexBlock(NumericBlock):\n __slots__ = ()\n is_complex = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))\n return isinstance(\n element, (float, int, complex, np.float_, np.int_)\n ) and not isinstance(element, (bool, np.bool_))\n\n\nclass IntBlock(NumericBlock):\n __slots__ = ()\n is_integer = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return (\n issubclass(tipo.type, np.integer)\n and not issubclass(tipo.type, np.timedelta64)\n and self.dtype.itemsize >= tipo.itemsize\n )\n # We have not inferred an integer from the dtype\n # check if we have a builtin int or a float equal to an int\n return is_integer(element) or (is_float(element) and element.is_integer())\n\n\nclass DatetimeLikeBlockMixin(Block):\n \"\"\"Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.\"\"\"\n\n _can_hold_na = True\n\n def get_values(self, dtype=None):\n \"\"\"\n return object dtype as boxed values, such as Timestamps/Timedelta\n \"\"\"\n if is_object_dtype(dtype):\n # DTA/TDA constructor and astype can handle 2D\n return self._holder(self.values).astype(object)\n return self.values\n\n def internal_values(self):\n # Override to return DatetimeArray and TimedeltaArray\n return self.array_values()\n\n def array_values(self):\n return self._holder._simple_new(self.values)\n\n def iget(self, key):\n # GH#31649 we need to wrap scalars in Timestamp/Timedelta\n # TODO(EA2D): this can be removed if we ever have 2D EA\n return self.array_values().reshape(self.shape)[key]\n\n def diff(self, n: int, axis: int = 0) -> List[\"Block\"]:\n \"\"\"\n 1st discrete difference.\n\n Parameters\n ----------\n n : int\n Number of periods to diff.\n axis : int, default 0\n Axis to diff upon.\n\n Returns\n -------\n A list with a new TimeDeltaBlock.\n\n Notes\n -----\n The arguments here are mimicking shift so they are called correctly\n by apply.\n \"\"\"\n # TODO(EA2D): reshape not necessary with 2D EAs\n values = self.array_values().reshape(self.shape)\n\n new_values = values - values.shift(n, axis=axis)\n return [\n TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer, ndim=self.ndim)\n ]\n\n def shift(self, periods, axis=0, fill_value=None):\n # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs\n values = self.array_values()\n new_values = values.shift(periods, fill_value=fill_value, axis=axis)\n return self.make_block_same_class(new_values)\n\n def to_native_types(self, na_rep=\"NaT\", **kwargs):\n \"\"\" convert to our native types format \"\"\"\n arr = self.array_values()\n\n result = arr._format_native_types(na_rep=na_rep, **kwargs)\n return self.make_block(result)\n\n\nclass DatetimeBlock(DatetimeLikeBlockMixin):\n __slots__ = ()\n is_datetime = True\n _holder = DatetimeArray\n fill_value = np.datetime64(\"NaT\", \"ns\")\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Input validation for values passed to __init__. Ensure that\n we have datetime64ns, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : ndarray[datetime64ns]\n\n Overridden by DatetimeTZBlock.\n \"\"\"\n if values.dtype != DT64NS_DTYPE:\n values = conversion.ensure_datetime64ns(values)\n\n if isinstance(values, DatetimeArray):\n values = values._data\n\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n these automatically copy, so copy=True has no effect\n raise on an except if raise == True\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n # if we are passed a datetime64[ns, tz]\n if is_datetime64tz_dtype(dtype):\n values = self.values\n if copy:\n # this should be the only copy\n values = values.copy()\n values = DatetimeArray._simple_new(values.view(\"i8\"), dtype=dtype)\n return self.make_block(values)\n\n # delegate\n return super().astype(dtype=dtype, copy=copy, errors=errors)\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n if isinstance(element, list) and len(element) == 0:\n # Following DatetimeArray._validate_setitem_value\n # convention, we treat this as object-dtype\n # (even though tipo is float64)\n return True\n\n elif self.is_datetimetz:\n # require exact match, since non-nano does not exist\n return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(\n element, self.dtype\n )\n\n # GH#27419 if we get a non-nano datetime64 object\n return is_datetime64_dtype(tipo)\n elif element is NaT:\n return True\n elif isinstance(element, datetime):\n if self.is_datetimetz:\n return tz_compare(element.tzinfo, self.dtype.tz)\n return element.tzinfo is None\n\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def set_inplace(self, locs, values):\n \"\"\"\n See Block.set.__doc__\n \"\"\"\n values = conversion.ensure_datetime64ns(values, copy=False)\n\n self.values[locs] = values\n\n\nclass DatetimeTZBlock(ExtensionBlock, DatetimeBlock):\n \"\"\" implement a datetime64 block with a tz attribute \"\"\"\n\n values: DatetimeArray\n\n __slots__ = ()\n is_datetimetz = True\n is_extension = True\n\n internal_values = Block.internal_values\n\n _holder = DatetimeBlock._holder\n _can_hold_element = DatetimeBlock._can_hold_element\n to_native_types = DatetimeBlock.to_native_types\n diff = DatetimeBlock.diff\n fillna = DatetimeBlock.fillna # i.e. Block.fillna\n fill_value = DatetimeBlock.fill_value\n _can_hold_na = DatetimeBlock._can_hold_na\n\n array_values = ExtensionBlock.array_values\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Input validation for values passed to __init__. Ensure that\n we have datetime64TZ, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : DatetimeArray\n \"\"\"\n if not isinstance(values, self._holder):\n values = self._holder(values)\n\n if values.tz is None:\n raise ValueError(\"cannot create a DatetimeTZBlock without a tz\")\n\n return values\n\n @property\n def is_view(self) -> bool:\n \"\"\" return a boolean if I am possibly a view \"\"\"\n # check the ndarray values of the DatetimeIndex values\n return self.values._data.base is not None\n\n def get_values(self, dtype=None):\n \"\"\"\n Returns an ndarray of values.\n\n Parameters\n ----------\n dtype : np.dtype\n Only `object`-like dtypes are respected here (not sure\n why).\n\n Returns\n -------\n values : ndarray\n When ``dtype=object``, then and object-dtype ndarray of\n boxed values is returned. Otherwise, an M8[ns] ndarray\n is returned.\n\n DatetimeArray is always 1-d. ``get_values`` will reshape\n the return value to be the same dimensionality as the\n block.\n \"\"\"\n values = self.values\n if is_object_dtype(dtype):\n values = values.astype(object)\n\n # TODO(EA2D): reshape unnecessary with 2D EAs\n # Ensure that our shape is correct for DataFrame.\n # ExtensionArrays are always 1-D, even in a DataFrame when\n # the analogous NumPy-backed column would be a 2-D ndarray.\n return np.asarray(values).reshape(self.shape)\n\n def external_values(self):\n # NB: this is different from np.asarray(self.values), since that\n # return an object-dtype ndarray of Timestamps.\n return np.asarray(self.values.astype(\"datetime64[ns]\", copy=False))\n\n def quantile(self, qs, interpolation=\"linear\", axis=0):\n naive = self.values.view(\"M8[ns]\")\n\n # TODO(EA2D): kludge for 2D block with 1D values\n naive = naive.reshape(self.shape)\n\n blk = self.make_block(naive)\n res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)\n\n # TODO(EA2D): ravel is kludge for 2D block with 1D values, assumes column-like\n aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)\n return self.make_block_same_class(aware, ndim=res_blk.ndim)\n\n def _check_ndim(self, values, ndim):\n \"\"\"\n ndim inference and validation.\n\n This is overriden by the DatetimeTZBlock to check the case of 2D\n data (values.ndim == 2), which should only be allowed if ndim is\n also 2.\n The case of 1D array is still allowed with both ndim of 1 or 2, as\n if the case for other EAs. Therefore, we are only checking\n `values.ndim > ndim` instead of `values.ndim != ndim` as for\n consolidated blocks.\n \"\"\"\n if ndim is None:\n ndim = values.ndim\n\n if values.ndim > ndim:\n raise ValueError(\n \"Wrong number of dimensions. \"\n f\"values.ndim != ndim [{values.ndim} != {ndim}]\"\n )\n return ndim\n\n\nclass TimeDeltaBlock(DatetimeLikeBlockMixin):\n __slots__ = ()\n is_timedelta = True\n fill_value = np.timedelta64(\"NaT\", \"ns\")\n\n def _maybe_coerce_values(self, values):\n if values.dtype != TD64NS_DTYPE:\n # non-nano we will convert to nano\n if values.dtype.kind != \"m\":\n # caller is responsible for ensuring timedelta64 dtype\n raise TypeError(values.dtype) # pragma: no cover\n\n values = TimedeltaArray._from_sequence(values)._data\n if isinstance(values, TimedeltaArray):\n values = values._data\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n @property\n def _holder(self):\n return TimedeltaArray\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.timedelta64)\n elif element is NaT:\n return True\n elif isinstance(element, (timedelta, np.timedelta64)):\n return True\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def fillna(self, value, **kwargs):\n # TODO(EA2D): if we operated on array_values, TDA.fillna would handle\n # raising here.\n if is_integer(value):\n # Deprecation GH#24694, GH#19233\n raise TypeError(\n \"Passing integers to fillna for timedelta64[ns] dtype is no \"\n \"longer supported. To obtain the old behavior, pass \"\n \"`pd.Timedelta(seconds=n)` instead.\"\n )\n return super().fillna(value, **kwargs)\n\n\nclass BoolBlock(NumericBlock):\n __slots__ = ()\n is_bool = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.bool_)\n return isinstance(element, (bool, np.bool_))\n\n\nclass ObjectBlock(Block):\n __slots__ = ()\n is_object = True\n _can_hold_na = True\n\n def _maybe_coerce_values(self, values):\n if issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n return values\n\n @property\n def is_bool(self):\n \"\"\"\n we can be a bool if we have only bool values but are of type\n object\n \"\"\"\n return lib.is_bool_array(self.values.ravel(\"K\"))\n\n def reduce(self, func, ignore_failures: bool = False) -> List[Block]:\n \"\"\"\n For object-dtype, we operate column-wise.\n \"\"\"\n assert self.ndim == 2\n\n values = self.values\n if len(values) > 1:\n # split_and_operate expects func with signature (mask, values, inplace)\n def mask_func(mask, values, inplace):\n if values.ndim == 1:\n values = values.reshape(1, -1)\n return func(values)\n\n return self.split_and_operate(\n None, mask_func, False, ignore_failures=ignore_failures\n )\n\n try:\n res = func(values)\n except TypeError:\n if not ignore_failures:\n raise\n return []\n\n assert isinstance(res, np.ndarray)\n assert res.ndim == 1\n res = res.reshape(1, -1)\n return [self.make_block_same_class(res)]\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n ) -> List[\"Block\"]:\n \"\"\"\n attempt to cast any object types to better types return a copy of\n the block (if copy = True) by definition we ARE an ObjectBlock!!!!!\n \"\"\"\n\n # operate column-by-column\n def f(mask, val, idx):\n shape = val.shape\n values = soft_convert_objects(\n val.ravel(),\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n copy=copy,\n )\n if isinstance(values, np.ndarray):\n # TODO(EA2D): allow EA once reshape is supported\n values = values.reshape(shape)\n\n return values\n\n if self.ndim == 2:\n blocks = self.split_and_operate(None, f, False)\n else:\n values = f(None, self.values.ravel(), None)\n blocks = [self.make_block(values)]\n\n return blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n if downcast is not None:\n return blocks\n\n # split and convert the blocks\n return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])\n\n def _can_hold_element(self, element: Any) -> bool:\n return True\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n # Note: the checks we do in NDFrame.replace ensure we never get\n # here with listlike to_replace or value, as those cases\n # go through _replace_list\n\n regex = _should_use_regex(regex, to_replace)\n\n if regex:\n return self._replace_regex(to_replace, value, inplace=inplace)\n else:\n return super().replace(to_replace, value, inplace=inplace, regex=False)\n\n\ndef _should_use_regex(regex: bool, to_replace: Any) -> bool:\n \"\"\"\n Decide whether to treat `to_replace` as a regular expression.\n \"\"\"\n if is_re(to_replace):\n regex = True\n\n regex = regex and is_re_compilable(to_replace)\n\n # Don't use regex if the pattern is empty.\n regex = regex and re.compile(to_replace).pattern != \"\"\n return regex\n\n\nclass CategoricalBlock(ExtensionBlock):\n __slots__ = ()\n\n def _replace_list(\n self,\n src_list: List[Any],\n dest_list: List[Any],\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n if len(algos.unique(dest_list)) == 1:\n # We likely got here by tiling value inside NDFrame.replace,\n # so un-tile here\n return self.replace(src_list, dest_list[0], inplace, regex)\n return super()._replace_list(src_list, dest_list, inplace, regex)\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = self if inplace else self.copy()\n\n result.values.replace(to_replace, value, inplace=True)\n return [result]\n\n\n# -----------------------------------------------------------------\n# Constructor Helpers\n\n\ndef get_block_type(values, dtype=None):\n \"\"\"\n Find the appropriate Block subclass to use for the given values and dtype.\n\n Parameters\n ----------\n values : ndarray-like\n dtype : numpy or pandas dtype\n\n Returns\n -------\n cls : class, subclass of Block\n \"\"\"\n dtype = dtype or values.dtype\n vtype = dtype.type\n\n cls: Type[Block]\n\n if is_sparse(dtype):\n # Need this first(ish) so that Sparse[datetime] is sparse\n cls = ExtensionBlock\n elif is_categorical_dtype(values.dtype):\n cls = CategoricalBlock\n elif issubclass(vtype, np.datetime64):\n assert not is_datetime64tz_dtype(values.dtype)\n cls = DatetimeBlock\n elif is_datetime64tz_dtype(values.dtype):\n cls = DatetimeTZBlock\n elif is_interval_dtype(dtype) or is_period_dtype(dtype):\n cls = ObjectValuesExtensionBlock\n elif is_extension_array_dtype(values.dtype):\n # Note: need to be sure PandasArray is unwrapped before we get here\n cls = ExtensionBlock\n elif issubclass(vtype, np.floating):\n cls = FloatBlock\n elif issubclass(vtype, np.timedelta64):\n assert issubclass(vtype, np.integer)\n cls = TimeDeltaBlock\n elif issubclass(vtype, np.complexfloating):\n cls = ComplexBlock\n elif issubclass(vtype, np.integer):\n cls = IntBlock\n elif dtype == np.bool_:\n cls = BoolBlock\n else:\n cls = ObjectBlock\n return cls\n\n\ndef make_block(values, placement, klass=None, ndim=None, dtype=None):\n # Ensure that we don't allow PandasArray / PandasDtype in internals.\n # For now, blocks should be backed by ndarrays when possible.\n if isinstance(values, ABCPandasArray):\n values = values.to_numpy()\n if ndim and ndim > 1:\n # TODO(EA2D): special case not needed with 2D EAs\n values = np.atleast_2d(values)\n\n if isinstance(dtype, PandasDtype):\n dtype = dtype.numpy_dtype\n\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(values, dtype)\n\n elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype):\n # TODO: This is no longer hit internally; does it need to be retained\n # for e.g. pyarrow?\n values = DatetimeArray._simple_new(values, dtype=dtype)\n\n return klass(values, ndim=ndim, placement=placement)\n\n\n# -----------------------------------------------------------------\n\n\ndef extend_blocks(result, blocks=None):\n \"\"\" return a new extended blocks, given the result \"\"\"\n if blocks is None:\n blocks = []\n if isinstance(result, list):\n for r in result:\n if isinstance(r, list):\n blocks.extend(r)\n else:\n blocks.append(r)\n else:\n assert isinstance(result, Block), type(result)\n blocks.append(result)\n return blocks\n\n\ndef _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike:\n \"\"\" guarantee the shape of the values to be at least 1 d \"\"\"\n if values.ndim < ndim:\n shape = values.shape\n if not is_extension_array_dtype(values.dtype):\n # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023\n # block.shape is incorrect for \"2D\" ExtensionArrays\n # We can't, and don't need to, reshape.\n # error: \"ExtensionArray\" has no attribute \"reshape\"\n values = values.reshape(tuple((1,) + shape)) # type: ignore[attr-defined]\n return values\n\n\ndef safe_reshape(arr, new_shape: Shape):\n \"\"\"\n If possible, reshape `arr` to have shape `new_shape`,\n with a couple of exceptions (see gh-13012):\n\n 1) If `arr` is a ExtensionArray or Index, `arr` will be\n returned as is.\n 2) If `arr` is a Series, the `_values` attribute will\n be reshaped and returned.\n\n Parameters\n ----------\n arr : array-like, object to be reshaped\n new_shape : int or tuple of ints, the new shape\n \"\"\"\n if isinstance(arr, ABCSeries):\n arr = arr._values\n if not is_extension_array_dtype(arr.dtype):\n # Note: this will include TimedeltaArray and tz-naive DatetimeArray\n # TODO(EA2D): special case will be unnecessary with 2D EAs\n arr = np.asarray(arr).reshape(new_shape)\n return arr\n\n\ndef _putmask_smart(v: np.ndarray, mask: np.ndarray, n) -> np.ndarray:\n \"\"\"\n Return a new ndarray, try to preserve dtype if possible.\n\n Parameters\n ----------\n v : np.ndarray\n `values`, updated in-place.\n mask : np.ndarray[bool]\n Applies to both sides (array like).\n n : `new values` either scalar or an array like aligned with `values`\n\n Returns\n -------\n values : ndarray with updated values\n this *may* be a copy of the original\n\n See Also\n --------\n ndarray.putmask\n \"\"\"\n # we cannot use np.asarray() here as we cannot have conversions\n # that numpy does when numeric are mixed with strings\n\n # n should be the length of the mask or a scalar here\n if not is_list_like(n):\n n = np.repeat(n, len(mask))\n\n # see if we are only masking values that if putted\n # will work in the current dtype\n try:\n nn = n[mask]\n except TypeError:\n # TypeError: only integer scalar arrays can be converted to a scalar index\n pass\n else:\n # make sure that we have a nullable type\n # if we have nulls\n if not isna_compat(v, nn[0]):\n pass\n elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):\n # only compare integers/floats\n pass\n elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):\n # only compare integers/floats\n pass\n else:\n\n # we ignore ComplexWarning here\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n nn_at = nn.astype(v.dtype)\n\n comp = nn == nn_at\n if is_list_like(comp) and comp.all():\n nv = v.copy()\n nv[mask] = nn_at\n return nv\n\n n = np.asarray(n)\n\n def _putmask_preserve(nv, n):\n try:\n nv[mask] = n[mask]\n except (IndexError, ValueError):\n nv[mask] = n\n return nv\n\n # preserves dtype if possible\n if v.dtype.kind == n.dtype.kind:\n return _putmask_preserve(v, n)\n\n # change the dtype if needed\n dtype, _ = maybe_promote(n.dtype)\n\n v = v.astype(dtype)\n\n return _putmask_preserve(v, n)\n\n\ndef _extract_bool_array(mask: ArrayLike) -> np.ndarray:\n \"\"\"\n If we have a SparseArray or BooleanArray, convert it to ndarray[bool].\n \"\"\"\n if isinstance(mask, ExtensionArray):\n # We could have BooleanArray, Sparse[bool], ...\n # Except for BooleanArray, this is equivalent to just\n # np.asarray(mask, dtype=bool)\n mask = mask.to_numpy(dtype=bool, na_value=False)\n\n assert isinstance(mask, np.ndarray), type(mask)\n assert mask.dtype == bool, mask.dtype\n return mask\n"
] |
[
[
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.cast.maybe_box_datetimelike",
"pandas.core.arrays.DatetimeArray._simple_new",
"pandas.core.missing.clean_interp_method",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.place",
"numpy.where",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.indexers.is_empty_indexer",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_re_compilable",
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.algos.validate_limit",
"numpy.delete",
"pandas._libs.internals.BlockPlacement",
"pandas.core.dtypes.cast.convert_scalar_for_putitemlike",
"numpy.array",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.common.is_null_slice",
"pandas.core.dtypes.cast.find_common_type",
"numpy.datetime64",
"pandas.core.dtypes.missing.isna",
"pandas.core.missing.interpolate_2d",
"pandas.core.array_algos.replace.replace_regex",
"numpy.asarray",
"pandas.core.dtypes.cast.maybe_downcast_numeric",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.tslibs.conversion.ensure_datetime64ns",
"pandas.core.algorithms.take_nd",
"numpy.putmask",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"numpy.timedelta64",
"numpy.atleast_2d",
"pandas.core.arrays.PandasArray",
"numpy.ndim",
"pandas.core.array_algos.transforms.shift",
"numpy.errstate",
"pandas.core.dtypes.cast.infer_dtype_from",
"pandas.core.arrays.TimedeltaArray._from_sequence",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.maybe_upcast",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.lib.is_scalar",
"pandas.core.indexers.check_setitem_lengths",
"numpy.repeat",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.pandas_dtype",
"numpy.isnan",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_period_dtype",
"pandas._libs.writers.word_len",
"pandas.core.missing.mask_missing",
"pandas.core.missing.clean_fill_method",
"pandas._libs.lib.is_float",
"pandas.core.algorithms.diff",
"numpy.broadcast_to",
"numpy.squeeze",
"numpy.dtype",
"pandas.core.computation.expressions.where",
"pandas.core.missing.interpolate_1d",
"numpy.any",
"numpy.arange",
"pandas.core.algorithms.unique",
"numpy.apply_along_axis",
"pandas.io.formats.format.FloatArrayFormatter",
"pandas.core.arrays.Categorical",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.dtypes.cast.maybe_promote",
"pandas.core.indexers.is_scalar_indexer",
"pandas.core.dtypes.common.is_sparse",
"pandas.core.dtypes.missing.is_valid_nat_for_dtype",
"pandas.core.dtypes.missing.isna_compat",
"pandas.core.dtypes.cast.astype_nansafe",
"pandas.core.dtypes.cast.maybe_infer_dtype_type",
"pandas.core.dtypes.common.is_re",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.construction.extract_array",
"pandas.core.array_algos.replace.compare_or_regex_search"
]
] |
Penlect/rectangle-packer
|
[
"52639f09ef0ce84bb1dbd81332554ba33ef2d835"
] |
[
"misc/recstat.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"Visualize rpack packing & benchmark results\n\nSync files from Bucket:\n\n$ gsutil -m rsync -r gs://bucket.penlect.com/rpack artifacts/\n\n\"\"\"\n\n# Built-in\nimport argparse\nimport os\nimport re\nimport pickle\nimport math\nfrom pathlib import Path\n\n# PyPI\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.gridspec as gridspec\nimport matplotlib.ticker as mtick\nimport matplotlib.animation as mani\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\n\n# Project\nimport rpack\n\n# Select a non-interactive backend so we can run on a server\nmatplotlib.use(\"Agg\")\n\n# Filename pattern of measurement files\nREGEX_FILE = re.compile(r'unif_side(\\d+)n(\\d+)m.pickle')\nIMG_EXT = frozenset({'png', 'svg', 'pdf'})\n\n\ndef load(directory):\n \"\"\"Load measurements from directory\"\"\"\n data = dict()\n for f in Path(directory).iterdir():\n match = REGEX_FILE.match(f.name)\n if match:\n n, m = match.groups()\n n, m = int(n), int(m)\n with open(f, 'rb') as h:\n data[n, m] = list()\n while True:\n try:\n data[n, m].append(pickle.load(h))\n except EOFError:\n break\n return data\n\n\ndef load_simple(directory, prefix):\n data = list()\n for f in Path(directory).iterdir():\n if f.name.startswith(prefix):\n with open(f, 'rb') as h:\n while True:\n try:\n rec, pos = pickle.load(h)\n except EOFError:\n break\n else:\n data.append((rec, pos))\n break\n return data\n\n\ndef plot_time(ax, mean_t):\n # Computation time\n\n mean_t = list()\n mean_f = list()\n for key in sorted(data):\n t, a, p = data[key]\n mean_t.append(sum(t)/len(t)/1e9)\n f = [aa/pp for aa, pp in zip(a, p)]\n mean_f.append(sum(f)/len(f))\n mean_t = np.array(mean_t).reshape((10, 10))\n mean_f = np.array(mean_f).reshape((10, 10))\n\n im = ax.imshow(\n mean_t, origin='lower', interpolation='none')\n ct = ax.contour(\n mean_t, levels=[1], origin='lower', colors=['red'])\n ax.clabel(ct, ct.levels, inline=True, fmt='%r sec')\n ax.set_xticklabels([100*i for i in range(1, 11)])\n ax.set_yticklabels([10*i for i in range(1, 11)])\n ax.set_xticks([i for i in range(0, 10)])\n ax.set_yticks([i for i in range(0, 10)])\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n ax.set_title('Computation time (sec)')\n ax.set_xlabel('size')\n ax.set_ylabel('rectangles')\n\n\ndef asdf_plot_packing_density(ax, mean_f):\n # Computation time\n im = ax.imshow(\n mean_f, origin='lower', interpolation='none')\n ct = ax.contour(\n mean_f, levels=[np.mean(mean_f)], origin='lower', colors=['red'])\n ax.clabel(ct, ct.levels, inline=True, fmt='%.1f %%')\n ax.set_xticklabels([100*i for i in range(1, 11)])\n ax.set_yticklabels([10*i for i in range(1, 11)])\n ax.set_xticks([i for i in range(0, 10)])\n ax.set_yticks([i for i in range(0, 10)])\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(im, cax=cax)\n ax.set_title('Packing density (%)')\n ax.set_xlabel('size')\n\n\ndef plot_packing_density_by_n(data):\n m = 1_000\n x = list()\n for n in range(10, 101, 10):\n f = list()\n for rec, pos, _ in data[n, m]:\n w, h = rpack.enclosing_size(rec, pos)\n a = sum(x*y for x, y in rec)\n f.append(a/(w*h))\n x.append(f)\n\n fig, ax = plt.subplots(tight_layout=True)\n bplot = ax.boxplot(\n x,\n sym='.',\n vert=True,\n patch_artist=True,\n showfliers=True,\n labels=list(range(10, 101, 10)),\n showmeans=True,\n medianprops=dict(color='black')\n )\n # fill with colors\n for patch in bplot['boxes']:\n patch.set_facecolor('lightblue')\n # ax.set_ylim([0.7, None])\n ax.yaxis.grid(True)\n ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=0))\n ax.yaxis.set_major_locator(mtick.MultipleLocator(0.05))\n ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.01))\n ax.set_title(rf'Packing density, rectangle side lengths ~ $Unif\\{{1, {m}\\}}$')\n ax.set_xlabel('Number of rectangles')\n\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'packing_density_by_n.{ext}'))\n fig.clf()\n plt.close()\n\n\ndef plot_packing_density_by_m(data):\n n = 100\n m = 1_000\n x = list()\n total = list()\n for m in range(100, 1001, 100):\n f = list()\n for rec, pos, _ in data[100, m]:\n w, h = rpack.enclosing_size(rec, pos)\n a = sum(x*y for x, y in rec)\n f.append(a/(w*h))\n x.append(f)\n total.extend(f)\n x.append(total)\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.axhline(\n np.array(total).mean(),\n color=matplotlib.rcParams['boxplot.meanprops.markerfacecolor'],\n linewidth=1,\n linestyle=':'\n )\n bplot = ax.boxplot(\n x,\n sym='.',\n vert=True,\n patch_artist=True,\n labels=list(range(100, 1001, 100)) + ['total'],\n # positions=list(range(1, 11)) + [11.25],\n # widths=[0.5]*10 + [1],\n showmeans=True,\n medianprops=dict(color='black')\n )\n # fill with colors\n colors = ['lightblue']*10 + ['lightgreen']\n for patch, color in zip(bplot['boxes'], colors):\n patch.set_facecolor(color)\n # ax.set_xlim([None, 13])\n ax.yaxis.grid(True)\n ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=0))\n ax.yaxis.set_major_locator(mtick.MultipleLocator(0.01))\n ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.001))\n ax.set_title('Packing density')\n ax.set_xlabel(rf'rectangle side lengths ~ $Unif\\{{1, m\\}}$, ${n} \\leq m \\leq {m}$')\n\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'packing_density_by_m.{ext}'))\n fig.clf()\n plt.close()\n\n\ndef plot_enclosing(data):\n w = list()\n h = list()\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.grid(True)\n m = 1_000\n # the scatter plot:\n for n in [10, 20, 30, 50, 100]:\n ew = list()\n eh = list()\n for rec, pos, t in data[n, m]:\n w, h = rpack.enclosing_size(rec, pos)\n ew.append(w)\n eh.append(h)\n ew = np.array(ew)/math.sqrt(n)\n eh = np.array(eh)/math.sqrt(n)\n # f = [aa/(w*h) for aa, (w, h) in zip(a, zip(ew, eh))]\n color = 'black' if n == 100 else None\n ax.scatter(ew, eh, c=color, s=0.2, label=f'$n = {n}$')\n\n ax.legend(markerscale=5)\n ax.set_aspect('equal')\n ax.set_xlim([0, None])\n ax.set_ylim([0, None])\n ax.set_title(\n 'Resulting enclosings,\\n'\n rf'$n$ = No. rectangles, side lengths ~ $Unif\\{{1, {m}\\}}$')\n ax.set_ylabel(r'Enclosing height / $\\sqrt{n}$')\n ax.set_xlabel(r'Enclosing width / $\\sqrt{n}$')\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'enclosing.{ext}'))\n fig.clf()\n plt.close()\n\n\ndef plot_computation_time_by_n(data):\n x = list(range(10, 101, 10))\n y = list()\n s = list()\n m = 1_000\n for n in x:\n t = np.array([(ns/1e9) for rec, pos, ns in data[n, m]])\n t_mean = t.mean()\n t_std = t.std()\n y.append(t_mean)\n s.append(t_std)\n y = np.array(y)\n s = np.array(s)\n\n pol = np.polyfit(x, y, 3)\n p = np.poly1d(pol)\n yy = [p(xx) for xx in x]\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.errorbar(x, y, s, fmt='--o', label='Measurement')\n ax.plot(x, yy, label='Polyfit $t = αn^3 + βn^2 + γn + δ$'\n '\\nα={:.2g}, β={:.2g} γ={:.2g} δ={:.2g}'.format(*pol))\n ax.grid(True)\n ax.xaxis.set_major_locator(mtick.MultipleLocator(10))\n ax.set_title(rf'Computation time, rectangle side lengths ~ $Unif\\{{1, {m}\\}}$')\n ax.set_xlabel('Number of rectangles')\n ax.set_ylabel('Seconds')\n ax.legend(loc='upper left')\n\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'computation_time_by_n.{ext}'))\n fig.clf()\n plt.close()\n\n\ndef plot_computation_time_by_m(data):\n x = list(range(100, 1001, 100))\n n = 100\n y = list()\n s = list()\n for m in x:\n t = np.array([(ns/1e9) for rec, pos, ns in data[n, m]])\n t_mean = t.mean()\n t_std = t.std()\n y.append(t_mean)\n s.append(t_std)\n y = np.array(y)\n s = np.array(s)\n\n pol = np.polyfit(x, y, 1)\n p = np.poly1d(pol)\n yy = [p(xx) for xx in x]\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.errorbar(x, y, s, fmt='--o', label='Measurement')\n ax.plot(x, yy, label='Polyfit $t = αn + β$'\n '\\nα={:.2g}, β={:.2g}'.format(*pol))\n ax.grid(True)\n ax.xaxis.set_major_locator(mtick.MultipleLocator(n))\n ax.set_title(f'Computation time, {n} rectangles')\n ax.set_xlabel(rf'rectangle side lengths ~ $Unif\\{{1, m\\}}$, ${n} \\leq m \\leq {m}$')\n ax.set_ylabel('Seconds')\n ax.legend(loc='upper left')\n\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'computation_time_by_m.{ext}'))\n fig.clf()\n plt.close()\n\n\ndef rectangle_color(w, h):\n r = min(w, h)/max(w, h)/2\n if h < w:\n r = 1 - r\n r = r/2 + 0.3\n return plt.get_cmap('viridis')(r)\n\n\nclass PlotPacking:\n\n def __init__(self, rec, pos, gridlines=False, title='', trim=False):\n \"\"\"Initialization of PlotPacking\"\"\"\n self.rec = rec\n self.pos = pos\n self.gridlines = gridlines\n self.index = None\n self.encl_w, self.encl_h = rpack.enclosing_size(rec, pos)\n self.density = sum(w*h for w, h in rec)/(self.encl_w*self.encl_h)\n if trim:\n self.fig = plt.figure(figsize=(6, 6*self.encl_h/self.encl_w))\n self.ax = self.fig.add_axes([0.01, 0.01, 0.98, 0.98])\n else:\n self.fig, self.ax = plt.subplots(tight_layout=True)\n self.ax.set_aspect('equal')\n self.ax.invert_yaxis()\n self.ax.set_xlim([0, self.encl_w])\n self.ax.set_ylim([self.encl_h, 0])\n self.ax.xaxis.set_visible(False)\n self.ax.yaxis.set_visible(False)\n if title and not trim:\n self.ax.set_title(\n f'Packing density {100*self.density:.2f}% '\n f'({self.encl_w} x {self.encl_h})' + title)\n\n def feed(self, *args):\n artists = list()\n if self.index is None:\n self.index = 0\n return [self.ax]\n try:\n w, h = self.rec[self.index]\n x, y = self.pos[self.index]\n except IndexError:\n return []\n else:\n p = patches.Rectangle(\n (x, y), w, h,\n edgecolor='k',\n facecolor=rectangle_color(w, h)\n )\n self.ax.add_patch(p)\n artists.append(p)\n if self.gridlines:\n hline = self.ax.axhline(y+h, color='k', linestyle='-', linewidth=0.5)\n vline = self.ax.axvline(x+w, color='k', linestyle='-', linewidth=0.5)\n artists.append(hline)\n artists.append(vline)\n self.index += 1\n return artists\n\n def save(self, output_file):\n for ext in IMG_EXT:\n plt.savefig(f'{output_file}.{ext}', bbox_inches='tight')\n self.fig.clf()\n plt.close()\n\n def animation(self, output_file, pack_duration=3, duration=10, **kwargs):\n frames = list(range(len(self.rec)))\n interval = int(1_000*pack_duration/len(frames))\n frames.extend([None] * int(len(frames)*(duration/pack_duration)))\n anim = mani.FuncAnimation(self.fig, self.feed, frames=frames, interval=interval)\n if 'dpi' not in kwargs:\n kwargs['dpi'] = 80\n anim.save(f'{output_file}.gif', writer='imagemagick', **kwargs)\n self.fig.clf()\n plt.close()\n\n\ndef packing_density(rec, pos):\n w, h = rpack.enclosing_size(rec, pos)\n return sum(x*y for x, y in rec)/(w*h)\n\ndef best_first(samples):\n x = list()\n for rec, pos, _ in samples:\n f = packing_density(rec, pos)\n x.append((f, rec, pos))\n x.sort(reverse=True)\n return x\n\n\ndef plot_packing_extremes(data):\n for n in [10, 100]:\n m = 1000\n toplist = best_first(data[n, m])\n for case in [('best', toplist[0]), ('worst', toplist[-1])]:\n name, (_, rec, pos) = case\n p = PlotPacking(\n rec, pos, title=f', {n} rectangles.')\n while p.feed():\n pass\n p.save(os.path.join(args.output_dir, f'packing_{name}_{n}'))\n\n\ndef plot_packing_golden_ratio(data):\n candidates = list()\n for n in range(40, 101, 10):\n for m in range(100, 1001, 100):\n for rec, pos, _ in data[n, m]:\n w, h = rpack.bbox_size(rec, pos)\n rho = rpack.packing_density(rec, pos)\n candidates.append((round(abs(w/h - 1.61803398875), 2), -rho, rec, pos))\n candidates.sort()\n _, _, rec, pos = candidates[0]\n p = PlotPacking(\n rec, pos, title=f', {len(rec)} rectangles.')\n while p.feed():\n pass\n p.save(os.path.join(args.output_dir, f'packing_phi'))\n\n\ndef plot_animations(data):\n candidates = list()\n for n in range(40, 101, 10):\n for m in range(100, 1001, 100):\n for rec, pos, _ in data[n, m]:\n w, h = rpack.bbox_size(rec, pos)\n rho = rpack.packing_density(rec, pos)\n candidates.append((round(abs(w/h - 0.61803398875), 2), -rho, rec, pos))\n candidates.sort()\n _, _, sizes, pos = candidates[0]\n # Sort the rectangles so the animation will plot them from left to\n # right.\n sizes = [s for s, _ in sorted(zip(sizes, pos), key=lambda x: x[1])]\n pos.sort()\n p = PlotPacking(sizes, pos, gridlines=True, trim=True)\n p.animation(os.path.join(args.output_dir, f'example_grid'), 60, 20)\n p = PlotPacking(sizes, pos, gridlines=True, trim=True)\n while p.feed():\n pass\n p.save(os.path.join(args.output_dir, f'example_grid'))\n\n\ndef plot_squares(directory):\n\n data = load_simple(directory, 'square')\n\n f = list()\n for (rec, pos) in data:\n w, h = rpack.enclosing_size(rec, pos)\n a = sum(x*y for x, y in rec)\n f.append(a/(w*h))\n\n with PdfPages(os.path.join(args.output_dir, 'squares.pdf')) as pdf:\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(list(range(1, len(f) + 1)), f)\n ax.grid(True)\n ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1))\n ax.xaxis.set_major_locator(mtick.MultipleLocator(10))\n ax.set_title('Packing density, squares')\n ax.set_xlabel('Rectangle max side length (n)')\n pdf.savefig(figure=fig)\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'squares_summary.{ext}'))\n fig.clf()\n plt.close()\n\n for n, (rec, pos) in enumerate(data, start=1):\n if n == 1:\n title = ', square 1x1'\n elif n == 2:\n title = ', squares 1x1 + 2x2'\n else:\n title = f', squares 1x1 ... {n}x{n}'\n p = PlotPacking(rec, pos, title=title)\n while p.feed():\n pass\n pdf.savefig(figure=p.fig)\n p.fig.clf()\n plt.close()\n\n\ndef plot_circums(directory):\n\n data = load_simple(directory, 'circum')\n\n f = list()\n for (rec, pos) in data:\n w, h = rpack.enclosing_size(rec, pos)\n a = sum(x*y for x, y in rec)\n f.append(a/(w*h))\n\n with PdfPages(os.path.join(args.output_dir, 'circum.pdf')) as pdf:\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(list(range(1, len(f) + 1)), f)\n ax.grid(True)\n ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1))\n ax.xaxis.set_major_locator(mtick.MultipleLocator(10))\n ax.set_title('Packing density, fixed circumference rectangles')\n ax.set_xlabel('Rectangle width + height (n)')\n pdf.savefig(figure=fig)\n for ext in IMG_EXT:\n plt.savefig(os.path.join(args.output_dir, f'circum_summary.{ext}'))\n fig.clf()\n plt.close()\n\n for n, (rec, pos) in enumerate(data, start=1):\n p = PlotPacking(\n rec, pos, title=f', nx1 ... 1xn, n={n}.')\n while p.feed():\n pass\n pdf.savefig(figure=p.fig)\n p.fig.clf()\n plt.close()\n\n\ndef main(args):\n os.makedirs(args.output_dir, exist_ok=True)\n data = load(args.input_dir)\n\n plot_computation_time_by_m(data)\n plot_packing_density_by_n(data)\n plot_packing_density_by_m(data)\n plot_computation_time_by_n(data)\n plot_enclosing(data)\n plot_packing_extremes(data)\n plot_animations(data)\n plot_squares(args.input_dir)\n plot_circums(args.input_dir)\n plot_packing_golden_ratio(data)\n\n\nPARSER = argparse.ArgumentParser()\nPARSER.add_argument(\n '--input-dir', '-i',\n # Example output_dir: /tmp/rpack/1.1.0-13-g18920b5-dirty/data\n type=str,\n default='/tmp/rpack/data',\n help='Data input directory.')\nPARSER.add_argument(\n '--output-dir', '-o',\n # Example output_dir: /tmp/rpack/1.1.0-13-g18920b5-dirty/img\n type=str,\n default='/tmp/rpack/img',\n help='Images output directory.')\n\nif __name__ == '__main__':\n args = PARSER.parse_args()\n main(args)\n"
] |
[
[
"numpy.polyfit",
"numpy.poly1d",
"matplotlib.ticker.MultipleLocator",
"matplotlib.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.ticker.PercentFormatter"
]
] |
doktorsleepelss/openpilot
|
[
"d4d21a954245bf534f344ba9de7a4cb055708564",
"d4d21a954245bf534f344ba9de7a4cb055708564"
] |
[
"selfdrive/controls/tests/test_lateral_mpc.py",
"selfdrive/controls/lib/pathplanner.py"
] |
[
"import unittest\nimport numpy as np\nfrom selfdrive.car.honda.interface import CarInterface\nfrom selfdrive.controls.lib.lateral_mpc import libmpc_py\nfrom selfdrive.controls.lib.vehicle_model import VehicleModel\nfrom selfdrive.controls.lib.drive_helpers import MPC_N, CAR_ROTATION_RADIUS\n\n\ndef run_mpc(v_ref=30., x_init=0., y_init=0., psi_init=0., tire_angle_init=0.,\n lane_width=3.6, poly_shift=0.):\n\n libmpc = libmpc_py.libmpc\n libmpc.init(1.0, 1.0, 1.0)\n\n mpc_solution = libmpc_py.ffi.new(\"log_t *\")\n\n y_pts = poly_shift * np.ones(MPC_N + 1)\n heading_pts = np.zeros(MPC_N + 1)\n\n CP = CarInterface.get_params(\"HONDA CIVIC 2016 TOURING\")\n VM = VehicleModel(CP)\n\n curvature_factor = VM.curvature_factor(v_ref)\n\n cur_state = libmpc_py.ffi.new(\"state_t *\")\n cur_state.x = x_init\n cur_state.y = y_init\n cur_state.psi = psi_init\n cur_state.tire_angle = tire_angle_init\n\n # converge in no more than 20 iterations\n for _ in range(20):\n libmpc.run_mpc(cur_state, mpc_solution, v_ref,\n curvature_factor, CAR_ROTATION_RADIUS,\n list(y_pts), list(heading_pts))\n\n return mpc_solution\n\n\nclass TestLateralMpc(unittest.TestCase):\n\n def _assert_null(self, sol, tire_angle=1e-6):\n for i in range(len(sol[0].y)):\n self.assertAlmostEqual(sol[0].y[i], 0., delta=tire_angle)\n self.assertAlmostEqual(sol[0].psi[i], 0., delta=tire_angle)\n self.assertAlmostEqual(sol[0].tire_angle[i], 0., delta=tire_angle)\n\n def _assert_simmetry(self, sol, tire_angle=1e-6):\n for i in range(len(sol[0][0].y)):\n self.assertAlmostEqual(sol[0][0].y[i], -sol[1][0].y[i], delta=tire_angle)\n self.assertAlmostEqual(sol[0][0].psi[i], -sol[1][0].psi[i], delta=tire_angle)\n self.assertAlmostEqual(sol[0][0].tire_angle[i], -sol[1][0].tire_angle[i], delta=tire_angle)\n self.assertAlmostEqual(sol[0][0].x[i], sol[1][0].x[i], delta=tire_angle)\n\n def _assert_identity(self, sol, ignore_y=False, tire_angle=1e-6):\n for i in range(len(sol[0][0].y)):\n self.assertAlmostEqual(sol[0][0].psi[i], sol[1][0].psi[i], delta=tire_angle)\n self.assertAlmostEqual(sol[0][0].tire_angle[i], sol[1][0].tire_angle[i], delta=tire_angle)\n self.assertAlmostEqual(sol[0][0].x[i], sol[1][0].x[i], delta=tire_angle)\n if not ignore_y:\n self.assertAlmostEqual(sol[0][0].y[i], sol[1][0].y[i], delta=tire_angle)\n\n def test_straight(self):\n sol = run_mpc()\n self._assert_null(sol)\n\n def test_y_symmetry(self):\n sol = []\n for y_init in [-0.5, 0.5]:\n sol.append(run_mpc(y_init=y_init))\n self._assert_simmetry(sol)\n\n def test_poly_symmetry(self):\n sol = []\n for poly_shift in [-1., 1.]:\n sol.append(run_mpc(poly_shift=poly_shift))\n self._assert_simmetry(sol)\n\n def test_tire_angle_symmetry(self):\n sol = []\n for tire_angle_init in [-0.1, 0.1]:\n sol.append(run_mpc(tire_angle_init=tire_angle_init))\n self._assert_simmetry(sol)\n\n def test_psi_symmetry(self):\n sol = []\n for psi_init in [-0.1, 0.1]:\n sol.append(run_mpc(psi_init=psi_init))\n self._assert_simmetry(sol)\n\n def test_y_shift_vs_poly_shift(self):\n shift = 1.\n sol = []\n sol.append(run_mpc(y_init=shift))\n sol.append(run_mpc(poly_shift=-shift))\n # need larger tire_angle than standard, otherwise it false triggers.\n # this is acceptable because the 2 cases are very different from the optimizer standpoint\n self._assert_identity(sol, ignore_y=True, tire_angle=1e-5)\n\n def test_no_overshoot(self):\n y_init = 1.\n sol = run_mpc(y_init=y_init)\n for y in list(sol[0].y):\n self.assertGreaterEqual(y_init, abs(y))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import os\nimport math\nimport numpy as np\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom common.numpy_fast import interp\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.controls.lib.lateral_mpc import libmpc_py\nfrom selfdrive.controls.lib.drive_helpers import MPC_COST_LAT, MPC_N, CAR_ROTATION_RADIUS\nfrom selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\nfrom selfdrive.config import Conversions as CV\nfrom common.params import Params\nimport cereal.messaging as messaging\nfrom cereal import log\n\nLaneChangeState = log.PathPlan.LaneChangeState\nLaneChangeDirection = log.PathPlan.LaneChangeDirection\n\nLOG_MPC = os.environ.get('LOG_MPC', False)\n\nLANE_CHANGE_SPEED_MIN = 45 * CV.MPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.PathPlan.Desire.none,\n LaneChangeState.preLaneChange: log.PathPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.PathPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.PathPlan.Desire.none,\n LaneChangeState.preLaneChange: log.PathPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.PathPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.PathPlan.Desire.none,\n LaneChangeState.preLaneChange: log.PathPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.PathPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass PathPlanner():\n def __init__(self, CP):\n self.LP = LanePlanner()\n\n self.last_cloudlog_t = 0\n self.steer_rate_cost = CP.steerRateCost\n\n self.setup_mpc()\n self.solution_invalid_cnt = 0\n self.lane_change_enabled = Params().get('LaneChangeEnabled') == b'1'\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.prev_one_blinker = False\n self.desire = log.PathPlan.Desire.none\n\n self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))\n self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))\n self.t_idxs = np.arange(TRAJECTORY_SIZE)\n self.y_pts = np.zeros(TRAJECTORY_SIZE)\n\n def setup_mpc(self):\n self.libmpc = libmpc_py.libmpc\n self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, self.steer_rate_cost)\n\n self.mpc_solution = libmpc_py.ffi.new(\"log_t *\")\n self.cur_state = libmpc_py.ffi.new(\"state_t *\")\n self.cur_state[0].x = 0.0\n self.cur_state[0].y = 0.0\n self.cur_state[0].psi = 0.0\n self.cur_state[0].tire_angle = 0.0\n\n self.angle_steers_des = 0.0\n self.angle_steers_des_mpc = 0.0\n self.angle_steers_des_time = 0.0\n\n def update(self, sm, CP, VM):\n v_ego = sm['carState'].vEgo\n active = sm['controlsState'].active\n steering_wheel_angle_offset_deg = sm['liveParameters'].angleOffset\n steering_wheel_angle_deg = sm['carState'].steeringAngle\n measured_tire_angle = -math.radians(steering_wheel_angle_deg - steering_wheel_angle_offset_deg) / VM.sR\n\n\n # Update vehicle model\n x = max(sm['liveParameters'].stiffnessFactor, 0.1)\n sr = max(sm['liveParameters'].steerRatio, 0.1)\n VM.update_params(x, sr)\n curvature_factor = VM.curvature_factor(v_ego)\n\n\n md = sm['modelV2']\n self.LP.parse_model(sm['modelV2'])\n if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:\n self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])\n self.t_idxs = np.array(md.position.t)\n self.plan_yaw = list(md.orientation.z)\n\n # Lane change logic\n one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker\n below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN\n\n if sm['carState'].leftBlinker:\n self.lane_change_direction = LaneChangeDirection.left\n elif sm['carState'].rightBlinker:\n self.lane_change_direction = LaneChangeDirection.right\n\n if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not self.lane_change_enabled):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n else:\n torque_applied = sm['carState'].steeringPressed and \\\n ((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))\n\n blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n\n lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob\n\n # State transitions\n # off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n\n # pre\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n if not one_blinker or below_lane_change_speed:\n self.lane_change_state = LaneChangeState.off\n elif torque_applied and not blindspot_detected:\n self.lane_change_state = LaneChangeState.laneChangeStarting\n\n # starting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)\n # 98% certainty\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n\n # finishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n if one_blinker and self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.preLaneChange\n elif self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.off\n\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Turn off lanes during lane change\n if self.desire == log.PathPlan.Desire.laneChangeRight or self.desire == log.PathPlan.Desire.laneChangeLeft:\n self.LP.lll_prob *= self.lane_change_ll_prob\n self.LP.rll_prob *= self.lane_change_ll_prob\n d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)\n y_pts = np.interp(v_ego * self.t_idxs[:MPC_N+1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])\n heading_pts = np.interp(v_ego * self.t_idxs[:MPC_N+1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)\n self.y_pts = y_pts\n\n v_ego_mpc = max(v_ego, 5.0) # avoid mpc roughness due to low speed\n assert len(y_pts) == MPC_N + 1\n assert len(heading_pts) == MPC_N + 1\n self.libmpc.run_mpc(self.cur_state, self.mpc_solution,\n float(v_ego_mpc),\n curvature_factor,\n CAR_ROTATION_RADIUS,\n list(y_pts),\n list(heading_pts))\n # init state for next\n self.cur_state.x = 0.0\n self.cur_state.y = 0.0\n self.cur_state.psi = 0.0\n self.cur_state.tire_angle = interp(DT_MDL, self.t_idxs[:MPC_N+1], self.mpc_solution.tire_angle)\n\n # TODO this needs more thought, use .2s extra for now to estimate other delays\n delay = CP.steerActuatorDelay + .2\n next_tire_angle = interp(DT_MDL + delay, self.t_idxs[:MPC_N+1], self.mpc_solution.tire_angle)\n next_tire_angle_rate = self.mpc_solution.tire_angle_rate[0]\n\n # reset to current steer angle if not active or overriding\n if active:\n tire_angle_desired = next_tire_angle\n desired_tire_angle_rate = next_tire_angle_rate\n else:\n tire_angle_desired = measured_tire_angle\n desired_tire_angle_rate = 0.0\n\n # negative sign, controls uses different convention\n self.desired_steering_wheel_angle_deg = -float(math.degrees(tire_angle_desired * VM.sR)) + steering_wheel_angle_offset_deg\n self.desired_steering_wheel_angle_rate_deg = -float(math.degrees(desired_tire_angle_rate * VM.sR))\n\n # Check for infeasable MPC solution\n mpc_nans = any(math.isnan(x) for x in self.mpc_solution.tire_angle)\n t = sec_since_boot()\n if mpc_nans:\n self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)\n self.cur_state.tire_angle = measured_tire_angle\n\n if t > self.last_cloudlog_t + 5.0:\n self.last_cloudlog_t = t\n cloudlog.warning(\"Lateral mpc - nan: True\")\n\n if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge\n self.solution_invalid_cnt += 1\n else:\n self.solution_invalid_cnt = 0\n\n def publish(self, sm, pm):\n plan_solution_valid = self.solution_invalid_cnt < 2\n plan_send = messaging.new_message('pathPlan')\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'liveParameters', 'modelV2'])\n plan_send.pathPlan.laneWidth = float(self.LP.lane_width)\n plan_send.pathPlan.dPathPoints = [float(x) for x in self.y_pts]\n plan_send.pathPlan.lProb = float(self.LP.lll_prob)\n plan_send.pathPlan.rProb = float(self.LP.rll_prob)\n plan_send.pathPlan.dProb = float(self.LP.d_prob)\n\n plan_send.pathPlan.angleSteers = float(self.desired_steering_wheel_angle_deg)\n plan_send.pathPlan.rateSteers = float(self.desired_steering_wheel_angle_rate_deg)\n plan_send.pathPlan.angleOffset = float(sm['liveParameters'].angleOffsetAverage)\n plan_send.pathPlan.mpcSolutionValid = bool(plan_solution_valid)\n plan_send.pathPlan.paramsValid = bool(sm['liveParameters'].valid)\n\n plan_send.pathPlan.desire = self.desire\n plan_send.pathPlan.laneChangeState = self.lane_change_state\n plan_send.pathPlan.laneChangeDirection = self.lane_change_direction\n\n pm.send('pathPlan', plan_send)\n\n if LOG_MPC:\n dat = messaging.new_message('liveMpc')\n dat.liveMpc.x = list(self.mpc_solution[0].x)\n dat.liveMpc.y = list(self.mpc_solution[0].y)\n dat.liveMpc.psi = list(self.mpc_solution[0].psi)\n dat.liveMpc.tire_angle = list(self.mpc_solution[0].tire_angle)\n dat.liveMpc.cost = self.mpc_solution[0].cost\n pm.send('liveMpc', dat)\n"
] |
[
[
"numpy.zeros",
"numpy.ones"
],
[
"numpy.arange",
"numpy.linalg.norm",
"numpy.column_stack",
"numpy.array",
"numpy.zeros"
]
] |
sethmerkel/qiskit-terra
|
[
"aa2c3e6049adff7a846be5b865d85ac77886b590"
] |
[
"qiskit/quantum_info/states/statevector.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nStatevector quantum state class.\n\"\"\"\n\nimport copy\nimport re\nfrom numbers import Number\n\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.instruction import Instruction\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.states.quantum_state import QuantumState\nfrom qiskit.quantum_info.operators.mixins.tolerances import TolerancesMixin\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.symplectic import Pauli, SparsePauliOp\nfrom qiskit.quantum_info.operators.op_shape import OpShape\nfrom qiskit.quantum_info.operators.predicates import matrix_equal\n\n# pylint: disable=no-name-in-module\nfrom .cython.exp_value import expval_pauli_no_x, expval_pauli_with_x\n\n\nclass Statevector(QuantumState, TolerancesMixin):\n \"\"\"Statevector class\"\"\"\n\n def __init__(self, data, dims=None):\n \"\"\"Initialize a statevector object.\n\n Args:\n data (np.array or list or Statevector or Operator or QuantumCircuit or\n qiskit.circuit.Instruction):\n Data from which the statevector can be constructed. This can be either a complex\n vector, another statevector, a ``Operator` with only one column or a\n ``QuantumCircuit`` or ``Instruction``. If the data is a circuit or instruction,\n the statevector is constructed by assuming that all qubits are initialized to the\n zero state.\n dims (int or tuple or list): Optional. The subsystem dimension of\n the state (See additional information).\n\n Raises:\n QiskitError: if input data is not valid.\n\n Additional Information:\n The ``dims`` kwarg can be None, an integer, or an iterable of\n integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` or ``None`` -- the length of the input vector\n specifies the total dimension of the density matrix. If it is a\n power of two the state will be initialized as an N-qubit state.\n If it is not a power of two the state will have a single\n d-dimensional subsystem.\n \"\"\"\n if isinstance(data, (list, np.ndarray)):\n # Finally we check if the input is a raw vector in either a\n # python list or numpy array format.\n self._data = np.asarray(data, dtype=complex)\n elif isinstance(data, Statevector):\n self._data = data._data\n if dims is None:\n dims = data._op_shape._dims_l\n elif isinstance(data, Operator):\n # We allow conversion of column-vector operators to Statevectors\n input_dim, _ = data.dim\n if input_dim != 1:\n raise QiskitError(\"Input Operator is not a column-vector.\")\n self._data = np.ravel(data.data)\n elif isinstance(data, (QuantumCircuit, Instruction)):\n self._data = Statevector.from_instruction(data).data\n else:\n raise QiskitError(\"Invalid input data format for Statevector\")\n # Check that the input is a numpy vector or column-vector numpy\n # matrix. If it is a column-vector matrix reshape to a vector.\n ndim = self._data.ndim\n shape = self._data.shape\n if ndim != 1:\n if ndim == 2 and shape[1] == 1:\n self._data = np.reshape(self._data, shape[0])\n shape = self._data.shape\n elif ndim != 2 or shape[1] != 1:\n raise QiskitError(\"Invalid input: not a vector or column-vector.\")\n super().__init__(op_shape=OpShape.auto(\n shape=shape, dims_l=dims, num_qubits_r=0))\n\n def __array__(self, dtype=None):\n if dtype:\n return np.asarray(self.data, dtype=dtype)\n return self.data\n\n def __eq__(self, other):\n return super().__eq__(other) and np.allclose(\n self._data, other._data, rtol=self.rtol, atol=self.atol)\n\n def __repr__(self):\n prefix = 'Statevector('\n pad = len(prefix) * ' '\n return '{}{},\\n{}dims={})'.format(\n prefix, np.array2string(\n self._data, separator=', ', prefix=prefix),\n pad, self._op_shape.dims_l())\n\n def draw(self, output=None, **drawer_args):\n \"\"\"Return a visualization of the Statevector.\n\n **repr**: ASCII TextMatrix of the state's ``__repr__``.\n\n **text**: ASCII TextMatrix that can be printed in the console.\n\n **latex**: An IPython Latex object for displaying in Jupyter Notebooks.\n\n **latex_source**: Raw, uncompiled ASCII source to generate array using LaTeX.\n\n **qsphere**: Matplotlib figure, rendering of statevector using `plot_state_qsphere()`.\n\n **hinton**: Matplotlib figure, rendering of statevector using `plot_state_hinton()`.\n\n **bloch**: Matplotlib figure, rendering of statevector using `plot_bloch_multivector()`.\n\n Args:\n output (str): Select the output method to use for drawing the\n state. Valid choices are `repr`, `text`, `latex`, `latex_source`,\n `qsphere`, `hinton`, or `bloch`. Default is `repr`. Default can\n be changed by adding the line ``state_drawer = <default>`` to\n ``~/.qiskit/settings.conf`` under ``[default]``.\n drawer_args: Arguments to be passed directly to the relevant drawing\n function or constructor (`TextMatrix()`, `array_to_latex()`,\n `plot_state_qsphere()`, `plot_state_hinton()` or `plot_bloch_multivector()`).\n See the relevant function under `qiskit.visualization` for that function's\n documentation.\n\n Returns:\n :class:`matplotlib.Figure` or :class:`str` or\n :class:`TextMatrix` or :class:`IPython.display.Latex`:\n Drawing of the Statevector.\n\n Raises:\n ValueError: when an invalid output method is selected.\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.visualization.state_visualization import state_drawer\n return state_drawer(self, output=output, **drawer_args)\n\n def _ipython_display_(self):\n out = self.draw()\n if isinstance(out, str):\n print(out)\n else:\n from IPython.display import display\n display(out)\n\n @property\n def data(self):\n \"\"\"Return data.\"\"\"\n return self._data\n\n def is_valid(self, atol=None, rtol=None):\n \"\"\"Return True if a Statevector has norm 1.\"\"\"\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n norm = np.linalg.norm(self.data)\n return np.allclose(norm, 1, rtol=rtol, atol=atol)\n\n def to_operator(self):\n \"\"\"Convert state to a rank-1 projector operator\"\"\"\n mat = np.outer(self.data, np.conj(self.data))\n return Operator(mat, input_dims=self.dims(), output_dims=self.dims())\n\n def conjugate(self):\n \"\"\"Return the conjugate of the operator.\"\"\"\n return Statevector(np.conj(self.data), dims=self.dims())\n\n def trace(self):\n \"\"\"Return the trace of the quantum state as a density matrix.\"\"\"\n return np.sum(np.abs(self.data) ** 2)\n\n def purity(self):\n \"\"\"Return the purity of the quantum state.\"\"\"\n # For a valid statevector the purity is always 1, however if we simply\n # have an arbitrary vector (not correctly normalized) then the\n # purity is equivalent to the trace squared:\n # P(|psi>) = Tr[|psi><psi|psi><psi|] = |<psi|psi>|^2\n return self.trace() ** 2\n\n def tensor(self, other):\n \"\"\"Return the tensor product state self ⊗ other.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the tensor product operator self ⊗ other.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n ret = copy.copy(self)\n ret._op_shape = self._op_shape.tensor(other._op_shape)\n ret._data = np.kron(self._data, other._data)\n return ret\n\n def expand(self, other):\n \"\"\"Return the tensor product state other ⊗ self.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the tensor product state other ⊗ self.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n ret = copy.copy(self)\n ret._op_shape = self._op_shape.expand(other._op_shape)\n ret._data = np.kron(other._data, self._data)\n return ret\n\n def _add(self, other):\n \"\"\"Return the linear combination self + other.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the linear combination self + other.\n\n Raises:\n QiskitError: if other is not a quantum state, or has\n incompatible dimensions.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n self._op_shape._validate_add(other._op_shape)\n ret = copy.copy(self)\n ret._data = self.data + other.data\n return ret\n\n def _multiply(self, other):\n \"\"\"Return the scalar multiplied state self * other.\n\n Args:\n other (complex): a complex number.\n\n Returns:\n Statevector: the scalar multiplied state other * self.\n\n Raises:\n QiskitError: if other is not a valid complex number.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n ret = copy.copy(self)\n ret._data = other * self.data\n return ret\n\n def evolve(self, other, qargs=None):\n \"\"\"Evolve a quantum state by the operator.\n\n Args:\n other (Operator): The operator to evolve by.\n qargs (list): a list of Statevector subsystem positions to apply\n the operator on.\n\n Returns:\n Statevector: the output quantum state.\n\n Raises:\n QiskitError: if the operator dimension does not match the\n specified Statevector subsystem dimensions.\n \"\"\"\n if qargs is None:\n qargs = getattr(other, 'qargs', None)\n\n # Get return vector\n ret = copy.copy(self)\n\n # Evolution by a circuit or instruction\n if isinstance(other, QuantumCircuit):\n other = other.to_instruction()\n if isinstance(other, Instruction):\n if self.num_qubits is None:\n raise QiskitError(\"Cannot apply QuantumCircuit to non-qubit Statevector.\")\n return self._evolve_instruction(ret, other, qargs=qargs)\n\n # Evolution by an Operator\n if not isinstance(other, Operator):\n other = Operator(other)\n\n # check dimension\n if self.dims(qargs) != other.input_dims():\n raise QiskitError(\n \"Operator input dimensions are not equal to statevector subsystem dimensions.\"\n )\n return Statevector._evolve_operator(ret, other, qargs=qargs)\n\n def equiv(self, other, rtol=None, atol=None):\n \"\"\"Return True if other is equivalent as a statevector up to global phase.\n\n .. note::\n\n If other is not a Statevector, but can be used to initialize a statevector object,\n this will check that Statevector(other) is equivalent to the current statevector up\n to global phase.\n\n Args:\n other (Statevector): an object from which a ``Statevector`` can be constructed.\n rtol (float): relative tolerance value for comparison.\n atol (float): absolute tolerance value for comparison.\n\n Returns:\n bool: True if statevectors are equivalent up to global phase.\n \"\"\"\n if not isinstance(other, Statevector):\n try:\n other = Statevector(other)\n except QiskitError:\n return False\n if self.dim != other.dim:\n return False\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n return matrix_equal(self.data, other.data, ignore_phase=True,\n rtol=rtol, atol=atol)\n\n def reverse_qargs(self):\n r\"\"\"Return a Statevector with reversed subsystem ordering.\n\n For a tensor product state this is equivalent to reversing the order\n of tensor product subsystems. For a statevector\n :math:`|\\psi \\rangle = |\\psi_{n-1} \\rangle \\otimes ... \\otimes |\\psi_0 \\rangle`\n the returned statevector will be\n :math:`|\\psi_{0} \\rangle \\otimes ... \\otimes |\\psi_{n-1} \\rangle`.\n\n Returns:\n Statevector: the Statevector with reversed subsystem order.\n \"\"\"\n ret = copy.copy(self)\n axes = tuple(range(self._op_shape._num_qargs_l - 1, -1, -1))\n ret._data = np.reshape(np.transpose(\n np.reshape(self.data, self._op_shape.tensor_shape), axes),\n self._op_shape.shape)\n ret._op_shape = self._op_shape.reverse()\n return ret\n\n def _expectation_value_pauli(self, pauli, qargs=None):\n \"\"\"Compute the expectation value of a Pauli.\n\n Args:\n pauli (Pauli): a Pauli operator to evaluate expval of.\n qargs (None or list): subsystems to apply operator on.\n\n Returns:\n complex: the expectation value.\n \"\"\"\n n_pauli = len(pauli)\n if qargs is None:\n qubits = np.arange(n_pauli)\n else:\n qubits = np.array(qargs)\n\n x_mask = np.dot(1 << qubits, pauli.x)\n z_mask = np.dot(1 << qubits, pauli.z)\n pauli_phase = (-1j) ** pauli.phase if pauli.phase else 1\n\n if x_mask + z_mask == 0:\n return pauli_phase * np.linalg.norm(self.data)\n\n if x_mask == 0:\n return pauli_phase * expval_pauli_no_x(self.data, self.num_qubits, z_mask)\n\n x_max = qubits[pauli.x][-1]\n y_phase = (-1j) ** np.sum(pauli.x & pauli.z)\n\n return pauli_phase * expval_pauli_with_x(\n self.data, self.num_qubits, z_mask, x_mask, y_phase, x_max)\n\n def expectation_value(self, oper, qargs=None):\n \"\"\"Compute the expectation value of an operator.\n\n Args:\n oper (Operator): an operator to evaluate expval of.\n qargs (None or list): subsystems to apply operator on.\n\n Returns:\n complex: the expectation value.\n \"\"\"\n if isinstance(oper, Pauli):\n return self._expectation_value_pauli(oper, qargs)\n\n if isinstance(oper, SparsePauliOp):\n return sum([coeff * self._expectation_value_pauli(Pauli((z, x)), qargs)\n for z, x, coeff in zip(oper.table.Z, oper.table.X, oper.coeffs)])\n\n val = self.evolve(oper, qargs=qargs)\n conj = self.conjugate()\n return np.dot(conj.data, val.data)\n\n def probabilities(self, qargs=None, decimals=None):\n \"\"\"Return the subsystem measurement probability vector.\n\n Measurement probabilities are with respect to measurement in the\n computation (diagonal) basis.\n\n Args:\n qargs (None or list): subsystems to return probabilities for,\n if None return for all subsystems (Default: None).\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done (Default: None).\n\n Returns:\n np.array: The Numpy vector array of probabilities.\n\n Examples:\n\n Consider a 2-qubit product state\n :math:`|\\\\psi\\\\rangle=|+\\\\rangle\\\\otimes|0\\\\rangle`.\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = psi.probabilities()\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring only qubit-0\n probs_qubit_0 = psi.probabilities([0])\n print('Qubit-0 probs: {}'.format(probs_qubit_0))\n\n # Probabilities for measuring only qubit-1\n probs_qubit_1 = psi.probabilities([1])\n print('Qubit-1 probs: {}'.format(probs_qubit_1))\n\n We can also permute the order of qubits in the ``qargs`` list\n to change the qubit position in the probabilities output\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = psi.probabilities([0, 1])\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring both qubits\n # but swapping qubits 0 and 1 in output\n probs_swapped = psi.probabilities([1, 0])\n print('Swapped probs: {}'.format(probs_swapped))\n \"\"\"\n probs = self._subsystem_probabilities(\n np.abs(self.data) ** 2, self._op_shape.dims_l(), qargs=qargs)\n if decimals is not None:\n probs = probs.round(decimals=decimals)\n return probs\n\n def reset(self, qargs=None):\n \"\"\"Reset state or subsystems to the 0-state.\n\n Args:\n qargs (list or None): subsystems to reset, if None all\n subsystems will be reset to their 0-state\n (Default: None).\n\n Returns:\n Statevector: the reset state.\n\n Additional Information:\n If all subsystems are reset this will return the ground state\n on all subsystems. If only a some subsystems are reset this\n function will perform a measurement on those subsystems and\n evolve the subsystems so that the collapsed post-measurement\n states are rotated to the 0-state. The RNG seed for this\n sampling can be set using the :meth:`seed` method.\n \"\"\"\n if qargs is None:\n # Resetting all qubits does not require sampling or RNG\n ret = copy.copy(self)\n state = np.zeros(self._op_shape.shape, dtype=complex)\n state[0] = 1\n ret._data = state\n return ret\n\n # Sample a single measurement outcome\n dims = self.dims(qargs)\n probs = self.probabilities(qargs)\n sample = self._rng.choice(len(probs), p=probs, size=1)\n\n # Convert to projector for state update\n proj = np.zeros(len(probs), dtype=complex)\n proj[sample] = 1 / np.sqrt(probs[sample])\n\n # Rotate outcome to 0\n reset = np.eye(len(probs))\n reset[0, 0] = 0\n reset[sample, sample] = 0\n reset[0, sample] = 1\n\n # compose with reset projection\n reset = np.dot(reset, np.diag(proj))\n return self.evolve(\n Operator(reset, input_dims=dims, output_dims=dims),\n qargs=qargs)\n\n @classmethod\n def from_label(cls, label):\n \"\"\"Return a tensor product of Pauli X,Y,Z eigenstates.\n\n .. list-table:: Single-qubit state labels\n :header-rows: 1\n\n * - Label\n - Statevector\n * - ``\"0\"``\n - :math:`[1, 0]`\n * - ``\"1\"``\n - :math:`[0, 1]`\n * - ``\"+\"``\n - :math:`[1 / \\\\sqrt{2}, 1 / \\\\sqrt{2}]`\n * - ``\"-\"``\n - :math:`[1 / \\\\sqrt{2}, -1 / \\\\sqrt{2}]`\n * - ``\"r\"``\n - :math:`[1 / \\\\sqrt{2}, i / \\\\sqrt{2}]`\n * - ``\"l\"``\n - :math:`[1 / \\\\sqrt{2}, -i / \\\\sqrt{2}]`\n\n Args:\n label (string): a eigenstate string ket label (see table for\n allowed values).\n\n Returns:\n Statevector: The N-qubit basis state density matrix.\n\n Raises:\n QiskitError: if the label contains invalid characters, or the\n length of the label is larger than an explicitly\n specified num_qubits.\n \"\"\"\n # Check label is valid\n if re.match(r'^[01rl\\-+]+$', label) is None:\n raise QiskitError('Label contains invalid characters.')\n # We can prepare Z-eigenstates by converting the computational\n # basis bit-string to an integer and preparing that unit vector\n # However, for X-basis states, we will prepare a Z-eigenstate first\n # then apply Hadamard gates to rotate 0 and 1s to + and -.\n z_label = label\n xy_states = False\n if re.match('^[01]+$', label) is None:\n # We have X or Y eigenstates so replace +,r with 0 and\n # -,l with 1 and prepare the corresponding Z state\n xy_states = True\n z_label = z_label.replace('+', '0')\n z_label = z_label.replace('r', '0')\n z_label = z_label.replace('-', '1')\n z_label = z_label.replace('l', '1')\n # Initialize Z eigenstate vector\n num_qubits = len(label)\n data = np.zeros(1 << num_qubits, dtype=complex)\n pos = int(z_label, 2)\n data[pos] = 1\n state = Statevector(data)\n if xy_states:\n # Apply hadamards to all qubits in X eigenstates\n x_mat = np.array([[1, 1], [1, -1]], dtype=complex) / np.sqrt(2)\n # Apply S.H to qubits in Y eigenstates\n y_mat = np.dot(np.diag([1, 1j]), x_mat)\n for qubit, char in enumerate(reversed(label)):\n if char in ['+', '-']:\n state = state.evolve(x_mat, qargs=[qubit])\n elif char in ['r', 'l']:\n state = state.evolve(y_mat, qargs=[qubit])\n return state\n\n @staticmethod\n def from_int(i, dims):\n \"\"\"Return a computational basis statevector.\n\n Args:\n i (int): the basis state element.\n dims (int or tuple or list): The subsystem dimensions of the statevector\n (See additional information).\n\n Returns:\n Statevector: The computational basis state :math:`|i\\\\rangle`.\n\n Additional Information:\n The ``dims`` kwarg can be an integer or an iterable of integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` -- the integer specifies the total dimension of the\n state. If it is a power of two the state will be initialized\n as an N-qubit state. If it is not a power of two the state\n will have a single d-dimensional subsystem.\n \"\"\"\n size = np.product(dims)\n state = np.zeros(size, dtype=complex)\n state[i] = 1.0\n return Statevector(state, dims=dims)\n\n @classmethod\n def from_instruction(cls, instruction):\n \"\"\"Return the output statevector of an instruction.\n\n The statevector is initialized in the state :math:`|{0,\\\\ldots,0}\\\\rangle` of the\n same number of qubits as the input instruction or circuit, evolved\n by the input instruction, and the output statevector returned.\n\n Args:\n instruction (qiskit.circuit.Instruction or QuantumCircuit): instruction or circuit\n\n Returns:\n Statevector: The final statevector.\n\n Raises:\n QiskitError: if the instruction contains invalid instructions for\n the statevector simulation.\n \"\"\"\n # Convert circuit to an instruction\n if isinstance(instruction, QuantumCircuit):\n instruction = instruction.to_instruction()\n # Initialize an the statevector in the all |0> state\n init = np.zeros(2 ** instruction.num_qubits, dtype=complex)\n init[0] = 1.0\n vec = Statevector(init, dims=instruction.num_qubits * (2,))\n return Statevector._evolve_instruction(vec, instruction)\n\n def to_dict(self, decimals=None):\n r\"\"\"Convert the statevector to dictionary form.\n\n This dictionary representation uses a Ket-like notation where the\n dictionary keys are qudit strings for the subsystem basis vectors.\n If any subsystem has a dimension greater than 10 comma delimiters are\n inserted between integers so that subsystems can be distinguished.\n\n Args:\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done\n (Default: None).\n\n Returns:\n dict: the dictionary form of the Statevector.\n\n Example:\n\n The ket-form of a 2-qubit statevector\n :math:`|\\psi\\rangle = |-\\rangle\\otimes |0\\rangle`\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('-0')\n print(psi.to_dict())\n\n For non-qubit subsystems the integer range can go from 0 to 9. For\n example in a qutrit system\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import Statevector\n\n vec = np.zeros(9)\n vec[0] = 1 / np.sqrt(2)\n vec[-1] = 1 / np.sqrt(2)\n psi = Statevector(vec, dims=(3, 3))\n print(psi.to_dict())\n\n For large subsystem dimensions delimiters are required. The\n following example is for a 20-dimensional system consisting of\n a qubit and 10-dimensional qudit.\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import Statevector\n\n vec = np.zeros(2 * 10)\n vec[0] = 1 / np.sqrt(2)\n vec[-1] = 1 / np.sqrt(2)\n psi = Statevector(vec, dims=(2, 10))\n print(psi.to_dict())\n \"\"\"\n return self._vector_to_dict(self.data,\n self._op_shape.dims_l(),\n decimals=decimals,\n string_labels=True)\n\n @staticmethod\n def _evolve_operator(statevec, oper, qargs=None):\n \"\"\"Evolve a qudit statevector\"\"\"\n new_shape = statevec._op_shape.compose(oper._op_shape, qargs=qargs)\n if qargs is None:\n # Full system evolution\n statevec._data = np.dot(oper._data, statevec._data)\n statevec._op_shape = new_shape\n return statevec\n\n # Get transpose axes\n num_qargs = statevec._op_shape.num_qargs[0]\n indices = [num_qargs - 1 - i for i in reversed(qargs)]\n axes = indices + [i for i in range(num_qargs) if i not in indices]\n axes_inv = np.argsort(axes).tolist()\n\n # Calculate contraction dimensions\n contract_dim = oper._op_shape.shape[1]\n contract_shape = (contract_dim, statevec._op_shape.shape[0] // contract_dim)\n\n # Reshape input for contraction\n statevec._data = np.reshape(\n np.transpose(np.reshape(statevec.data, statevec._op_shape.tensor_shape),\n axes), contract_shape)\n # Contract and reshape output\n statevec._data = np.reshape(np.dot(oper.data, statevec._data),\n new_shape.tensor_shape)\n statevec._data = np.reshape(np.transpose(statevec._data, axes_inv),\n new_shape.shape[0])\n\n # Update dimension\n statevec._op_shape = new_shape\n return statevec\n\n @staticmethod\n def _evolve_instruction(statevec, obj, qargs=None):\n \"\"\"Update the current Statevector by applying an instruction.\"\"\"\n from qiskit.circuit.reset import Reset\n from qiskit.circuit.barrier import Barrier\n\n mat = Operator._instruction_to_matrix(obj)\n if mat is not None:\n # Perform the composition and inplace update the current state\n # of the operator\n return Statevector._evolve_operator(statevec, Operator(mat), qargs=qargs)\n\n # Special instruction types\n if isinstance(obj, Reset):\n statevec._data = statevec.reset(qargs)._data\n return statevec\n if isinstance(obj, Barrier):\n return statevec\n\n # If the instruction doesn't have a matrix defined we use its\n # circuit decomposition definition if it exists, otherwise we\n # cannot compose this gate and raise an error.\n if obj.definition is None:\n raise QiskitError('Cannot apply Instruction: {}'.format(obj.name))\n if not isinstance(obj.definition, QuantumCircuit):\n raise QiskitError('{} instruction definition is {}; expected QuantumCircuit'.format(\n obj.name, type(obj.definition)))\n if obj.definition.global_phase:\n statevec._data *= np.exp(1j * float(obj.definition.global_phase))\n qubits = {qubit: i for i, qubit in enumerate(obj.definition.qubits)}\n for instr, qregs, cregs in obj.definition:\n if cregs:\n raise QiskitError(\n 'Cannot apply instruction with classical registers: {}'.format(\n instr.name))\n # Get the integer position of the flat register\n if qargs is None:\n new_qargs = [qubits[tup] for tup in qregs]\n else:\n new_qargs = [qargs[qubits[tup]] for tup in qregs]\n Statevector._evolve_instruction(statevec, instr, qargs=new_qargs)\n return statevec\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.product",
"numpy.sqrt",
"numpy.asarray",
"numpy.kron",
"numpy.allclose",
"numpy.reshape",
"numpy.arange",
"numpy.ravel",
"numpy.array2string",
"numpy.zeros",
"numpy.transpose",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.conj",
"numpy.abs",
"numpy.linalg.norm"
]
] |
anudeepsekhar/Lane-Detection-Pytorch
|
[
"cfddda8a0768cf83afd87e29d605fd58aa89df59"
] |
[
"scripts/helper.py"
] |
[
"import matplotlib.pyplot as plt\nimport torchvision\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\n\ndef images_to_probs(net, images):\n '''\n Generates predictions and corresponding probabilities from a trained\n network and a list of images\n '''\n output = net(images)\n # convert output probabilities to predicted class\n _, preds_tensor = torch.max(output, 1)\n preds = np.squeeze(preds_tensor.numpy())\n return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]\n\n\ndef plot_classes_preds(net, images, labels):\n '''\n Generates matplotlib Figure using a trained network, along with images\n and labels from a batch, that shows the network's top prediction along\n with its probability, alongside the actual label, coloring this\n information based on whether the prediction was correct or not.\n Uses the \"images_to_probs\" function.\n '''\n preds, probs = images_to_probs(net, images)\n # plot the images in the batch, along with predicted and true labels\n fig = plt.figure(figsize=(12, 48))\n for idx in np.arange(4):\n ax = fig.add_subplot(1, 4, idx+1, xticks=[], yticks=[])\n matplotlib_imshow(images[idx], one_channel=True)\n ax.set_title(\"{0}, {1:.1f}%\\n(label: {2})\".format(\n classes[preds[idx]],\n probs[idx] * 100.0,\n classes[labels[idx]]),\n color=(\"green\" if preds[idx]==labels[idx].item() else \"red\"))\n return fig\n\ndef plot_label_mask(model, images, labels, grayscale):\n '''\n Generates matplotlib Figure using a trained network, along with images\n and labels from a batch, that shows the network's top prediction.\n '''\n ins_pred_= model(images)\n ins_pred = ins_pred_.cpu().data.numpy()\n # ins_pred = ins_pred[0]\n ins_pred = np.concatenate(ins_pred)\n images_ = images.cpu()\n # images_ = images_[0]\n grid_img = torchvision.utils.make_grid(images_, nrow=1)\n images_ = torch.squeeze(images_)\n if not grayscale:\n images_ = images_.permute(1, 2, 0)\n labels_ = labels.cpu().data.numpy()\n labels_ = np.squeeze(labels_)\n \n # plot the images in the batch, along with predicted and true labels\n fig, ax = plt.subplots(1, 5, figsize=(48,12))\n\n ax[0].imshow(images_.numpy()/255)\n ax[1].imshow(labels_[0])\n ax[2].imshow(ins_pred[0])\n ax[3].imshow(labels_[1])\n ax[4].imshow(ins_pred[1])\n\n return fig\n\n def plot_label_mask2(model, images, labels, grayscale):\n '''\n Generates matplotlib Figure using a trained network, along with images\n and labels from a batch, that shows the network's top prediction.\n '''\n ins_pred_= model(images)\n ins_pred = ins_pred_.cpu().data.numpy()\n ins_pred = np.concatenate(ins_pred)\n images_ = images.cpu()\n images_ = images_[0]\n grid_img = torchvision.utils.make_grid(images_, nrow=1)\n images_ = torch.squeeze(images_)\n if not grayscale:\n images_ = images_.permute(1, 2, 0)\n labels_ = labels.cpu().data.numpy()\n labels_ = np.squeeze(labels_)\n \n # plot the images in the batch, along with predicted and true labels\n fig, ax = plt.subplots(1, 5, figsize=(48,12))\n\n ax[0].imshow(images_.numpy()/255)\n ax[1].imshow(labels_[0])\n ax[2].imshow(ins_pred[0])\n ax[3].imshow(labels_[1])\n ax[4].imshow(ins_pred[1])\n\n return fig\n\ndef plot_tu_data(images, labels, predicts):\n images = images.cpu()\n labels = labels.cpu()\n predicts = predicts.cpu().detach()\n predicts = F.sigmoid(predicts)\n image = torch.squeeze(images[0])\n image = image.permute(1, 2, 0)\n label = torch.squeeze(labels[0])\n predict = torch.squeeze(predicts[0])\n fig = plt.figure(figsize=(30,10))\n plt.subplot(1,5,1)\n plt.imshow(image)\n plt.subplot(1,5,2)\n plt.imshow(image)\n plt.imshow(label, cmap='jet', alpha=0.5)\n plt.subplot(1,5,3)\n plt.imshow(image)\n plt.imshow(predict, cmap='jet', alpha=0.5)\n plt.subplot(1,5,4)\n plt.imshow(label)\n plt.subplot(1,5,5)\n plt.imshow(predict)\n return fig\n\n\ndef plot_tu_data_2(images, labels, predicts):\n images = images.cpu()\n labels = labels.cpu()\n predicts = predicts.cpu().detach()\n predicts = F.sigmoid(predicts)\n image = torch.squeeze(images[0])\n image = image.permute(1, 2, 0)\n label1 = labels[0][0]\n label2 = labels[0][1]\n predict1 = predicts[0][0]\n predict2 = predicts[0][1]\n fig = plt.figure(figsize=(30,10))\n plt.subplot(1,5,1)\n plt.imshow(image)\n # plt.subplot(1,7,2)\n # plt.imshow(image)\n # plt.imshow(label, cmap='jet', alpha=0.5)\n # plt.subplot(1,7,3)\n # plt.imshow(image)\n # plt.imshow(predict, cmap='jet', alpha=0.5)\n plt.subplot(1,5,2)\n plt.imshow(label1)\n plt.subplot(1,5,3)\n plt.imshow(predict1)\n plt.subplot(1,5,4)\n plt.imshow(label2)\n plt.subplot(1,5,5)\n plt.imshow(predict2)\n return fig\n\n\ndef plot_tu_data_3(images, labels, predicts, points, pred_points):\n points = points.cpu()\n pred_points = pred_points.cpu().detach()\n images = images.cpu()\n labels = labels.cpu()\n predicts = predicts.cpu().detach()\n # predicts = F.sigmoid(predicts)\n image = torch.squeeze(images[0])\n image = image.permute(1, 2, 0)\n label = torch.squeeze(labels[0])\n predict = torch.squeeze(predicts[0])\n fig = plt.figure(figsize=(30,10))\n\n plt.subplot(1,5,1)\n plt.imshow(image)\n plt.subplot(1,5,2)\n plt.imshow(image)\n points = points.reshape(30,2)\n plt.scatter(points[:15,1], points[:15,0])\n plt.scatter(points[15:,1], points[15:,0])\n # plt.imshow(label, cmap='jet', alpha=0.5)\n plt.subplot(1,5,3)\n plt.imshow(image)\n pred_points = pred_points.reshape(30,2)\n plt.scatter(pred_points[:15,1], pred_points[:15,0])\n plt.scatter(pred_points[15:,1], pred_points[15:,0])\n # plt.imshow(predict, cmap='jet', alpha=0.5)\n plt.subplot(1,5,4)\n plt.imshow(label)\n plt.subplot(1,5,5)\n plt.imshow(predict)\n return fig"
] |
[
[
"matplotlib.pyplot.imshow",
"torch.nn.functional.softmax",
"torch.max",
"matplotlib.pyplot.scatter",
"numpy.arange",
"numpy.squeeze",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"torch.nn.functional.sigmoid",
"matplotlib.pyplot.subplot",
"torch.squeeze",
"matplotlib.pyplot.figure"
]
] |
iod-ine/philoseismos
|
[
"79240d11cf82c3552c4a49d4e19a79b003fa9929"
] |
[
"tests/test_grids/test_surfer6text.py"
] |
[
"\"\"\" philoseismos: engineering seismologist's toolbox.\n\nauthor: Ivan Dubrovin\ne-mail: [email protected] \"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom philoseismos.grids import Surfer6TextGrid\n\n\ndef test_loading_surfer6text(text_grd_file, bad_text_grd_file):\n \"\"\" Test loading process of the grid files. \"\"\"\n\n # only proceeds to read true .grd files\n with pytest.raises(ValueError):\n grd = Surfer6TextGrid.load(bad_text_grd_file)\n\n grd = Surfer6TextGrid.load(text_grd_file)\n\n assert grd.nx == 10\n assert grd.ny == 15\n assert grd.xlo == 0\n assert grd.xhi == 9\n assert grd.ylo == 10\n assert grd.yhi == 38\n assert grd.zlo == 0\n assert grd.zhi == 150\n\n assert grd.dm.shape == (15, 10)\n assert np.alltrue(grd.dm == np.arange(150).reshape(15, 10))\n\n\ndef test_surfer6binary_properties(text_grd_file):\n \"\"\" Test the properties of the grid. \"\"\"\n\n grd = Surfer6TextGrid.load(text_grd_file)\n\n # to help construct the plt.imshow, grid returns it's extent\n assert grd.extent == [0, 9, 38, 10]\n\n\ndef test_surfer6binary_invert_axis_methods(text_grd_file):\n \"\"\" Test the .invert_yaxis() and .invert_xaxis() method of the gird. \"\"\"\n\n grd = Surfer6TextGrid.load(text_grd_file)\n\n dm = np.arange(150).reshape(15, 10)\n\n assert np.alltrue(grd.dm == dm)\n assert grd.ylo == 10\n assert grd.yhi == 38\n assert grd.xhi == 9\n assert grd.xlo == 0\n grd.invert_yaxis()\n assert np.alltrue(grd.dm == dm[::-1, :])\n assert grd.ylo == 38\n assert grd.yhi == 10\n grd.invert_xaxis()\n assert np.alltrue(grd.dm == dm[::-1, ::-1])\n assert grd.xlo == 9\n assert grd.xhi == 0\n grd.invert_yaxis()\n assert np.alltrue(grd.dm == dm[:, ::-1])\n grd.invert_xaxis()\n assert np.alltrue(grd.dm == dm)\n assert grd.ylo == 10\n assert grd.yhi == 38\n assert grd.xhi == 9\n assert grd.xlo == 0\n"
] |
[
[
"numpy.arange",
"numpy.alltrue"
]
] |
xiaobaishu0097/Pointcloud-InstanceSegmentation
|
[
"072fe748e31d466cb58b202d9bddb37303d07858"
] |
[
"train_instance_classifier.py"
] |
[
"'''\nPointGroup train.py\nWritten by Li Jiang\n'''\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, DistributedSampler\nimport time, sys, os, random, glob\nfrom tensorboardX import SummaryWriter\nimport numpy as np\n\nimport util.utils as utils\n\nfrom util.class_finder import model_class\nfrom model.model_functions import model_fn_decorator\n\ndef init():\n # config\n global cfg\n from util.config import get_parser\n cfg = get_parser()\n cfg.dist = False\n\n # copy important files to backup\n backup_dir = os.path.join(cfg.exp_path, 'backup_files')\n os.makedirs(backup_dir, exist_ok=True)\n os.system('cp train.py {}'.format(backup_dir))\n os.system('cp {} {}'.format(cfg.model_dir, backup_dir))\n os.system('cp {} {}'.format(cfg.dataset_dir, backup_dir))\n os.system('cp {} {}'.format(cfg.config, backup_dir))\n\n # logger\n global logger\n from util.log import get_logger\n logger = get_logger(cfg)\n\n # log the config\n logger.info(cfg)\n\n # summary writer\n global writer\n writer = SummaryWriter(cfg.exp_path)\n\n # random seed\n random.seed(cfg.manual_seed)\n np.random.seed(cfg.manual_seed)\n torch.manual_seed(cfg.manual_seed)\n torch.cuda.manual_seed_all(cfg.manual_seed)\n\ndef train_epoch(train_loader, model, model_fn, optimizer, epoch, scene_id):\n iter_time = utils.AverageMeter()\n data_time = utils.AverageMeter()\n am_dict = {}\n\n model.train()\n start_epoch = time.time()\n end = time.time()\n for i, batch in enumerate(train_loader):\n data_time.update(time.time() - end)\n torch.cuda.empty_cache()\n\n ##### adjust learning rate\n utils.step_learning_rate(optimizer, cfg.lr, epoch - 1, cfg.step_epoch, cfg.multiplier)\n\n ##### prepare input and forward\n loss, _, visual_dict, meter_dict = model_fn(batch, model, epoch)\n\n ##### meter_dict\n for k, v in meter_dict.items():\n if k not in am_dict.keys():\n am_dict[k] = utils.AverageMeter()\n am_dict[k].update(v[0], v[1])\n\n ##### backward\n optimizer.zero_grad()\n loss.backward()\n if cfg.clip:\n torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip)\n optimizer.step()\n\n ##### time and print\n current_iter = (epoch - 1) * len(train_loader) + i + 1\n max_iter = cfg.epochs * len(train_loader)\n remain_iter = max_iter - current_iter\n\n iter_time.update(time.time() - end)\n end = time.time()\n\n remain_time = remain_iter * iter_time.avg\n t_m, t_s = divmod(remain_time, 60)\n t_h, t_m = divmod(t_m, 60)\n remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s))\n\n sys.stdout.write(\n \"epoch: {}/{} iter: {}/{} loss: {:.6f}({:.6f}) data_time: {:.2f}({:.2f}) iter_time: {:.2f}({:.2f}) remain_time: {remain_time}\\n\".format\n (epoch, cfg.instance_classifier['epochs_per_instance'], i + 1, len(train_loader), am_dict['loss'].val, am_dict['loss'].avg,\n data_time.val, data_time.avg, iter_time.val, iter_time.avg, remain_time=remain_time))\n if (i == len(train_loader) - 1): print()\n\n\n logger.info(\"epoch: {}/{}, train loss: {:.4f}, time: {}s\".format(epoch, cfg.instance_classifier['epochs_per_instance'], am_dict['loss'].avg, time.time() - start_epoch))\n\n f = utils.checkpoint_scene_save(model, cfg.exp_path, cfg.config.split('/')[-1][:-5], epoch, scene_id, cfg.save_freq)\n logger.info('Saving {}'.format(f))\n\n for k in am_dict.keys():\n if k in visual_dict.keys():\n writer.add_scalar(k+'_train', am_dict[k].avg, epoch)\n\n\ndef eval_epoch(val_loader, model, model_fn, epoch):\n logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')\n am_dict = {}\n\n with torch.no_grad():\n model.eval()\n start_epoch = time.time()\n for i, batch in enumerate(val_loader):\n\n ##### prepare input and forward\n loss, preds, visual_dict, meter_dict = model_fn(batch, model, epoch)\n\n ##### meter_dict\n for k, v in meter_dict.items():\n if k not in am_dict.keys():\n am_dict[k] = utils.AverageMeter()\n am_dict[k].update(v[0], v[1])\n\n ##### print\n sys.stdout.write(\"\\riter: {}/{} loss: {:.4f}({:.4f})\".format(i + 1, len(val_loader), am_dict['loss'].val, am_dict['loss'].avg))\n if (i == len(val_loader) - 1): print()\n\n logger.info(\"epoch: {}/{}, val loss: {:.4f}, time: {}s\".format(epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch))\n\n for k in am_dict.keys():\n if k in visual_dict.keys():\n writer.add_scalar(k + '_eval', am_dict[k].avg, epoch)\n\n\nif __name__ == '__main__':\n ##### init\n init()\n\n ##### SA\n if cfg.cache:\n if cfg.dataset == 'scannetv2':\n train_file_names = sorted(\n glob.glob(os.path.join(cfg.data_root, cfg.dataset, 'train', '*' + cfg.filename_suffix)))\n val_file_names = sorted(\n glob.glob(os.path.join(cfg.data_root, cfg.dataset, 'val', '*' + cfg.filename_suffix)))\n utils.create_shared_memory(train_file_names, wlabel=True)\n utils.create_shared_memory(val_file_names, wlabel=True)\n\n ##### get model version and data version\n exp_name = cfg.config.split('/')[-1][:-5]\n model_name = exp_name.split('_')[0]\n data_name = exp_name.split('_')[-1]\n\n ##### dataset\n if cfg.dataset == 'scannetv2':\n if data_name == 'scannet':\n from data.scannetv2_inst import ScannetDatast\n dataset = ScannetDatast(cfg)\n else:\n print(\"Error: no data loader - \" + data_name)\n exit(0)\n\n dataset.trainLoader()\n logger.info('Training samples: {}'.format(len(dataset.train_file_names)))\n dataset.valLoader()\n logger.info('Validation samples: {}'.format(len(dataset.val_file_names)))\n\n scene_id_restored = 0\n for scene_id in range(0, len(dataset.train_file_names)):\n if scene_id_restored > scene_id:\n continue\n # modify the instance number in the config file\n _, _, _, instance_label = dataset.train_files[scene_id]\n cfg.instance_classifier['instance_num'] = int(instance_label.max() + 1)\n\n dataset.trainInstanceLoader(scene_id)\n\n ##### model\n logger.info('=> creating model ...')\n\n Network = model_class(cfg.model_name)\n model = Network(cfg)\n\n use_cuda = torch.cuda.is_available()\n logger.info('cuda available: {}'.format(use_cuda))\n assert use_cuda\n model = model.cuda()\n\n # logger.info(model)\n logger.info('#classifier parameters: {}'.format(sum([x.nelement() for x in model.parameters()])))\n\n ##### optimizer\n if cfg.optim == 'Adam':\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr)\n elif cfg.optim == 'SGD':\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, momentum=cfg.momentum,\n weight_decay=cfg.weight_decay)\n\n ##### model_fn (criterion)\n model_fn = model_fn_decorator(cfg)\n\n ##### resume\n start_epoch, scene_id_restored, f = utils.checkpoint_scene_restore(model, cfg.exp_path, cfg.config.split('/')[-1][:-5]) # resume from the latest epoch, or specify the epoch to restore\n if start_epoch > 10:\n start_epoch = 1\n if scene_id_restored > scene_id:\n continue\n logger.info('Restore from {}'.format(f) if len(f) > 0 else 'Start from epoch {}'.format(start_epoch))\n\n ##### train and val\n for epoch in range(start_epoch, cfg.instance_classifier['epochs_per_instance'] + 1):\n train_epoch(dataset.train_instance_data_loader, model, model_fn, optimizer, epoch, scene_id)\n\n # if utils.is_multiple(epoch, cfg.save_freq) or utils.is_power2(epoch):\n # eval_epoch(dataset.val_data_loader, model, model_fn, epoch)\n\n ##### delete SA\n # if cfg.cache:\n # if cfg.dataset == 'scannetv2':\n # utils.delete_shared_memory(train_file_names, wlabel=True)\n # utils.delete_shared_memory(val_file_names, wlabel=True)"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available"
]
] |
manjotms10/google-trends-analytics
|
[
"718311ba92d880623eb85cf5c76423116faaade2"
] |
[
"src/analytics/video_games/scraper/vgchartz_weekly_sales_data.py"
] |
[
"import requests\r\nimport pandas\r\nimport numpy as np\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef weekly_sales_scrape(weeks):\r\n '''\r\n this function is to get the weekly sales data from http://www.vgchartz.com/weekly/(week code)/Global/, where\r\n (weekcode) need to be specified by the week you are looking for. This function is intentionally designed for \r\n this website and doesn't apply to other websites.\r\n :param weekly: how many weeks back you want to scrape from 2018/12/22\r\n :return: a csv file that contains all the data, the week with no data represented with a blank row\r\n '''\r\n\r\n page_list = []\r\n data = {}\r\n rank_of_week =[]\r\n name = []\r\n weekly_sales = []\r\n total_sales = []\r\n week = []\r\n \r\n for ith_page in range(weeks):\r\n page_num = 43464 - 7*ith_page #go over each week by page number\r\n \r\n \r\n page = requests.get(\"http://www.vgchartz.com/weekly/%d/Global/\"% page_num) #request from chart website\r\n \r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n \r\n selector = soup.select(\"tr\") #search all 'tr' where contains all the data of the selected week\r\n \r\n num = []\r\n a3 = []\r\n for i in range(75): #the location of sales data of each week\r\n i = 4+2*i\r\n num.append(i)\r\n if len(selector) < 300: #filter out the weeks with no data\r\n a3 = [' ']\r\n else: \r\n for i in num:\r\n info = selector[i].text #get text from the all the games of the selected week\r\n \r\n a1 = info.split('\\n') #process the data\r\n a2 = [] \r\n for element in a1:\r\n if element != '':\r\n a2.append(element)\r\n a3.append(a2)\r\n \r\n a4 = enumerate(a3) \r\n \r\n #put all the data to the corresponding category \r\n for item in a4: \r\n rank_of_week.append(item[1][0])\r\n name.append(item[1][1])\r\n weekly_sales.append(item[1][2])\r\n total_sales.append(item[1][3])\r\n week.append(item[1][4])\r\n \r\n #update the categorized data to each column\r\n data.update({\"rank of the week\":rank_of_week})\r\n data.update({\"name\":name})\r\n data.update({\"weekly sales\":weekly_sales})\r\n data.update({\"total sales\":total_sales})\r\n data.update({\"week after release\":week})\r\n \r\n df1 = pandas.DataFrame(data)#put everything in one dataframe and output to csv file\r\n return df1\r\n\r\nif __name__ == \"__main__\":\r\n weeks = 10 #for example if you want to look for the past year data\r\n sales_data = weekly_sales_scrape(weeks)\r\n sales_data.to_csv(\"../../../../conf/video_games/scraped/vgsales-game-sale-%dweeks.csv\"% (weeks), sep=',',index=False)\r\n "
] |
[
[
"pandas.DataFrame"
]
] |
xinyuewang1/chainerrl
|
[
"49425d09cb0749968f4e364e281670e752a46791",
"49425d09cb0749968f4e364e281670e752a46791"
] |
[
"tests/explorers_tests/test_additive_gaussian.py",
"examples/gym/train_pcl_gym.py"
] |
[
"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\nimport unittest\n\nfrom chainer import testing\nfrom chainer.testing import condition\nimport numpy as np\n\nfrom chainerrl.explorers.additive_gaussian import AdditiveGaussian\n\n\[email protected](*(\n testing.product({\n 'action_size': [1, 3],\n 'scale': [0, .1],\n 'low': [None, -.4],\n 'high': [None, .4],\n })\n))\nclass TestAdditiveGaussian(unittest.TestCase):\n\n @condition.retry(3)\n def test(self):\n\n def greedy_action_func():\n return np.full(self.action_size, .3)\n\n explorer = AdditiveGaussian(self.scale, low=self.low, high=self.high)\n\n actions = []\n for t in range(100):\n a = explorer.select_action(t, greedy_action_func)\n\n if self.low is not None:\n # Clipped at lower edge\n self.assertTrue((a >= self.low).all())\n\n if self.high is not None:\n # Clipped at upper edge\n self.assertTrue((a <= self.high).all())\n\n if self.scale == 0:\n # Without noise\n self.assertTrue((a == .3).all())\n else:\n # With noise\n self.assertFalse((a == .3).all())\n actions.append(a)\n\n if self.low is None and self.high is None:\n np.testing.assert_allclose(\n np.mean(np.asarray(actions), axis=0), .3, atol=.1)\n",
"\"\"\"An example of training PCL against OpenAI Gym Envs.\n\nThis script is an example of training a PCL agent against OpenAI Gym envs.\nBoth discrete and continuous action spaces are supported.\n\nTo solve CartPole-v0, run:\n python train_pcl_gym.py\n\nTo solve InvertedPendulum-v1, run:\n python train_pcl_gym.py --env InvertedPendulum-v1\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom builtins import * # NOQA\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\nimport argparse\nimport os\n\n# This prevents numpy from using multiple threads\nos.environ['OMP_NUM_THREADS'] = '1' # NOQA\n\nimport chainer\nimport gym\nimport gym.spaces\nimport numpy as np\n\nimport chainerrl\nfrom chainerrl import experiments\nfrom chainerrl import misc\nfrom chainerrl.optimizers import rmsprop_async\n\n\ndef exp_return_of_episode(episode):\n return np.exp(sum(x['reward'] for x in episode))\n\n\ndef main():\n import logging\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--processes', type=int, default=8)\n parser.add_argument('--gpu', type=int, default=0)\n parser.add_argument('--env', type=str, default='CartPole-v0')\n parser.add_argument('--seed', type=int, default=0,\n help='Random seed [0, 2 ** 32)')\n parser.add_argument('--outdir', type=str, default='results',\n help='Directory path to save output files.'\n ' If it does not exist, it will be created.')\n parser.add_argument('--batchsize', type=int, default=10)\n parser.add_argument('--rollout-len', type=int, default=10)\n parser.add_argument('--n-hidden-channels', type=int, default=100)\n parser.add_argument('--n-hidden-layers', type=int, default=2)\n parser.add_argument('--n-times-replay', type=int, default=1)\n parser.add_argument('--replay-start-size', type=int, default=10000)\n parser.add_argument('--t-max', type=int, default=None)\n parser.add_argument('--tau', type=float, default=1e-2)\n parser.add_argument('--profile', action='store_true')\n parser.add_argument('--steps', type=int, default=8 * 10 ** 7)\n parser.add_argument('--eval-interval', type=int, default=10 ** 5)\n parser.add_argument('--eval-n-runs', type=int, default=10)\n parser.add_argument('--reward-scale-factor', type=float, default=1e-2)\n parser.add_argument('--render', action='store_true', default=False)\n parser.add_argument('--lr', type=float, default=7e-4)\n parser.add_argument('--demo', action='store_true', default=False)\n parser.add_argument('--load', type=str, default='')\n parser.add_argument('--logger-level', type=int, default=logging.DEBUG)\n parser.add_argument('--monitor', action='store_true')\n parser.add_argument('--train-async', action='store_true', default=False)\n parser.add_argument('--prioritized-replay', action='store_true',\n default=False)\n parser.add_argument('--disable-online-update', action='store_true',\n default=False)\n parser.add_argument('--backprop-future-values', action='store_true',\n default=True)\n parser.add_argument('--no-backprop-future-values', action='store_false',\n dest='backprop_future_values')\n args = parser.parse_args()\n\n logging.basicConfig(level=args.logger_level)\n\n # Set a random seed used in ChainerRL.\n # If you use async training (--train-async), the results will be no longer\n # deterministic even with the same random seed.\n misc.set_random_seed(args.seed, gpus=(args.gpu,))\n\n if args.train_async:\n # Set different random seeds for different subprocesses.\n # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].\n # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].\n process_seeds = np.arange(args.processes) + args.seed * args.processes\n assert process_seeds.max() < 2 ** 32\n\n args.outdir = experiments.prepare_output_dir(args, args.outdir)\n\n def make_env(process_idx, test):\n env = gym.make(args.env)\n # Use different random seeds for train and test envs\n if args.train_async:\n process_seed = int(process_seeds[process_idx])\n env_seed = 2 ** 32 - 1 - process_seed if test else process_seed\n else:\n env_seed = 2 ** 32 - 1 - args.seed if test else args.seed\n env.seed(env_seed)\n # Cast observations to float32 because our model uses float32\n env = chainerrl.wrappers.CastObservationToFloat32(env)\n if args.monitor and process_idx == 0:\n env = chainerrl.wrappers.Monitor(env, args.outdir)\n if not test:\n # Scale rewards (and thus returns) to a reasonable range so that\n # training is easier\n env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)\n if args.render and process_idx == 0 and not test:\n env = chainerrl.wrappers.Render(env)\n return env\n\n sample_env = gym.make(args.env)\n timestep_limit = sample_env.spec.tags.get(\n 'wrapper_config.TimeLimit.max_episode_steps')\n obs_space = sample_env.observation_space\n action_space = sample_env.action_space\n\n # Switch policy types accordingly to action space types\n if isinstance(action_space, gym.spaces.Box):\n model = chainerrl.agents.pcl.PCLSeparateModel(\n pi=chainerrl.policies.FCGaussianPolicy(\n obs_space.low.size, action_space.low.size,\n n_hidden_channels=args.n_hidden_channels,\n n_hidden_layers=args.n_hidden_layers,\n bound_mean=True,\n min_action=action_space.low,\n max_action=action_space.high,\n var_wscale=1e-3,\n var_bias=1,\n var_type='diagonal',\n ),\n v=chainerrl.v_functions.FCVFunction(\n obs_space.low.size,\n n_hidden_channels=args.n_hidden_channels,\n n_hidden_layers=args.n_hidden_layers,\n )\n )\n else:\n model = chainerrl.agents.pcl.PCLSeparateModel(\n pi=chainerrl.policies.FCSoftmaxPolicy(\n obs_space.low.size, action_space.n,\n n_hidden_channels=args.n_hidden_channels,\n n_hidden_layers=args.n_hidden_layers\n ),\n v=chainerrl.v_functions.FCVFunction(\n obs_space.low.size,\n n_hidden_channels=args.n_hidden_channels,\n n_hidden_layers=args.n_hidden_layers,\n ),\n )\n\n if not args.train_async and args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu(args.gpu)\n\n if args.train_async:\n opt = rmsprop_async.RMSpropAsync(lr=args.lr, alpha=0.99)\n else:\n opt = chainer.optimizers.Adam(alpha=args.lr)\n opt.setup(model)\n\n if args.prioritized_replay:\n replay_buffer = \\\n chainerrl.replay_buffer.PrioritizedEpisodicReplayBuffer(\n capacity=5 * 10 ** 3,\n uniform_ratio=0.1,\n default_priority_func=exp_return_of_episode,\n wait_priority_after_sampling=False,\n return_sample_weights=False)\n else:\n replay_buffer = chainerrl.replay_buffer.EpisodicReplayBuffer(\n capacity=5 * 10 ** 3)\n\n agent = chainerrl.agents.PCL(\n model, opt, replay_buffer=replay_buffer,\n t_max=args.t_max, gamma=0.99,\n tau=args.tau,\n rollout_len=args.rollout_len,\n n_times_replay=args.n_times_replay,\n replay_start_size=args.replay_start_size,\n batchsize=args.batchsize,\n train_async=args.train_async,\n disable_online_update=args.disable_online_update,\n backprop_future_values=args.backprop_future_values,\n )\n if args.load:\n agent.load(args.load)\n\n if args.demo:\n env = make_env(0, True)\n eval_stats = experiments.eval_performance(\n env=env,\n agent=agent,\n n_steps=None,\n n_episodes=args.eval_n_runs,\n max_episode_len=timestep_limit)\n print('n_runs: {} mean: {} median: {} stdev {}'.format(\n args.eval_n_runs, eval_stats['mean'], eval_stats['median'],\n eval_stats['stdev']))\n else:\n if args.train_async:\n experiments.train_agent_async(\n agent=agent,\n outdir=args.outdir,\n processes=args.processes,\n make_env=make_env,\n profile=args.profile,\n steps=args.steps,\n eval_n_steps=None,\n eval_n_episodes=args.eval_n_runs,\n eval_interval=args.eval_interval,\n max_episode_len=timestep_limit)\n else:\n experiments.train_agent_with_evaluation(\n agent=agent,\n env=make_env(0, test=False),\n eval_env=make_env(0, test=True),\n outdir=args.outdir,\n steps=args.steps,\n eval_n_steps=None,\n eval_n_episodes=args.eval_n_runs,\n eval_interval=args.eval_interval,\n train_max_episode_len=timestep_limit)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.asarray",
"numpy.full"
],
[
"numpy.arange"
]
] |
rainwaterone/stat656
|
[
"c582fc8c6a55c377e2b57d1f7b10471d625d79db"
] |
[
"final/sol/Final2020_analysis.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 4 12:17:34 2020\n\n@author: EJones\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Classes provided from AdvancedAnalytics ver 1.25\nfrom AdvancedAnalytics.Text import text_plot\n\nAnalysis = \"Covid\"\nfilename = \"PICKLE/\"+Analysis+\"_08-04_df_all_pickle.pkl\"\nif Analysis == \"Election\":\n # Read Nov2020 Election ARTICLES\n print(\"{:-<40s}{:->39s}\".format('*', '*'))\n print(\"{:-<22s} SENTIMENT ANALYSIS OF 2020 {:->22s}\".format('*','*'))\n print(\"{:-<22s} PRESIDENTIAL ELECTION NEWS {:->22s}\".format('*','*'))\n print(\"{:-<40s}{:->39s}\".format('*', '*'))\n df = pd.read_pickle(filename)\nelse:\n # Read COVID ARTICLES\n print(\"{:-<40s}{:->39s}\".format('*', '*'))\n print(\"{:-<22s} SENTIMENT ANALYSIS OF {:->22s}\".format('*','*'))\n print(\"{:-<22s} COVID-19 NEWS ARTICLES {:->22s}\".format('*','*'))\n print(\"{:-<40s}{:->39s}\".format('*', '*'))\n df = pd.read_pickle(filename)\n \ntp, bp, fp = 0, 0, 0\ntn, bn, fn = 0, 0, 0\ntu, bu, fu = 0, 0, 0\nfor i in range(df.shape[0]):\n if df['sentiment'].iloc[i]>0:\n if df['trump_1k'].iloc[i]>0:\n tp += 1\n if df['biden_1k'].iloc[i]>0:\n bp += 1\n if df['fauci_1k'].iloc[i]>0:\n fp += 1\n elif df['sentiment'].iloc[i]<0:\n if df['trump_1k'].iloc[i]>0:\n tn += 1\n if df['biden_1k'].iloc[i]>0:\n bn += 1\n if df['trump_1k'].iloc[i]>0:\n fn += 1\n else:\n if df['trump_1k'].iloc[i]>0:\n tu += 1\n if df['biden_1k'].iloc[i]>0:\n bu += 1\n if df['trump_1k'].iloc[i]>0:\n fu += 1\npt = round(100*tp/(tp+tn+tu), 1)\npb = round(100*bp/(bp+bn+bu), 1)\npf = round(100*fp/(fp+fn+fu), 1)\nnt = round(100*tn/(tp+tn+tu), 1)\nnb = round(100*bn/(bp+bn+bu), 1)\nnf = round(100*fn/(fp+fn+fu), 1)\nprint(\" Sentiment by Person\")\nprint(\" Name (+/-) Articles Percent\")\nprint(\"{:-<20s}{:->19s}\".format(\"*\", \"*\"))\nprint(\" {:5s}{:>5s}{:>11d}{:>13.1f}%\".format(\"Trump\", \"+\", tp, pt))\nprint(\" {:5s}{:>5s}{:>11d}{:>13.1f}%\".format(\"Trump\", \"-\", tn, nt))\nprint(\"\\n {:5s}{:>5s}{:>11d}{:>13.1f}%\".format(\"Biden\", \"+\", bp, pb))\nprint(\" {:5s}{:>5s}{:>11d}{:>13.1f}%\".format(\"Biden\", \"-\", bn, nb))\nprint(\"\\n {:5s}{:>5s}{:>11d}{:>13.1f}%\".format(\"Fauci\", \"+\", fp, pf))\nprint(\" {:5s}{:>5s}{:>11d}{:>13.1f}%\".format(\"Fauci\", \"-\", fn, nf))\nprint(\"{:-<20s}{:->19s}\".format(\"*\", \"*\"))\n\nlabels = ['BIDEN', 'TRUMP', 'FAUCI'] \np_means = [pb, pt, pf]\nn_means = [nb, nt, nf]\nx = np.arange(len(labels))\nwidth = 0.35 # Bar width\nfig, ax = plt.subplots()\nrects1 = ax.bar(x - width/2, p_means, width, label='Positive', color='mediumblue')\nrects2 = ax.bar(x + width/2, n_means, width, label='Negative', color='orangered')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Percent Sentiment')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\nplt.ylim(0,100)\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 2), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\n#fig.tight_layout()\nplt.show()\n\nn_pos = []\nn_neg = []\np_pos = []\np_neg = []\nprint(\" Sentiment by Topic\")\nprint(\" Topic (+/-) Articles Percent\")\nprint(\"{:-<20s}{:->19s}\".format(\"*\", \"*\"))\nfor t in df['topic'].unique():\n df1 = df.loc[df['topic'] == t]\n npos = sum(df1['sentiment']>0)\n nneg = sum(df1['sentiment']<0)\n nneu = sum(df1['sentiment']==0)\n pp = round(100*npos/(npos+nneg+nneu), 1)\n pn = round(100*nneg/(npos+nneg+nneu), 1)\n print(\" {:<5d}{:>5s}{:>11d}{:>13.1f}%\".format(t, \"+\", npos, pp))\n print(\" {:<5d}{:>5s}{:>11d}{:>13.1f}%\\n\".format(t, \"-\", nneg, pn))\n n_pos.append(npos)\n p_pos.append(pp)\n n_neg.append(nneg)\n p_neg.append(pn) \nprint(\"{:-<20s}{:->19s}\".format(\"*\", \"*\"))\n\nlabels = [1, 2, 3, 4, 5, 6, 7, 8, 9] \np_means = p_pos\nn_means = p_neg\nx = np.arange(len(labels))\nwidth = 0.35 # Bar width\nfig, ax = plt.subplots()\nrects1 = ax.bar(x - width/2, p_means, width, label='Positive', color='mediumblue')\nrects2 = ax.bar(x + width/2, n_means, width, label='Negative', color='orangered')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Percent Sentiment')\nax.set_xlabel('Topics')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\nplt.ylim(0,70)\n#autolabel(rects1)\n#autolabel(rects2)\n#fig.tight_layout()\nplt.show()\n\ny = np.ravel(df['sentiment'])\nfor i in range(9):\n j = i+1\n var = 'prob'+str(j)\n x = np.ravel(df[var])\n fig, ax = plt.subplots()\n plt.hlines(0, 0, 1.0)\n plt.xlabel(\"Probability for Topic \"+str(j))\n plt.ylabel(\"Sentiment\")\n plt.plot(x, y, 'o', color='tab:brown', markersize=1)\n\n# Fit a line to x & y\ndf1 = df[['trump_1k', 'sentiment']].copy()\ndf1.sort_values('trump_1k', inplace=True)\nx = np.ravel(df1['trump_1k'])\ny = np.ravel(df1['sentiment'])\nb, a = np.polyfit(x, y, deg=1)\ny_est = a + b*x\n\nfig, ax = plt.subplots()\nax.plot(x, y_est, '-')\nplt.xlabel(\"'Trump'/1,000 Words\")\nplt.ylabel(\"Sentiment\")\nplt.hlines(0, 0, 10, colors='red', linestyles='dashed', label='Neutral')\nplt.plot(x, y, 'o', color='tab:brown', markersize=1)\n\n# Fit a line to x & y\ndf1 = df[['biden_1k', 'sentiment']].copy()\nx = np.ravel(df1['biden_1k'])\ny = np.ravel(df1['sentiment'])\n\nb, a = np.polyfit(x, y, deg=1)\ny_est = a + b*x\n\nfig, ax = plt.subplots()\nax.plot(x, y_est, '-')\nplt.xlabel(\"'Biden'/1,000 Words\")\nplt.ylabel(\"Sentiment\")\nplt.hlines(0, 0, 8, colors='red', linestyles='dashed', label='Neutral')\nplt.plot(x, y, 'o', color='tab:brown', markersize=1)\n\n# Fit a line to x & y\ndf1 = df[['fauci_1k', 'sentiment']].copy()\nx = np.ravel(df1['fauci_1k'])\ny = np.ravel(df1['sentiment'])\n\nb, a = np.polyfit(x, y, deg=1)\ny_est = a + b*x\n\nfig, ax = plt.subplots()\nax.plot(x, y_est, '-')\nplt.xlabel(\"'Fauci'/1,000 Words\")\nplt.ylabel(\"Sentiment\")\nplt.hlines(0, 0, 20, colors='red', linestyles='dashed', label='Neutral')\nplt.plot(x, y, 'o', color='tab:brown', markersize=1)\n"
] |
[
[
"numpy.polyfit",
"pandas.read_pickle",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.ravel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
dhimmel/kh-tools
|
[
"860aa846c6e4458f1270dc62f3317807f17469b7"
] |
[
"khtools/knn.py"
] |
[
"import os\nimport warnings\n\nfrom matplotlib.lines import Line2D\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom . import sourmash_utils\nfrom .s3_utils import savefig\n\ndef _compute_neighbor_adjacencies(data, n_neighbors=5):\n # Convert to distances by subtracting from 1\n X = 1 - data\n nbrs = NearestNeighbors(n_neighbors=n_neighbors, metric='precomputed').fit(X)\n distances, indices = nbrs.kneighbors(X)\n\n # Replace integers with cell ids\n neighbor_indices = pd.DataFrame(indices, index=X.columns)\n neighbor_indices = neighbor_indices.applymap(lambda x: X.columns[x])\n\n # Make (cell_1, cell_2) adjacency list\n neighbor_indices_tidy = neighbor_indices.unstack()\n neighbor_indices_tidy = neighbor_indices_tidy.reset_index()\n neighbor_indices_tidy = neighbor_indices_tidy.drop(columns='level_0')\n return neighbor_indices_tidy.values\n\n\ndef add_color_cols(metadata, color_cols=['cell_ontology_class'],\n palettes=dict(cell_ontology_class='tab10')):\n \"\"\"Add a hexadecimal color for the categorical values in color_cols\"\"\"\n for col in color_cols:\n palette = palettes[col]\n colors = sourmash_utils.category_colors(metadata[col],\n palette=palette)\n new_col = f'{col}_color'\n metadata.loc[:, new_col] = colors\n return metadata\n\n\ndef nearest_neighbor_graph(data, metadata, n_neighbors=5,\n color_cols=['cell_ontology_class'],\n palettes=dict(cell_ontology_class='tab10')):\n metadata = add_color_cols(metadata, color_cols=color_cols, palettes=palettes)\n\n G = nx.Graph()\n nodes = [(cell_id, attr.to_dict()) for cell_id, attr in metadata.iterrows()]\n G.add_nodes_from(nodes)\n\n neighbor_adjacencies = _compute_neighbor_adjacencies(data, n_neighbors=n_neighbors)\n G.add_edges_from(neighbor_adjacencies)\n return G\n\n\ndef _add_legend(colors, labels, title):\n label_color_df = pd.DataFrame(dict(colors=colors, labels=labels))\n label_color_df = label_color_df.drop_duplicates()\n\n # Sort by lowercase version of the labels\n label_color_df.loc[:, 'labels_lower'] = label_color_df['labels'].astype(str).str.lower()\n label_color_df = label_color_df.sort_values('labels_lower')\n # Remove the sorting column\n label_color_df.drop('labels_lower', inplace=True, axis=1)\n\n legend_elements = [Line2D([0], [0], color='w', marker='o', markersize=10,\n markerfacecolor=color, label=label, alpha=0.5)\n for i, (color, label) in label_color_df.iterrows()]\n\n ax = plt.gca()\n ax.legend(handles=legend_elements, title=title, frameon=False)\n return ax\n\n\ndef draw_graph(G, label_col='cell_ontology_class', edge_color='black', legend=True,\n **kwargs):\n label_color_col = f\"{label_col}_color\"\n\n colors = [d[label_color_col] for v, d in G.nodes(data=True)]\n labels = [d[label_col] for v, d in G.nodes(data=True)]\n\n if 'pos' not in kwargs:\n kwargs['pos'] = nx.spring_layout(G)\n nx.draw(G, node_color=colors, alpha=0.5, edge_color=edge_color, linewidths=0.5, **kwargs)\n\n if legend:\n _add_legend(colors, labels, label_col)\n\n\ndef build_graph_and_plot(data, metadata, n_neighbors, color_cols, palettes,\n figure_folder, figure_prefix, title):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n graph = nearest_neighbor_graph(data, metadata,\n n_neighbors=n_neighbors,\n color_cols=color_cols,\n palettes=palettes)\n\n pos = nx.spring_layout(graph, seed=0)\n\n for label in color_cols:\n fig, ax = plt.subplots()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n draw_graph(graph, edge_color='black', label_col=label, pos=pos)\n ax.set_title(title)\n figure_suffix = f'graph_nneighbors-{n_neighbors}_colorby-{label}'\n png = os.path.join(figure_folder,\n f'{figure_prefix}_{figure_suffix}.png')\n savefig(fig, png, dpi=150)\n return graph, pos\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"sklearn.neighbors.NearestNeighbors"
]
] |
gbriones1/stylegan2-ada-pytorch
|
[
"8a3722211f83b39172f04d3a4d0706c935c35a12"
] |
[
"projector.py"
] |
[
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Project given image to the latent space of pretrained network pickle.\"\"\"\n\nimport copy\nimport os\nfrom time import perf_counter\n\nimport click\nimport imageio\nimport numpy as np\nimport PIL.Image\nimport torch\nimport torch.nn.functional as F\n\nimport dnnlib\nimport legacy\nimport lpips\n\nfrom mdf.mdfloss import MDFLoss\nfrom pytorch_msssim.pytorch_msssim import SSIM\n\nloss_fn_alex = lpips.LPIPS(net='alex').cuda()\nloss_fn_vgg = lpips.LPIPS(net='vgg').cuda()\nloss_fn_mdf = MDFLoss(\"./mdf/weights/Ds_SISR.pth\", cuda_available=True)\nloss_fn_ssim = SSIM(win_size=11, win_sigma=1.5, data_range=1, size_average=True, channel=3)\n\ndef project(\n G,\n target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution\n *,\n num_steps = 1000,\n w_avg_samples = 10000,\n initial_learning_rate = 0.1,\n initial_noise_factor = 0.05,\n lr_rampdown_length = 0.25,\n lr_rampup_length = 0.05,\n noise_ramp_length = 0.75,\n regularize_noise_weight = 1e5,\n verbose = False,\n train_noise = False,\n loss_fn = \"vgg\",\n return_losses = False,\n resize = 256,\n device: torch.device\n):\n assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)\n\n def logprint(*args):\n if verbose:\n print(*args)\n\n G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore\n\n # Compute w stats.\n logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')\n z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)\n w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]\n w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]\n w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]\n w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5\n\n # Setup noise inputs.\n if train_noise:\n noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }\n\n # Load VGG16 feature detector.\n url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'\n with dnnlib.util.open_url(url) as f:\n vgg16 = torch.jit.load(f).eval().to(device)\n\n # Features for target image.\n target_images = target.unsqueeze(0).to(device).to(torch.float32)\n if target_images.shape[2] > resize:\n target_images = F.interpolate(target_images, size=(resize, resize), mode='area')\n target_features = vgg16(target_images, resize_images=False, return_lpips=True)\n\n w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable\n w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)\n if train_noise:\n optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)\n\n # Init noise.\n for buf in noise_bufs.values():\n buf[:] = torch.randn_like(buf)\n buf.requires_grad = True\n else:\n optimizer = torch.optim.Adam([w_opt], betas=(0.9, 0.999), lr=initial_learning_rate)\n\n losses = []\n for step in range(num_steps):\n # Learning rate schedule.\n t = step / num_steps\n w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2\n lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)\n lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)\n lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)\n lr = initial_learning_rate * lr_ramp\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Synth images from opt_w.\n w_noise = torch.randn_like(w_opt) * w_noise_scale\n ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])\n synth_images = G.synthesis(ws, noise_mode='const')\n\n # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.\n synth_images = (synth_images + 1) * (255/2)\n if synth_images.shape[2] > resize:\n synth_images = F.interpolate(synth_images, size=(resize, resize), mode='area')\n\n # Features for synth images.\n synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)\n if loss_fn == \"lpips_vgg\":\n dist = loss_fn_vgg(target_images, synth_images.to(torch.float32))\n elif loss_fn == \"lpips_alex\":\n dist = loss_fn_alex(target_images, synth_images.to(torch.float32))\n elif loss_fn == \"mdf\":\n dist = loss_fn_mdf(target_images, synth_images.to(torch.float32))\n elif loss_fn == \"ssim\":\n dist = 1 - loss_fn_ssim(target_images/255.0, synth_images.to(torch.float32)/255.0)\n else:\n dist = (target_features - synth_features).square().sum()\n\n # Noise regularization.\n if train_noise:\n reg_loss = 0.0\n for v in noise_bufs.values():\n noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()\n while True:\n reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2\n reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2\n if noise.shape[2] <= 8:\n break\n noise = F.avg_pool2d(noise, kernel_size=2)\n loss = dist + reg_loss * regularize_noise_weight\n else:\n loss = dist\n\n # Step\n optimizer.zero_grad(set_to_none=True)\n loss.backward()\n optimizer.step()\n logprint(f'step {step+1:>4d}/{num_steps}: loss {float(loss):<5.2f}')\n losses.append(float(loss))\n\n # Save projected W for each optimization step.\n w_out[step] = w_opt.detach()[0]\n\n # Normalize noise.\n if train_noise:\n with torch.no_grad():\n for buf in noise_bufs.values():\n buf -= buf.mean()\n buf *= buf.square().mean().rsqrt()\n \n if return_losses:\n return w_out.repeat([1, G.mapping.num_ws, 1]), losses\n\n return w_out.repeat([1, G.mapping.num_ws, 1])\n\n#----------------------------------------------------------------------------\n\[email protected]()\[email protected]('--network', 'network_pkl', help='Network pickle filename', required=True)\[email protected]('--target', 'target_fname', help='Target image file to project to', required=True, metavar='FILE')\[email protected]('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True)\[email protected]('--seed', help='Random seed', type=int, default=303, show_default=True)\[email protected]('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)\[email protected]('--outdir', help='Where to save the output images', required=True, metavar='DIR')\ndef run_projection(\n network_pkl: str,\n target_fname: str,\n outdir: str,\n save_video: bool,\n seed: int,\n num_steps: int\n):\n \"\"\"Project given image to the latent space of pretrained network pickle.\n\n Examples:\n\n \\b\n python projector.py --outdir=out --target=~/mytargetimg.png \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl\n \"\"\"\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n # Load networks.\n print('Loading networks from \"%s\"...' % network_pkl)\n device = torch.device('cuda')\n with dnnlib.util.open_url(network_pkl) as fp:\n G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore\n\n # Load target image.\n target_pil = PIL.Image.open(target_fname).convert('RGB')\n w, h = target_pil.size\n s = min(w, h)\n target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))\n target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)\n target_uint8 = np.array(target_pil, dtype=np.uint8)\n\n # Optimize projection.\n start_time = perf_counter()\n projected_w_steps = project(\n G,\n target=torch.tensor(target_uint8.transpose([2, 0, 1]), device=device), # pylint: disable=not-callable\n num_steps=num_steps,\n device=device,\n verbose=True\n )\n print (f'Elapsed: {(perf_counter()-start_time):.1f} s')\n\n # Render debug output: optional video and projected image and W vector.\n os.makedirs(outdir, exist_ok=True)\n if save_video:\n video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')\n print (f'Saving optimization progress video \"{outdir}/proj.mp4\"')\n for projected_w in projected_w_steps:\n synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')\n synth_image = (synth_image + 1) * (255/2)\n synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()\n video.append_data(np.concatenate([target_uint8, synth_image], axis=1))\n video.close()\n\n # Save final projected frame and W vector.\n target_pil.save(f'{outdir}/target.png')\n projected_w = projected_w_steps[-1]\n synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')\n synth_image = (synth_image + 1) * (255/2)\n synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()\n PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')\n np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n run_projection() # pylint: disable=no-value-for-parameter\n\n#----------------------------------------------------------------------------\n"
] |
[
[
"torch.optim.Adam",
"torch.randn_like",
"torch.jit.load",
"torch.roll",
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.functional.avg_pool2d",
"torch.from_numpy",
"numpy.cos",
"torch.tensor",
"numpy.concatenate",
"numpy.mean",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.device",
"numpy.array",
"numpy.sum",
"numpy.random.RandomState"
]
] |
seed4600/jaseci
|
[
"d738cb5e9e90d5e3e78f516af15cdd5ad920276a"
] |
[
"jskit/use_enc/use_enc.py"
] |
[
"import numpy as np\nimport tensorflow_hub as hub\nimport tensorflow as tf\nimport tensorflow_text # noqa\nfrom jaseci.actions.live_actions import jaseci_action\nfrom typing import Union\n\n\nmodule = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder/4\")\n\n\n@jaseci_action(act_group=['use'], aliases=['get_embedding'], allow_remote=True)\ndef encode(text: Union[str, list]):\n if(isinstance(text, str)):\n text = [text]\n return module(text).numpy().tolist()\n\n\n@jaseci_action(act_group=['use'], allow_remote=True)\ndef cos_sim_score(q_emb: list, a_emb: list):\n norm = np.linalg.norm\n return np.dot(q_emb, a_emb)/(norm(q_emb)*norm(a_emb))\n\n\nif __name__ == \"__main__\":\n from jaseci.actions.remote_actions import launch_server\n launch_server(port=8000)\n"
] |
[
[
"numpy.dot"
]
] |
danachang/End2EndDecoder
|
[
"575d6077c5932a2e0a63d5af9691d12e5cb468ef"
] |
[
"model.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom util import log\nfrom decoder import Decoder\nfrom decoder_mdl import Decoder_Mdl\n\n\nclass Model(object):\n\n def __init__(self, config,\n debug_information=False,\n is_train=True):\n self.debug = debug_information\n\n self.config = config\n self.batch_size = config.batch_size\n self.h = config.h\n self.w = config.w\n self.c = config.c\n self.output_dim = config.output_dim\n self.output_act_fn = config.output_act_fn\n self.num_d_conv = config.num_d_conv\n self.num_d_fc = config.num_d_fc\n self.d_norm_type = config.d_norm_type\n self.loss_type = config.loss_type\n\n # added for Decoder_mdl\n self.load_pretrained = config.load_pretrained\n self.arch = config.arch\n\n # create placeholders for the input\n self.image = tf.placeholder(\n name='image', dtype=tf.float32,\n shape=[self.batch_size, self.h, self.w, self.c],\n )\n\n self.label = tf.placeholder(\n name='label', dtype=tf.float32,\n shape=[self.batch_size, self.output_dim],\n )\n\n self.build(is_train=is_train)\n\n def get_feed_dict(self, batch_chunk):\n fd = {\n self.image: batch_chunk['image'], # [bs, h, w, c]\n self.label: batch_chunk['label'], # [bs, v] (v should be 3)\n }\n return fd\n\n def build(self, is_train=True):\n\n # Decoder {{{\n # =========\n # Input: an image [bs, h, w, c]\n # Output: [bs, [x, y, v]]\n\n if self.arch == 'ConvNet':\n D = Decoder('Decoder', self.output_dim, self.output_act_fn,\n self.num_d_conv, self.num_d_fc,\n self.d_norm_type, is_train)\n else:\n D = Decoder_Mdl('Decoder_Mdl', self.output_dim, self.output_act_fn,\n self.num_d_conv, self.num_d_fc,\n self.d_norm_type, is_train,\n self.load_pretrained, self.arch)\n\n pred_label, conv_list, actv_list, fc_list = D(self.image)\n self.pred_label = pred_label\n self.conv_list = conv_list\n self.actv_list = actv_list\n self.fc_list = fc_list\n # }}}\n\n # Build losses {{{\n # =========\n # compute loss\n if self.loss_type == 'l1':\n self.ori_loss = tf.abs(self.label - pred_label)\n self.loss = tf.reduce_mean(self.ori_loss)\n elif self.loss_type == 'l2':\n self.ori_loss = (self.label - pred_label) **2\n self.loss = tf.reduce_mean(self.ori_loss)\n else:\n raise NotImplementedError\n # }}}\n\n # TensorBoard summaries {{{\n # =========\n tf.summary.scalar(\"loss/loss\", self.loss)\n tf.summary.image(\"image\", self.image)\n # }}}\n\n # Output {{{\n # =========\n self.output = {\n 'pred_label': pred_label\n }\n # }}}\n\n log.warn('\\033[93mSuccessfully loaded the model.\\033[0m')\n"
] |
[
[
"tensorflow.reduce_mean",
"tensorflow.summary.image",
"tensorflow.placeholder",
"tensorflow.summary.scalar",
"tensorflow.abs"
]
] |
Mathiasn21/Traffic_Sign_ML_final_project
|
[
"17dae41f139d044f73d45493ddb82fb3d4425fa5"
] |
[
"main/source/main/mlp_diff_solvers.py"
] |
[
"import joblib\nimport matplotlib.pyplot as plt\nfrom sklearn.neural_network import MLPClassifier\n\n# Load previously saved models, with different MLP solvers\nmlp_adam_best: MLPClassifier = joblib.load('D:\\\\group_projects\\\\Sign-machine-learning\\\\main\\\\source\\\\models\\\\mlp_adam_best.joblib')\nmlp_sgd: MLPClassifier = joblib.load('D:\\\\group_projects\\\\Sign-machine-learning\\\\main\\\\source\\\\models\\\\mlp_sgd.joblib')\nmlp_adam_worst: MLPClassifier = joblib.load('D:\\\\group_projects\\\\Sign-machine-learning\\\\main\\\\source\\\\models\\\\mlp_adam_worst.joblib')\n\n# Plot various loss curves from models with different solvers and learning rate.\nplt.plot(mlp_adam_best.loss_curve_, label='Adam - Learning rate 0.0001')\nplt.plot(mlp_sgd.loss_curve_, label='SGD - Learning rate 0.01')\nplt.plot(mlp_adam_worst.loss_curve_, label='Adam - Learning rate 0.01')\nplt.title('Loss')\nplt.xlabel('Iterations')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Noezor/msticpy
|
[
"f0d6d0d0bbaeba1ca060787b9929350804fa6dc5",
"f0d6d0d0bbaeba1ca060787b9929350804fa6dc5"
] |
[
"msticpy/data/drivers/local_data_driver.py",
"tests/data/drivers/test_kql_driver.py"
] |
[
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"Local Data Driver class - for testing and demos.\"\"\"\nfrom pathlib import Path\nfrom typing import Union, Any, Dict, Optional, List\n\nimport pandas as pd\n\nfrom .driver_base import DriverBase, QuerySource\nfrom ...common.utility import export\nfrom ..._version import VERSION\n\n__version__ = VERSION\n__author__ = \"Ian Hellen\"\n\n\n@export\nclass LocalDataDriver(DriverBase):\n \"\"\"LocalDataDriver class to execute kql queries.\"\"\"\n\n def __init__(self, connection_str: str = None, **kwargs):\n \"\"\"\n Instantiaite LocalDataDriver and optionally connect.\n\n Parameters\n ----------\n connection_str : str, optional\n Connection string (not used)\n data_paths : List[str], optional\n Paths from which to load data files\n\n \"\"\"\n del connection_str\n self._debug = kwargs.get(\"debug\", False)\n super().__init__()\n\n # If data paths specified, use these\n data_paths = kwargs.get(\"data_paths\")\n if data_paths:\n self._paths: List[str] = [path.strip() for path in data_paths]\n else:\n self._paths = [\".\"]\n\n self.data_files: Dict[str, str] = self._get_data_paths()\n self._schema: Dict[str, Any] = {}\n self._loaded = True\n self._connected = True\n\n def _get_data_paths(self) -> Dict[str, str]:\n \"\"\"Read files in data paths.\"\"\"\n data_files = {}\n for path in self._paths:\n for pattern in [\"**/*.pkl\", \"**/*.csv\"]:\n data_files.update(\n {\n str(file_path.name).casefold(): str(file_path)\n for file_path in Path(path).resolve().glob(pattern)\n }\n )\n return data_files\n\n def connect(self, connection_str: Optional[str] = None, **kwargs):\n \"\"\"\n Connect to data source.\n\n Parameters\n ----------\n connection_str : str\n Connect to a data source\n\n \"\"\"\n del connection_str\n self._connected = True\n print(\"Connected.\")\n\n @property\n def schema(self) -> Dict[str, Dict]:\n \"\"\"\n Return current data schema of connection.\n\n Returns\n -------\n Dict[str, Dict]\n Data schema of current connection.\n\n \"\"\"\n if self._schema:\n return self._schema\n for df_fname in self.data_files:\n test_df = self.query(df_fname)\n if not isinstance(test_df, pd.DataFrame):\n continue\n df_schema = test_df.dtypes\n self._schema[df_fname] = {\n key: dtype.name for key, dtype in df_schema.to_dict().items()\n }\n\n return self._schema\n\n def query(\n self, query: str, query_source: QuerySource = None, **kwargs\n ) -> Union[pd.DataFrame, Any]:\n \"\"\"\n Execute query string and return DataFrame of results.\n\n Parameters\n ----------\n query : str\n The query to execute\n query_source : QuerySource\n The query definition object\n\n Returns\n -------\n Union[pd.DataFrame, results.ResultSet]\n A DataFrame (if successfull) or\n the underlying provider result if an error.\n\n \"\"\"\n del kwargs\n query_name = query_source.name if query_source else query\n file_path = self.data_files.get(query.casefold())\n if not file_path:\n raise FileNotFoundError(\n f\"Data file ({query}) for query {query_name} not found.\"\n )\n if file_path.endswith(\"csv\"):\n return pd.read_csv(\n file_path, infer_datetime_format=True, parse_dates=[\"TimeGenerated\"]\n )\n data_df = pd.read_pickle(file_path)\n if isinstance(data_df, pd.DataFrame):\n return data_df\n return f\"{query} is not a DataFrame ({file_path}).\"\n\n def query_with_results(self, query, **kwargs):\n \"\"\"Return query with fake results.\"\"\"\n return self.query(query, **kwargs), \"OK\"\n",
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"datq query test class.\"\"\"\nfrom contextlib import redirect_stdout\nimport io\nfrom unittest.mock import patch\nimport pytest\nimport pytest_check as check\n\nimport pandas as pd\n\nfrom adal.adal_error import AdalError\nfrom Kqlmagic.kql_response import KqlError\nfrom Kqlmagic.kql_engine import KqlEngineError\nfrom Kqlmagic.my_aad_helper import AuthenticationError\n\nfrom msticpy.common.exceptions import (\n MsticpyKqlConnectionError,\n MsticpyNotConnectedError,\n MsticpyNoDataSourceError,\n MsticpyDataQueryError,\n)\nfrom msticpy.data.data_providers import KqlDriver\n\nfrom ...unit_test_lib import get_test_data_path\n\n\n_TEST_DATA = get_test_data_path()\n\nGET_IPYTHON_PATCH = KqlDriver.__module__ + \".get_ipython\"\n\n\n# pylint: disable=too-many-branches, too-many-return-statements\n# pylint: disable=no-self-use\n\n\nclass KqlResultTest:\n \"\"\"Test Kql result class.\"\"\"\n\n def __init__(self, code=0, partial=False, status=\"success\"):\n \"\"\"Create instance.\"\"\"\n self.completion_query_info = {\"StatusCode\": code, \"StatusDescription\": status}\n self.is_partial_table = partial\n\n def to_dataframe(self):\n \"\"\"Convert dataframe.\"\"\"\n return pd.DataFrame()\n\n\nclass _MockIPython:\n \"\"\"IPython get_ipython mock.\"\"\"\n\n def find_magic(self, magic):\n \"\"\"Return None if magic isn't == kql.\"\"\"\n if magic == \"kql\":\n return \"Kqlmagic\"\n return None\n\n def run_line_magic(self, magic, line):\n \"\"\"Mock run line magic.\"\"\"\n return self._run_magic(magic, line)\n\n def run_cell_magic(self, magic, line, cell):\n \"\"\"Mock run cell magic.\"\"\"\n content = cell or line\n return self._run_magic(magic, content)\n\n @staticmethod # noqa: MC0001\n def _run_magic(magic, content):\n if magic == \"reload_ext\":\n return None\n if magic == \"config\":\n if \"=\" in content:\n return \"dummy_setting\"\n return True\n\n check.equal(magic, \"kql\")\n if \"KqlErrorUnk\" in content:\n resp = '{\"error\": {\"code\": \"UnknownError\"}}'\n raise KqlError(http_response=resp, message=resp)\n if \"KqlErrorWS\" in content:\n resp = '{\"error\": {\"code\": \"WorkspaceNotFoundError\"}}'\n raise KqlError(http_response=resp, message=resp)\n if \"KqlEngineError\" in content:\n raise KqlEngineError(\"Test Error\")\n if \"AdalErrorUnk\" in content:\n resp = {\"error_description\": \"unknown error\"}\n raise AdalError(\"Test Error\", error_response=resp)\n if \"AdalErrorNR\" in content:\n raise AdalError(\"Test Error\")\n if \"AdalErrorPoll\" in content:\n raise AdalError(\"Unexpected polling state code_expired\")\n if \"AuthenticationError\" in content:\n raise AuthenticationError(\"Test Error\")\n\n if content == \"--schema\":\n return {\n \"table1\": {\"field1\": int, \"field2\": str},\n \"table2\": {\"field1\": int, \"field2\": str},\n }\n\n if \"query_partial\" in content:\n return KqlResultTest(code=0, partial=True, status=\"partial\")\n if \"query_failed\" in content:\n return KqlResultTest(code=1, partial=False, status=\"failed\")\n\n return KqlResultTest(code=0, partial=False, status=\"success\")\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_load(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n check.is_true(kql_driver.loaded)\n\n kql_driver = KqlDriver(connection_str=\"la://connection\")\n check.is_true(kql_driver.loaded)\n check.is_true(kql_driver.connected)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_connect(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n check.is_true(kql_driver.loaded)\n\n kql_driver.connect(connection_str=\"la://connection\")\n check.is_true(kql_driver.connected)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_connect_no_cs(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n check.is_true(kql_driver.loaded)\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect()\n check.is_in(\"no connection string\", mp_ex.value.args)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_connect_kql_exceptions(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(connection_str=\"la://connection+KqlErrorUnk\")\n check.is_in(\"Kql response error\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(\n connection_str=\"la://connection.workspace('1234').tenant(KqlErrorWS)\"\n )\n check.is_in(\"unknown workspace\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(\n connection_str=\"la://connection.workspace('1234').tenant(KqlEngineError)\"\n )\n check.is_in(\"kql connection error\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_connect_adal_exceptions(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(connection_str=\"la://connection+AdalErrorUnk\")\n check.is_in(\"could not authenticate to tenant\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(connection_str=\"la://connection+AdalErrorNR\")\n check.is_in(\"could not authenticate to tenant\", mp_ex.value.args)\n check.is_in(\"Full error\", str(mp_ex.value.args))\n check.is_false(kql_driver.connected)\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(connection_str=\"la://connection+AdalErrorPoll\")\n check.is_in(\"authentication timed out\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_connect_authn_exceptions(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n\n with pytest.raises(MsticpyKqlConnectionError) as mp_ex:\n kql_driver.connect(connection_str=\"la://connection+AuthenticationError\")\n check.is_in(\"authentication failed\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_schema(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n kql_driver.connect(connection_str=\"la://connection\")\n\n check.is_in(\"table1\", kql_driver.schema)\n check.is_in(\"table2\", kql_driver.schema)\n check.is_in(\"field1\", kql_driver.schema[\"table1\"])\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_query_not_connected(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n\n with pytest.raises(MsticpyNotConnectedError) as mp_ex:\n kql_driver.query(\"test\")\n check.is_in(\"not connected to a workspace.\", mp_ex.value.args)\n check.is_false(kql_driver.connected)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_query_failed(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n kql_driver.connect(connection_str=\"la://connection\")\n\n with pytest.raises(MsticpyDataQueryError) as mp_ex:\n kql_driver.query(\"test query_failed\")\n arg_str = \"\\n\".join([str(arg) for arg in mp_ex.value.args])\n check.is_in(\"Query:\", arg_str)\n check.is_in(\"test query_failed\", arg_str)\n check.is_in(\"Query failed\", arg_str)\n check.is_in(\n \"https://msticpy.readthedocs.io/en/latest/DataAcquisition.html\", arg_str\n )\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_query_success(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n kql_driver.connect(connection_str=\"la://connection\")\n\n result_df = kql_driver.query(\"test query\")\n check.is_instance(result_df, pd.DataFrame)\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_query_partial(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n kql_driver.connect(connection_str=\"la://connection\")\n\n output = io.StringIO()\n with redirect_stdout(output):\n result_df = kql_driver.query(\"test query_partial\")\n check.is_instance(result_df, pd.DataFrame)\n check.is_in(\"Warning - query returned partial\", output.getvalue())\n\n\n@patch(GET_IPYTHON_PATCH)\ndef test_kql_query_no_table(get_ipython):\n \"\"\"Check loaded true.\"\"\"\n get_ipython.return_value = _MockIPython()\n kql_driver = KqlDriver()\n kql_driver.connect(connection_str=\"la://connection\")\n\n with pytest.raises(MsticpyNoDataSourceError) as mp_ex:\n query_source = {\"args.table\": \"table3\"}\n kql_driver.query(\"test query\", query_source=query_source)\n\n check.is_in(\"table3 not found.\", mp_ex.value.args)\n"
] |
[
[
"pandas.read_pickle",
"pandas.read_csv"
],
[
"pandas.DataFrame"
]
] |
uberduck-ai/VQGAN-CLIP
|
[
"2e103f4076e084ec9774476ede00d68fdd35b35f"
] |
[
"generate.py"
] |
[
"# Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings)\n# The original BigGAN+CLIP method was by https://twitter.com/advadnoun\n\nimport argparse\nimport math\nimport random\n# from email.policy import default\nfrom urllib.request import urlopen\nfrom tqdm import tqdm\nimport sys\nimport os\n\n# pip install taming-transformers works with Gumbel, but does not yet work with coco etc\n# appending the path works with Gumbel, but gives ModuleNotFoundError: No module named 'transformers' for coco etc\nsys.path.append('taming-transformers')\n\nfrom omegaconf import OmegaConf\nfrom taming.models import cond_transformer, vqgan\n#import taming.modules \n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as TF\nfrom torch.cuda import get_device_properties\ntorch.backends.cudnn.benchmark = False\t\t# NR: True is a bit faster, but can lead to OOM. False is more deterministic.\n#torch.use_deterministic_algorithms(True)\t# NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation\n\nfrom torch_optimizer import DiffGrad, AdamP, RAdam\n\nfrom CLIP import clip\nimport kornia.augmentation as K\nimport numpy as np\nimport imageio\n\nfrom PIL import ImageFile, Image, PngImagePlugin\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nfrom subprocess import Popen, PIPE\nimport re\n\n# Supress warnings\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# Check for GPU and reduce the default image size if low VRAM\ndefault_image_size = 512 # >8GB VRAM\nif not torch.cuda.is_available():\n default_image_size = 256 # no GPU found\nelif get_device_properties(0).total_memory <= 2 ** 33: # 2 ** 33 = 8,589,934,592 bytes = 8 GB\n default_image_size = 318 # <8GB VRAM\n\n# Create the parser\nvq_parser = argparse.ArgumentParser(description='Image generation using VQGAN+CLIP')\n\n# Add the arguments\nvq_parser.add_argument(\"-p\", \"--prompts\", type=str, help=\"Text prompts\", default=None, dest='prompts')\nvq_parser.add_argument(\"-ip\", \"--image_prompts\", type=str, help=\"Image prompts / target image\", default=[], dest='image_prompts')\nvq_parser.add_argument(\"-i\", \"--iterations\", type=int, help=\"Number of iterations\", default=500, dest='max_iterations')\nvq_parser.add_argument(\"-se\", \"--save_every\", type=int, help=\"Save image iterations\", default=50, dest='display_freq')\nvq_parser.add_argument(\"-s\", \"--size\", nargs=2, type=int, help=\"Image size (width height) (default: %(default)s)\", default=[default_image_size,default_image_size], dest='size')\nvq_parser.add_argument(\"-ii\", \"--init_image\", type=str, help=\"Initial image\", default=None, dest='init_image')\nvq_parser.add_argument(\"-in\", \"--init_noise\", type=str, help=\"Initial noise image (pixels or gradient)\", default=None, dest='init_noise')\nvq_parser.add_argument(\"-iw\", \"--init_weight\", type=float, help=\"Initial weight\", default=0., dest='init_weight')\nvq_parser.add_argument(\"-m\", \"--clip_model\", type=str, help=\"CLIP model (e.g. ViT-B/32, ViT-B/16)\", default='ViT-B/32', dest='clip_model')\nvq_parser.add_argument(\"-conf\", \"--vqgan_config\", type=str, help=\"VQGAN config\", default=f'checkpoints/vqgan_imagenet_f16_16384.yaml', dest='vqgan_config')\nvq_parser.add_argument(\"-ckpt\", \"--vqgan_checkpoint\", type=str, help=\"VQGAN checkpoint\", default=f'checkpoints/vqgan_imagenet_f16_16384.ckpt', dest='vqgan_checkpoint')\nvq_parser.add_argument(\"-nps\", \"--noise_prompt_seeds\", nargs=\"*\", type=int, help=\"Noise prompt seeds\", default=[], dest='noise_prompt_seeds')\nvq_parser.add_argument(\"-npw\", \"--noise_prompt_weights\", nargs=\"*\", type=float, help=\"Noise prompt weights\", default=[], dest='noise_prompt_weights')\nvq_parser.add_argument(\"-lr\", \"--learning_rate\", type=float, help=\"Learning rate\", default=0.1, dest='step_size')\nvq_parser.add_argument(\"-cutm\", \"--cut_method\", type=str, help=\"Cut method\", choices=['original','updated','nrupdated','updatedpooling','latest'], default='latest', dest='cut_method')\nvq_parser.add_argument(\"-cuts\", \"--num_cuts\", type=int, help=\"Number of cuts\", default=32, dest='cutn')\nvq_parser.add_argument(\"-cutp\", \"--cut_power\", type=float, help=\"Cut power\", default=1., dest='cut_pow')\nvq_parser.add_argument(\"-sd\", \"--seed\", type=int, help=\"Seed\", default=None, dest='seed')\nvq_parser.add_argument(\"-opt\", \"--optimiser\", type=str, help=\"Optimiser\", choices=['Adam','AdamW','Adagrad','Adamax','DiffGrad','AdamP','RAdam','RMSprop'], default='Adam', dest='optimiser')\nvq_parser.add_argument(\"-o\", \"--output\", type=str, help=\"Output file\", default=\"output.png\", dest='output')\nvq_parser.add_argument(\"-vid\", \"--video\", action='store_true', help=\"Create video frames?\", dest='make_video')\nvq_parser.add_argument(\"-zvid\", \"--zoom_video\", action='store_true', help=\"Create zoom video?\", dest='make_zoom_video')\nvq_parser.add_argument(\"-zs\", \"--zoom_start\", type=int, help=\"Zoom start iteration\", default=0, dest='zoom_start')\nvq_parser.add_argument(\"-zse\", \"--zoom_save_every\", type=int, help=\"Save zoom image iterations\", default=10, dest='zoom_frequency')\nvq_parser.add_argument(\"-zsc\", \"--zoom_scale\", type=float, help=\"Zoom scale\", default=0.99, dest='zoom_scale')\nvq_parser.add_argument(\"-cpe\", \"--change_prompt_every\", type=int, help=\"Prompt change frequency\", default=0, dest='prompt_frequency')\nvq_parser.add_argument(\"-vl\", \"--video_length\", type=float, help=\"Video length in seconds (not interpolated)\", default=10, dest='video_length')\nvq_parser.add_argument(\"-ofps\", \"--output_video_fps\", type=float, help=\"Create an interpolated video (Nvidia GPU only) with this fps (min 10. best set to 30 or 60)\", default=0, dest='output_video_fps')\nvq_parser.add_argument(\"-ifps\", \"--input_video_fps\", type=float, help=\"When creating an interpolated video, use this as the input fps to interpolate from (>0 & <ofps)\", default=15, dest='input_video_fps')\nvq_parser.add_argument(\"-d\", \"--deterministic\", action='store_true', help=\"Enable cudnn.deterministic?\", dest='cudnn_determinism')\nvq_parser.add_argument(\"-aug\", \"--augments\", nargs='+', action='append', type=str, choices=['Ji','Sh','Gn','Pe','Ro','Af','Et','Ts','Cr','Er','Re'], help=\"Enabled augments (latest vut method only)\", default=[], dest='augments')\nvq_parser.add_argument(\"-vsd\", \"--video_style_dir\", type=str, help=\"Directory with video frames to style\", default=None, dest='video_style_dir')\nvq_parser.add_argument(\"-cd\", \"--cuda_device\", type=str, help=\"Cuda device to use\", default=\"cuda:0\", dest='cuda_device')\n\n\n# Execute the parse_args() method\nargs = vq_parser.parse_args()\n\nif not args.prompts and not args.image_prompts:\n args. prompts = \"A cute, smiling, Nerdy Rodent\"\n\nif args.cudnn_determinism:\n torch.backends.cudnn.deterministic = True\n\nif not args.augments:\n args.augments = [['Af', 'Pe', 'Ji', 'Er']]\n\n# Split text prompts using the pipe character (weights are split later)\nif args.prompts:\n # For stories, there will be many phrases\n story_phrases = [phrase.strip() for phrase in args.prompts.split(\"^\")]\n \n # Make a list of all phrases\n all_phrases = []\n for phrase in story_phrases:\n all_phrases.append(phrase.split(\"|\"))\n \n # First phrase\n args.prompts = all_phrases[0]\n \n# Split target images using the pipe character (weights are split later)\nif args.image_prompts:\n args.image_prompts = args.image_prompts.split(\"|\")\n args.image_prompts = [image.strip() for image in args.image_prompts]\n\nif args.make_video and args.make_zoom_video:\n print(\"Warning: Make video and make zoom video are mutually exclusive.\")\n args.make_video = False\n \n# Make video steps directory\nif args.make_video or args.make_zoom_video:\n if not os.path.exists('steps'):\n os.mkdir('steps')\n\n# Fallback to CPU if CUDA is not found and make sure GPU video rendering is also disabled\n# NB. May not work for AMD cards?\nif not args.cuda_device == 'cpu' and not torch.cuda.is_available():\n args.cuda_device = 'cpu'\n args.video_fps = 0\n print(\"Warning: No GPU found! Using the CPU instead. The iterations will be slow.\")\n print(\"Perhaps CUDA/ROCm or the right pytorch version is not properly installed?\")\n\n# If a video_style_dir has been, then create a list of all the images\nif args.video_style_dir:\n print(\"Locating video frames...\")\n video_frame_list = []\n for entry in os.scandir(args.video_style_dir):\n if (entry.path.endswith(\".jpg\")\n or entry.path.endswith(\".png\")) and entry.is_file():\n video_frame_list.append(entry.path)\n\n # Reset a few options - same filename, different directory\n if not os.path.exists('steps'):\n os.mkdir('steps')\n\n args.init_image = video_frame_list[0]\n filename = os.path.basename(args.init_image)\n cwd = os.getcwd()\n args.output = os.path.join(cwd, \"steps\", filename)\n num_video_frames = len(video_frame_list) # for video styling\n\n\n# Various functions and classes\ndef sinc(x):\n return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))\n\n\ndef lanczos(x, a):\n cond = torch.logical_and(-a < x, x < a)\n out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))\n return out / out.sum()\n\n\ndef ramp(ratio, width):\n n = math.ceil(width / ratio + 1)\n out = torch.empty([n])\n cur = 0\n for i in range(out.shape[0]):\n out[i] = cur\n cur += ratio\n return torch.cat([-out[1:].flip([0]), out])[1:-1]\n\n\n# For zoom video\ndef zoom_at(img, x, y, zoom):\n w, h = img.size\n zoom2 = zoom * 2\n img = img.crop((x - w / zoom2, y - h / zoom2, \n x + w / zoom2, y + h / zoom2))\n return img.resize((w, h), Image.LANCZOS)\n\n\n# NR: Testing with different intital images\ndef random_noise_image(w,h):\n random_image = Image.fromarray(np.random.randint(0,255,(w,h,3),dtype=np.dtype('uint8')))\n return random_image\n\n\n# create initial gradient image\ndef gradient_2d(start, stop, width, height, is_horizontal):\n if is_horizontal:\n return np.tile(np.linspace(start, stop, width), (height, 1))\n else:\n return np.tile(np.linspace(start, stop, height), (width, 1)).T\n\n\ndef gradient_3d(width, height, start_list, stop_list, is_horizontal_list):\n result = np.zeros((height, width, len(start_list)), dtype=float)\n\n for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):\n result[:, :, i] = gradient_2d(start, stop, width, height, is_horizontal)\n\n return result\n\n \ndef random_gradient_image(w,h):\n array = gradient_3d(w, h, (0, 0, np.random.randint(0,255)), (np.random.randint(1,255), np.random.randint(2,255), np.random.randint(3,128)), (True, False, False))\n random_image = Image.fromarray(np.uint8(array))\n return random_image\n\n\n# Used in older MakeCutouts\ndef resample(input, size, align_corners=True):\n n, c, h, w = input.shape\n dh, dw = size\n\n input = input.view([n * c, 1, h, w])\n\n if dh < h:\n kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)\n pad_h = (kernel_h.shape[0] - 1) // 2\n input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')\n input = F.conv2d(input, kernel_h[None, None, :, None])\n\n if dw < w:\n kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)\n pad_w = (kernel_w.shape[0] - 1) // 2\n input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')\n input = F.conv2d(input, kernel_w[None, None, None, :])\n\n input = input.view([n, c, h, w])\n return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)\n\n\nclass ReplaceGrad(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x_forward, x_backward):\n ctx.shape = x_backward.shape\n return x_forward\n\n @staticmethod\n def backward(ctx, grad_in):\n return None, grad_in.sum_to_size(ctx.shape)\n\nreplace_grad = ReplaceGrad.apply\n\n\nclass ClampWithGrad(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, min, max):\n ctx.min = min\n ctx.max = max\n ctx.save_for_backward(input)\n return input.clamp(min, max)\n\n @staticmethod\n def backward(ctx, grad_in):\n input, = ctx.saved_tensors\n return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None\n\nclamp_with_grad = ClampWithGrad.apply\n\n\ndef vector_quantize(x, codebook):\n d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T\n indices = d.argmin(-1)\n x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook\n return replace_grad(x_q, x)\n\n\nclass Prompt(nn.Module):\n def __init__(self, embed, weight=1., stop=float('-inf')):\n super().__init__()\n self.register_buffer('embed', embed)\n self.register_buffer('weight', torch.as_tensor(weight))\n self.register_buffer('stop', torch.as_tensor(stop))\n\n def forward(self, input):\n input_normed = F.normalize(input.unsqueeze(1), dim=2)\n embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)\n dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)\n dists = dists * self.weight.sign()\n return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()\n\n\n#NR: Split prompts and weights\ndef split_prompt(prompt):\n vals = prompt.rsplit(':', 2)\n vals = vals + ['', '1', '-inf'][len(vals):]\n return vals[0], float(vals[1]), float(vals[2])\n\n\nclass MakeCutouts(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow # not used with pooling\n \n # Pick your own augments & their order\n augment_list = []\n for item in args.augments[0]:\n if item == 'Ji':\n augment_list.append(K.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, p=0.7))\n elif item == 'Sh':\n augment_list.append(K.RandomSharpness(sharpness=0.3, p=0.5))\n elif item == 'Gn':\n augment_list.append(K.RandomGaussianNoise(mean=0.0, std=1., p=0.5))\n elif item == 'Pe':\n augment_list.append(K.RandomPerspective(distortion_scale=0.7, p=0.7))\n elif item == 'Ro':\n augment_list.append(K.RandomRotation(degrees=15, p=0.7))\n elif item == 'Af':\n augment_list.append(K.RandomAffine(degrees=15, translate=0.1, shear=5, p=0.7, padding_mode='zeros', keepdim=True)) # border, reflection, zeros\n elif item == 'Et':\n augment_list.append(K.RandomElasticTransform(p=0.7))\n elif item == 'Ts':\n augment_list.append(K.RandomThinPlateSpline(scale=0.8, same_on_batch=True, p=0.7))\n elif item == 'Cr':\n augment_list.append(K.RandomCrop(size=(self.cut_size,self.cut_size), pad_if_needed=True, padding_mode='reflect', p=0.5))\n elif item == 'Er':\n augment_list.append(K.RandomErasing(scale=(.1, .4), ratio=(.3, 1/.3), same_on_batch=True, p=0.7))\n elif item == 'Re':\n augment_list.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5))\n \n self.augs = nn.Sequential(*augment_list)\n self.noise_fac = 0.1\n # self.noise_fac = False\n\n # Uncomment if you like seeing the list ;)\n # print(augment_list)\n \n # Pooling\n self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))\n self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))\n\n def forward(self, input):\n cutouts = []\n \n for _ in range(self.cutn): \n # Use Pooling\n cutout = (self.av_pool(input) + self.max_pool(input))/2\n cutouts.append(cutout)\n \n batch = self.augs(torch.cat(cutouts, dim=0))\n \n if self.noise_fac:\n facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)\n batch = batch + facs * torch.randn_like(batch)\n return batch\n\n\n# An updated version with Kornia augments and pooling (where my version started):\nclass MakeCutoutsPoolingUpdate(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow # Not used with pooling\n\n self.augs = nn.Sequential(\n K.RandomAffine(degrees=15, translate=0.1, p=0.7, padding_mode='border'),\n K.RandomPerspective(0.7,p=0.7),\n K.ColorJitter(hue=0.1, saturation=0.1, p=0.7),\n K.RandomErasing((.1, .4), (.3, 1/.3), same_on_batch=True, p=0.7), \n )\n \n self.noise_fac = 0.1\n self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))\n self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))\n\n def forward(self, input):\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n cutouts = []\n \n for _ in range(self.cutn):\n cutout = (self.av_pool(input) + self.max_pool(input))/2\n cutouts.append(cutout)\n \n batch = self.augs(torch.cat(cutouts, dim=0))\n \n if self.noise_fac:\n facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)\n batch = batch + facs * torch.randn_like(batch)\n return batch\n\n\n# An Nerdy updated version with selectable Kornia augments, but no pooling:\nclass MakeCutoutsNRUpdate(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow\n self.noise_fac = 0.1\n \n # Pick your own augments & their order\n augment_list = []\n for item in args.augments[0]:\n if item == 'Ji':\n augment_list.append(K.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, p=0.7))\n elif item == 'Sh':\n augment_list.append(K.RandomSharpness(sharpness=0.3, p=0.5))\n elif item == 'Gn':\n augment_list.append(K.RandomGaussianNoise(mean=0.0, std=1., p=0.5))\n elif item == 'Pe':\n augment_list.append(K.RandomPerspective(distortion_scale=0.5, p=0.7))\n elif item == 'Ro':\n augment_list.append(K.RandomRotation(degrees=15, p=0.7))\n elif item == 'Af':\n augment_list.append(K.RandomAffine(degrees=30, translate=0.1, shear=5, p=0.7, padding_mode='zeros', keepdim=True)) # border, reflection, zeros\n elif item == 'Et':\n augment_list.append(K.RandomElasticTransform(p=0.7))\n elif item == 'Ts':\n augment_list.append(K.RandomThinPlateSpline(scale=0.8, same_on_batch=True, p=0.7))\n elif item == 'Cr':\n augment_list.append(K.RandomCrop(size=(self.cut_size,self.cut_size), pad_if_needed=True, padding_mode='reflect', p=0.5))\n elif item == 'Er':\n augment_list.append(K.RandomErasing(scale=(.1, .4), ratio=(.3, 1/.3), same_on_batch=True, p=0.7))\n elif item == 'Re':\n augment_list.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5))\n \n self.augs = nn.Sequential(*augment_list)\n\n\n def forward(self, input):\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n cutouts = []\n for _ in range(self.cutn):\n size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)\n offsetx = torch.randint(0, sideX - size + 1, ())\n offsety = torch.randint(0, sideY - size + 1, ())\n cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]\n cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))\n batch = self.augs(torch.cat(cutouts, dim=0))\n if self.noise_fac:\n facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)\n batch = batch + facs * torch.randn_like(batch)\n return batch\n\n\n# An updated version with Kornia augments, but no pooling:\nclass MakeCutoutsUpdate(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow\n self.augs = nn.Sequential(\n K.RandomHorizontalFlip(p=0.5),\n K.ColorJitter(hue=0.01, saturation=0.01, p=0.7),\n # K.RandomSolarize(0.01, 0.01, p=0.7),\n K.RandomSharpness(0.3,p=0.4),\n K.RandomAffine(degrees=30, translate=0.1, p=0.8, padding_mode='border'),\n K.RandomPerspective(0.2,p=0.4),)\n self.noise_fac = 0.1\n\n\n def forward(self, input):\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n cutouts = []\n for _ in range(self.cutn):\n size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)\n offsetx = torch.randint(0, sideX - size + 1, ())\n offsety = torch.randint(0, sideY - size + 1, ())\n cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]\n cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))\n batch = self.augs(torch.cat(cutouts, dim=0))\n if self.noise_fac:\n facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)\n batch = batch + facs * torch.randn_like(batch)\n return batch\n\n\n# This is the original version (No pooling)\nclass MakeCutoutsOrig(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow\n\n def forward(self, input):\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n cutouts = []\n for _ in range(self.cutn):\n size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)\n offsetx = torch.randint(0, sideX - size + 1, ())\n offsety = torch.randint(0, sideY - size + 1, ())\n cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]\n cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))\n return clamp_with_grad(torch.cat(cutouts, dim=0), 0, 1)\n\n\ndef load_vqgan_model(config_path, checkpoint_path):\n global gumbel\n gumbel = False\n config = OmegaConf.load(config_path)\n if config.model.target == 'taming.models.vqgan.VQModel':\n model = vqgan.VQModel(**config.model.params)\n model.eval().requires_grad_(False)\n model.init_from_ckpt(checkpoint_path)\n elif config.model.target == 'taming.models.vqgan.GumbelVQ':\n model = vqgan.GumbelVQ(**config.model.params)\n model.eval().requires_grad_(False)\n model.init_from_ckpt(checkpoint_path)\n gumbel = True\n elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':\n parent_model = cond_transformer.Net2NetTransformer(**config.model.params)\n parent_model.eval().requires_grad_(False)\n parent_model.init_from_ckpt(checkpoint_path)\n model = parent_model.first_stage_model\n else:\n raise ValueError(f'unknown model type: {config.model.target}')\n del model.loss\n return model\n\n\ndef resize_image(image, out_size):\n ratio = image.size[0] / image.size[1]\n area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])\n size = round((area * ratio)**0.5), round((area / ratio)**0.5)\n return image.resize(size, Image.LANCZOS)\n\n\n# Do it\ndevice = torch.device(args.cuda_device)\nmodel = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)\njit = True if float(torch.__version__[:3]) < 1.8 else False\nperceptor = clip.load(args.clip_model, jit=jit)[0].eval().requires_grad_(False).to(device)\n\n# clock=deepcopy(perceptor.visual.positional_embedding.data)\n# perceptor.visual.positional_embedding.data = clock/clock.max()\n# perceptor.visual.positional_embedding.data=clamp_with_grad(clock,0,1)\n\ncut_size = perceptor.visual.input_resolution\nf = 2**(model.decoder.num_resolutions - 1)\n\n# Cutout class options:\n# 'latest','original','updated' or 'updatedpooling'\nif args.cut_method == 'latest':\n make_cutouts = MakeCutouts(cut_size, args.cutn, cut_pow=args.cut_pow)\nelif args.cut_method == 'original':\n make_cutouts = MakeCutoutsOrig(cut_size, args.cutn, cut_pow=args.cut_pow)\nelif args.cut_method == 'updated':\n make_cutouts = MakeCutoutsUpdate(cut_size, args.cutn, cut_pow=args.cut_pow)\nelif args.cut_method == 'nrupdated':\n make_cutouts = MakeCutoutsNRUpdate(cut_size, args.cutn, cut_pow=args.cut_pow)\nelse:\n make_cutouts = MakeCutoutsPoolingUpdate(cut_size, args.cutn, cut_pow=args.cut_pow) \n\ntoksX, toksY = args.size[0] // f, args.size[1] // f\nsideX, sideY = toksX * f, toksY * f\n\n# Gumbel or not?\nif gumbel:\n e_dim = 256\n n_toks = model.quantize.n_embed\n z_min = model.quantize.embed.weight.min(dim=0).values[None, :, None, None]\n z_max = model.quantize.embed.weight.max(dim=0).values[None, :, None, None]\nelse:\n e_dim = model.quantize.e_dim\n n_toks = model.quantize.n_e\n z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]\n z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]\n\n\nif args.init_image:\n if 'http' in args.init_image:\n img = Image.open(urlopen(args.init_image))\n else:\n img = Image.open(args.init_image)\n pil_image = img.convert('RGB')\n pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)\n pil_tensor = TF.to_tensor(pil_image)\n z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)\nelif args.init_noise == 'pixels':\n img = random_noise_image(args.size[0], args.size[1]) \n pil_image = img.convert('RGB')\n pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)\n pil_tensor = TF.to_tensor(pil_image)\n z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)\nelif args.init_noise == 'gradient':\n img = random_gradient_image(args.size[0], args.size[1])\n pil_image = img.convert('RGB')\n pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)\n pil_tensor = TF.to_tensor(pil_image)\n z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)\nelse:\n one_hot = F.one_hot(torch.randint(n_toks, [toksY * toksX], device=device), n_toks).float()\n # z = one_hot @ model.quantize.embedding.weight\n if gumbel:\n z = one_hot @ model.quantize.embed.weight\n else:\n z = one_hot @ model.quantize.embedding.weight\n\n z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2) \n #z = torch.rand_like(z)*2\t\t\t\t\t\t# NR: check\n\nz_orig = z.clone()\nz.requires_grad_(True)\n\npMs = []\nnormalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n std=[0.26862954, 0.26130258, 0.27577711])\n\n# From imagenet - Which is better?\n#normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n# std=[0.229, 0.224, 0.225])\n\n# CLIP tokenize/encode \nfor prompt in args.prompts:\n txt, weight, stop = split_prompt(prompt)\n embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()\n pMs.append(Prompt(embed, weight, stop).to(device))\n\nfor prompt in args.image_prompts:\n path, weight, stop = split_prompt(prompt)\n img = Image.open(path)\n pil_image = img.convert('RGB')\n img = resize_image(pil_image, (sideX, sideY))\n batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))\n embed = perceptor.encode_image(normalize(batch)).float()\n pMs.append(Prompt(embed, weight, stop).to(device))\n\nfor seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):\n gen = torch.Generator().manual_seed(seed)\n embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)\n pMs.append(Prompt(embed, weight).to(device))\n\n\n# Set the optimiser\ndef get_opt(opt_name, opt_lr):\n if opt_name == \"Adam\":\n opt = optim.Adam([z], lr=opt_lr)\t# LR=0.1 (Default)\n elif opt_name == \"AdamW\":\n opt = optim.AdamW([z], lr=opt_lr)\t\n elif opt_name == \"Adagrad\":\n opt = optim.Adagrad([z], lr=opt_lr)\t# LR=0.5+\n elif opt_name == \"Adamax\":\n opt = optim.Adamax([z], lr=opt_lr)\t# LR=0.5+\n elif opt_name == \"DiffGrad\":\n opt = DiffGrad([z], lr=opt_lr)\t \n elif opt_name == \"AdamP\":\n opt = AdamP([z], lr=opt_lr)\t\t \n elif opt_name == \"RAdam\":\n opt = RAdam([z], lr=opt_lr)\t\t \n elif opt_name == \"RMSprop\":\n opt = optim.RMSprop([z], lr=opt_lr)\n else:\n print(\"Unknown optimiser. Are choices broken?\")\n opt = optim.Adam([z], lr=opt_lr)\n return opt\n\nopt = get_opt(args.optimiser, args.step_size)\n\n\n# Output for the user\nprint('Using device:', device)\nprint('Optimising using:', args.optimiser)\n\nif args.prompts:\n print('Using text prompts:', args.prompts) \nif args.image_prompts:\n print('Using image prompts:', args.image_prompts)\nif args.init_image:\n print('Using initial image:', args.init_image)\nif args.noise_prompt_weights:\n print('Noise prompt weights:', args.noise_prompt_weights) \n\n\nif args.seed is None:\n seed = torch.seed()\nelse:\n seed = args.seed \ntorch.manual_seed(seed)\nprint('Using seed:', seed)\n\n\n# Vector quantize\ndef synth(z):\n if gumbel:\n z_q = vector_quantize(z.movedim(1, 3), model.quantize.embed.weight).movedim(3, 1)\n else:\n z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)\n return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)\n\n\[email protected]_grad()\ndef checkin(i, losses):\n losses_str = ', '.join(f'{loss.item():g}' for loss in losses)\n tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')\n out = synth(z)\n info = PngImagePlugin.PngInfo()\n info.add_text('comment', f'{args.prompts}')\n TF.to_pil_image(out[0].cpu()).save(args.output, pnginfo=info) \t\n\n\ndef ascend_txt():\n global i\n out = synth(z)\n iii = perceptor.encode_image(normalize(make_cutouts(out))).float()\n \n result = []\n\n if args.init_weight:\n # result.append(F.mse_loss(z, z_orig) * args.init_weight / 2)\n result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*args.init_weight) / 2)\n\n for prompt in pMs:\n result.append(prompt(iii))\n \n if args.make_video: \n img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]\n img = np.transpose(img, (1, 2, 0))\n imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))\n\n return result # return loss\n\n\ndef train(i):\n opt.zero_grad(set_to_none=True)\n lossAll = ascend_txt()\n \n if i % args.display_freq == 0:\n checkin(i, lossAll)\n \n loss = sum(lossAll)\n loss.backward()\n opt.step()\n \n with torch.no_grad():\n z.copy_(z.maximum(z_min).minimum(z_max))\n\n\n\ni = 0 # Iteration counter\nj = 0 # Zoom video frame counter\np = 1 # Phrase counter\nsmoother = 0 # Smoother counter\nthis_video_frame = 0 # for video styling\n\n# Messing with learning rate / optimisers\n#variable_lr = args.step_size\n#optimiser_list = [['Adam',0.085],['AdamW',0.125],['Adagrad',0.225],['Adamax',0.125],['DiffGrad',0.08],['RAdam',0.125],['RMSprop',0.04]]\n\n# Do it\ntry:\n with tqdm() as pbar:\n while True: \n # Change generated image\n if args.make_zoom_video:\n if i % args.zoom_frequency == 0:\n out = synth(z)\n \n # Save image\n img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]\n img = np.transpose(img, (1, 2, 0))\n imageio.imwrite('./steps/' + str(j) + '.png', np.array(img))\n\n # Time to start zooming? \n if args.zoom_start <= i:\n # Convert z back into a Pil image \n #pil_image = TF.to_pil_image(out[0].cpu())\n \n # Convert NP to Pil image\n pil_image = Image.fromarray(np.array(img).astype('uint8'), 'RGB')\n \n # Zoom\n pil_image_zoom = zoom_at(pil_image, sideX/2, sideY/2, args.zoom_scale)\n \n # Convert image back to a tensor again\n pil_tensor = TF.to_tensor(pil_image_zoom)\n \n # Re-encode\n z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)\n z_orig = z.clone()\n z.requires_grad_(True)\n\n # Re-create optimiser\n opt = get_opt(args.optimiser, args.step_size)\n \n # Next\n j += 1\n \n # Change text prompt\n if args.prompt_frequency > 0:\n if i % args.prompt_frequency == 0 and i > 0:\n # In case there aren't enough phrases, just loop\n if p >= len(all_phrases):\n p = 0\n \n pMs = []\n args.prompts = all_phrases[p]\n\n # Show user we're changing prompt \n print(args.prompts)\n \n for prompt in args.prompts:\n txt, weight, stop = split_prompt(prompt)\n embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()\n pMs.append(Prompt(embed, weight, stop).to(device))\n \n '''\n # Smooth test\n smoother = args.zoom_frequency * 15 # smoothing over x frames\n variable_lr = args.step_size * 0.25\n opt = get_opt(args.optimiser, variable_lr)\n '''\n \n p += 1\n \n '''\n if smoother > 0:\n if smoother == 1:\n opt = get_opt(args.optimiser, args.step_size)\n smoother -= 1\n '''\n \n ''' \n # Messing with learning rate / optimisers\n if i % 225 == 0 and i > 0:\n variable_optimiser_item = random.choice(optimiser_list)\n variable_optimiser = variable_optimiser_item[0]\n variable_lr = variable_optimiser_item[1]\n \n opt = get_opt(variable_optimiser, variable_lr)\n print(\"New opt: %s, lr= %f\" %(variable_optimiser,variable_lr)) \n ''' \n\n # Training time\n train(i)\n \n # Ready to stop yet?\n if i == args.max_iterations:\n if not args.video_style_dir:\n # we're done\n break\n else: \n if this_video_frame == (num_video_frames - 1):\n # we're done\n make_styled_video = True\n break\n else:\n # Next video frame\n this_video_frame += 1\n\n # Reset the iteration count\n i = -1\n pbar.reset()\n \n # Load the next frame, reset a few options - same filename, different directory\n args.init_image = video_frame_list[this_video_frame]\n print(\"Next frame: \", args.init_image)\n\n if args.seed is None:\n seed = torch.seed()\n else:\n seed = args.seed \n torch.manual_seed(seed)\n print(\"Seed: \", seed)\n\n filename = os.path.basename(args.init_image)\n args.output = os.path.join(cwd, \"steps\", filename)\n\n # Load and resize image\n img = Image.open(args.init_image)\n pil_image = img.convert('RGB')\n pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)\n pil_tensor = TF.to_tensor(pil_image)\n \n # Re-encode\n z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)\n z_orig = z.clone()\n z.requires_grad_(True)\n\n # Re-create optimiser\n opt = get_opt(args.optimiser, args.step_size)\n\n i += 1\n pbar.update()\nexcept KeyboardInterrupt:\n pass\n\n# All done :)\n\n# Video generation\nif args.make_video or args.make_zoom_video:\n init_frame = 1 # Initial video frame\n if args.make_zoom_video:\n last_frame = j\n else:\n last_frame = i # This will raise an error if that number of frames does not exist.\n\n length = args.video_length # Desired time of the video in seconds\n\n min_fps = 10\n max_fps = 60\n\n total_frames = last_frame-init_frame\n\n frames = []\n tqdm.write('Generating video...')\n for i in range(init_frame,last_frame):\n temp = Image.open(\"./steps/\"+ str(i) +'.png')\n keep = temp.copy()\n frames.append(keep)\n temp.close()\n \n if args.output_video_fps > 9:\n # Hardware encoding and video frame interpolation\n print(\"Creating interpolated frames...\")\n ffmpeg_filter = f\"minterpolate='mi_mode=mci:me=hexbs:me_mode=bidir:mc_mode=aobmc:vsbmc=1:mb_size=8:search_param=32:fps={args.output_video_fps}'\"\n output_file = re.compile('\\.png$').sub('.mp4', args.output)\n try:\n p = Popen(['ffmpeg',\n '-y',\n '-f', 'image2pipe',\n '-vcodec', 'png',\n '-r', str(args.input_video_fps), \n '-i',\n '-',\n '-b:v', '10M',\n '-vcodec', 'h264_nvenc',\n '-pix_fmt', 'yuv420p',\n '-strict', '-2',\n '-filter:v', f'{ffmpeg_filter}',\n '-metadata', f'comment={args.prompts}',\n output_file], stdin=PIPE)\n except FileNotFoundError:\n print(\"ffmpeg command failed - check your installation\")\n for im in tqdm(frames):\n im.save(p.stdin, 'PNG')\n p.stdin.close()\n p.wait()\n else:\n # CPU\n fps = np.clip(total_frames/length,min_fps,max_fps)\n output_file = re.compile('\\.png$').sub('.mp4', args.output)\n try:\n p = Popen(['ffmpeg',\n '-y',\n '-f', 'image2pipe',\n '-vcodec', 'png',\n '-r', str(fps),\n '-i',\n '-',\n '-vcodec', 'libx264',\n '-r', str(fps),\n '-pix_fmt', 'yuv420p',\n '-crf', '17',\n '-preset', 'veryslow',\n '-metadata', f'comment={args.prompts}',\n output_file], stdin=PIPE)\n except FileNotFoundError:\n print(\"ffmpeg command failed - check your installation\") \n for im in tqdm(frames):\n im.save(p.stdin, 'PNG')\n p.stdin.close()\n p.wait() \n"
] |
[
[
"torch.cuda.get_device_properties",
"torch.randn_like",
"torch.randint",
"numpy.linspace",
"torch.sin",
"torch.cat",
"numpy.dtype",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.functional.interpolate",
"torch.device",
"torch.logical_and",
"numpy.random.randint",
"torch.Generator",
"torch.nn.AdaptiveMaxPool2d",
"numpy.clip",
"numpy.uint8",
"torch.tensor",
"torch.rand",
"torch.nn.functional.pad",
"torch.optim.Adam",
"torch.nn.Sequential",
"torch.empty",
"torch.nn.functional.conv2d",
"torch.zeros_like",
"torch.optim.AdamW",
"numpy.transpose",
"numpy.array",
"torch.seed",
"torch.as_tensor",
"torch.optim.Adamax",
"torch.optim.Adagrad",
"torch.manual_seed",
"torch.optim.RMSprop",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.one_hot",
"torch.maximum"
]
] |
mlhaycck/examples
|
[
"f27903a2116cb9158b48e36ef5531a836434c2f1"
] |
[
"tensorflow_examples/lite/model_maker/core/task/text_classifier.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TextClassier class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nimport tensorflow as tf\n\nfrom tensorflow_examples.lite.model_maker.core import compat\nfrom tensorflow_examples.lite.model_maker.core.export_format import ExportFormat\nfrom tensorflow_examples.lite.model_maker.core.task import classification_model\nfrom tensorflow_examples.lite.model_maker.core.task import model_spec as ms\nfrom tensorflow_examples.lite.model_maker.core.task import model_util\nfrom tensorflow_examples.lite.model_maker.core.task.metadata_writers.bert.text_classifier import metadata_writer_for_bert_text_classifier as bert_metadata_writer\nfrom tensorflow_examples.lite.model_maker.core.task.metadata_writers.text_classifier import metadata_writer_for_text_classifier as metadata_writer\n\n\ndef create(train_data,\n model_spec='average_word_vec',\n validation_data=None,\n batch_size=None,\n epochs=3,\n shuffle=False,\n do_train=True):\n \"\"\"Loads data and train the model for test classification.\n\n Args:\n train_data: Training data.\n model_spec: Specification for the model.\n validation_data: Validation data. If None, skips validation process.\n batch_size: Batch size for training.\n epochs: Number of epochs for training.\n shuffle: Whether the data should be shuffled.\n do_train: Whether to run training.\n\n Returns:\n TextClassifier\n \"\"\"\n model_spec = ms.get(model_spec)\n if compat.get_tf_behavior() not in model_spec.compat_tf_versions:\n raise ValueError('Incompatible versions. Expect {}, but got {}.'.format(\n model_spec.compat_tf_versions, compat.get_tf_behavior()))\n\n text_classifier = TextClassifier(\n model_spec,\n train_data.index_to_label,\n train_data.num_classes,\n shuffle=shuffle)\n\n if do_train:\n tf.compat.v1.logging.info('Retraining the models...')\n text_classifier.train(train_data, validation_data, epochs, batch_size)\n else:\n text_classifier.create_model()\n\n return text_classifier\n\n\ndef _get_bert_model_info(model_spec, vocab_file, label_file):\n return bert_metadata_writer.ClassifierSpecificInfo(\n name=model_spec.name + ' text classifier',\n version='v1',\n description=bert_metadata_writer.DEFAULT_DESCRIPTION,\n input_names=bert_metadata_writer.bert_qa_inputs(\n ids_name=model_spec.tflite_input_name['ids'],\n mask_name=model_spec.tflite_input_name['mask'],\n segment_ids_name=model_spec.tflite_input_name['segment_ids']),\n tokenizer_type=bert_metadata_writer.Tokenizer.BERT_TOKENIZER,\n vocab_file=vocab_file,\n label_file=label_file)\n\n\ndef _get_model_info(model_name):\n return metadata_writer.ModelSpecificInfo(\n name=model_name + ' text classifier',\n description='Classify text into predefined categories.',\n version='v1')\n\n\nclass TextClassifier(classification_model.ClassificationModel):\n \"\"\"TextClassifier class for inference and exporting to tflite.\"\"\"\n\n DEFAULT_EXPORT_FORMAT = (ExportFormat.TFLITE, ExportFormat.LABEL,\n ExportFormat.VOCAB)\n ALLOWED_EXPORT_FORMAT = (ExportFormat.TFLITE, ExportFormat.LABEL,\n ExportFormat.VOCAB, ExportFormat.SAVED_MODEL)\n\n def __init__(self,\n model_spec,\n index_to_label,\n num_classes,\n shuffle=True):\n \"\"\"Init function for TextClassifier class.\n\n Args:\n model_spec: Specification for the model.\n index_to_label: A list that map from index to label class name.\n num_classes: Number of label classes.\n shuffle: Whether the data should be shuffled.\n \"\"\"\n super(TextClassifier, self).__init__(\n model_spec,\n index_to_label,\n num_classes,\n shuffle,\n train_whole_model=True)\n\n def create_model(self):\n self.model = self.model_spec.create_model(self.num_classes)\n\n def train(self,\n train_data,\n validation_data=None,\n epochs=None,\n batch_size=None):\n \"\"\"Feeds the training data for training.\"\"\"\n if batch_size is None:\n batch_size = self.model_spec.default_batch_size\n\n if len(train_data) < batch_size:\n raise ValueError('The size of the train_data (%d) couldn\\'t be smaller '\n 'than batch_size (%d). To solve this problem, set '\n 'the batch_size smaller or increase the size of the '\n 'train_data.' % (len(train_data), batch_size))\n\n train_input_fn, steps_per_epoch = self._get_input_fn_and_steps(\n train_data, batch_size, is_training=True)\n validation_input_fn, validation_steps = self._get_input_fn_and_steps(\n validation_data, batch_size, is_training=False)\n\n self.model = self.model_spec.run_classifier(\n train_input_fn,\n validation_input_fn,\n epochs,\n steps_per_epoch,\n validation_steps,\n self.num_classes,\n callbacks=self._keras_callbacks(model_dir=self.model_spec.model_dir))\n\n return self.model\n\n def _export_tflite(self,\n tflite_filepath,\n quantization_config=None,\n with_metadata=True):\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n Args:\n tflite_filepath: File path to save tflite model.\n quantization_config: Configuration for post-training quantization.\n with_metadata: Whether the output tflite model contains metadata. If True,\n Exports metadata in json file as well.\n \"\"\"\n # Sets batch size from None to 1 when converting to tflite.\n model_util.set_batch_size(self.model, batch_size=1)\n model_util.export_tflite(self.model, tflite_filepath, quantization_config,\n self.model_spec.convert_from_saved_model_tf2)\n # Sets batch size back to None to support retraining later.\n model_util.set_batch_size(self.model, batch_size=None)\n\n if with_metadata:\n with tempfile.TemporaryDirectory() as temp_dir:\n tf.compat.v1.logging.info('Vocab file and label file are inside the '\n 'TFLite model with metadata.')\n vocab_filepath = os.path.join(temp_dir, 'vocab.txt')\n self.model_spec.save_vocab(vocab_filepath)\n label_filepath = os.path.join(temp_dir, 'labels.txt')\n self._export_labels(label_filepath)\n\n export_dir = os.path.dirname(tflite_filepath)\n if isinstance(self.model_spec, ms.BertClassifierModelSpec):\n model_info = _get_bert_model_info(self.model_spec, vocab_filepath,\n label_filepath)\n populator = bert_metadata_writer.MetadataPopulatorForBertTextClassifier(\n tflite_filepath, export_dir, model_info)\n elif isinstance(self.model_spec, ms.AverageWordVecModelSpec):\n model_info = _get_model_info(self.model_spec.name)\n populator = metadata_writer.MetadataPopulatorForTextClassifier(\n tflite_filepath, export_dir, model_info, label_filepath,\n vocab_filepath)\n else:\n raise ValueError('Model Specification is not supported to writing '\n 'metadata into TFLite. Please set '\n '`with_metadata=False` or write metadata by '\n 'yourself.')\n populator.populate()\n"
] |
[
[
"tensorflow.compat.v1.logging.info"
]
] |
mmorrison1670/yellowbrick
|
[
"c2028de2b7703e563503f7e85fdd65ad08de1ef6"
] |
[
"tests/test_classifier/test_class_prediction_error.py"
] |
[
"# tests.test_classifier.test_class_prediction_error\n# Testing for the ClassPredictionError visualizer\n#\n# Author: Benjamin Bengfort\n# Author: Rebecca Bilbro\n# Author: Larry Gray\n# Created: Tue May 23 13:41:55 2017 -0700\n#\n# Copyright (C) 2017 The scikit-yb developers\n# For license information, see LICENSE.txt\n#\n# ID: test_rocauc.py [] [email protected] $\n\n\"\"\"\nTesting for the ClassPredictionError visualizer\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport pytest\nimport matplotlib.pyplot as plt\n\nfrom yellowbrick.exceptions import ModelError\nfrom yellowbrick.datasets import load_occupancy\nfrom yellowbrick.classifier.class_prediction_error import *\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import make_multilabel_classification\n\nfrom unittest.mock import patch\nfrom tests.base import VisualTestCase\n\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\n\n##########################################################################\n## Tests\n##########################################################################\n\n\nclass TestClassPredictionError(VisualTestCase):\n \"\"\"\n Test ClassPredictionError visualizer\n \"\"\"\n\n @pytest.mark.filterwarnings(\"ignore:could not determine class_counts_\")\n def test_numpy_integration(self):\n \"\"\"\n Assert no errors during class prediction error integration with NumPy arrays\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_numpy()\n\n classes = [\"unoccupied\", \"occupied\"]\n\n model = LinearSVC(random_state=42)\n model.fit(X, y)\n visualizer = ClassPredictionError(model, classes=classes)\n visualizer.score(X, y)\n visualizer.finalize()\n\n # AppVeyor and Linux conda fail due to non-text-based differences\n # AppVeyor fails with RMS 13.161 - 13.289 (python - miniconda)\n self.assert_images_similar(visualizer, tol=12.5, windows_tol=13.3)\n\n @pytest.mark.filterwarnings(\"ignore:could not determine class_counts_\")\n @pytest.mark.skipif(pd is None, reason=\"test requires pandas\")\n def test_pandas_integration(self):\n \"\"\"\n Assert no errors during class prediction error integration with Pandas\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_pandas()\n classes = [\"unoccupied\", \"occupied\"]\n\n model = LinearSVC(random_state=42)\n model.fit(X, y)\n visualizer = ClassPredictionError(model, classes=classes)\n visualizer.score(X, y)\n visualizer.finalize()\n\n # AppVeyor and Linux conda fail due to non-text-based differences\n # AppVeyor fails with RMS 13.161 - 13.289 (python - miniconda)\n self.assert_images_similar(visualizer, tol=12.5, windows_tol=13.3)\n\n def test_class_prediction_error_quickmethod(self):\n \"\"\"\n Test the ClassPreditionError quickmethod\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_numpy()\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n clf = LinearSVC(random_state=42)\n viz = class_prediction_error(clf, X, y, ax=ax, random_state=42)\n\n # Not sure why the tolerance must be so high for this\n # Failing on travis with RMS 9.544\n # AppVeyor and Linux conda fail due to non-text-based differences: RMS 12.961\n self.assert_images_similar(viz, tol=13, windows_tol=13)\n\n @pytest.mark.filterwarnings(\"ignore:could not determine class_counts_\")\n def test_classes_greater_than_indices(self):\n \"\"\"\n A model error should be raised when there are more classes in fit than score\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_numpy()\n classes = [\"unoccupied\", \"occupied\", \"partytime\"]\n\n model = LinearSVC(random_state=42)\n model.fit(X, y)\n with pytest.raises(ModelError):\n visualizer = ClassPredictionError(model, classes=classes)\n visualizer.score(X, y)\n\n def test_classes_less_than_indices(self):\n \"\"\"\n Assert error when there is an attempt to filter classes\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_numpy()\n classes = [\"unoccupied\"]\n\n model = LinearSVC(random_state=42)\n model.fit(X, y)\n with pytest.raises(NotImplementedError):\n visualizer = ClassPredictionError(model, classes=classes)\n visualizer.score(X, y)\n\n @pytest.mark.skip(reason=\"not implemented yet\")\n def test_no_classes_provided(self):\n \"\"\"\n Assert no errors when no classes are provided\n \"\"\"\n pass\n\n def test_class_type(self):\n \"\"\"\n Test class must be either binary or multiclass type\n \"\"\"\n X, y = make_multilabel_classification()\n model = RandomForestClassifier()\n model.fit(X, y)\n with pytest.raises(YellowbrickValueError):\n visualizer = ClassPredictionError(model)\n visualizer.score(X, y)\n\n def test_score_returns_score(self):\n \"\"\"\n Test that ClassPredictionError score() returns a score between 0 and 1\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_numpy()\n\n # Create and fit the visualizer\n visualizer = ClassPredictionError(LinearSVC(random_state=42))\n visualizer.fit(X, y)\n\n # Score the visualizer\n s = visualizer.score(X, y)\n assert 0 <= s <= 1\n\n def test_with_fitted(self):\n \"\"\"\n Test that visualizer properly handles an already-fitted model\n \"\"\"\n X, y = load_occupancy(return_dataset=True).to_numpy()\n\n model = RandomForestClassifier().fit(X, y)\n classes = [\"unoccupied\", \"occupied\"]\n\n with patch.object(model, \"fit\") as mockfit:\n oz = ClassPredictionError(model, classes=classes)\n oz.fit(X, y)\n mockfit.assert_not_called()\n\n with patch.object(model, \"fit\") as mockfit:\n oz = ClassPredictionError(model, classes=classes, is_fitted=True)\n oz.fit(X, y)\n mockfit.assert_not_called()\n\n with patch.object(model, \"fit\") as mockfit:\n oz = ClassPredictionError(model, classes=classes, is_fitted=False)\n oz.fit(X, y)\n mockfit.assert_called_once_with(X, y)\n"
] |
[
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.datasets.make_multilabel_classification",
"sklearn.svm.LinearSVC",
"matplotlib.pyplot.figure"
]
] |
MMidwinter/sqlalchemy-challenge
|
[
"df00c4f50f21865181654a6498f821c27c3f238d"
] |
[
"app.py"
] |
[
"import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\[email protected](\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )\n\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n #Start the session\n session = Session(engine)\n\n #Record the results\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n session.close() \n \n #Record entries into database\n precipitation_list = []\n \n for date, prcp in results:\n precipitation_dict = {}\n precipitation_dict['Date'] = date\n precipitation_dict['Precipitation'] = prcp\n precipitation_list.append(precipitation_dict) \n\n return jsonify(precipitation_list)\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n #Start the session\n session = Session(engine)\n\n #Record the results\n results = session.query(Measurement.station).all()\n\n session.close() \n \n #Record entries into database\n station_list = []\n \n for station in results:\n station_dict = {}\n station_dict['Station'] = station\n station_list.append(station_dict) \n\n return jsonify(station_list)\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n #Start the session\n session = Session(engine)\n\n #Record the results\n results = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= \"2016-08-23\").filter(Measurement.station == 'USC00519281').all()\n\n session.close() \n \n #Record entries into database\n most_active_list = []\n \n for date, tobs in results:\n most_active_dict = {}\n most_active_dict['Date'] = date\n most_active_dict['Temp.'] = tobs\n most_active_list.append(most_active_dict) \n\n return jsonify(most_active_list)\n\[email protected](\"/api/v1.0/<start>\")\[email protected](\"/api/v1.0/<start>/<end>\")\ndef start_date(start, end = None):\n\n canonicalized = start.replace(\" \", \"\").lower()\n \n session = Session(engine)\n if not end:\n \n result = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs),func.avg(Measurement.tobs)).filter(Measurement.date >= start).all()\n \n else: \n result = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs),func.avg(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n temp_average = list(np.ravel(result))\n\n session.close() \n\n\n return jsonify(temp_average)\n\nif __name__ == '__main__':\n app.run(debug=True)"
] |
[
[
"numpy.ravel"
]
] |
kstrauch94/SMAC3
|
[
"8e9b33689f8c44270f60c22826e38f0a6bca47d4",
"8e9b33689f8c44270f60c22826e38f0a6bca47d4"
] |
[
"smac/epm/base_epm.py",
"smac/utils/validate.py"
] |
[
"import copy\nimport typing\n\nimport numpy as np\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.exceptions import NotFittedError\n\nfrom smac.configspace import ConfigurationSpace\nfrom smac.utils.constants import VERY_SMALL_NUMBER\nfrom smac.utils.logging import PickableLoggerAdapter\n\n__author__ = \"Marius Lindauer\"\n__copyright__ = \"Copyright 2016, ML4AAD\"\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"Marius Lindauer\"\n__email__ = \"[email protected]\"\n__version__ = \"0.0.1\"\n\n\nclass AbstractEPM(object):\n \"\"\"Abstract implementation of the EPM API.\n\n **Note:** The input dimensionality of Y for training and the output dimensions\n of all predictions (also called ``n_objectives``) depends on the concrete\n implementation of this abstract class.\n\n Attributes\n ----------\n instance_features : np.ndarray(I, K)\n Contains the K dimensional instance features\n of the I different instances\n pca : sklearn.decomposition.PCA\n Object to perform PCA\n pca_components : float\n Number of components to keep or None\n n_feats : int\n Number of instance features\n n_params : int\n Number of parameters in a configuration (only available after train has\n been called)\n scaler : sklearn.preprocessing.MinMaxScaler\n Object to scale data to be withing [0, 1]\n var_threshold : float\n Lower bound vor variance. If estimated variance < var_threshold, the set\n to var_threshold\n types : list\n If set, contains a list with feature types (cat,const) of input vector\n \"\"\"\n\n def __init__(self,\n configspace: ConfigurationSpace,\n types: typing.List[int],\n bounds: typing.List[typing.Tuple[float, float]],\n seed: int,\n instance_features: typing.Optional[np.ndarray] = None,\n pca_components: typing.Optional[int] = 7,\n ) -> None:\n \"\"\"Constructor\n\n Parameters\n ----------\n configspace : ConfigurationSpace\n Configuration space to tune for.\n types : List[int]\n Specifies the number of categorical values of an input dimension where\n the i-th entry corresponds to the i-th input dimension. Let's say we\n have 2 dimension where the first dimension consists of 3 different\n categorical choices and the second dimension is continuous than we\n have to pass [3, 0]. Note that we count starting from 0.\n bounds : List[Tuple[float, float]]\n bounds of input dimensions: (lower, uppper) for continuous dims; (n_cat, np.nan) for categorical dims\n seed : int\n The seed that is passed to the model library.\n instance_features : np.ndarray (I, K)\n Contains the K dimensional instance features\n of the I different instances\n pca_components : float\n Number of components to keep when using PCA to reduce\n dimensionality of instance features. Requires to\n set n_feats (> pca_dims).\n \"\"\"\n self.configspace = configspace\n self.seed = seed\n self.instance_features = instance_features\n self.pca_components = pca_components\n\n if instance_features is not None:\n self.n_feats = instance_features.shape[1]\n else:\n self.n_feats = 0\n\n self.n_params = None # will be updated on train()\n\n self.pca = PCA(n_components=self.pca_components)\n self.scaler = MinMaxScaler()\n\n # Never use a lower variance than this\n self.var_threshold = VERY_SMALL_NUMBER\n\n self.bounds = bounds\n self.types = types\n # Initial types array which is used to reset the type array at every call to train()\n self._initial_types = copy.deepcopy(types)\n\n self.logger = PickableLoggerAdapter(self.__module__ + \".\" + self.__class__.__name__)\n\n def train(self, X: np.ndarray, Y: np.ndarray) -> 'AbstractEPM':\n \"\"\"Trains the EPM on X and Y.\n\n Parameters\n ----------\n X : np.ndarray [n_samples, n_features (config + instance features)]\n Input data points.\n Y : np.ndarray [n_samples, n_objectives]\n The corresponding target values. n_objectives must match the\n number of target names specified in the constructor.\n\n Returns\n -------\n self : AbstractEPM\n \"\"\"\n self.types = copy.deepcopy(self._initial_types)\n\n if len(X.shape) != 2:\n raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))\n if X.shape[1] != len(self.types):\n raise ValueError('Feature mismatch: X should have %d features, but has %d' % (len(self.types), X.shape[1]))\n if X.shape[0] != Y.shape[0]:\n raise ValueError('X.shape[0] (%s) != y.shape[0] (%s)' % (X.shape[0], Y.shape[0]))\n\n self.n_params = X.shape[1] - self.n_feats\n\n # reduce dimensionality of features of larger than PCA_DIM\n if self.pca_components and X.shape[0] > self.pca.n_components and self.n_feats >= self.pca_components:\n X_feats = X[:, -self.n_feats:]\n # scale features\n X_feats = self.scaler.fit_transform(X_feats)\n X_feats = np.nan_to_num(X_feats) # if features with max == min\n # PCA\n X_feats = self.pca.fit_transform(X_feats)\n X = np.hstack((X[:, :self.n_params], X_feats))\n if hasattr(self, \"types\"):\n # for RF, adapt types list\n # if X_feats.shape[0] < self.pca, X_feats.shape[1] ==\n # X_feats.shape[0]\n self.types = np.array(\n np.hstack((self.types[:self.n_params], np.zeros((X_feats.shape[1])))),\n dtype=np.uint,\n )\n\n return self._train(X, Y)\n\n def _train(self, X: np.ndarray, Y: np.ndarray) -> 'AbstractEPM':\n \"\"\"Trains the random forest on X and y.\n\n Parameters\n ----------\n X : np.ndarray [n_samples, n_features (config + instance features)]\n Input data points.\n Y : np.ndarray [n_samples, n_objectives]\n The corresponding target values. n_objectives must match the\n number of target names specified in the constructor.\n\n Returns\n -------\n self\n \"\"\"\n raise NotImplementedError\n\n def predict(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Predict means and variances for given X.\n\n Parameters\n ----------\n X : np.ndarray of shape = [n_samples, n_features (config + instance features)]\n Training samples\n\n Returns\n -------\n means : np.ndarray of shape = [n_samples, n_objectives]\n Predictive mean\n vars : np.ndarray of shape = [n_samples, n_objectives]\n Predictive variance\n \"\"\"\n if len(X.shape) != 2:\n raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))\n if X.shape[1] != len(self._initial_types):\n raise ValueError('Rows in X should have %d entries but have %d!' % (len(self._initial_types), X.shape[1]))\n\n if self.pca_components:\n try:\n X_feats = X[:, -self.n_feats:]\n X_feats = self.scaler.transform(X_feats)\n X_feats = self.pca.transform(X_feats)\n X = np.hstack((X[:, :self.n_params], X_feats))\n except NotFittedError:\n pass # PCA not fitted if only one training sample\n\n if X.shape[1] != len(self.types):\n raise ValueError('Rows in X should have %d entries but have %d!' % (len(self.types), X.shape[1]))\n\n mean, var = self._predict(X)\n\n if len(mean.shape) == 1:\n mean = mean.reshape((-1, 1))\n if len(var.shape) == 1:\n var = var.reshape((-1, 1))\n\n return mean, var\n\n def _predict(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Predict means and variances for given X.\n\n Parameters\n ----------\n X : np.ndarray\n [n_samples, n_features (config + instance features)]\n\n Returns\n -------\n means : np.ndarray of shape = [n_samples, n_objectives]\n Predictive mean\n vars : np.ndarray of shape = [n_samples, n_objectives]\n Predictive variance\n \"\"\"\n raise NotImplementedError()\n\n def predict_marginalized_over_instances(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:\n \"\"\"Predict mean and variance marginalized over all instances.\n\n Returns the predictive mean and variance marginalised over all\n instances for a set of configurations.\n\n Parameters\n ----------\n X : np.ndarray\n [n_samples, n_features (config)]\n\n Returns\n -------\n means : np.ndarray of shape = [n_samples, 1]\n Predictive mean\n vars : np.ndarray of shape = [n_samples, 1]\n Predictive variance\n \"\"\"\n\n if len(X.shape) != 2:\n raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))\n if X.shape[1] != len(self.bounds):\n raise ValueError('Rows in X should have %d entries but have %d!' %\n (len(self.bounds), X.shape[1]))\n\n if self.instance_features is None or \\\n len(self.instance_features) == 0:\n mean, var = self.predict(X)\n var[var < self.var_threshold] = self.var_threshold\n var[np.isnan(var)] = self.var_threshold\n return mean, var\n else:\n n_instances = len(self.instance_features)\n\n mean = np.zeros(X.shape[0])\n var = np.zeros(X.shape[0])\n for i, x in enumerate(X):\n X_ = np.hstack(\n (np.tile(x, (n_instances, 1)), self.instance_features))\n means, vars = self.predict(X_)\n # VAR[1/n (X_1 + ... + X_n)] =\n # 1/n^2 * ( VAR(X_1) + ... + VAR(X_n))\n # for independent X_1 ... X_n\n var_x = np.sum(vars) / (len(vars) ** 2)\n if var_x < self.var_threshold:\n var_x = self.var_threshold\n\n var[i] = var_x\n mean[i] = np.mean(means)\n\n if len(mean.shape) == 1:\n mean = mean.reshape((-1, 1))\n if len(var.shape) == 1:\n var = var.reshape((-1, 1))\n\n return mean, var\n",
"import os\n\nfrom collections import namedtuple\nfrom joblib import Parallel, delayed\nfrom typing import Union\nimport typing\nimport logging\nimport numpy as np\n\nfrom smac.configspace import Configuration, convert_configurations_to_array\nfrom smac.epm.rf_with_instances import RandomForestWithInstances\nfrom smac.epm.rfr_imputator import RFRImputator\nfrom smac.epm.util_funcs import get_types\nfrom smac.runhistory.runhistory import RunHistory, RunKey, StatusType\nfrom smac.runhistory.runhistory2epm import RunHistory2EPM4Cost\nfrom smac.scenario.scenario import Scenario\nfrom smac.stats.stats import Stats\nfrom smac.tae.execute_ta_run import ExecuteTARun\nfrom smac.tae.execute_ta_run_old import ExecuteTARunOld\nfrom smac.utils.constants import MAXINT\n\n__author__ = \"Joshua Marben\"\n__copyright__ = \"Copyright 2017, ML4AAD\"\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"Joshua Marben\"\n__email__ = \"[email protected]\"\n\n\ndef _unbound_tae_starter(\n tae: ExecuteTARun, *args: typing.Any, **kwargs: typing.Any\n) -> typing.Tuple[StatusType, float, float, typing.Dict]:\n \"\"\"\n Unbound function to be used by joblibs Parallel, since directly passing the\n TAE results in pickling-problems.\n\n Parameters\n ----------\n tae: ExecuteTARun\n tae to be used\n *args, **kwargs: various\n arguments to the tae\n\n Returns\n -------\n tae_results: tuple\n return from tae.start\n \"\"\"\n return tae.start(*args, **kwargs)\n\n\n_Run = namedtuple('Run', 'config inst seed inst_specs')\n\n\nclass Validator(object):\n \"\"\"\n Validator for the output of SMAC-scenarios.\n Evaluates specified configurations on specified instances.\n \"\"\"\n\n def __init__(self,\n scenario: Scenario,\n trajectory: typing.Optional[typing.List],\n rng: Union[np.random.RandomState, int, None] = None) -> None:\n \"\"\"\n Construct Validator for given scenario and trajectory.\n\n Parameters\n ----------\n scenario: Scenario\n scenario object for cutoff, instances, features and specifics\n trajectory: trajectory-list\n trajectory to take incumbent(s) from\n rng: np.random.RandomState or int\n Random number generator or seed\n \"\"\"\n self.logger = logging.getLogger(\n self.__module__ + \".\" + self.__class__.__name__)\n\n self.traj = trajectory\n self.scen = scenario\n self.epm = None # type: typing.Optional[RandomForestWithInstances]\n\n if isinstance(rng, np.random.RandomState):\n self.rng = rng\n elif isinstance(rng, int):\n self.rng = np.random.RandomState(seed=rng)\n else:\n self.logger.debug('no seed given, using default seed of 1')\n num_run = 1\n self.rng = np.random.RandomState(seed=num_run)\n\n def _save_results(\n self,\n rh: RunHistory,\n output_fn: typing.Optional[str],\n backup_fn: typing.Optional[str] = None,\n ) -> None:\n \"\"\" Helper to save results to file\n\n Parameters\n ----------\n rh: RunHistory\n runhistory to save\n output_fn: str\n if ends on '.json': filename to save history to\n else: directory to save runhistory to (filename is backup_fn)\n backup_fn: str\n if output_fn does not end on '.json', treat output_fn as dir and\n append backup_fn as filename (if output_fn ends on '.json', this\n argument is ignored)\n \"\"\"\n if not output_fn:\n self.logger.info(\"No output specified, validated runhistory not saved.\")\n return\n # Check if a folder or a file is specified as output\n if not output_fn.endswith('.json'):\n if backup_fn is None:\n raise ValueError('If output_fn does not end with .json the argument backup_fn needs to be given.')\n output_dir = output_fn\n output_fn = os.path.join(output_dir, backup_fn)\n self.logger.debug(\"Output is \\\"%s\\\", changing to \\\"%s\\\"!\", output_dir, output_fn)\n base = os.path.split(output_fn)[0]\n if not base == \"\" and not os.path.exists(base):\n self.logger.debug(\"Folder (\\\"%s\\\") doesn't exist, creating.\", base)\n os.makedirs(base)\n rh.save_json(output_fn)\n self.logger.info(\"Saving validation-results in %s\", output_fn)\n\n def validate(self,\n config_mode: Union[str, typing.List[Configuration]] = 'def',\n instance_mode: Union[str, typing.List[str]] = 'test',\n repetitions: int = 1,\n n_jobs: int = 1,\n backend: str = 'threading',\n runhistory: RunHistory = None,\n tae: ExecuteTARun = None,\n output_fn: typing.Optional[str] = None,\n ) -> RunHistory:\n \"\"\"\n Validate configs on instances and save result in runhistory.\n If a runhistory is provided as input it is important that you run it on the same/comparable hardware.\n\n side effect: if output is specified, saves runhistory to specified\n output directory.\n\n Parameters\n ----------\n config_mode: str or list<Configuration>\n string or directly a list of Configuration.\n string from [def, inc, def+inc, wallclock_time, cpu_time, all].\n time evaluates at cpu- or wallclock-timesteps of:\n [max_time/2^0, max_time/2^1, max_time/2^3, ..., default]\n with max_time being the highest recorded time\n instance_mode: str or list<str>\n what instances to use for validation, either from\n [train, test, train+test] or directly a list of instances\n repetitions: int\n number of repetitions in nondeterministic algorithms\n n_jobs: int\n number of parallel processes used by joblib\n backend: str\n what backend joblib should use for parallel runs\n runhistory: RunHistory\n optional, RunHistory-object to reuse runs\n tae: ExecuteTARun\n tae to be used. if None, will initialize ExecuteTARunOld\n output_fn: str\n path to runhistory to be saved. if the suffix is not '.json', will\n be interpreted as directory and filename will be\n 'validated_runhistory.json'\n\n Returns\n -------\n runhistory: RunHistory\n runhistory with validated runs\n \"\"\"\n self.logger.debug(\"Validating configs '%s' on instances '%s', repeating %d times\"\n \" with %d parallel runs on backend '%s'.\",\n config_mode, instance_mode, repetitions, n_jobs, backend)\n\n # Get all runs to be evaluated as list\n runs, validated_rh = self._get_runs(config_mode, instance_mode, repetitions, runhistory)\n\n # Create new Stats without limits\n inf_scen = Scenario({\n 'run_obj': self.scen.run_obj,\n 'cutoff_time': self.scen.cutoff, # type: ignore[attr-defined] # noqa F821\n 'output_dir': \"\"})\n inf_stats = Stats(inf_scen)\n inf_stats.start_timing()\n\n # Create TAE\n if not tae:\n tae = ExecuteTARunOld(ta=self.scen.ta, # type: ignore[attr-defined] # noqa F821\n runhistory=runhistory,\n stats=inf_stats,\n run_obj=self.scen.run_obj,\n par_factor=self.scen.par_factor, # type: ignore[attr-defined] # noqa F821\n cost_for_crash=self.scen.cost_for_crash) # type: ignore[attr-defined] # noqa F821\n else:\n # Inject endless-stats\n tae.stats = inf_stats\n\n # Validate!\n run_results = self._validate_parallel(tae, runs, n_jobs, backend)\n assert len(run_results) == len(runs), (run_results, runs)\n\n # tae returns (status, cost, runtime, additional_info)\n # Add runs to RunHistory\n for run, result in zip(runs, run_results):\n validated_rh.add(config=run.config,\n cost=result[1],\n time=result[2],\n status=result[0],\n instance_id=run.inst,\n seed=run.seed,\n additional_info=result[3])\n\n self._save_results(validated_rh, output_fn, backup_fn=\"validated_runhistory.json\")\n return validated_rh\n\n def _validate_parallel(\n self,\n tae: ExecuteTARun,\n runs: typing.List[_Run],\n n_jobs: int,\n backend: str,\n ) -> typing.List[typing.Tuple[StatusType, float, float, typing.Dict]]:\n \"\"\"\n Validate runs with joblibs Parallel-interface\n\n Parameters\n ----------\n tae: ExecuteTARun\n tae to be used for validation\n runs: list<_Run>\n list with _Run-objects\n [_Run(config=CONFIG1,inst=INSTANCE1,seed=SEED1,inst_specs=INST_SPECIFICS1), ...]\n n_jobs: int\n number of cpus to use for validation (-1 to use all)\n backend: str\n what backend to use for parallelization\n\n Returns\n -------\n run_results: list<tuple(tae-returns)>\n results as returned by tae\n \"\"\"\n # Runs with parallel\n run_results = Parallel(n_jobs=n_jobs, backend=backend)(\n delayed(_unbound_tae_starter)(tae, run.config,\n run.inst,\n self.scen.cutoff, # type: ignore[attr-defined] # noqa F821\n run.seed,\n run.inst_specs,\n capped=False) for run in runs)\n return run_results\n\n def validate_epm(self,\n config_mode: Union[str, typing.List[Configuration]] = 'def',\n instance_mode: Union[str, typing.List[str]] = 'test',\n repetitions: int = 1,\n runhistory: typing.Optional[RunHistory] = None,\n output_fn: typing.Optional[str] = None,\n reuse_epm: bool = True,\n ) -> RunHistory:\n \"\"\"\n Use EPM to predict costs/runtimes for unknown config/inst-pairs.\n\n side effect: if output is specified, saves runhistory to specified\n output directory.\n\n Parameters\n ----------\n output_fn: str\n path to runhistory to be saved. if the suffix is not '.json', will\n be interpreted as directory and filename will be\n 'validated_runhistory_EPM.json'\n config_mode: str or list<Configuration>\n string or directly a list of Configuration, string from [def, inc, def+inc, wallclock_time, cpu_time, all].\n time evaluates at cpu- or wallclock-timesteps of:\n [max_time/2^0, max_time/2^1, max_time/2^3, ..., default] with max_time being the highest recorded time\n instance_mode: str or list<str>\n what instances to use for validation, either from\n [train, test, train+test] or directly a list of instances\n repetitions: int\n number of repetitions in nondeterministic algorithms\n runhistory: RunHistory\n optional, RunHistory-object to reuse runs\n reuse_epm: bool\n if true (and if `self.epm`), reuse epm to validate runs\n\n Returns\n -------\n runhistory: RunHistory\n runhistory with predicted runs\n \"\"\"\n if not isinstance(runhistory, RunHistory) and (self.epm is None or not reuse_epm):\n raise ValueError(\"No runhistory specified for validating with EPM!\")\n elif not reuse_epm or self.epm is None:\n # Create RandomForest\n types, bounds = get_types(self.scen.cs, self.scen.feature_array) # type: ignore[attr-defined] # noqa F821\n epm = RandomForestWithInstances(\n configspace=self.scen.cs, # type: ignore[attr-defined] # noqa F821\n types=types,\n bounds=bounds,\n instance_features=self.scen.feature_array,\n seed=self.rng.randint(MAXINT),\n ratio_features=1.0,\n )\n # Use imputor if objective is runtime\n imputor = None\n impute_state = None\n impute_censored_data = False\n if self.scen.run_obj == 'runtime':\n threshold = self.scen.cutoff * self.scen.par_factor # type: ignore[attr-defined] # noqa F821\n imputor = RFRImputator(rng=self.rng,\n cutoff=self.scen.cutoff, # type: ignore[attr-defined] # noqa F821\n threshold=threshold,\n model=epm)\n impute_censored_data = True\n impute_state = [StatusType.CAPPED]\n # Transform training data (from given rh)\n rh2epm = RunHistory2EPM4Cost(num_params=len(self.scen.cs.get_hyperparameters()), # type: ignore[attr-defined] # noqa F821\n scenario=self.scen, rng=self.rng,\n impute_censored_data=impute_censored_data,\n imputor=imputor,\n impute_state=impute_state)\n assert runhistory is not None # please mypy\n X, y = rh2epm.transform(runhistory)\n self.logger.debug(\"Training model with data of shape X: %s, y:%s\",\n str(X.shape), str(y.shape))\n # Train random forest\n epm.train(X, y)\n else:\n epm = typing.cast(RandomForestWithInstances, self.epm)\n\n # Predict desired runs\n runs, rh_epm = self._get_runs(config_mode, instance_mode, repetitions, runhistory)\n\n feature_array_size = len(self.scen.cs.get_hyperparameters()) # type: ignore[attr-defined] # noqa F821\n if self.scen.feature_array is not None:\n feature_array_size += self.scen.feature_array.shape[1]\n\n X_pred = np.empty((len(runs), feature_array_size))\n for idx, run in enumerate(runs):\n if self.scen.feature_array is not None and run.inst is not None:\n X_pred[idx] = np.hstack([convert_configurations_to_array([run.config])[0],\n self.scen.feature_dict[run.inst]])\n else:\n X_pred[idx] = convert_configurations_to_array([run.config])[0]\n self.logger.debug(\"Predicting desired %d runs, data has shape %s\",\n len(runs), str(X_pred.shape))\n\n y_pred = epm.predict(X_pred)\n self.epm = epm\n\n # Add runs to runhistory\n for run, pred in zip(runs, y_pred[0]):\n rh_epm.add(config=run.config,\n cost=float(pred),\n time=float(pred),\n status=StatusType.SUCCESS,\n instance_id=run.inst,\n seed=-1,\n additional_info={\"additional_info\":\n \"ESTIMATED USING EPM!\"})\n\n if output_fn:\n self._save_results(rh_epm, output_fn, backup_fn=\"validated_runhistory_EPM.json\")\n return rh_epm\n\n def _get_runs(self,\n configs: Union[str, typing.List[Configuration]],\n insts: Union[str, typing.List[str]],\n repetitions: int = 1,\n runhistory: RunHistory = None,\n ) -> typing.Tuple[typing.List[_Run], RunHistory]:\n \"\"\"\n Generate list of SMAC-TAE runs to be executed. This means\n combinations of configs with all instances on a certain number of seeds.\n\n side effect: Adds runs that don't need to be reevaluated to self.rh!\n\n Parameters\n ----------\n configs: str or list<Configuration>\n string or directly a list of Configuration\n str from [def, inc, def+inc, wallclock_time, cpu_time, all]\n time evaluates at cpu- or wallclock-timesteps of:\n [max_time/2^0, max_time/2^1, max_time/2^3, ..., default]\n with max_time being the highest recorded time\n insts: str or list<str>\n what instances to use for validation, either from\n [train, test, train+test] or directly a list of instances\n repetitions: int\n number of seeds per instance/config-pair to be evaluated\n runhistory: RunHistory\n optional, try to reuse this runhistory and save some runs\n\n Returns\n -------\n runs: list<_Run>\n list with _Runs\n [_Run(config=CONFIG1,inst=INSTANCE1,seed=SEED1,inst_specs=INST_SPECIFICS1),\n _Run(config=CONFIG2,inst=INSTANCE2,seed=SEED2,inst_specs=INST_SPECIFICS2),\n ...]\n \"\"\"\n # Get relevant configurations and instances\n if isinstance(configs, str):\n configs = self._get_configs(configs)\n if isinstance(insts, str):\n instances = self._get_instances(insts) # type: typing.Sequence[typing.Union[str, None]]\n elif insts is not None:\n instances = insts\n else:\n instances = [None]\n # If no instances are given, fix the instances to one \"None\" instance\n if not instances:\n instances = [None]\n\n # If algorithm is deterministic, fix repetitions to 1\n if self.scen.deterministic and repetitions != 1: # type: ignore[attr-defined] # noqa F821\n self.logger.warning(\"Specified %d repetitions, but fixing to 1, \"\n \"because algorithm is deterministic.\", repetitions)\n repetitions = 1\n\n # Extract relevant information from given runhistory\n inst_seed_config = self._process_runhistory(configs, instances, runhistory)\n\n # Now create the actual run-list\n runs = []\n # Counter for runs without the need of recalculation\n runs_from_rh = 0\n # If we reuse runs, we want to return them as well\n new_rh = RunHistory()\n\n for i in sorted(instances):\n for rep in range(repetitions):\n # First, find a seed and add all the data we can take from the\n # given runhistory to \"our\" validation runhistory.\n configs_evaluated = [] # type: Configuration\n if runhistory and i in inst_seed_config:\n # Choose seed based on most often evaluated inst-seed-pair\n seed, configs_evaluated = inst_seed_config[i].pop(0)\n # Delete inst if all seeds are used\n if not inst_seed_config[i]:\n inst_seed_config.pop(i)\n # Add runs to runhistory\n for c in configs_evaluated[:]:\n runkey = RunKey(runhistory.config_ids[c], i, seed)\n cost, time, status, additional_info = runhistory.data[runkey]\n if status in [StatusType.CRASHED, StatusType.ABORT, StatusType.CAPPED]:\n # Not properly executed target algorithm runs should be repeated\n configs_evaluated.remove(c)\n continue\n new_rh.add(c, cost, time, status, instance_id=i,\n seed=seed, additional_info=additional_info)\n runs_from_rh += 1\n else:\n # If no runhistory or no entries for instance, get new seed\n seed = self.rng.randint(MAXINT)\n\n # We now have a seed and add all configs that are not already\n # evaluated on that seed to the runs-list. This way, we\n # guarantee the same inst-seed-pairs for all configs.\n for config in [c for c in configs if c not in configs_evaluated]:\n # Only use specifics if specific exists, else use string \"0\"\n specs = self.scen.instance_specific[i] if i and i in self.scen.instance_specific else \"0\"\n runs.append(_Run(config=config,\n inst=i,\n seed=seed,\n inst_specs=specs))\n\n self.logger.info(\"Collected %d runs from %d configurations on %d \"\n \"instances with %d repetitions. Reusing %d runs from \"\n \"given runhistory.\", len(runs), len(configs),\n len(instances), repetitions, runs_from_rh)\n\n return runs, new_rh\n\n def _process_runhistory(\n self,\n configs: typing.List[Configuration],\n insts: typing.Sequence[typing.Optional[str]],\n runhistory: typing.Optional[RunHistory],\n ) -> typing.Dict[str, typing.List[typing.Tuple[int, typing.List[Configuration]]]]:\n \"\"\"\n Processes runhistory from self._get_runs by extracting already evaluated\n (relevant) config-inst-seed tuples.\n\n Parameters\n ----------\n configs: list(Configuration)\n list of configs of interest\n insts: list(str)\n list of instances of interest\n runhistory: RunHistory\n runhistory to extract runs from\n\n Returns\n -------\n inst_seed_config: dict<str : list(tuple(int, tuple(configs)))>\n dictionary mapping instances to a list of tuples of already used\n seeds and the configs that this inst-seed-pair has been evaluated\n on, sorted by the number of configs\n \"\"\"\n # We want to reuse seeds that have been used on most configurations\n # To this end, we create a dictionary as {instances:{seed:[configs]}}\n # Like this we can easily retrieve the most used instance-seed pairs to\n # minimize the number of runs to be evaluated\n if runhistory:\n inst_seed_config = {} # type: typing.Dict[str, typing.Dict[int, typing.List[Configuration]]]\n relevant = dict()\n for key in runhistory.data:\n if (runhistory.ids_config[key.config_id] in configs and key.instance_id in insts):\n relevant[key] = runhistory.data[key]\n\n # Change data-structure to {instances:[(seed1, (configs)), (seed2, (configs), ... ]}\n # to make most used seed easily accessible, we sort after length of configs\n for key in relevant:\n inst, seed = key.instance_id, key.seed\n config = runhistory.ids_config[key.config_id]\n if inst in inst_seed_config:\n if seed in inst_seed_config[inst]:\n inst_seed_config[inst][seed].append(config)\n else:\n inst_seed_config[inst][seed] = [config]\n else:\n inst_seed_config[inst] = {seed: [config]}\n\n return {\n i: sorted(\n [(seed, list(inst_seed_config[i][seed])) for seed in inst_seed_config[i]],\n key=lambda x: len(x[1])\n ) for i in inst_seed_config\n }\n else:\n rval = {} # type: typing.Dict[str, typing.List[typing.Tuple[int, typing.List[Configuration]]]]\n return rval\n\n def _get_configs(self, mode: str) -> typing.List[str]:\n \"\"\"\n Return desired configs\n\n Parameters\n ----------\n mode: str\n str from [def, inc, def+inc, wallclock_time, cpu_time, all]\n time evaluates at cpu- or wallclock-timesteps of:\n [max_time/2^0, max_time/2^1, max_time/2^3, ..., default]\n with max_time being the highest recorded time\n\n Returns\n -------\n configs: list<Configuration>\n list with desired configurations\n \"\"\"\n\n # Get trajectory and make sure it's not None to please mypy\n traj = self.traj\n assert traj is not None # please mypy\n\n # Add desired configs\n configs = []\n mode = mode.lower()\n if mode not in ['def', 'inc', 'def+inc', 'wallclock_time', 'cpu_time',\n 'all']:\n raise ValueError(\"%s not a valid option for config_mode in validation.\"\n % mode)\n if mode == \"def\" or mode == \"def+inc\":\n configs.append(self.scen.cs.get_default_configuration()) # type: ignore[attr-defined] # noqa F821\n if mode == \"inc\" or mode == \"def+inc\":\n configs.append(traj[-1][\"incumbent\"])\n if mode in [\"wallclock_time\", \"cpu_time\"]:\n # get highest time-entry and add entries from there\n # not using wallclock_limit in case it's inf\n if (mode == \"wallclock_time\" and np.isfinite(self.scen.wallclock_limit)):\n max_time = self.scen.wallclock_limit\n elif (mode == \"cpu_time\" and np.isfinite(self.scen.algo_runs_timelimit)):\n max_time = self.scen.algo_runs_timelimit\n else:\n max_time = traj[-1][mode]\n counter = 2 ** 0\n for entry in traj[::-1]:\n if (entry[mode] <= max_time / counter and entry[\"incumbent\"] not in configs):\n configs.append(entry[\"incumbent\"])\n counter *= 2\n if not traj[0][\"incumbent\"] in configs:\n configs.append(traj[0][\"incumbent\"]) # add first\n if mode == \"all\":\n for entry in traj:\n if not entry[\"incumbent\"] in configs:\n configs.append(entry[\"incumbent\"])\n self.logger.debug(\"Gathered %d configurations for mode %s.\",\n len(configs), mode)\n return configs\n\n def _get_instances(self, mode: str) -> typing.List[str]:\n \"\"\"\n Get desired instances\n\n Parameters\n ----------\n mode: str\n what instances to use for validation, from [train, test, train+test]\n\n Returns\n -------\n instances: list<str>\n instances to be used\n \"\"\"\n instance_mode = mode.lower()\n if mode not in ['train', 'test', 'train+test']:\n raise ValueError(\"%s not a valid option for instance_mode in validation.\"\n % mode)\n\n # Make sure if instances matter, than instances should be passed\n if ((instance_mode == 'train' and self.scen.train_insts == [None]) or (\n instance_mode == 'test' and self.scen.test_insts == [None])):\n self.logger.warning(\"Instance mode is set to %s, but there are no \"\n \"%s-instances specified in the scenario. Setting instance mode to\"\n \"\\\"train+test\\\"!\", instance_mode, instance_mode)\n instance_mode = 'train+test'\n\n instances = [] # type: typing.List[str]\n if (instance_mode == 'train' or instance_mode == 'train+test') and not self.scen.train_insts == [None]:\n instances.extend(self.scen.train_insts)\n if (instance_mode == 'test' or instance_mode == 'train+test') and not self.scen.test_insts == [None]:\n instances.extend(self.scen.test_insts)\n return instances\n"
] |
[
[
"numpy.hstack",
"numpy.sum",
"numpy.isnan",
"numpy.nan_to_num",
"numpy.tile",
"numpy.mean",
"numpy.zeros",
"sklearn.decomposition.PCA",
"sklearn.preprocessing.MinMaxScaler"
],
[
"numpy.random.RandomState",
"numpy.isfinite"
]
] |
JoshuaJoost/GNN_SS20
|
[
"6b905319f2e51b71569354c347805abce9df3cb1"
] |
[
"Aufgaben/abgabe2/view.py"
] |
[
"__authors__ = \"Rosario Allegro (1813064), Sedat Cakici (1713179), Joshua Joost (1626034)\"\n# maintainer = who fixes buggs?\n__maintainer = __authors__\n__date__ = \"2020-04-23\"\n__version__ = \"0.0\"\n__status__ = \"Development\"\n##--- TODO \n# - Graph showing the learning progress of the neural network\n# - Let the neural network draw the unit circle\n# - testen\n\n# kernel imports\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\nWINDOW_X_INCH = 9.0\nWINDOW_Y_INCH = 6.5\n\ndef printPerformance(data, summary=False):\n\n #TODO + errorPerformance: Zusätzliche, eindeutige Markierung für die erreichte Quote auf der linken Skala oder als zusätzliches label\n if(summary is True):\n ax = plt.gca()\n else:\n fig, ax = plt.subplots()\n fig.suptitle(\"Statistik über Neuronales Netzwerk\", fontsize=18, fontweight='bold')\n fig.subplots_adjust(top=0.83)\n\n ax.plot(data, 's-', markersize=6, color='blue')\n ax.set(xlabel='Epoche(n)', ylabel='Performance (%)', title='Trefferquote je Epoche')\n ax.grid( axis='y', linestyle='--')\n ax.set_xlim(0,)\n\n return ax if summary is True else plt.show()\n pass\n\n\ndef printErrorPerformance(dataError, summary=False):\n\n if(summary is True):\n ax = plt.gca()\n else:\n fig, ax = plt.subplots()\n fig.suptitle(\"Statistik über Neuronales Netzwerk\", fontsize=18, fontweight='bold')\n fig.subplots_adjust(top=0.83)\n\n ax.plot(dataError, 's-', markersize=6, color='darkred')\n ax.set(xlabel='Epoche(n)', ylabel='Fehlerrate (%)', title='Fehlerquote je Epoche')\n ax.grid( axis='y', linestyle='--')\n ax.set_xlim(0,)\n\n return ax if summary is True else plt.show()\n pass\n\n\ndef printSummary(dataError, dataPerformance, value_range, query):\n\n fig = plt.figure()\n fig.set_size_inches(WINDOW_X_INCH, WINDOW_Y_INCH)\n \n # TODO kürzerer Code?\n #for add_print in [printErrorPerformance(dataError, summary=True), \n # printCircle(value_range, query, summary=True), \n # printPerformance(dataPerformance, summary=True)]:\n\n\n #ax1 error quote\n ax1 = plt.subplot2grid((2,2),(0,0))\n ax1 = printErrorPerformance(dataError, summary=True)\n \n #ax2 circle\n ax2 = plt.subplot2grid((2,2),(0,1))\n ax2 = printCircle(value_range, query, summary=True)\n \n #ax3 performance\n ax3 = plt.subplot2grid((2,2),(1,0),colspan=2)\n ax3 = printPerformance(dataPerformance, summary=True)\n \n fig.suptitle('Aktuelle Statistik zum Neuronalen Netzwerk', fontsize=26)\n fig.subplots_adjust(hspace=0.3, wspace=0.2, top=0.9)\n fig.subplots_adjust(top=0.85)\n\n plt.show()\n pass\n\ndef printCircle(value_range, query, summary=False):\n\n x_range = np.arange(-value_range * 2, value_range * 2, 0.05)\n y_range = np.arange(-value_range * 2, value_range * 2, 0.05)\n x, y = np.meshgrid(x_range, y_range)\n z = []\n for x_coordinate in x_range:\n z_row = []\n for y_coordinate in y_range:\n z_row.append(query(np.array([x_coordinate,y_coordinate])))\n z.append(z_row)\n\n # prepare plot\n if(summary is True):\n ax = plt.gca()\n else:\n fig, ax = plt.subplots()\n fig.suptitle(\"Performance-Circle\", fontsize=18, fontweight='bold')\n fig.subplots_adjust(top=0.83)\n fig.suptitle(\"Aktuelle Statistik zum Neuronalen Netzwerk\", fontsize=18, fontweight='bold')\n fig.subplots_adjust(top=0.83)\n\n ax.set(xlabel='x', ylabel='y', title='Erzielte Verteilung der Daten')\n ax.set_aspect('equal', 'box')\n z = np.array(z)\n z = z.reshape(z.shape[0], z.shape[1])\n p = ax.pcolor(x, y, z)\n plt.colorbar(p)\n\n return ax if summary is True else plt.show()\n \n pass\n\n\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.gca",
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
] |
valterlej/dvcusi
|
[
"c886b6f3af783e59994eb934c838fe566ab35dc1"
] |
[
"code/clustering.py"
] |
[
"import os\nimport numpy as np\nimport time\nimport pickle\nfrom sklearn.cluster import MiniBatchKMeans\nfrom tqdm import tqdm\n\n\ndef mini_batch_k_means_clustering(inp_data, output_file=\"./data/cluster.pkl\", epochs=5, n_clusters=1000, random_state=0, batch_size=20000, save_file=True):\n \n cluster = MiniBatchKMeans(n_clusters=n_clusters, random_state=random_state, batch_size=batch_size)\n for e in tqdm(range(epochs)):\n start = time.time()\n print(f\"Training epoch: {e}\")\n np.random.shuffle(inp_data)\n cluster.fit_predict(inp_data)\n print(f\"Time taken for training once {time.time()-start} sec\")\n if save_file:\n print(f\"Saving model\")\n pickle.dump(cluster, open(output_file, 'wb'))\n return cluster\n\n\ndef predict(vid_stack, cluster, output_file=None, save_file=True, return_prediction=False): \n pred = cluster.predict(vid_stack)\n pred_list = list(pred)\n if save_file:\n with open(output_file, 'wb') as f:\n np.save(f, pred)\n if return_prediction:\n return pred_list\n\ndef predict_files_from_directory(vid_stacks, files, cluster, output_dir, file_extension):\n \n if not os.path.isdir(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n\n for i in tqdm(range(len(vid_stacks))):\n stack = vid_stacks[i]\n file = files[i]\n out_file = os.path.join(output_dir, file.replace(\"_rgb\",\"\").replace(\"_flow\",\"\").replace(\".npy\",\"\")+file_extension)\n predict(stack, cluster, out_file, save_file=True)"
] |
[
[
"sklearn.cluster.MiniBatchKMeans",
"numpy.save",
"numpy.random.shuffle"
]
] |
grozail/faraday
|
[
"39f6c1442c52ecb7d4a962044c38591ef09eca17"
] |
[
"src/data/gym/environment.py"
] |
[
"import numpy as np\nimport gym\nfrom .trajectory import RandomTrajectoryGenerationProcess, trajectory_with_current_to_csv\nfrom .config import ControlConfiguration\nfrom src.visualization.tbx import board\n\nclass SingleServoEnv(gym.GoalEnv):\n def __init__(self,\n process: RandomTrajectoryGenerationProcess,\n control_config: ControlConfiguration):\n self.process = process\n self.servo = process.servo\n self.action_space = gym.spaces.Box(-self.servo.max_current,\n self.servo.max_current,\n shape=(control_config.control_horizon,),\n dtype='float32')\n self.observation_space = gym.spaces.Box(-np.inf,\n np.inf,\n shape=((control_config.prediction_horizon + 1) * self.servo.state().size,),\n dtype='float32')\n self.control_config = control_config\n\n self.trajectory = process.run_uniform()\n trajectory_with_current_to_csv(self.servo, self.trajectory)\n self.current = self.trajectory[:, 4]\n self.trajectory = self.trajectory[:, 0:4]\n\n self.dynamic_error = 0\n self.max_dynamic_error_deg = 0.1\n self.index = 1\n self.reset()\n\n def is_done_by_dynamic_error(self):\n return self.dynamic_error > self.max_dynamic_error_deg\n\n def compute_dynamic_error(self, achieved_goal, desired_goal, info):\n self.dynamic_error += np.rad2deg(np.abs(achieved_goal[3] - desired_goal[3])) / 100\n return self.dynamic_error\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return np.exp(-np.sum(np.square(achieved_goal - desired_goal)))\n\n def step(self, action):\n ac = float(action)\n achieved_state = self.servo.step(ac)\n desired_state = self.trajectory[self.index]\n info = {}\n reward = self.compute_reward(achieved_state, desired_state, info)\n self.compute_dynamic_error(achieved_state, desired_state, info)\n done = self.is_done_by_dynamic_error()\n self.index += 1\n info['dynamic_error'] = self.dynamic_error\n info['achieved_state'] = achieved_state\n info['desired_state'] = desired_state\n try:\n observation = np.concatenate([self.servo.state(), self.get_prediction_horizon()])\n except IndexError:\n info['expected_current'] = self.current[1]\n observation = np.concatenate([self.servo.state() for _ in range(self.control_config.prediction_horizon + 1)])\n done = True\n reward = 5\n return observation, reward, done, info\n\n def get_prediction_horizon(self):\n if self.index + self.control_config.prediction_horizon == self.trajectory.shape[0]:\n raise IndexError\n return np.concatenate(self.trajectory[self.index:self.index + self.control_config.prediction_horizon])\n\n def render(self, mode='human'):\n pass\n\n def reset(self):\n self.index = 1\n self.servo.set_state(*self.trajectory[0])\n self.dynamic_error = 0\n prediction_horizon = self.get_prediction_horizon()\n return np.concatenate([self.servo.state(), prediction_horizon])\n\n"
] |
[
[
"numpy.concatenate",
"numpy.square",
"numpy.abs"
]
] |
qapture/Dis-PU
|
[
"539abcfb507af38fa6e7e881db9ab8193a1a8ab7"
] |
[
"Common/pc_util.py"
] |
[
"\"\"\" Utility functions for processing point clouds.\n\nAuthor: Charles R. Qi, Hao Su\nDate: November 2016\n\"\"\"\n\nimport os\nimport sys\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom open3d import *\nimport open3d\nfrom sklearn.neighbors import NearestNeighbors\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\n\n# Draw point cloud\nfrom Common.eulerangles import euler2mat\n\n# Point cloud IO\nimport numpy as np\nimport plyfile\n\nfrom sklearn.neighbors import NearestNeighbors\n\n\n# ----------------------------------------\n# Point Cloud/Volume Conversions\n# ----------------------------------------\n\ndef point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):\n \"\"\" Input is BxNx3 batch of point cloud\n Output is Bx(vsize^3)\n \"\"\"\n vol_list = []\n for b in range(point_clouds.shape[0]):\n vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)\n if flatten:\n vol_list.append(vol.flatten())\n else:\n vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))\n if flatten:\n return np.vstack(vol_list)\n else:\n return np.concatenate(vol_list, 0)\n\n\ndef point_cloud_to_volume(points, vsize, radius=1.0):\n \"\"\" input is Nx3 points.\n output is vsize*vsize*vsize\n assumes points are in range [-radius, radius]\n \"\"\"\n vol = np.zeros((vsize,vsize,vsize))\n voxel = 2*radius/float(vsize)\n locations = (points + radius)/voxel\n locations = locations.astype(int)\n vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0\n return vol\n\n#a = np.zeros((16,1024,3))\n#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape\n\ndef volume_to_point_cloud(vol):\n \"\"\" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize\n return Nx3 numpy array.\n \"\"\"\n vsize = vol.shape[0]\n assert(vol.shape[1] == vsize and vol.shape[1] == vsize)\n points = []\n for a in range(vsize):\n for b in range(vsize):\n for c in range(vsize):\n if vol[a,b,c] == 1:\n points.append(np.array([a,b,c]))\n if len(points) == 0:\n return np.zeros((0,3))\n points = np.vstack(points)\n return points\n\n\ndef extract_knn_patch(queries, pc, k):\n \"\"\"\n queries [M, C]\n pc [P, C]\n \"\"\"\n knn_search = NearestNeighbors(n_neighbors=k, algorithm='auto')\n knn_search.fit(pc)\n knn_idx = knn_search.kneighbors(queries, return_distance=False)\n k_patches = np.take(pc, knn_idx, axis=0) # M, K, C\n return k_patches\n\ndef get_knn_idx(queries, pc, k):\n \"\"\"\n queries [M, C]\n pc [P, C]\n \"\"\"\n knn_search = NearestNeighbors(n_neighbors=k, algorithm='auto')\n knn_search.fit(pc)\n knn_idx = knn_search.kneighbors(queries, return_distance=False)\n return knn_idx\n\ndef get_pairwise_distance(batch_features):\n \"\"\"Compute pairwise distance of a point cloud.\n\n Args:\n batch_features: numpy (batch_size, num_points, num_dims)\n\n Returns:\n pairwise distance: (batch_size, num_points, num_points)\n \"\"\"\n\n og_batch_size = len(batch_features.shape)\n\n if og_batch_size == 2: #just two dimension\n batch_features = np.expand_dims(batch_features, axis=0)\n\n\n batch_features_transpose = np.transpose(batch_features, (0, 2, 1))\n\n #batch_features_inner = batch_features@batch_features_transpose\n batch_features_inner = np.matmul(batch_features,batch_features_transpose)\n\n #print(np.max(batch_features_inner), np.min(batch_features_inner))\n\n\n batch_features_inner = -2 * batch_features_inner\n batch_features_square = np.sum(np.square(batch_features), axis=-1, keepdims=True)\n\n\n batch_features_square_tranpose = np.transpose(batch_features_square, (0, 2, 1))\n\n return batch_features_square + batch_features_inner + batch_features_square_tranpose\n\ndef get_knn_dis(queries, pc, k):\n \"\"\"\n queries [M, C]\n pc [P, C]\n \"\"\"\n knn_search = NearestNeighbors(n_neighbors=k, algorithm='auto')\n knn_search.fit(pc)\n dis,knn_idx = knn_search.kneighbors(queries, return_distance=True)\n #k_patches = np.take(pc, knn_idx, axis=0) # M, K, C\n return dis\n\ndef normalize_point_cloud(input):\n \"\"\"\n input: pc [N, P, 3]\n output: pc, centroid, furthest_distance\n \"\"\"\n if len(input.shape) == 2:\n axis = 0\n elif len(input.shape) == 3:\n axis = 1\n centroid = np.mean(input, axis=axis, keepdims=True)\n input = input - centroid\n furthest_distance = np.amax(\n np.sqrt(np.sum(input ** 2, axis=-1, keepdims=True)), axis=axis, keepdims=True)\n input = input / furthest_distance\n return input, centroid, furthest_distance\n\ndef jitter_perturbation_point_cloud(batch_data, sigma=0.005, clip=0.02, is_2D=False):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n \"\"\"\n B, N, C = batch_data.shape\n assert(clip > 0)\n if is_2D:\n chn = 2\n else:\n chn = 3\n jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)\n jittered_data[:, :, chn:] = 0\n jittered_data += batch_data\n return jittered_data\n\ndef downsample_points(pts,K=2048):\n # if num_pts > 8K use farthest sampling\n # else use random sampling\n if pts.shape[0] >= 2*K:\n sampler = FarthestSampler()\n return sampler(pts, K)\n else:\n return pts[np.random.choice(pts.shape[0], K,\n replace=(K<pts.shape[0])), :]\n\ndef random_sampling(pointcloud,K=2048):\n\tN = pointcloud.shape[0]\n\tif N >= K:\n\t\tidx = np.random.choice(N, K)\n\t\treturn pointcloud[idx]\n\telse:\n\t\tidx = np.random.choice(np.arange(N), K-N)\n\t\texpand_data = pointcloud[idx]\n\t\tpointcloud = np.concatenate((pointcloud, expand_data), 0)\n\t\treturn pointcloud\n\nclass FarthestSampler:\n def __init__(self):\n pass\n\n def _calc_distances(self, p0, points):\n return ((p0 - points) ** 2).sum(axis=1)\n\n def __call__(self, pts, k):\n farthest_pts = np.zeros((k, 3), dtype=np.float32)\n farthest_pts[0] = pts[np.random.randint(len(pts))]\n distances = self._calc_distances(farthest_pts[0], pts)\n for i in range(1, k):\n farthest_pts[i] = pts[np.argmax(distances)]\n distances = np.minimum(\n distances, self._calc_distances(farthest_pts[i], pts))\n return farthest_pts\n\n# ----------------------------------------\n# Point cloud IO\n# ----------------------------------------\n\ndef plot_pcd_three_views(filename, pcds, titles, suptitle='', sizes=None, cmap='Reds', zdir='y',\n xlim=(-0.5, 0.5), ylim=(-0.5, 0.5), zlim=(-0.5, 0.5)):\n if sizes is None:\n sizes = [0.5 for i in range(len(pcds))]\n fig = plt.figure(figsize=(len(pcds) * 3, 9))\n for i in range(3):\n elev = 30\n azim = -45 + 90 * i\n for j, (pcd, size) in enumerate(zip(pcds, sizes)):\n color = pcd[:, 0]\n ax = fig.add_subplot(\n 3, len(pcds), i * len(pcds) + j + 1, projection='3d')\n ax.view_init(elev, azim)\n ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], zdir=zdir,\n c=color, s=size, cmap=cmap, vmin=-1, vmax=0.5)\n ax.set_title(titles[j])\n ax.set_axis_off()\n ax.set_xlim3d(xlim)\n ax.set_ylim3d(ylim)\n ax.set_zlim3d(zlim)\n plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05,\n top=0.9, wspace=0.1, hspace=0.1)\n plt.suptitle(suptitle)\n fig.savefig(filename, dpi=300)\n plt.close(fig)\n\n\ndef plot_pcd_one_view(filename, pcds, titles, suptitle='', sizes=None, cmap='Reds', zdir='y',\n xlim=(-0.55, 0.55), ylim=(-0.55, 0.55), zlim=(-0.55, 0.55)):\n if sizes is None:\n sizes = [0.5 for i in range(len(pcds))]\n fig = plt.figure(figsize=(len(pcds) * 3, 3))\n\n for j, (pcd, size) in enumerate(zip(pcds, sizes)):\n # color = pcd[:, 0]\n color = 'k'\n ax = fig.add_subplot(\n 1, len(pcds), j + 1, projection='3d')\n ax.view_init(0, 270)\n ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], zdir=zdir,\n c=color, s=size, vmin=-1, vmax=0.5)\n ax.set_title(titles[j])\n ax.set_axis_off()\n ax.set_xlim3d(xlim)\n ax.set_ylim3d(ylim)\n ax.set_zlim3d(zlim)\n\n plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05,\n top=0.95, wspace=0.1, hspace=0.1)\n plt.suptitle(suptitle)\n fig.savefig(filename, dpi=300)\n plt.close(fig)\n\ndef read_pcd(filename, count=None):\n points = read_point_cloud(filename)\n points = np.array(points.points).astype(np.float32)\n if count is not None:\n if count > points.shape[0]:\n # fill the point clouds with the random point\n tmp = np.zeros((count, points.shape[1]), dtype=points.dtype)\n tmp[:points.shape[0], ...] = points\n tmp[points.shape[0]:, ...] = points[np.random.choice(\n points.shape[0], count - points.shape[0]), :]\n points = tmp\n elif count < points.shape[0]:\n # different to pointnet2, take random x point instead of the first\n # idx = np.random.permutation(count)\n # points = points[idx, :]\n points = downsample_points(points, count)\n return points\n\n\ndef save_pcd(filename, points):\n pcd = PointCloud()\n pcd.points = Vector3dVector(points)\n write_point_cloud(filename, pcd)\n\n\ndef read_ply_with_color(file, count=None):\n loaded = plyfile.PlyData.read(file)\n points = np.vstack([loaded['vertex'].data['x'], loaded['vertex'].data['y'], loaded['vertex'].data['z']])\n if 'nx' in loaded['vertex'].data.dtype.names:\n normals = np.vstack([loaded['vertex'].data['nx'], loaded['vertex'].data['ny'], loaded['vertex'].data['nz']])\n points = np.concatenate([points, normals], axis=0)\n colors = None\n if 'red' in loaded['vertex'].data.dtype.names:\n colors = np.vstack([loaded['vertex'].data['red'], loaded['vertex'].data['green'], loaded['vertex'].data['blue']])\n if 'alpha' in loaded['vertex'].data.dtype.names:\n colors = np.concatenate([colors, np.expand_dims(loaded['vertex'].data['alpha'], axis=0)], axis=0)\n colors = colors.transpose(1, 0)\n colors = colors.astype(np.float32) / 255.0\n\n points = points.transpose(1, 0)\n if count is not None:\n if count > points.shape[0]:\n # fill the point clouds with the random point\n tmp = np.zeros((count, points.shape[1]), dtype=points.dtype)\n tmp[:points.shape[0], ...] = points\n tmp[points.shape[0]:, ...] = points[np.random.choice(\n points.shape[0], count - points.shape[0]), :]\n points = tmp\n elif count < points.shape[0]:\n # different to pointnet2, take random x point instead of the first\n # idx = np.random.permutation(count)\n # points = points[idx, :]\n points = downsample_points(points, count)\n return points, colors\n\n\ndef read_ply(file, count=None):\n loaded = plyfile.PlyData.read(file)\n points = np.vstack([loaded['vertex'].data['x'], loaded['vertex'].data['y'], loaded['vertex'].data['z']])\n if 'nx' in loaded['vertex'].data.dtype.names:\n normals = np.vstack([loaded['vertex'].data['nx'], loaded['vertex'].data['ny'], loaded['vertex'].data['nz']])\n points = np.concatenate([points, normals], axis=0)\n\n points = points.transpose(1, 0)\n if count is not None:\n if count > points.shape[0]:\n # fill the point clouds with the random point\n tmp = np.zeros((count, points.shape[1]), dtype=points.dtype)\n tmp[:points.shape[0], ...] = points\n tmp[points.shape[0]:, ...] = points[np.random.choice(\n points.shape[0], count - points.shape[0]), :]\n points = tmp\n elif count < points.shape[0]:\n # different to pointnet2, take random x point instead of the first\n # idx = np.random.permutation(count)\n # points = points[idx, :]\n points = downsample_points(points, count)\n return points\n\n\ndef save_ply_with_face_property(points, faces, property, property_max, filename, cmap_name=\"Set1\"):\n face_num = faces.shape[0]\n colors = np.full(faces.shape, 0.5)\n cmap = cm.get_cmap(cmap_name)\n for point_idx in range(face_num):\n colors[point_idx] = cmap(property[point_idx] / property_max)[:3]\n save_ply_with_face(points, faces, filename, colors)\n\n\ndef save_ply_with_face(points, faces, filename, colors=None):\n vertex = np.array([tuple(p) for p in points], dtype=[\n ('x', 'f4'), ('y', 'f4'), ('z', 'f4')])\n faces = np.array([(tuple(p),) for p in faces], dtype=[\n ('vertex_indices', 'i4', (3, ))])\n descr = faces.dtype.descr\n if colors is not None:\n assert len(colors) == len(faces)\n face_colors = np.array([tuple(c * 255) for c in colors],\n dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])\n descr = faces.dtype.descr + face_colors.dtype.descr\n\n faces_all = np.empty(len(faces), dtype=descr)\n for prop in faces.dtype.names:\n faces_all[prop] = faces[prop]\n if colors is not None:\n for prop in face_colors.dtype.names:\n faces_all[prop] = face_colors[prop]\n\n ply = plyfile.PlyData([plyfile.PlyElement.describe(\n vertex, 'vertex'), plyfile.PlyElement.describe(faces_all, 'face')], text=False)\n ply.write(filename)\n\n\ndef load(filename, count=None):\n if filename[-4:] == \".ply\":\n points = read_ply(filename, count)[:, :3]\n elif filename[-4:] == \".pcd\":\n points = read_pcd(filename, count)[:, :3]\n else:\n points = np.loadtxt(filename).astype(np.float32)\n if count is not None:\n if count > points.shape[0]:\n # fill the point clouds with the random point\n tmp = np.zeros((count, points.shape[1]), dtype=points.dtype)\n tmp[:points.shape[0], ...] = points\n tmp[points.shape[0]:, ...] = points[np.random.choice(\n points.shape[0], count - points.shape[0]), :]\n points = tmp\n elif count < points.shape[0]:\n # different to pointnet2, take random x point instead of the first\n # idx = np.random.permutation(count)\n # points = points[idx, :]\n points = downsample_points(points, count)\n return points\n\ndef save_ply(points, filename, colors=None, normals=None):\n vertex = np.core.records.fromarrays(points.transpose(1,0),names='x, y, z',formats='f4, f4, f4')\n num_vertex = len(vertex)\n desc = vertex.dtype.descr\n\n if normals is not None:\n vertex_normal = np.core.records.fromarrays(normals.transpose(1,0),names='nx, ny, nz',formats='f4, f4, f4')\n assert len(vertex_normal) == num_vertex\n desc = desc + vertex_normal.dtype.descr\n\n if colors is not None:\n assert len(colors) == num_vertex\n if colors.max() <= 1:\n colors = colors*255\n if colors.shape[1] == 4:\n vertex_color = np.core.records.fromarrays(colors.transpose(1,0),names='red, green, blue, alpha',formats='u1, u1, u1, u1')\n else:\n vertex_color = np.core.records.fromarrays(colors.transpose(1,0),names='red, green, blue',formats='u1, u1, u1')\n desc = desc + vertex_color.dtype.descr\n\n vertex_all = np.empty(num_vertex, dtype=desc)\n\n for prop in vertex.dtype.names:\n vertex_all[prop] = vertex[prop]\n\n if normals is not None:\n for prop in vertex_normal.dtype.names:\n vertex_all[prop] = vertex_normal[prop]\n\n if colors is not None:\n for prop in vertex_color.dtype.names:\n vertex_all[prop] = vertex_color[prop]\n\n ply = plyfile.PlyData(\n [plyfile.PlyElement.describe(vertex_all, 'vertex')], text=False)\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n ply.write(filename)\n\n\ndef save_ply_property(points, property, filename, property_max=None, normals=None, cmap_name='Set1'):\n point_num = points.shape[0]\n colors = np.full([point_num, 3], 0.5)\n cmap = cm.get_cmap(cmap_name)\n if property_max is None:\n property_max = np.amax(property)\n for point_idx in range(point_num):\n colors[point_idx] = cmap(property[point_idx] / property_max)[:3]\n save_ply(points, filename, colors, normals)\n\n\n# ----------------------------------------\n# Simple Point cloud and Volume Renderers\n# ----------------------------------------\n\ndef draw_point_cloud(input_points, canvasSize=500, space=240, diameter=10,\n xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):\n \"\"\" Render point cloud to image with alpha channel.\n Input:\n points: Nx3 numpy array (+y is up direction)\n Output:\n gray image as numpy array of size canvasSizexcanvasSize\n \"\"\"\n canvasSizeX = canvasSize\n canvasSizeY = canvasSize\n\n image = np.zeros((canvasSizeX, canvasSizeY))\n if input_points is None or input_points.shape[0] == 0:\n return image\n\n points = input_points[:, switch_xyz]\n M = euler2mat(zrot, yrot, xrot)\n points = (np.dot(M, points.transpose())).transpose()\n\n # Normalize the point cloud\n # We normalize scale to fit points in a unit sphere\n if normalize:\n centroid = np.mean(points, axis=0)\n points -= centroid\n furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))\n points /= furthest_distance\n\n # Pre-compute the Gaussian disk\n radius = (diameter-1)/2.0\n disk = np.zeros((diameter, diameter))\n for i in range(diameter):\n for j in range(diameter):\n if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:\n disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))\n mask = np.argwhere(disk > 0)\n dx = mask[:, 0]\n dy = mask[:, 1]\n dv = disk[disk > 0]\n\n # Order points by z-buffer\n zorder = np.argsort(points[:, 2])\n points = points[zorder, :]\n points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))\n max_depth = np.max(points[:, 2])\n\n for i in range(points.shape[0]):\n j = points.shape[0] - i - 1\n x = points[j, 0]\n y = points[j, 1]\n xc = canvasSizeX/2 + (x*space)\n yc = canvasSizeY/2 + (y*space)\n xc = int(np.round(xc))\n yc = int(np.round(yc))\n\n px = dx + xc\n py = dy + yc\n #image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3\n image[px, py] = image[px, py] * 0.7 + dv * 0.3\n\n val = np.max(image)+1e-8\n val = np.percentile(image,99.9)\n image = image / val\n mask = image==0\n\n image[image>1.0]=1.0\n image = 1.0-image\n #image = np.expand_dims(image, axis=-1)\n #image = np.concatenate((image*0.3+0.7,np.ones_like(image), np.ones_like(image)), axis=2)\n #image = colors.hsv_to_rgb(image)\n image[mask]=1.0\n\n\n return image\n\ndef point_cloud_three_views(points,diameter=5):\n \"\"\" input points Nx3 numpy array (+y is up direction).\n return an numpy array gray image of size 500x1500. \"\"\"\n # +y is up direction\n # xrot is azimuth\n # yrot is in-plane\n # zrot is elevation\n # img1 = draw_point_cloud(points, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi, zrot=0/180.0*np.pi,diameter=diameter)\n # img2 = draw_point_cloud(points, xrot=180/180.0*np.pi, yrot=0/180.0*np.pi, zrot=0/180.0*np.pi,diameter=diameter)\n # img3 = draw_point_cloud(points, xrot=0/180.0*np.pi, yrot=-90/180.0*np.pi, zrot=0/180.0*np.pi,diameter=diameter)\n # image_large = np.concatenate([img1, img2, img3], 1)\n try:\n img1 = draw_point_cloud(points, zrot=110 / 180.0 * np.pi, xrot=135 / 180.0 * np.pi, yrot=0 / 180.0 * np.pi,diameter=diameter)\n img2 = draw_point_cloud(points, zrot=70 / 180.0 * np.pi, xrot=135 / 180.0 * np.pi, yrot=0 / 180.0 * np.pi,diameter=diameter)\n img3 = draw_point_cloud(points, zrot=180.0 / 180.0 * np.pi, xrot=90 / 180.0 * np.pi, yrot=0 / 180.0 * np.pi,diameter=diameter)\n image_large = np.concatenate([img1, img2, img3], 1)\n except Exception as e:\n image_large = np.zeros((500, 1500), dtype=np.float32)\n\n return image_large\n\n\nfrom PIL import Image\ndef point_cloud_three_views_demo():\n \"\"\" Demo for draw_point_cloud function \"\"\"\n points = read_ply('../third_party/mesh_sampling/piano.ply')\n im_array = point_cloud_three_views(points)\n img = Image.fromarray(np.uint8(im_array*255.0))\n img.save('piano.jpg')\n\n\ndef pyplot_draw_point_cloud(points, output_filename=None):\n \"\"\" points is a Nx3 numpy array \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(points[:,0], points[:,1], points[:,2])\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n if output_filename:\n savefig(output_filename)\n\ndef pyplot_draw_volume(vol, output_filename):\n \"\"\" vol is of size vsize*vsize*vsize\n output an image to output_filename\n \"\"\"\n points = volume_to_point_cloud(vol)\n pyplot_draw_point_cloud(points, output_filename)\n\n\nif __name__==\"__main__\":\n point_cloud_three_views_demo()\n"
] |
[
[
"numpy.amax",
"numpy.expand_dims",
"numpy.take",
"numpy.vstack",
"numpy.squeeze",
"numpy.concatenate",
"numpy.max",
"numpy.round",
"numpy.mean",
"numpy.random.randn",
"numpy.exp",
"numpy.square",
"numpy.uint8",
"numpy.arange",
"numpy.matmul",
"numpy.full",
"numpy.argmax",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"sklearn.neighbors.NearestNeighbors",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.choice",
"numpy.min",
"numpy.loadtxt",
"numpy.transpose",
"numpy.argsort",
"matplotlib.pyplot.suptitle",
"numpy.array",
"numpy.sum",
"numpy.percentile",
"numpy.argwhere",
"matplotlib.cm.get_cmap",
"numpy.empty"
]
] |
zebivy/mars
|
[
"edefba4e9217d614056d7f575e3c2bf63cc8cbda"
] |
[
"mars/deploy/oscar/tests/test_fault_injection.py"
] |
[
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport pytest\nimport numpy as np\n\nimport mars.tensor as mt\nfrom mars.remote import spawn\nfrom mars.deploy.oscar.local import new_cluster\nfrom mars.deploy.oscar.session import get_default_async_session\nfrom mars.oscar.errors import ServerClosed\nfrom mars.services.tests.fault_injection_manager import (\n FaultType,\n AbstractFaultInjectionManager,\n ExtraConfigKey,\n FaultInjectionError,\n FaultInjectionUnhandledError\n)\n\nCONFIG_FILE = os.path.join(\n os.path.dirname(__file__), 'fault_injection_config.yml')\nRERUN_SUBTASK_CONFIG_FILE = os.path.join(\n os.path.dirname(__file__), 'fault_injection_config_with_rerun.yml')\n\n\[email protected]\nasync def fault_cluster(request):\n param = getattr(request, \"param\", {})\n start_method = os.environ.get('POOL_START_METHOD', None)\n client = await new_cluster(subprocess_start_method=start_method,\n config=param.get('config', CONFIG_FILE),\n n_worker=2,\n n_cpu=2)\n async with client:\n yield client\n\n\nasync def create_fault_injection_manager(session_id, address, fault_count, fault_type):\n class FaultInjectionManager(AbstractFaultInjectionManager):\n def __init__(self):\n self._fault_count = fault_count\n\n def set_fault_count(self, count):\n self._fault_count = count\n\n def on_execute_operand(self) -> FaultType:\n if self._fault_count > 0:\n self._fault_count -= 1\n return fault_type\n return FaultType.NoFault\n\n await FaultInjectionManager.create(session_id, address)\n return FaultInjectionManager.name\n\n\[email protected]('fault_and_exception',\n [[FaultType.Exception,\n pytest.raises(FaultInjectionError, match='Fault Injection')],\n [FaultType.UnhandledException,\n pytest.raises(FaultInjectionUnhandledError, match='Fault Injection Unhandled')],\n [FaultType.ProcessExit,\n pytest.raises(ServerClosed)]])\[email protected]\nasync def test_fault_inject_subtask_processor(fault_cluster, fault_and_exception):\n fault_type, first_run_raises = fault_and_exception\n name = await create_fault_injection_manager(\n session_id=fault_cluster.session.session_id,\n address=fault_cluster.session.address,\n fault_count=1,\n fault_type=fault_type)\n extra_config = {ExtraConfigKey.FAULT_INJECTION_MANAGER_NAME: name}\n\n raw = np.random.RandomState(0).rand(10, 10)\n a = mt.tensor(raw, chunk_size=5)\n b = a + 1\n\n with first_run_raises:\n b.execute(extra_config=extra_config)\n\n # execute again may raise an ConnectionRefusedError if the\n # ProcessExit occurred.\n\n\[email protected]('fault_cluster',\n [{'config': RERUN_SUBTASK_CONFIG_FILE}],\n indirect=True)\[email protected]('fault_config',\n [[FaultType.Exception, 1,\n pytest.raises(FaultInjectionError, match='Fault Injection')],\n [FaultType.ProcessExit, 1,\n pytest.raises(ServerClosed)]])\[email protected]\nasync def test_rerun_subtask(fault_cluster, fault_config):\n fault_type, fault_count, expect_raises = fault_config\n name = await create_fault_injection_manager(\n session_id=fault_cluster.session.session_id,\n address=fault_cluster.session.address,\n fault_count=fault_count,\n fault_type=fault_type)\n extra_config = {ExtraConfigKey.FAULT_INJECTION_MANAGER_NAME: name}\n session = get_default_async_session()\n\n raw = np.random.RandomState(0).rand(10, 10)\n a = mt.tensor(raw, chunk_size=5)\n b = a + 1\n\n info = await session.execute(b, extra_config=extra_config)\n await info\n assert info.result() is None\n assert info.exception() is None\n\n r = await session.fetch(b)\n np.testing.assert_array_equal(r, raw + 1)\n\n fault_injection_manager = await session.get_remote_object(\n fault_cluster.session.session_id, name)\n await fault_injection_manager.set_fault_count(1)\n\n # the extra config overwrites the default config.\n extra_config['subtask_max_retries'] = 0\n info = await session.execute(b, extra_config=extra_config)\n with expect_raises:\n await info\n\n\[email protected]('fault_cluster',\n [{'config': RERUN_SUBTASK_CONFIG_FILE}],\n indirect=True)\[email protected]\nasync def test_rerun_subtask_unhandled(fault_cluster):\n name = await create_fault_injection_manager(\n session_id=fault_cluster.session.session_id,\n address=fault_cluster.session.address,\n fault_count=1,\n fault_type=FaultType.UnhandledException)\n extra_config = {ExtraConfigKey.FAULT_INJECTION_MANAGER_NAME: name}\n\n raw = np.random.RandomState(0).rand(10, 10)\n a = mt.tensor(raw, chunk_size=5)\n b = a + 1\n\n with pytest.raises(FaultInjectionUnhandledError):\n b.execute(extra_config=extra_config)\n\n\[email protected]('fault_cluster',\n [{'config': RERUN_SUBTASK_CONFIG_FILE}],\n indirect=True)\[email protected]('fault_config',\n [[FaultType.Exception, 1,\n pytest.raises(FaultInjectionError, match='Fault Injection')],\n [FaultType.ProcessExit, 1,\n pytest.raises(ServerClosed)]])\[email protected]\nasync def test_retryable(fault_cluster, fault_config):\n fault_type, fault_count, expect_raises = fault_config\n name = await create_fault_injection_manager(\n session_id=fault_cluster.session.session_id,\n address=fault_cluster.session.address,\n fault_count=fault_count,\n fault_type=fault_type)\n extra_config = {ExtraConfigKey.FAULT_INJECTION_MANAGER_NAME: name}\n\n def f(x):\n return x + 1\n\n r = spawn(f, args=(1,), retry_when_fail=False)\n with expect_raises:\n r.execute(extra_config=extra_config)\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.random.RandomState"
]
] |
igrekun/layer-to-layer-pytorch
|
[
"6ec1b23f370c7f8345859d90fdb3a6fb6ff117a2"
] |
[
"layer_to_layer_pytorch/loss.py"
] |
[
"from typing import List\n\nimport copy\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom layer_to_layer_pytorch.helpers import zipper\nfrom layer_to_layer_pytorch.types import LossFn\n\n\nclass L2LLoss:\n def __init__(\n self,\n model,\n loss_fn: LossFn,\n # store_grad_on_calc: bool = True,\n **forward_kwargs,\n ):\n self.model = model\n self.loss_fn = loss_fn\n self.store_grad_on_calc = False # store_grad_on_calc\n self.forward_kwargs = forward_kwargs or {}\n\n self._batch = None\n self._target = None\n\n @torch.no_grad()\n def __call__(\n self, batch: torch.Tensor, target: torch.Tensor\n ) -> torch.Tensor:\n self._batch = batch\n self._target = target\n\n microbatch_size = self.model._get_microbatch_size(batch)\n num_steps_in_loss = batch.shape[0] // microbatch_size\n losses: List[torch.Tensor] = []\n\n last_layer: nn.Module = copy.deepcopy(self.model._get_layers()[-1]).to(\n self.model.gpu_device\n )\n\n for microbatch, microtarget in zipper(\n batch.split(microbatch_size),\n target.split(microbatch_size),\n verbose=False,\n desc=\"Microbatching\",\n total=num_steps_in_loss,\n leave=False,\n ):\n microbatch = microbatch.to(self.model.gpu_device)\n # microbatch.requires_grad = True\n\n microtarget = microtarget.to(self.model.gpu_device)\n\n activation: torch.Tensor = last_layer(\n microbatch, **self.forward_kwargs\n )\n\n loss = self.loss_fn(activation, microtarget)\n losses.append(loss.item())\n\n # if self.store_grad_on_calc:\n # loss.backward()\n # self.model._grads[-1].append(microbatch.grad.cpu())\n\n with torch.no_grad():\n loss_value = torch.tensor(np.sum(losses) / num_steps_in_loss)\n\n return loss_value\n\n def backward(self) -> None:\n self.model.backward(\n self._batch,\n self._target,\n loss_fn=self.loss_fn,\n # skip_last_layer=self.store_grad_on_calc,\n )\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self._batch = None\n self._target = None\n\n\n__all__ = [\"L2LLoss\"]\n"
] |
[
[
"torch.no_grad",
"numpy.sum"
]
] |
lewisc/spark-tk
|
[
"5548fc925b5c278263cbdebbd9e8c7593320c2f4"
] |
[
"regression-tests/sparktkregtests/testcases/models/gmm_test.py"
] |
[
"# vim: set encoding=utf-8\n\n# Copyright (c) 2016 Intel Corporation \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Test guassian mixture models against known values\"\"\"\nimport unittest\nfrom collections import Counter\nfrom numpy.testing import assert_almost_equal\nfrom sparktkregtests.lib import sparktk_test\n\n\nclass GMMModelTest(sparktk_test.SparkTKTestCase):\n\n def setUp(self):\n data_file = self.get_file(\"gmm_data.csv\")\n self.frame = self.context.frame.import_csv(\n data_file, schema=[(\"x1\", float), (\"x2\", float)])\n\n def test_train(self):\n \"\"\" Verify that model operates as expected in straightforward case\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"],\n column_scalings=[1.0, 1.0],\n k=5,\n max_iterations=500,\n seed=20,\n convergence_tol=0.0001)\n\n actual_mu = [g.mu for g in model.gaussians]\n actual_sigma = [g.sigma for g in model.gaussians]\n expected_mu = \\\n [[7.0206, -10.1706],\n [7.8322, -10.2383],\n [-1.3816, 6.7215],\n [-0.04184, 5.8039],\n [-4.1743, 8.5564]]\n expected_sigma = \\\n [[[0.2471, -0.3325],\n [-0.3325, 0.5828]],\n [[2.3005, 0.6906],\n [0.6906, 2.1103]],\n [[1.5941, -3.5325],\n [-3.5325, 7.8424]],\n [[0.9849, 0.04328],\n [0.04328, 0.3736]],\n [[0.1168, 0.1489],\n [0.1489, 0.9757]]]\n assert_almost_equal(actual_mu, expected_mu, decimal=3)\n assert_almost_equal(actual_sigma, expected_sigma, decimal=3)\n\n def test_predict(self):\n \"\"\" Tests output of predict \"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"],\n column_scalings=[1.0, 1.0],\n k=3,\n max_iterations=100,\n seed=15)\n predicted_frame = model.predict(self.frame)\n results_df = predicted_frame.to_pandas(self.frame.count())\n\n actual_cluster_sizes = Counter(results_df[\"predicted_cluster\"].tolist())\n expected_cluster_sizes = {2: 27, 0: 17, 1: 6}\n self.assertItemsEqual(actual_cluster_sizes, expected_cluster_sizes)\n\n def test_gmm_1_cluster(self):\n \"\"\"Test gmm doesn't error on k=1\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], [1.0, 1.0], k=1)\n\n def test_gmm_1_iteration(self):\n \"\"\"Train on 1 iteration only, shouldn't throw exception\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\"], column_scalings=[1.0],\n max_iterations=1)\n\n def test_gmm_high_convergence(self):\n \"\"\"Train on high convergence, should not throw exception\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], column_scalings=[1.0, 1.0],\n convergence_tol=1e6)\n\n def test_gmm_negative_seed(self):\n \"\"\"Train on negative seed, shouldn't throw exception\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], column_scalings=[1.0, 1.0],\n seed=-20)\n\n def test_gmm_0_scalings(self):\n \"\"\"all-zero column scalings, shouldn't throw exception\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], column_scalings=[0.0, 0.0])\n\n def test_gmm_negative_scalings(self):\n \"\"\"negative column scalings, shouldn't throw exception\"\"\"\n model = self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], column_scalings=[-1.0, -1.0])\n\n def test_gmm_empty_frame(self):\n \"\"\" Verify that model operates as expected in straightforward case\"\"\"\n # Train on an empty frame\n block_data = []\n frame = self.context.frame.create(\n block_data,\n [(\"x1\", float)])\n\n with self.assertRaisesRegexp(\n Exception, \"empty collection\"):\n self.context.models.clustering.gmm.train(\n frame, [\"x1\"], column_scalings=[1.0])\n\n def test_0_classes_errors(self):\n \"\"\"Train on 0 classes, should error\"\"\"\n with self.assertRaisesRegexp(\n Exception, \"k must be at least 1\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], column_scalings=[1.0, 1.0], k=0)\n\n def test_negative_classes(self):\n \"\"\"Train on negative classes, should error\"\"\"\n with self.assertRaisesRegexp(\n Exception, \"k must be at least 1\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\"], column_scalings=[1.0], k=-5)\n\n def test_0_iterations(self):\n \"\"\"Train on 0 iterations, should error\"\"\"\n with self.assertRaisesRegexp(\n Exception, \"maxIterations must be a positive value\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\"], column_scalings=[1.0],\n max_iterations=0)\n\n def test_negative_iterations(self):\n \"\"\"Train on negative iterations, should error\"\"\"\n with self.assertRaisesRegexp(\n Exception, \"maxIterations must be a positive value\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\"], column_scalings=[1.0],\n max_iterations=-20)\n\n def test_wrong_column_scalings(self):\n \"\"\"Insufficient column scalings, should error\"\"\"\n with self.assertRaisesRegexp(\n Exception, \"columnWeights must not be null or empty\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\"], column_scalings=[])\n\n def test_too_many_column_scalings(self):\n \"\"\"Extra column scalings, should error\"\"\"\n with self.assertRaisesRegexp(\n Exception,\n \"Length of columnWeights and observationColumns.*\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], column_scalings=[1.0, 1.0, 1.0])\n\n def test_missing_column_scalings(self):\n \"\"\"Missing column scalings, should error\"\"\"\n with self.assertRaisesRegexp(\n TypeError, \"train\\(\\) takes at least 3 arguments.*\"):\n self.context.models.clustering.gmm.train(\n self.frame, [\"x1\", \"x2\"], k=2)\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.testing.assert_almost_equal"
]
] |
COVIDAnalytics/DELPHI
|
[
"1f75ad8047fb22f386105a7d621025d744967916"
] |
[
"DELPHI_model_V4.py"
] |
[
"# Authors: Hamza Tazi Bouardi ([email protected]), Michael L. Li ([email protected]), Omar Skali Lami ([email protected])\nimport os\nimport yaml\nimport logging\nimport time\nimport psutil\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport multiprocessing as mp\nfrom scipy.integrate import solve_ivp\nfrom scipy.optimize import minimize\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom tqdm import tqdm\nfrom scipy.optimize import dual_annealing\nfrom DELPHI_utils_V4_static import (\n DELPHIAggregations, DELPHIDataSaver, DELPHIDataCreator, get_initial_conditions,\n get_mape_data_fitting, create_fitting_data_from_validcases, get_residuals_value\n)\nfrom DELPHI_utils_V4_dynamic import get_bounds_params_from_pastparams\nfrom DELPHI_params_V4 import (\n fitting_start_date,\n default_parameter_list,\n dict_default_reinit_parameters,\n dict_default_reinit_lower_bounds,\n dict_default_reinit_upper_bounds,\n default_upper_bound,\n default_lower_bound,\n percentage_drift_upper_bound,\n percentage_drift_lower_bound,\n percentage_drift_upper_bound_annealing,\n percentage_drift_lower_bound_annealing,\n default_upper_bound_annealing,\n default_lower_bound_annealing,\n default_lower_bound_t_jump,\n default_parameter_t_jump,\n default_upper_bound_t_jump,\n default_lower_bound_std_normal,\n default_parameter_std_normal,\n default_upper_bound_std_normal,\n default_bounds_params,\n validcases_threshold,\n IncubeD,\n RecoverID,\n RecoverHD,\n DetectD,\n VentilatedD,\n default_maxT,\n p_v,\n p_d,\n p_h,\n max_iter,\n)\n\n## Initializing Global Variables ##########################################################################\nwith open(\"config.yml\", \"r\") as ymlfile:\n CONFIG = yaml.load(ymlfile, Loader=yaml.BaseLoader)\nCONFIG_FILEPATHS = CONFIG[\"filepaths\"]\ntime_beginning = time.time()\nyesterday = \"\".join(str(datetime.now().date() - timedelta(days=1)).split(\"-\"))\nyesterday_logs_filename = \"\".join(\n (str(datetime.now().date() - timedelta(days=1)) + f\"_{datetime.now().hour}H{datetime.now().minute}M\").split(\"-\")\n)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--run_config', '-rc', type=str, required=True,\n help=\"specify relative path for the run config YAML file\"\n)\narguments = parser.parse_args()\nwith open(arguments.run_config, \"r\") as ymlfile:\n RUN_CONFIG = yaml.load(ymlfile, Loader=yaml.BaseLoader)\n\nUSER_RUNNING = RUN_CONFIG[\"arguments\"][\"user\"]\nOPTIMIZER = RUN_CONFIG[\"arguments\"][\"optimizer\"]\nGET_CONFIDENCE_INTERVALS = bool(int(RUN_CONFIG[\"arguments\"][\"confidence_intervals\"]))\nSAVE_TO_WEBSITE = bool(int(RUN_CONFIG[\"arguments\"][\"website\"]))\nSAVE_SINCE100_CASES = bool(int(RUN_CONFIG[\"arguments\"][\"since100case\"]))\nPATH_TO_FOLDER_DANGER_MAP = CONFIG_FILEPATHS[\"danger_map\"][USER_RUNNING]\nPATH_TO_DATA_SANDBOX = CONFIG_FILEPATHS[\"data_sandbox\"][USER_RUNNING]\nPATH_TO_WEBSITE_PREDICTED = CONFIG_FILEPATHS[\"website\"][USER_RUNNING]\npast_prediction_date = \"\".join(str(datetime.now().date() - timedelta(days=14)).split(\"-\"))\n#############################################################################################################\n\ndef solve_and_predict_area(\n tuple_area_state_: tuple,\n yesterday_: str,\n past_parameters_: pd.DataFrame,\n popcountries: pd.DataFrame,\n startT: str = None, # added to change optimmization start date\n):\n \"\"\"\n Parallelizable version of the fitting & solving process for DELPHI V4, this function is called with multiprocessing\n :param tuple_area_: tuple corresponding to (continent, country, province)\n :param yesterday_: string corresponding to the date from which the model will read the previous parameters. The\n format has to be 'YYYYMMDD'\n :param past_parameters_: Parameters from yesterday_ used as a starting point for the fitting process\n :param popcountries: DataFrame containing population information for all countries and provinces\n :startT: string for the date from when the pandemic will be modelled (format should be 'YYYY-MM-DD')\n :return: either None if can't optimize (either less than 100 cases or less than 7 days with 100 cases) or a tuple\n with 3 dataframes related to that tuple_area_ (parameters df, predictions since yesterday_+1, predictions since\n first day with 100 cases) and a scipy.optimize object (OptimizeResult) that contains the predictions for all\n 16 states of the model (and some other information that isn't used)\n \"\"\"\n time_entering = time.time()\n continent, country, province, initial_state = tuple_area_state_\n country_sub = country.replace(\" \", \"_\")\n province_sub = province.replace(\" \", \"_\")\n print(f\"starting to predict for {continent}, {country}, {province}\")\n if os.path.exists(PATH_TO_FOLDER_DANGER_MAP + f\"processed/Global/Cases_{country_sub}_{province_sub}.csv\"):\n totalcases = pd.read_csv(\n PATH_TO_FOLDER_DANGER_MAP + f\"processed/Global/Cases_{country_sub}_{province_sub}.csv\"\n )\n if totalcases.day_since100.max() < 0:\n logging.warning(\n f\"Not enough cases (less than 100) for Continent={continent}, Country={country} and Province={province}\"\n )\n return None\n\n if past_parameters_ is not None:\n parameter_list_total = past_parameters_[\n (past_parameters_.Country == country)\n & (past_parameters_.Province == province)\n ].reset_index(drop=True)\n if len(parameter_list_total) > 0:\n parameter_list_line = parameter_list_total.iloc[-1, :].values.tolist()\n parameter_list = parameter_list_line[5:]\n parameter_list, bounds_params = get_bounds_params_from_pastparams(\n optimizer=OPTIMIZER,\n parameter_list=parameter_list,\n dict_default_reinit_parameters=dict_default_reinit_parameters,\n percentage_drift_lower_bound=percentage_drift_lower_bound,\n default_lower_bound=default_lower_bound,\n dict_default_reinit_lower_bounds=dict_default_reinit_lower_bounds,\n percentage_drift_upper_bound=percentage_drift_upper_bound,\n default_upper_bound=default_upper_bound,\n dict_default_reinit_upper_bounds=dict_default_reinit_upper_bounds,\n percentage_drift_lower_bound_annealing=percentage_drift_lower_bound_annealing,\n default_lower_bound_annealing=default_lower_bound_annealing,\n percentage_drift_upper_bound_annealing=percentage_drift_upper_bound_annealing,\n default_upper_bound_annealing=default_upper_bound_annealing,\n default_lower_bound_t_jump=default_lower_bound_t_jump,\n default_upper_bound_t_jump=default_upper_bound_t_jump,\n default_parameter_t_jump=default_parameter_t_jump,\n default_lower_bound_std_normal=default_lower_bound_std_normal,\n default_upper_bound_std_normal=default_upper_bound_std_normal,\n default_parameter_std_normal=default_parameter_std_normal\n )\n start_date = pd.to_datetime(parameter_list_line[3])\n bounds_params = tuple(bounds_params)\n else:\n # Otherwise use established lower/upper bounds\n parameter_list = default_parameter_list\n bounds_params = default_bounds_params\n start_date = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, \"date\"].iloc[-1])\n else:\n # Otherwise use established lower/upper bounds\n parameter_list = default_parameter_list\n bounds_params = default_bounds_params\n start_date = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, \"date\"].iloc[-1])\n\n if startT is not None:\n input_start_date = pd.to_datetime(startT)\n if input_start_date > start_date:\n delta_days = (input_start_date - start_date).days\n parameter_list[9] = parameter_list[9] - delta_days\n bounds_params_list = list(bounds_params)\n bounds_params_list[9] = (bounds_params_list[9][0]-delta_days, bounds_params_list[9][1]-delta_days)\n bounds_params = tuple(bounds_params_list)\n start_date = input_start_date\n validcases = totalcases[\n (totalcases.date >= str(start_date.date()))\n & (totalcases.date <= str((pd.to_datetime(yesterday_) + timedelta(days=1)).date()))\n ][[\"day_since100\", \"case_cnt\", \"death_cnt\"]].reset_index(drop=True)\n else:\n validcases = totalcases[\n (totalcases.day_since100 >= 0)\n & (totalcases.date <= str((pd.to_datetime(yesterday_) + timedelta(days=1)).date()))\n ][[\"day_since100\", \"case_cnt\", \"death_cnt\"]].reset_index(drop=True)\n # Now we start the modeling part:\n if len(validcases) <= validcases_threshold:\n logging.warning(\n f\"Not enough historical data (less than a week)\"\n + f\"for Continent={continent}, Country={country} and Province={province}\"\n )\n return None\n else:\n PopulationT = popcountries[\n (popcountries.Country == country) & (popcountries.Province == province)\n ].pop2016.iloc[-1]\n N = PopulationT\n PopulationI = validcases.loc[0, \"case_cnt\"]\n PopulationD = validcases.loc[0, \"death_cnt\"]\n if initial_state is not None:\n R_0 = initial_state[9]\n else:\n R_0 = validcases.loc[0, \"death_cnt\"] * 5 if validcases.loc[0, \"case_cnt\"] - validcases.loc[0, \"death_cnt\"]> validcases.loc[0, \"death_cnt\"] * 5 else 0\n bounds_params_list = list(bounds_params)\n bounds_params_list[-1] = (0.999,1)\n bounds_params = tuple(bounds_params_list)\n cases_t_14days = totalcases[totalcases.date >= str(start_date- pd.Timedelta(14, 'D'))]['case_cnt'].values[0]\n deaths_t_9days = totalcases[totalcases.date >= str(start_date - pd.Timedelta(9, 'D'))]['death_cnt'].values[0]\n R_upperbound = validcases.loc[0, \"case_cnt\"] - validcases.loc[0, \"death_cnt\"]\n R_heuristic = cases_t_14days - deaths_t_9days\n if int(R_0*p_d) >= R_upperbound and R_heuristic >= R_upperbound:\n logging.error(f\"Initial conditions for PopulationR too high for {country}-{province}, on {startT}\")\n \"\"\"\n Fixed Parameters based on meta-analysis:\n p_h: Hospitalization Percentage\n RecoverHD: Average Days until Recovery\n VentilationD: Number of Days on Ventilation for Ventilated Patients\n maxT: Maximum # of Days Modeled\n p_d: Percentage of True Cases Detected\n p_v: Percentage of Hospitalized Patients Ventilated,\n balance: Regularization coefficient between cases and deaths\n \"\"\"\n maxT = (default_maxT - start_date).days + 1\n t_cases = validcases[\"day_since100\"].tolist() - validcases.loc[0, \"day_since100\"]\n balance, balance_total_difference, cases_data_fit, deaths_data_fit, weights = create_fitting_data_from_validcases(validcases)\n GLOBAL_PARAMS_FIXED = (N, R_upperbound, R_heuristic, R_0, PopulationD, PopulationI, p_d, p_h, p_v)\n\n def model_covid(\n t, x, alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3\n ) -> list:\n \"\"\"\n SEIR based model with 16 distinct states, taking into account undetected, deaths, hospitalized and\n recovered, and using an ArcTan government response curve, corrected with a Gaussian jump in case of\n a resurgence in cases\n :param t: time step\n :param x: set of all the states in the model (here, 16 of them)\n :param alpha: Infection rate\n :param days: Median day of action (used in the arctan governmental response)\n :param r_s: Median rate of action (used in the arctan governmental response)\n :param r_dth: Rate of death\n :param p_dth: Initial mortality percentage\n :param r_dthdecay: Rate of decay of mortality percentage\n :param k1: Internal parameter 1 (used for initial conditions)\n :param k2: Internal parameter 2 (used for initial conditions)\n :param jump: Amplitude of the Gaussian jump modeling the resurgence in cases\n :param t_jump: Time where the Gaussian jump will reach its maximum value\n :param std_normal: Standard Deviation of the Gaussian jump (~ time span of the resurgence in cases)\n :param k3: Internal parameter 2 (used for initial conditions)\n :return: predictions for all 16 states, which are the following\n [0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D, 11 TH, 12 DVR,13 DVD, 14 DD, 15 DT]\n \"\"\"\n r_i = np.log(2) / IncubeD # Rate of infection leaving incubation phase\n r_d = np.log(2) / DetectD # Rate of detection\n r_ri = np.log(2) / RecoverID # Rate of recovery not under infection\n r_rh = np.log(2) / RecoverHD # Rate of recovery under hospitalization\n r_rv = np.log(2) / VentilatedD # Rate of recovery under ventilation\n gamma_t = (\n (2 / np.pi) * np.arctan(-(t - days) / 20 * r_s) + 1\n + jump * np.exp(-(t - t_jump) ** 2 / (2 * std_normal ** 2))\n )\n p_dth_mod = (2 / np.pi) * (p_dth - 0.001) * (np.arctan(-t / 20 * r_dthdecay) + np.pi / 2) + 0.001\n assert (\n len(x) == 16\n ), f\"Too many input variables, got {len(x)}, expected 16\"\n S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT = x\n # Equations on main variables\n dSdt = -alpha * gamma_t * S * I / N\n dEdt = alpha * gamma_t * S * I / N - r_i * E\n dIdt = r_i * E - r_d * I\n dARdt = r_d * (1 - p_dth_mod) * (1 - p_d) * I - r_ri * AR\n dDHRdt = r_d * (1 - p_dth_mod) * p_d * p_h * I - r_rh * DHR\n dDQRdt = r_d * (1 - p_dth_mod) * p_d * (1 - p_h) * I - r_ri * DQR\n dADdt = r_d * p_dth_mod * (1 - p_d) * I - r_dth * AD\n dDHDdt = r_d * p_dth_mod * p_d * p_h * I - r_dth * DHD\n dDQDdt = r_d * p_dth_mod * p_d * (1 - p_h) * I - r_dth * DQD\n dRdt = r_ri * (AR + DQR) + r_rh * DHR\n dDdt = r_dth * (AD + DQD + DHD)\n # Helper states (usually important for some kind of output)\n dTHdt = r_d * p_d * p_h * I\n dDVRdt = r_d * (1 - p_dth_mod) * p_d * p_h * p_v * I - r_rv * DVR\n dDVDdt = r_d * p_dth_mod * p_d * p_h * p_v * I - r_dth * DVD\n dDDdt = r_dth * (DHD + DQD)\n dDTdt = r_d * p_d * I\n return [\n dSdt, dEdt, dIdt, dARdt, dDHRdt, dDQRdt, dADdt, dDHDdt,\n dDQDdt, dRdt, dDdt, dTHdt, dDVRdt, dDVDdt, dDDdt, dDTdt,\n ]\n\n def residuals_totalcases(params) -> float:\n \"\"\"\n Function that makes sure the parameters are in the right range during the fitting process and computes\n the loss function depending on the optimizer that has been chosen for this run as a global variable\n :param params: currently fitted values of the parameters during the fitting process\n :return: the value of the loss function as a float that is optimized against (in our case, minimized)\n \"\"\"\n # Variables Initialization for the ODE system\n alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3 = params\n # Force params values to stay in a certain range during the optimization process with re-initializations\n params = (\n max(alpha, dict_default_reinit_parameters[\"alpha\"]),\n days,\n max(r_s, dict_default_reinit_parameters[\"r_s\"]),\n max(min(r_dth, 1), dict_default_reinit_parameters[\"r_dth\"]),\n max(min(p_dth, 1), dict_default_reinit_parameters[\"p_dth\"]),\n max(r_dthdecay, dict_default_reinit_parameters[\"r_dthdecay\"]),\n max(k1, dict_default_reinit_parameters[\"k1\"]),\n max(k2, dict_default_reinit_parameters[\"k2\"]),\n max(jump, dict_default_reinit_parameters[\"jump\"]),\n max(t_jump, dict_default_reinit_parameters[\"t_jump\"]),\n max(std_normal, dict_default_reinit_parameters[\"std_normal\"]),\n max(k3, dict_default_reinit_lower_bounds[\"k3\"]),\n )\n\n x_0_cases = get_initial_conditions(\n params_fitted=params, global_params_fixed=GLOBAL_PARAMS_FIXED\n )\n x_sol_total = solve_ivp(\n fun=model_covid,\n y0=x_0_cases,\n t_span=[t_cases[0], t_cases[-1]],\n t_eval=t_cases,\n args=tuple(params),\n )\n x_sol = x_sol_total.y\n # weights = list(range(1, len(cases_data_fit) + 1))\n # weights = [(x/len(cases_data_fit))**2 for x in weights]\n if x_sol_total.status == 0:\n residuals_value = get_residuals_value(\n optimizer=OPTIMIZER,\n balance=balance,\n x_sol=x_sol,\n cases_data_fit=cases_data_fit,\n deaths_data_fit=deaths_data_fit,\n weights=weights,\n balance_total_difference=balance_total_difference \n )\n else:\n residuals_value = 1e16\n return residuals_value\n\n if OPTIMIZER in [\"tnc\", \"trust-constr\"]:\n output = minimize(\n residuals_totalcases,\n parameter_list,\n method=OPTIMIZER,\n bounds=bounds_params,\n options={\"maxiter\": max_iter},\n )\n elif OPTIMIZER == \"annealing\":\n output = dual_annealing(\n residuals_totalcases, x0=parameter_list, bounds=bounds_params\n )\n print(f\"Parameter bounds are {bounds_params}\")\n print(f\"Parameter list is {parameter_list}\")\n else:\n raise ValueError(\"Optimizer not in 'tnc', 'trust-constr' or 'annealing' so not supported\")\n\n if (OPTIMIZER in [\"tnc\", \"trust-constr\"]) or (OPTIMIZER == \"annealing\" and output.success):\n best_params = output.x\n t_predictions = [i for i in range(maxT)]\n \n def solve_best_params_and_predict(optimal_params):\n # Variables Initialization for the ODE system\n alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3 = optimal_params\n optimal_params = [\n max(alpha, dict_default_reinit_parameters[\"alpha\"]),\n days,\n max(r_s, dict_default_reinit_parameters[\"r_s\"]),\n max(min(r_dth, 1), dict_default_reinit_parameters[\"r_dth\"]),\n max(min(p_dth, 1), dict_default_reinit_parameters[\"p_dth\"]),\n max(r_dthdecay, dict_default_reinit_parameters[\"r_dthdecay\"]),\n max(k1, dict_default_reinit_parameters[\"k1\"]),\n max(k2, dict_default_reinit_parameters[\"k2\"]),\n max(jump, dict_default_reinit_parameters[\"jump\"]),\n max(t_jump, dict_default_reinit_parameters[\"t_jump\"]),\n max(std_normal, dict_default_reinit_parameters[\"std_normal\"]),\n max(k3, dict_default_reinit_lower_bounds[\"k3\"]),\n ]\n x_0_cases = get_initial_conditions(\n params_fitted=optimal_params,\n global_params_fixed=GLOBAL_PARAMS_FIXED,\n )\n x_sol_best = solve_ivp(\n fun=model_covid,\n y0=x_0_cases,\n t_span=[t_predictions[0], t_predictions[-1]],\n t_eval=t_predictions,\n args=tuple(optimal_params),\n ).y\n return x_sol_best\n\n x_sol_final = solve_best_params_and_predict(best_params)\n data_creator = DELPHIDataCreator(\n x_sol_final=x_sol_final,\n date_day_since100=start_date,\n best_params=best_params,\n continent=continent,\n country=country,\n province=province,\n testing_data_included=False,\n )\n mape_data = get_mape_data_fitting(\n cases_data_fit=cases_data_fit, deaths_data_fit=deaths_data_fit, x_sol_final=x_sol_final\n )\n \n logging.info(f\"In-Sample MAPE Last 15 Days {country, province}: {round(mape_data, 3)} %\")\n logging.debug(f\"Best fitted parameters for {country, province}: {best_params}\")\n df_parameters_area = data_creator.create_dataset_parameters(mape_data)\n # Creating the datasets for predictions of this area\n if GET_CONFIDENCE_INTERVALS:\n df_predictions_since_today_area, df_predictions_since_100_area = (\n data_creator.create_datasets_with_confidence_intervals(\n cases_data_fit, deaths_data_fit,\n past_prediction_file=PATH_TO_FOLDER_DANGER_MAP + f\"predicted/Global_V4_{past_prediction_date}.csv\",\n past_prediction_date=str(pd.to_datetime(past_prediction_date).date()))\n )\n else:\n df_predictions_since_today_area, df_predictions_since_100_area = data_creator.create_datasets_predictions()\n logging.info(\n f\"Finished predicting for Continent={continent}, Country={country} and Province={province} in \"\n + f\"{round(time.time() - time_entering, 2)} seconds\"\n )\n logging.info(\"--------------------------------------------------------------------------------------------\")\n return (\n df_parameters_area,\n df_predictions_since_today_area,\n df_predictions_since_100_area,\n output,\n )\n else:\n return None\n else: # file for that tuple (continent, country, province) doesn't exist in processed files\n logging.info(\n f\"Skipping Continent={continent}, Country={country} and Province={province} as no processed file available\"\n )\n return None\n\n\nif __name__ == \"__main__\":\n assert USER_RUNNING in CONFIG_FILEPATHS[\"delphi_repo\"].keys(), f\"User {USER_RUNNING} not referenced in config.yml\"\n if not os.path.exists(CONFIG_FILEPATHS[\"logs\"][USER_RUNNING] + \"model_fitting/\"):\n os.mkdir(CONFIG_FILEPATHS[\"logs\"][USER_RUNNING] + \"model_fitting/\")\n\n logger_filename = (\n CONFIG_FILEPATHS[\"logs\"][USER_RUNNING] +\n f\"model_fitting/delphi_model_V4_{yesterday_logs_filename}_{OPTIMIZER}.log\"\n )\n logging.basicConfig(\n filename=logger_filename,\n level=logging.DEBUG,\n format=\"%(asctime)s | %(levelname)s | %(message)s\",\n datefmt=\"%m-%d-%Y %I:%M:%S %p\",\n )\n logging.info(\n f\"The user is {USER_RUNNING}, the chosen optimizer for this run was {OPTIMIZER} and \" +\n f\"generation of Confidence Intervals' flag is {GET_CONFIDENCE_INTERVALS}\"\n )\n popcountries = pd.read_csv(\n PATH_TO_FOLDER_DANGER_MAP + f\"processed/Global/Population_Global.csv\"\n )\n popcountries[\"tuple_area\"] = list(zip(popcountries.Continent, popcountries.Country, popcountries.Province))\n\n if not os.path.exists(PATH_TO_DATA_SANDBOX + f\"predicted/raw_predictions/Predicted_model_state_V4_{fitting_start_date}.csv\"):\n logging.error(f\"Initial model state file not found, can not train from {fitting_start_date}. Use model_V3 to train on entire data.\")\n raise FileNotFoundError\n df_initial_states = pd.read_csv(\n PATH_TO_DATA_SANDBOX + f\"predicted/raw_predictions/Predicted_model_state_V4_{fitting_start_date}.csv\"\n )\n\n try:\n past_parameters = pd.read_csv(\n PATH_TO_FOLDER_DANGER_MAP\n + f\"predicted/Parameters_Global_V4_{yesterday}.csv\"\n )\n print(PATH_TO_FOLDER_DANGER_MAP+ f\"predicted/Parameters_Global_V4_{yesterday}.csv\")\n except:\n past_parameters = None\n\n ### Fitting the Model ###\n # Initalizing lists of the different dataframes that will be concatenated in the end\n list_df_global_predictions_since_today = []\n list_df_global_predictions_since_100_cases = []\n list_df_global_parameters = []\n obj_value = 0\n solve_and_predict_area_partial = partial(\n solve_and_predict_area,\n yesterday_=yesterday,\n past_parameters_=past_parameters,\n popcountries=popcountries,\n startT=fitting_start_date\n )\n n_cpu = psutil.cpu_count(logical = False) - 2\n logging.info(f\"Number of CPUs found and used in this run: {n_cpu}\")\n list_tuples = [(\n r.continent, \n r.country, \n r.province, \n r.values[:16] if not pd.isna(r.S) else None\n ) for _, r in df_initial_states.iterrows()]\n\n # list_tuples = [t for t in list_tuples if t[1] in [\"Germany\", \"Poland\"]]\n # , \"Poland\", \"Belgium\", \"France\", \"Greece\"]]\n\n logging.info(f\"Number of areas to be fitted in this run: {len(list_tuples)}\")\n with mp.Pool(n_cpu) as pool:\n for result_area in tqdm(\n pool.map_async(solve_and_predict_area_partial, list_tuples).get(),\n total=len(list_tuples),\n ):\n if result_area is not None:\n (\n df_parameters_area,\n df_predictions_since_today_area,\n df_predictions_since_100_area,\n output,\n ) = result_area\n obj_value = obj_value + output.fun\n # Then we add it to the list of df to be concatenated to update the tracking df\n list_df_global_parameters.append(df_parameters_area)\n list_df_global_predictions_since_today.append(df_predictions_since_today_area)\n list_df_global_predictions_since_100_cases.append(df_predictions_since_100_area)\n else:\n continue\n logging.info(\"Finished the Multiprocessing for all areas\")\n pool.close()\n pool.join()\n\n # Appending parameters, aggregations per country, per continent, and for the world\n # for predictions today & since 100\n today_date_str = \"\".join(str(datetime.now().date()).split(\"-\"))\n df_global_parameters = pd.concat(list_df_global_parameters).sort_values(\n [\"Country\", \"Province\"]\n ).reset_index(drop=True)\n df_global_predictions_since_today = pd.concat(list_df_global_predictions_since_today)\n df_global_predictions_since_today = DELPHIAggregations.append_all_aggregations(\n df_global_predictions_since_today\n )\n df_global_predictions_since_100_cases = pd.concat(list_df_global_predictions_since_100_cases)\n if GET_CONFIDENCE_INTERVALS:\n df_global_predictions_since_today, df_global_predictions_since_100_cases = DELPHIAggregations.append_all_aggregations_cf(\n df_global_predictions_since_100_cases,\n past_prediction_file=PATH_TO_FOLDER_DANGER_MAP + f\"predicted/Global_V4_{past_prediction_date}.csv\",\n past_prediction_date=str(pd.to_datetime(past_prediction_date).date())\n )\n else:\n df_global_predictions_since_100_cases = DELPHIAggregations.append_all_aggregations(\n df_global_predictions_since_100_cases\n )\n\n logger = logging.getLogger(\"V4Logger\")\n delphi_data_saver = DELPHIDataSaver(\n path_to_folder_danger_map=PATH_TO_FOLDER_DANGER_MAP,\n path_to_website_predicted=PATH_TO_WEBSITE_PREDICTED,\n df_global_parameters=df_global_parameters,\n df_global_predictions_since_today=df_global_predictions_since_today,\n df_global_predictions_since_100_cases=df_global_predictions_since_100_cases,\n logger=logger\n )\n delphi_data_saver.save_all_datasets(optimizer=OPTIMIZER, save_since_100_cases=SAVE_SINCE100_CASES, website=SAVE_TO_WEBSITE)\n logging.info(\n f\"Exported all 3 datasets to website & danger_map repositories, \"\n + f\"total runtime was {round((time.time() - time_beginning)/60, 2)} minutes\"\n )"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.to_datetime",
"numpy.log",
"numpy.arctan",
"pandas.Timedelta",
"pandas.isna",
"scipy.optimize.minimize",
"scipy.optimize.dual_annealing",
"numpy.exp"
]
] |
cgarciae/spektral
|
[
"34d71510791091754b6b21a01b6b91fc3e92d84c"
] |
[
"spektral/datasets/tud.py"
] |
[
"import os\nimport shutil\nimport zipfile\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\n\nfrom spektral.utils import nx_to_numpy\n\nDATASET_URL = 'https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets'\nDATASET_CLEAN_URL = 'https://raw.githubusercontent.com/nd7141/graph_datasets/master/datasets'\nDATA_PATH = os.path.expanduser('~/.spektral/datasets/')\nAVAILABLE_DATASETS = [\n d[:-4]\n for d in pd.read_html(DATASET_URL)[0].Name[2:-1].values.tolist()\n]\n\n\ndef load_data(dataset_name, normalize_features=None, clean=False):\n \"\"\"\n Loads one of the Benchmark Data Sets for Graph Kernels from TU Dortmund\n ([link](https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets)).\n The node features are computed by concatenating the following features for\n each node:\n\n - node attributes, if available, normalized as specified in `normalize_features`;\n - clustering coefficient, normalized with z-score;\n - node degrees, normalized as specified in `normalize_features`;\n - node labels, if available, one-hot encoded.\n :param dataset_name: name of the dataset to load (see `spektral.datasets.tud.AVAILABLE_DATASETS`).\n :param normalize_features: `None`, `'zscore'` or `'ohe'`, how to normalize\n the node features (only works for node attributes).\n :param clean: if True, return a version of the dataset with no isomorphic\n graphs.\n :return:\n - a list of adjacency matrices;\n - a list of node feature matrices;\n - a numpy array containing the one-hot encoded targets.\n \"\"\"\n if dataset_name not in AVAILABLE_DATASETS:\n raise ValueError('Available datasets: {}'.format(AVAILABLE_DATASETS))\n\n if clean:\n dataset_name += '_clean'\n if not os.path.exists(DATA_PATH + dataset_name):\n _download_data(dataset_name)\n\n # Read data\n nx_graphs, y = _read_graphs(dataset_name)\n\n # Preprocessing\n y = np.array(y)[..., None]\n y = OneHotEncoder(sparse=False, categories='auto').fit_transform(y)\n\n # Get node attributes\n try:\n A, X_attr, _ = nx_to_numpy(nx_graphs, nf_keys=['attributes'], auto_pad=False)\n X_attr = _normalize_node_features(X_attr, normalize_features)\n except KeyError:\n print('Featureless nodes')\n A, X_attr, _ = nx_to_numpy(nx_graphs, auto_pad=False)\n\n # Get clustering coefficients (always zscore norm)\n clustering_coefficients = [np.array(list(nx.clustering(g).values()))[..., None] for g in nx_graphs]\n clustering_coefficients = _normalize_node_features(clustering_coefficients, 'zscore')\n\n # Get node degrees\n node_degrees = np.array([np.sum(_, axis=-1, keepdims=True) for _ in A])\n node_degrees = _normalize_node_features(node_degrees, 'zscore')\n\n # Get node labels\n try:\n _, X_labs, _ = nx_to_numpy(nx_graphs, nf_keys=['label'], auto_pad=False)\n X_labs = _normalize_node_features(X_labs, 'ohe')\n except KeyError:\n print('Label-less nodes')\n X_labs = None\n\n # Concatenate features\n Xs = [node_degrees, clustering_coefficients]\n if X_attr is not None:\n Xs.append(X_attr)\n if X_labs is not None:\n Xs.append(X_labs)\n X = [np.concatenate(x_, axis=-1) for x_ in zip(*Xs)]\n X = np.array(X)\n\n return A, X, y\n\n\ndef _read_graphs(dataset_name):\n file_prefix = DATA_PATH + dataset_name + '/' + dataset_name\n with open(file_prefix + \"_graph_indicator.txt\", \"r\") as f:\n graph_indicator = [int(i) - 1 for i in list(f)]\n\n # Nodes\n num_graphs = max(graph_indicator)\n node_indices = []\n offset = []\n c = 0\n\n for i in range(num_graphs + 1):\n offset.append(c)\n c_i = graph_indicator.count(i)\n node_indices.append((c, c + c_i - 1))\n c += c_i\n\n graph_list = []\n vertex_list = []\n for i in node_indices:\n g = nx.Graph(directed=False)\n vertex_list_g = []\n for j in range(i[1] - i[0] + 1):\n vertex_list_g.append(g.add_node(j))\n\n graph_list.append(g)\n vertex_list.append(vertex_list_g)\n\n # Edges\n with open(file_prefix + \"_A.txt\", \"r\") as f:\n edges = [i.strip().split(',') for i in list(f)]\n\n edges = [(int(e[0].strip()) - 1, int(e[1].strip()) - 1) for e in edges]\n\n edge_indicator = []\n edge_list = []\n for e in edges:\n g_id = graph_indicator[e[0]]\n edge_indicator.append(g_id)\n g = graph_list[g_id]\n off = offset[g_id]\n\n # Avoid multigraph\n edge_list.append(g.add_edge(e[0] - off, e[1] - off))\n\n # Node labels\n if os.path.exists(file_prefix + \"_node_labels.txt\"):\n with open(file_prefix + \"_node_labels.txt\", \"r\") as f:\n node_labels = [int(i.strip()) for i in list(f)]\n\n i = 0\n for g in graph_list:\n for n in g.nodes():\n g.nodes[n]['label'] = node_labels[i]\n i += 1\n\n # Node Attributes\n if os.path.exists(file_prefix + \"_node_attributes.txt\"):\n with open(file_prefix + \"_node_attributes.txt\", \"r\") as f:\n node_attributes = [map(float, i.strip().split(',')) for i in list(f)]\n i = 0\n for g in graph_list:\n for n in g.nodes():\n g.nodes[n]['attributes'] = list(node_attributes[i])\n i += 1\n\n # Classes\n with open(file_prefix + \"_graph_labels.txt\", \"r\") as f:\n classes = [int(float(i.strip())) for i in list(f)]\n\n return graph_list, classes\n\n\ndef _download_data(dataset_name):\n print('Dowloading ' + dataset_name + ' dataset.')\n if dataset_name.endswith('_clean'):\n true_name = dataset_name[:-6]\n url = DATASET_CLEAN_URL\n else:\n true_name = dataset_name\n url = DATASET_URL\n\n data_url = '{}/{}.zip'.format(url, true_name)\n req = requests.get(data_url)\n\n os.makedirs(DATA_PATH, exist_ok=True)\n with open(DATA_PATH + dataset_name + '.zip', 'wb') as out_file:\n out_file.write(req.content)\n with zipfile.ZipFile(DATA_PATH + dataset_name + '.zip', 'r') as zip_ref:\n zip_ref.extractall(DATA_PATH + dataset_name + '/')\n os.remove(DATA_PATH + dataset_name + '.zip')\n\n subfolder = os.path.join(DATA_PATH, dataset_name, true_name)\n parentfolder = os.path.join(DATA_PATH, dataset_name)\n for filename in os.listdir(subfolder):\n try:\n suffix = filename.split(true_name)[1]\n except IndexError:\n # Probably the README\n continue\n shutil.move(\n os.path.join(subfolder, filename),\n os.path.join(parentfolder, dataset_name + suffix)\n )\n shutil.rmtree(subfolder)\n\n\ndef _normalize_node_features(feat_list, norm=None):\n \"\"\"\n Apply one-hot encoding or z-score to a list of node features\n \"\"\"\n if norm == 'ohe':\n fnorm = OneHotEncoder(sparse=False, categories='auto')\n elif norm == 'zscore':\n fnorm = StandardScaler()\n else:\n return feat_list\n fnorm.fit(np.vstack(feat_list))\n feat_list = [fnorm.transform(feat_.astype(np.float32)) for feat_ in feat_list]\n return feat_list\n"
] |
[
[
"sklearn.preprocessing.OneHotEncoder",
"pandas.read_html",
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.sum",
"numpy.vstack"
]
] |
f3ss1/WildHackPublic
|
[
"7b7d85b116a60ff8143eb1a6e53afb1dbd30ceb7"
] |
[
"popularity.py"
] |
[
"import pickle\n\nfrom natasha import (\n Doc,\n Segmenter,\n NewsEmbedding,\n NewsMorphTagger,\n MorphVocab\n)\nfrom pandas import read_csv\nfrom navec import Navec\nfrom tqdm import tqdm\n\nPATH = 'navec_hudlit_v1_12B_500K_300d_100q.tar' # Name of file for Navec\n\nNAME = 'popularity'\n\n# Natasha Setup.\n\nsegm = Segmenter()\n_emb = NewsEmbedding()\nmorph_tagger = NewsMorphTagger(_emb)\nmorph_vocab = MorphVocab()\n\n\ndef query_to_noun(query: str) -> list[str]:\n doc = Doc(query.lower())\n\n doc.segment(segmenter=segm)\n\n doc.tag_morph(morph_tagger)\n\n res_arr = []\n for token in doc.tokens:\n if token.pos == 'NOUN':\n token.lemmatize(morph_vocab)\n res_arr.append(token.lemma)\n\n return res_arr\n\n\n# Navec setup.\n\nnavec = Navec.load(PATH)\n\n# Data load.\n\ndata = read_csv('query_popularity.csv')\ndata.dropna(inplace=True)\ndata.reset_index(inplace=True)\n\npop_dict: dict[str, float] = {}\nnumber_dict: dict[str, int] = {}\n\nfor i in tqdm(range(data.shape[0])):\n text = data.loc[i, 'query']\n text_popular = data.loc[i, 'query_popularity']\n noun_list = query_to_noun(text)\n for noun in noun_list:\n if noun in pop_dict:\n pop_dict[noun] += text_popular\n number_dict[noun] += 1\n else:\n pop_dict[noun] = text_popular\n number_dict[noun] = 1\n\nfor key in tqdm(pop_dict.keys()):\n pop_dict[key] /= number_dict[key]\n\n# Dump.\n\nwith open(NAME + '.pkl', 'wb') as f:\n pickle.dump(pop_dict, f, pickle.HIGHEST_PROTOCOL)\n"
] |
[
[
"pandas.read_csv"
]
] |
geissdoerfer/polyprox
|
[
"209d75318368c4df967581910c079ed1092c96bf"
] |
[
"setup.py"
] |
[
"from setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext as _build_ext\n\n\nclass build_ext(_build_ext):\n def finalize_options(self):\n _build_ext.finalize_options(self)\n __builtins__.__NUMPY_SETUP__ = False\n import numpy\n\n self.include_dirs.append(numpy.get_include())\n\n\nmod_algs = Extension(\"algorithms\", sources=[\"src/algorithms.c\"])\nsetup(\n name=\"polyprox\",\n version=\"0.4\",\n description=\"Polygonal curve approximation tools\",\n author=\"Kai Geissdoerfer\",\n author_email=\"[email protected]\",\n url=\"https://github.com/geissdoerfer/polyprox\",\n ext_modules=[mod_algs],\n packages=[\"polyprox\"],\n setup_requires=[\"numpy\", \"pytest-runner\"],\n cmdclass={\"build_ext\": build_ext},\n tests_require=[\"pytest>=3.9\", \"rdp\"],\n)\n"
] |
[
[
"numpy.get_include"
]
] |
AWNystrom/Kernels
|
[
"8f6297dad159d36081ee1d2067dc57b8bde55189"
] |
[
"gram_matrix.py"
] |
[
"from numpy import array, exp\nfrom numpy.random import choice\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom code import interact\nfrom scipy.spatial.distance import euclidean\ndef linear_kernel(a, b):\n return a.dot(b.T)\n\ndef rbf(a, b, gamma=0.01):\n return exp(-gamma * euclidean(a, b))\n \ndef poly_kernel(a, b, d=6, c=1):\n return (a.T.dot(b)+c)**d\n \nclass GramMatrix(TransformerMixin, BaseEstimator):\n def __init__(self, kernel):\n self.kernel = kernel\n \n def fit(self, X, Y=None):\n if X.shape[0] > 1000:\n self.gram = X[choice(range(X.shape[0]), 1000, replace=False)]\n else:\n self.gram = X\n return self\n \n def transform(self, X):\n k = self.kernel\n gram = self.gram\n return array([[k(x, g) for g in gram] for x in X])\n\nif __name__ == '__main__':\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.colors import ListedColormap\n from sklearn.cross_validation import train_test_split\n from sklearn.preprocessing import StandardScaler\n from sklearn.datasets import make_moons, make_circles, make_classification\n from sklearn.pipeline import Pipeline\n from sklearn.linear_model import LogisticRegression\n\n h = .02 # step size in the mesh\n\n names = [\"No Gram\", \"Gram\"]\n classifiers = [LogisticRegression(), Pipeline([('gram', GramMatrix(poly_kernel)), ('clf', LogisticRegression())])]\n\n X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1)\n rng = np.random.RandomState(2)\n X += 2 * rng.uniform(size=X.shape)\n linearly_separable = (X, y)\n\n datasets = [make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable\n ]\n\n figure = plt.figure(figsize=(27, 9))\n i = 1\n # iterate over datasets\n for ds in datasets:\n # preprocess dataset, split into training and test part\n X, y = ds\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\n alpha=0.6)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(name)\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=15, horizontalalignment='right')\n i += 1\n\n figure.subplots_adjust(left=.02, right=.98)\n plt.show()"
] |
[
[
"sklearn.cross_validation.train_test_split",
"sklearn.datasets.make_classification",
"sklearn.linear_model.LogisticRegression",
"sklearn.datasets.make_moons",
"numpy.arange",
"scipy.spatial.distance.euclidean",
"sklearn.datasets.make_circles",
"matplotlib.colors.ListedColormap",
"sklearn.preprocessing.StandardScaler",
"numpy.random.RandomState",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
smalbadger/TradeBot
|
[
"a6d4b443a6584af3e91b2d9bf0162db2b4c362e5"
] |
[
"src/Caroline/client_side/remote_mongo_test.py"
] |
[
"import pymongo\nfrom getpass import getpass\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\n\n#username = input(\"Username: \")\n#password = getpass()\ndb = 'cryptos'\n\nsuccessful_login = True\nclient = pymongo.MongoClient(\"mongodb://sam:[email protected]/cryptos\")\n#client = pymongo.MongoClient(\"mongodb://{}:{}@192.168.0.23/{}\".format(username, password,db))\n\ndb = client.cryptos\n\nfor collection_name in db.collection_names():\n print(collection_name)\n print('\\t{} documents'.format(db[collection_name].count()))\n\nprices = []\ntimes = []\n\ntime_delta = timedelta(minutes = 1)\ndocs = db.BCH_matches.find()\n\nstart_time = docs[0][\"time\"]\navg_price = 0\ncount = 0\nfor doc in docs:\n if (doc[\"time\"] - start_time) < time_delta:\n #add on to current average\n avg_price += float(doc[\"price\"])\n count += 1\n \n else:\n #tie up current average\n avg_price /= count\n prices.append(avg_price)\n times.append(start_time)\n #print(\"{} : {}\".format(start_time, avg_price))\n \n #start new average\n start_time = doc[\"time\"]\n avg_price = float(doc[\"price\"])\n count = 1\n\nplt.plot(times, prices)\nplt.gcf().autofmt_xdate()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gcf"
]
] |
tyler961/GravitySimulation
|
[
"9b0f218b4273451394cb345a478aa3c8495f5ce4"
] |
[
"src/particle.py"
] |
[
"import numpy\r\n\r\nclass Vector:\r\n\tdef __init__(self, magnitude, direction):\r\n\t\tself.magnitude = magnitude\r\n\t\tself.direction = direction\r\n\t\tself.xComp = 0\r\n\t\tself.yComp = 0\r\n\t\tself.calcVectorComponents()\r\n\r\n\tdef calcVectorComponents(self):\r\n\t\tself.xComp = self.magnitude * numpy.cos(self.direction)\r\n\t\tself.yComp = self.magnitude * numpy.sin(self.direction)\r\n\r\n\tdef calcVectorMagAndDir(self):\r\n\t\tself.magnitude = numpy.sqrt(self.xComp**2 + self.yComp**2)\r\n\t\tself.direction = numpy.arctan(self.yComp / self.xComp)\r\n\t\tself.updateAngle()\r\n\r\n\tdef updateAngle(self):\r\n\t\tself.direction = numpy.absolute(self.direction)\r\n\t\tif (self.xComp < 0 and self.yComp < 0):\r\n\t\t\tself.direction += 3.14159\r\n\t\telif (self.xComp < 0 and self.yComp >= 0):\r\n\t\t\tself.direction = 3.14159 - self.direction\r\n\t\telif (self.xComp >= 0 and self.yComp < 0):\r\n\t\t\tself.direction = 6.28319 - self.direction\r\n\r\n\r\nclass Particle:\r\n\tdef __init__(self, id, mass = 0, position = [0, 0, 0], velocityVector = Vector(0, 0), accelerationVector = Vector(0, 0)):\r\n\t\tself.mass = mass\r\n\t\tself.position = position\r\n\t\tself.velocityVector = velocityVector\r\n\t\tself.finalForceVector = Vector(0, 0)\r\n\t\tself.accelerationVector = accelerationVector\r\n\t\tself.GRAV_CONST = 6.67408 * 10**-11\r\n\t\tself.ID = id\r\n\r\n\t# Gets a list of the other particles.\r\n\t# Loops through each particle and calculates this particle's vector delta based on the attraction.\r\n\t# For now, each particle finds it's own attraction. Later change this so I don't have to recalc the same equations over and over.\r\n\t# Ex: p1 and p2 have 33 N of attraction. If this loop is taking place in p1, p1 sets that as it's attraction. \r\n\t# \t This will have to be recalced again when it's p2's turn to loop. Remove this overlap later.\r\n\tdef updateParticle(self, particleList, seconds, numDimensions):\r\n\t\tforceVectors = []\r\n\t\tdist = 0\r\n\t\tfor i in particleList:\r\n\t\t\tif(i.ID is not self.ID):\r\n\t\t\t\t# Find distance between both particles\r\n\t\t\t\tdist = numpy.sqrt((i.position[0] - self.position[0])**2 + (i.position[1] - self.position[1])**2)\r\n\r\n\t\t\t\t# Find force between the two particles\r\n\t\t\t\tforce = self.GRAV_CONST * ((self.mass * i.mass) / dist**2)\r\n\r\n\t\t\t\t# First find x diff and y diff\r\n\t\t\t\txDiff = i.position[0] - self.position[0]\r\n\t\t\t\tyDiff = i.position[1] - self.position[1]\r\n\r\n\t\t\t\t# alpha angle\r\n\t\t\t\t# a = y diff\r\n\t\t\t\t# b = x diff\r\n\t\t\t\tif xDiff == 0 and yDiff >= 0:\r\n\t\t\t\t\tangle = 0\r\n\t\t\t\telif xDiff == 0 and yDiff < 0:\r\n\t\t\t\t\tangle = 3.14159\r\n\t\t\t\telse:\r\n\t\t\t\t\tangle = numpy.absolute(numpy.arctan(yDiff / xDiff))\r\n\t\t\t\t\t# Correct the angle\r\n\t\t\t\t\tif (xDiff < 0 and yDiff < 0):\r\n\t\t\t\t\t\tangle += 3.14159\r\n\t\t\t\t\telif (xDiff < 0 and yDiff >= 0):\r\n\t\t\t\t\t\tangle = 3.14159 - angle\r\n\t\t\t\t\telif (xDiff >= 0 and yDiff < 0):\r\n\t\t\t\t\t\tangle = 6.28319 - angle\r\n\r\n\t\t\t\tforceVector = Vector(force, numpy.absolute(angle))\r\n\t\t\t\t\r\n\t\t\t\t# Need to get a list of all the vectors, then combine them and calcuate the final vector that will be applied.\r\n\t\t\t\tforceVectors.append(forceVector)\r\n\r\n\t\ttempForceVector = Vector(0, 0)\r\n\t\t# Calculate the final vector\r\n\t\tfor i in forceVectors:\r\n\t\t\t# Find vector's components and add them up for the final vector's components\r\n\t\t\ti.calcVectorComponents()\r\n\t\t\ttempForceVector.xComp += i.xComp\r\n\t\t\ttempForceVector.yComp += i.yComp\r\n\r\n\t\t# Now that I have the final vector's components I update the mag and direction of the final vector\r\n\t\tself.finalForceVector = tempForceVector\r\n\t\tself.finalForceVector.calcVectorMagAndDir()\r\n\r\n\t\t# Update current particle's acceleration\r\n\t\tself.accelerationVector.magnitude = (self.finalForceVector.magnitude / self.mass) \r\n\t\tself.accelerationVector.magnitude *= seconds\r\n\t\tself.accelerationVector.direction = self.finalForceVector.direction\r\n\t\tself.accelerationVector.calcVectorComponents()\r\n\r\n\t\t# update velocity vector by adding the acceleration vector\r\n\t\ttmpVelocityVector = Vector(0, 0)\r\n\t\ttmpVelocityVector.xComp = self.velocityVector.xComp + self.accelerationVector.xComp\r\n\t\ttmpVelocityVector.yComp = self.velocityVector.yComp + self.accelerationVector.yComp\r\n\t\ttmpVelocityVector.calcVectorMagAndDir()\r\n\t\tself.velocityVector = tmpVelocityVector\r\n\r\n\tdef move(self):\r\n\t\tself.position[0] += numpy.cos(self.velocityVector.direction) * self.velocityVector.magnitude\r\n\t\tself.position[1] += numpy.sin(self.velocityVector.direction) * self.velocityVector.magnitude\r\n\r\n\tdef printParticleStats(self):\r\n\t\tprint(\"\\n*******************************************************************************************\")\r\n\t\tprint(\"ID: \", self.ID)\r\n\t\tprint(\"Final Force Vector: \\n\\tMagnitude: \", self.finalForceVector.magnitude, \"\\n\\tDirection: \", self.finalForceVector.direction)\r\n\t\tprint(\"Acceleration Vector: \\n\\tMagnitude: \", self.accelerationVector.magnitude, \"\\n\\tDirection: \", self.accelerationVector.direction)\r\n\t\tprint(\"Velocity Vector: \\n\\tMagnitude: \", self.velocityVector.magnitude, \"\\n\\tDirection: \", self.velocityVector.direction)\r\n\t\tprint(\"Position:\\n\\tX: \", self.position[0], \"| Y: \", self.position[1])\r\n\t\tprint(\"Mass:\", self.mass, \" kg\")\r\n\t\tprint(\"*******************************************************************************************\\n\")\r\n\r\n\r\n# Needed to create final force vector to calculate what direction the acceleration is pointing\r\n# Force vector to every particle. This is the Force (magnitude of the vector) and the angle ()\r\n\r\n# Adding vectors:\r\n# First, find the components of each vector in relation to each other. \r\n# Vector A:\r\n#\t\tAx = A(mag) cos(thetaA)\r\n#\t\tAy = A(mag) sin(thetaA)\r\n# Vector B:\r\n#\t\tBx = B(mag) cos(thetaB)\r\n#\t\tBy = B(mag) sin(thetaB)\r\n#\r\n# Find the magnitude of the resulting vector T\r\n# \t\tRx = Ax + Bx\r\n#\t\tRy = Ay + By\r\n#\r\n# Now that I have the components of the new vector, turn it from components back into vector values\r\n#\t\tR(mag) = sqrt(Rx^2 + Ry^2)\r\n#\t\tR(theta) = arctan(Ry / Rx)\r\n#\r\n# This can be used to add any number of vectors.\r\n# Ex:\r\n#\t\tRx = Ax + Bx + Cx + Dx + ....\r\n#\t\tRy = Ay + By + Cy + Dy + ....\r\n# I can then use the equations above to find the new vector's magnitude and angle"
] |
[
[
"numpy.absolute",
"numpy.sqrt",
"numpy.arctan",
"numpy.cos",
"numpy.sin"
]
] |
amdecker/ir
|
[
"22ff03512c79239c1bd155deaeb6add3708a9805"
] |
[
"util.py"
] |
[
"__author__ = \"Amos Decker\"\n__date__ = \"January 2020\"\n\n\"\"\"\nvarious helpful tools used in StitcherEasy and rescale\n- system file chooser\n- remove black border from panoramas\n- & others\n\"\"\"\n\nfrom tkinter import Tk\nfrom tkinter.filedialog import askdirectory\nimport numpy as np\nfrom typing import List, Dict, Any, Tuple, TextIO\n\nPALETTES: List[str] = [\"arctic.pal\", \"coldest.pal\", \"contrast.pal\", \"gray.pal\", \"hottest.pal\", \"iron.pal\", \"lava.pal\", \"rainbow.pal\", \"wheel.pal\"]\nColor = Tuple[int, int, int]\n\n\ndef open_directory_chooser() -> str:\n \"\"\"opens system file chooser and returns path to directory the user selects\"\"\"\n root: Tk = Tk()\n root.withdraw()\n root.update()\n directory_name: str = askdirectory()\n root.update()\n root.destroy()\n return directory_name\n\n\ndef swap_dict(d: Dict) -> Dict:\n \"\"\"switches keys with values in a dictionary\"\"\"\n return dict((v, k) for k, v in d.items())\n\n\ndef make_double_digit_str(num: int) -> str:\n \"\"\"\n useful for file names, turns 9 into \"09\" but keeps 15 as \"15\"\n :param num: one or two digit number\n :return: two digit number string\n \"\"\"\n return str(num) if num > 9 else \"0\" + str(num)\n\n\ndef ycbcr_to_bgr(c: Color) -> Color:\n \"\"\"\n converts from color space YCbCr to BGR\n :param c: tuple of three numbers that give the YCbCr color\n :return: tuple of (b, g, r)\n \"\"\"\n r: int = int(c[0] + 1.40200 * (c[1] - 128))\n g: int = int(c[0] - 0.34414 * (c[2] - 128) - 0.71414 * (c[1] - 128))\n b: int = int(c[0] + 1.77200 * (c[2] - 128))\n r = max(0, min(255, r))\n g = max(0, min(255, g))\n b = max(0, min(255, b))\n return b, g, r\n\n\ndef palette_to_bgr(filename: str) -> List[Color]:\n \"\"\"\n creates list of tuple of (b, g, r) values for each color in palette\n :param filename: path to .pal palette file\n :return: list of tuple of (b, g, r)\n \"\"\"\n f: TextIO\n with open(filename) as f:\n palette: List[Color] = [tuple([int(y) for y in x.split(\",\")]) for x in f.read().split(\"\\n\")]\n for i in range(len(palette)):\n palette[i] = ycbcr_to_bgr(palette[i])\n return palette\n\n\ndef get_palette_color_match(pxl: np.ndarray, palette: List[Color]) -> Color:\n \"\"\"\n the bgr values of the images don't match up perfectly with the .pal file, so return the color in the palette\n closest to that of the pixel\n :param pxl: 1x3 array of b, g, r colors\n :param palette: output of palette_to_bgr()\n :return: tuple of bgr color in the palette\n \"\"\"\n differences = np.sum(abs(palette - pxl), axis=1) # how different the colors are\n idx: int = np.where(differences == np.amin(differences))[0][0] # get index of closest color\n return palette[idx]\n\n\ndef stretch_list(orig: List, new_length: int) -> List:\n \"\"\"\n stretches a list to be a certain length and tries to fill it in as evenly as possible\n\n so with orig as [0, 1, 2] and leng as 5 it would output [0, 0, 1, 1, 2]\n :param orig: list to be stretched\n :param new_length: length of final stretched list\n :return: list with length leng filled evenly with values from orig\n \"\"\"\n new: List = [None] * new_length\n num_each: int = round(len(new) / len(orig))\n\n prev: int = 0\n i: int = 0\n while num_each * (i + 1) <= new_length and i < len(orig):\n new[prev:num_each * (i + 1)] = [orig[i]] * num_each\n prev = num_each * (i + 1)\n i += 1\n\n numNone:int = new.count(None)\n if numNone > 0:\n if i >= len(orig):\n i = len(orig) - 1\n new[-numNone:] = stretch_list([orig[i]], numNone)\n return new\n\n\ndef replace(arr: np.ndarray, d: Dict) -> np.ndarray:\n \"\"\"\n replaces values in 2d array according to dictionary\n\n example: arr = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] and dict = {(1, 2, 3): (0, 0, 0), (4, 5, 6):(1, 1, 1)}\n gives [[0, 0, 0], [0, 0, 0], [1, 1, 1]]\n\n thanks to https://stackoverflow.com/a/16992881 for this solution\n :param arr: a 2-D array\n :param d: keys must contain all values in arr\n :return: new np array with same shape as arr\n \"\"\"\n u, inv = np.unique(arr, return_inverse=True,\n axis=0) # inv gives back indices allowing reconstruction of original array from unique elements\n\n new_arr = np.array([d[tuple(x)] for x in u])[inv]\n\n if new_arr.shape[-1] == arr.shape[-1]:\n return new_arr.reshape(arr.shape)\n else:\n if len(new_arr.shape) == 1:\n return new_arr.reshape(arr.shape[0], 1)\n else:\n return new_arr\n\n\n"
] |
[
[
"numpy.amin",
"numpy.unique"
]
] |
kevinsung/qiskit-terra
|
[
"573198bdc55eaf4ad0691d441ae41c874f46313d"
] |
[
"qiskit/visualization/timeline/core.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nCore module of the timeline drawer.\n\nThis module provides the `DrawerCanvas` which is a collection of drawings.\nThe canvas instance is not just a container of drawing objects, as it also performs\ndata processing like binding abstract coordinates.\n\n\nInitialization\n~~~~~~~~~~~~~~\nThe `DataCanvas` is not exposed to users as they are implicitly initialized in the\ninterface function. It is noteworthy that the data canvas is agnostic to plotters.\nThis means once the canvas instance is initialized we can reuse this data\namong multiple plotters. The canvas is initialized with a stylesheet.\n\n ```python\n canvas = DrawerCanvas(stylesheet=stylesheet)\n canvas.load_program(sched)\n canvas.update()\n ```\n\nOnce all properties are set, `.update` method is called to apply changes to drawings.\n\nUpdate\n~~~~~~\nTo update the image, a user can set new values to canvas and then call the `.update` method.\n\n ```python\n canvas.set_time_range(2000, 3000)\n canvas.update()\n ```\n\nAll stored drawings are updated accordingly. The plotter API can access to\ndrawings with `.collections` property of the canvas instance. This returns\nan iterator of drawings with the unique data key.\nIf a plotter provides object handler for plotted shapes, the plotter API can manage\nthe lookup table of the handler and the drawings by using this data key.\n\"\"\"\nimport warnings\nfrom copy import deepcopy\nfrom functools import partial\nfrom typing import Tuple, Iterator, Dict\nfrom enum import Enum\n\nimport numpy as np\n\nfrom qiskit import circuit\nfrom qiskit.visualization.exceptions import VisualizationError\nfrom qiskit.visualization.timeline import drawings, types\nfrom qiskit.visualization.timeline.stylesheet import QiskitTimelineStyle\n\n\nclass DrawerCanvas:\n \"\"\"Data container for drawings.\"\"\"\n\n def __init__(self, stylesheet: QiskitTimelineStyle):\n \"\"\"Create new data container.\"\"\"\n # stylesheet\n self.formatter = stylesheet.formatter\n self.generator = stylesheet.generator\n self.layout = stylesheet.layout\n\n # drawings\n self._collections = {}\n self._output_dataset = {}\n\n # vertical offset of bits\n self.bits = []\n self.assigned_coordinates = {}\n\n # visible controls\n self.disable_bits = set()\n self.disable_types = set()\n\n # time\n self._time_range = (0, 0)\n\n # graph height\n self.vmax = 0\n self.vmin = 0\n\n @property\n def time_range(self) -> Tuple[int, int]:\n \"\"\"Return current time range to draw.\n\n Calculate net duration and add side margin to edge location.\n\n Returns:\n Time window considering side margin.\n \"\"\"\n t0, t1 = self._time_range\n\n duration = t1 - t0\n new_t0 = t0 - duration * self.formatter[\"margin.left_percent\"]\n new_t1 = t1 + duration * self.formatter[\"margin.right_percent\"]\n\n return new_t0, new_t1\n\n @property\n def collections(self) -> Iterator[Tuple[str, drawings.ElementaryData]]:\n \"\"\"Return currently active entries from drawing data collection.\n\n The object is returned with unique name as a key of an object handler.\n When the horizontal coordinate contains `AbstractCoordinate`,\n the value is substituted by current time range preference.\n \"\"\"\n yield from self._output_dataset.items()\n\n @time_range.setter\n def time_range(self, new_range: Tuple[int, int]):\n \"\"\"Update time range to draw.\"\"\"\n self._time_range = new_range\n\n def add_data(self, data: drawings.ElementaryData):\n \"\"\"Add drawing to collections.\n\n If the given object already exists in the collections,\n this interface replaces the old object instead of adding new entry.\n\n Args:\n data: New drawing to add.\n \"\"\"\n if not self.formatter[\"control.show_clbits\"]:\n data.bits = [b for b in data.bits if not isinstance(b, circuit.Clbit)]\n self._collections[data.data_key] = data\n\n # pylint: disable=cyclic-import\n def load_program(self, program: circuit.QuantumCircuit):\n \"\"\"Load quantum circuit and create drawing..\n\n Args:\n program: Scheduled circuit object to draw.\n\n Raises:\n VisualizationError: When circuit is not scheduled.\n \"\"\"\n not_gate_like = (circuit.Barrier,)\n\n if getattr(program, \"_op_start_times\") is None:\n # Run scheduling for backward compatibility\n from qiskit import transpile\n from qiskit.transpiler import InstructionDurations, TranspilerError\n\n warnings.warn(\n \"Visualizing un-scheduled circuit with timeline drawer has been deprecated. \"\n \"This circuit should be transpiled with scheduler though it consists of \"\n \"instructions with explicit durations.\",\n DeprecationWarning,\n )\n\n try:\n program = transpile(\n program, scheduling_method=\"alap\", instruction_durations=InstructionDurations()\n )\n except TranspilerError as ex:\n raise VisualizationError(\n f\"Input circuit {program.name} is not scheduled and it contains \"\n \"operations with unknown delays. This cannot be visualized.\"\n ) from ex\n\n for t0, (inst, qargs, cargs) in zip(program.op_start_times, program.data):\n bits = qargs + cargs\n for bit_pos, bit in enumerate(qargs + cargs):\n if not isinstance(inst, not_gate_like):\n # Generate draw object for gates\n gate_source = types.ScheduledGate(\n t0=t0,\n operand=inst,\n duration=inst.duration,\n bits=bits,\n bit_position=bit_pos,\n )\n for gen in self.generator[\"gates\"]:\n obj_generator = partial(gen, formatter=self.formatter)\n for datum in obj_generator(gate_source):\n self.add_data(datum)\n if len(bits) > 1 and bit_pos == 0:\n # Generate draw object for gate-gate link\n line_pos = t0 + 0.5 * inst.duration\n link_source = types.GateLink(t0=line_pos, opname=inst.name, bits=bits)\n for gen in self.generator[\"gate_links\"]:\n obj_generator = partial(gen, formatter=self.formatter)\n for datum in obj_generator(link_source):\n self.add_data(datum)\n if isinstance(inst, circuit.Barrier):\n # Generate draw object for barrier\n barrier_source = types.Barrier(t0=t0, bits=bits, bit_position=bit_pos)\n for gen in self.generator[\"barriers\"]:\n obj_generator = partial(gen, formatter=self.formatter)\n for datum in obj_generator(barrier_source):\n self.add_data(datum)\n\n self.bits = program.qubits + program.clbits\n for bit in self.bits:\n for gen in self.generator[\"bits\"]:\n # Generate draw objects for bit\n obj_generator = partial(gen, formatter=self.formatter)\n for datum in obj_generator(bit):\n self.add_data(datum)\n\n # update time range\n t_end = max(program.duration, self.formatter[\"margin.minimum_duration\"])\n self.set_time_range(t_start=0, t_end=t_end)\n\n def set_time_range(self, t_start: int, t_end: int):\n \"\"\"Set time range to draw.\n\n Args:\n t_start: Left boundary of drawing in units of cycle time.\n t_end: Right boundary of drawing in units of cycle time.\n \"\"\"\n self.time_range = (t_start, t_end)\n\n def set_disable_bits(self, bit: types.Bits, remove: bool = True):\n \"\"\"Interface method to control visibility of bits.\n\n Specified object in the blocked list will not be shown.\n\n Args:\n bit: A qubit or classical bit object to disable.\n remove: Set `True` to disable, set `False` to enable.\n \"\"\"\n if remove:\n self.disable_bits.add(bit)\n else:\n self.disable_bits.discard(bit)\n\n def set_disable_type(self, data_type: types.DataTypes, remove: bool = True):\n \"\"\"Interface method to control visibility of data types.\n\n Specified object in the blocked list will not be shown.\n\n Args:\n data_type: A drawing data type to disable.\n remove: Set `True` to disable, set `False` to enable.\n \"\"\"\n if isinstance(data_type, Enum):\n data_type_str = str(data_type.value)\n else:\n data_type_str = data_type\n\n if remove:\n self.disable_types.add(data_type_str)\n else:\n self.disable_types.discard(data_type_str)\n\n def update(self):\n \"\"\"Update all collections.\n\n This method should be called before the canvas is passed to the plotter.\n \"\"\"\n self._output_dataset.clear()\n self.assigned_coordinates.clear()\n\n # update coordinate\n y0 = -self.formatter[\"margin.top\"]\n for bit in self.layout[\"bit_arrange\"](self.bits):\n # remove classical bit\n if isinstance(bit, circuit.Clbit) and not self.formatter[\"control.show_clbits\"]:\n continue\n # remove idle bit\n if not self._check_bit_visible(bit):\n continue\n offset = y0 - 0.5\n self.assigned_coordinates[bit] = offset\n y0 = offset - 0.5\n self.vmax = 0\n self.vmin = y0 - self.formatter[\"margin.bottom\"]\n\n # add data\n temp_gate_links = {}\n temp_data = {}\n for data_key, data in self._collections.items():\n # deep copy to keep original data hash\n new_data = deepcopy(data)\n new_data.xvals = self._bind_coordinate(data.xvals)\n new_data.yvals = self._bind_coordinate(data.yvals)\n if data.data_type == str(types.LineType.GATE_LINK.value):\n temp_gate_links[data_key] = new_data\n else:\n temp_data[data_key] = new_data\n\n # update horizontal offset of gate links\n temp_data.update(self._check_link_overlap(temp_gate_links))\n\n # push valid data\n for data_key, data in temp_data.items():\n if self._check_data_visible(data):\n self._output_dataset[data_key] = data\n\n def _check_data_visible(self, data: drawings.ElementaryData) -> bool:\n \"\"\"A helper function to check if the data is visible.\n\n Args:\n data: Drawing object to test.\n\n Returns:\n Return `True` if the data is visible.\n \"\"\"\n _barriers = [str(types.LineType.BARRIER.value)]\n\n _delays = [str(types.BoxType.DELAY.value), str(types.LabelType.DELAY.value)]\n\n def _time_range_check(_data):\n \"\"\"If data is located outside the current time range.\"\"\"\n t0, t1 = self.time_range\n if np.max(_data.xvals) < t0 or np.min(_data.xvals) > t1:\n return False\n return True\n\n def _associated_bit_check(_data):\n \"\"\"If any associated bit is not shown.\"\"\"\n if all(bit not in self.assigned_coordinates for bit in _data.bits):\n return False\n return True\n\n def _data_check(_data):\n \"\"\"If data is valid.\"\"\"\n if _data.data_type == str(types.LineType.GATE_LINK.value):\n active_bits = [bit for bit in _data.bits if bit not in self.disable_bits]\n if len(active_bits) < 2:\n return False\n elif _data.data_type in _barriers and not self.formatter[\"control.show_barriers\"]:\n return False\n elif _data.data_type in _delays and not self.formatter[\"control.show_delays\"]:\n return False\n return True\n\n checks = [_time_range_check, _associated_bit_check, _data_check]\n if all(check(data) for check in checks):\n return True\n\n return False\n\n def _check_bit_visible(self, bit: types.Bits) -> bool:\n \"\"\"A helper function to check if the bit is visible.\n\n Args:\n bit: Bit object to test.\n\n Returns:\n Return `True` if the bit is visible.\n \"\"\"\n _gates = [str(types.BoxType.SCHED_GATE.value), str(types.SymbolType.FRAME.value)]\n\n if bit in self.disable_bits:\n return False\n\n if self.formatter[\"control.show_idle\"]:\n return True\n\n for data in self._collections.values():\n if bit in data.bits and data.data_type in _gates:\n return True\n return False\n\n def _bind_coordinate(self, vals: Iterator[types.Coordinate]) -> np.ndarray:\n \"\"\"A helper function to bind actual coordinates to an `AbstractCoordinate`.\n\n Args:\n vals: Sequence of coordinate objects associated with a drawing.\n\n Returns:\n Numpy data array with substituted values.\n \"\"\"\n\n def substitute(val: types.Coordinate):\n if val == types.AbstractCoordinate.LEFT:\n return self.time_range[0]\n if val == types.AbstractCoordinate.RIGHT:\n return self.time_range[1]\n if val == types.AbstractCoordinate.TOP:\n return self.vmax\n if val == types.AbstractCoordinate.BOTTOM:\n return self.vmin\n raise VisualizationError(f\"Coordinate {val} is not supported.\")\n\n try:\n return np.asarray(vals, dtype=float)\n except TypeError:\n return np.asarray(list(map(substitute, vals)), dtype=float)\n\n def _check_link_overlap(\n self, links: Dict[str, drawings.GateLinkData]\n ) -> Dict[str, drawings.GateLinkData]:\n \"\"\"Helper method to check overlap of bit links.\n\n This method dynamically shifts horizontal position of links if they are overlapped.\n \"\"\"\n duration = self.time_range[1] - self.time_range[0]\n allowed_overlap = self.formatter[\"margin.link_interval_percent\"] * duration\n\n # return y coordinates\n def y_coords(link: drawings.GateLinkData):\n return np.array([self.assigned_coordinates.get(bit, np.nan) for bit in link.bits])\n\n # group overlapped links\n overlapped_group = []\n data_keys = list(links.keys())\n while len(data_keys) > 0:\n ref_key = data_keys.pop()\n overlaps = set()\n overlaps.add(ref_key)\n for key in data_keys[::-1]:\n # check horizontal overlap\n if np.abs(links[ref_key].xvals[0] - links[key].xvals[0]) < allowed_overlap:\n # check vertical overlap\n y0s = y_coords(links[ref_key])\n y1s = y_coords(links[key])\n v1 = np.nanmin(y0s) - np.nanmin(y1s)\n v2 = np.nanmax(y0s) - np.nanmax(y1s)\n v3 = np.nanmin(y0s) - np.nanmax(y1s)\n v4 = np.nanmax(y0s) - np.nanmin(y1s)\n if not (v1 * v2 > 0 and v3 * v4 > 0):\n overlaps.add(data_keys.pop(data_keys.index(key)))\n overlapped_group.append(list(overlaps))\n\n # renew horizontal offset\n new_links = {}\n for overlaps in overlapped_group:\n if len(overlaps) > 1:\n xpos_mean = np.mean([links[key].xvals[0] for key in overlaps])\n # sort link key by y position\n sorted_keys = sorted(overlaps, key=lambda x: np.nanmax(y_coords(links[x])))\n x0 = xpos_mean - 0.5 * allowed_overlap * (len(overlaps) - 1)\n for ind, key in enumerate(sorted_keys):\n data = links[key]\n data.xvals = [x0 + ind * allowed_overlap]\n new_links[key] = data\n else:\n key = overlaps[0]\n new_links[key] = links[key]\n\n return {key: new_links[key] for key in links.keys()}\n"
] |
[
[
"numpy.nanmax",
"numpy.abs",
"numpy.min",
"numpy.asarray",
"numpy.nanmin",
"numpy.max",
"numpy.mean"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.