repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
kamata1729/visualize-pytorch
[ "ec1b3fe0952c5db187a5d4875cd1539a1b7a1270" ]
[ "src/guidedBackProp.py" ]
[ "import cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nclass GuidedBackProp():\n def __init__(self, model, use_cuda):\n self.model = model.eval()\n self.use_cuda = use_cuda\n if self.use_cuda:\n self.model = self.model.cuda()\n \n for module in self.model.named_modules():\n module[1].register_backward_hook(self.bp_relu)\n \n def bp_relu(self, module, grad_in, grad_out):\n if isinstance(module, nn.ReLU):\n return (torch.clamp(grad_in[0], min=0.0), )\n \n def __call__(self, x, index=None):\n x = x.clone()\n if self.use_cuda:\n x = x.cuda()\n x.requires_grad_()\n output = self.model(x)\n\n if index == None:\n index = np.argmax(output.cpu().data.numpy())\n \n one_hot = np.zeros((1, output.size()[-1]), dtype = np.float32)\n one_hot[0][index] = 1\n one_hot = torch.from_numpy(one_hot)\n one_hot.requires_grad_()\n if self.use_cuda:\n one_hot = torch.sum(one_hot.cuda() * output)\n else:\n one_hot = torch.sum(one_hot * output)\n \n one_hot.backward()\n result = x.grad.cpu().numpy()[0]\n result = np.transpose(result, (1,2,0))\n return result, index\n\ndef arrange_img(img):\n img = np.maximum(img, 0)\n res = img - img.min()\n res /= res.max()\n res = np.uint8(res*255)\n return res" ]
[ [ "numpy.maximum", "numpy.uint8", "torch.sum", "torch.from_numpy", "numpy.transpose", "torch.clamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pansiyuan123/im2recipe-Pytorch
[ "e0563cc909a3763b0548d1a8efca7e8175273ebc" ]
[ "scripts/proc.py" ]
[ "from scipy.misc import imread, imresize\r\nimport numpy as np\r\ndef detect_ingrs(recipe, vocab):\r\n #去重\r\n try:\r\n ingr_names = [ingr['text'] for ingr in recipe['ingredients'] if ingr['text']]\r\n except:\r\n ingr_names = []\r\n print (\"Could not load ingredients! Moving on...\")\r\n\r\n detected = set()\r\n for name in ingr_names:\r\n name = name.replace(' ','_')\r\n name_ind = vocab.get(name)\r\n if name_ind:\r\n detected.add(name_ind)\r\n '''\r\n name_words = name.lower().split(' ')\r\n for i in xrange(len(name_words)):\r\n name_ind = vocab.get('_'.join(name_words[i:]))\r\n if name_ind:\r\n detected.add(name_ind)\r\n break\r\n '''\r\n\r\n return list(detected) + [vocab['</i>']]\r\n\r\ndef process_image(impath,imsize):\r\n try:\r\n img = imread(impath)\r\n if img.ndim == 2: #grayscale\r\n img = img[:,:,None][:,:,[0,0,0]]\r\n H0, W0 = img.shape[0], img.shape[1]\r\n\r\n img = imresize(img, float(imsize) / min(H0, W0))\r\n fail = 0\r\n except:\r\n print (\"Could not load image...Using black one instead.\")\r\n img = np.zeros((imsize,imsize,3))\r\n fail =1\r\n\r\n return img,fail\r\n\r\ndef read_image(filename):\r\n img = imread(filename)\r\n if img.ndim == 2:\r\n img = img[:, :, None][:, :, [0, 0, 0]]\r\n\r\n img = imresize(img, (224,224))\r\n return img\r\n" ]
[ [ "scipy.misc.imresize", "numpy.zeros", "scipy.misc.imread" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.10", "0.16", "0.19", "0.18", "0.12", "1.0", "0.17", "1.2" ], "tensorflow": [] } ]
tobiasjj/cellular-nanoscience
[ "a165dc7ee62964bb82b1fc736d2ab03a01894ebf", "a165dc7ee62964bb82b1fc736d2ab03a01894ebf", "a165dc7ee62964bb82b1fc736d2ab03a01894ebf" ]
[ "functions/functions/force_extension.py", "functions/functions/binning.py", "functions/functions/helpers.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# force_extension, functions to work with and show force extension curves\n# Copyright 2019 Tobias Jachowski\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport itertools\nimport math\nimport numpy as np\nimport os\nimport unzipping_simulation as uzsi\n\nfrom IPython.display import display\nfrom ipywidgets import Label, interactive_output, Checkbox, IntText, BoundedIntText, FloatText, HBox, VBox\nfrom matplotlib import pyplot as plt\nfrom stepfinder import filter_fbnl\n\nfrom .binning import calculate_bin_means, concatenate_data_dict, separate_data_array\n\n\ndef cart2sph(x, y, z, offset_phi=0, positive_phi=False):\n \"\"\"\n offset_phi : float\n angle in Euclidian plane that should point in the direction of positive x\n \"\"\"\n # cart2sph -- Transform Cartesian to spherical coordinates\n # Spherical coordinates (r, θ, φ) as commonly used in physics (ISO convention):\n # radial distance r, inclination θ (theta), and azimuth φ (phi).\n hxy = math.hypot(x, y)\n r = math.hypot(hxy, z)\n theta = math.atan2(hxy, z)\n phi = math.atan2(y, x) - offset_phi\n if positive_phi and phi < 0:\n phi += 2 * math.pi\n return r, theta, phi\n\n\ndef sph2cart(r, theta, phi, offset_phi=0):\n \"\"\"\n offset_phi : float\n angle in Euclidian plane that points in the directon of positive x\n \"\"\"\n # sph2cart -- Transform spherical to Cartesian coordinates\n # Spherical coordinates (r, θ, φ) as commonly used in physics (ISO convention):\n # radial distance r, inclination θ (theta), and azimuth φ (phi).\n phi += offset_phi\n rsin_theta = r * math.sin(theta)\n x = rsin_theta * math.cos(phi)\n y = rsin_theta * math.sin(phi)\n z = r * math.cos(theta)\n return x, y, z\n\n\ndef angle(v1, v2):\n # angle between two vectors\n #return math.atan2(np.linalg.norm(np.cross(v1,v2)), np.dot(v1,v2))\n # does not work as well for small angles, but is faster:\n cos_theta = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n cos_theta = max(-1, cos_theta)\n cos_theta = min(1, cos_theta)\n return math.acos(cos_theta)\n\n\ndef _get_speed_approx(tether, i, cycle=None):\n # Determine the indices of the raw data of the the corresponding cycle\n cycle = 'stress' if cycle is None else cycle\n pairs = tether.stress_release_pairs(i=i)\n # Get the index and the label of the trace of the excited axis\n idx = pairs[cycle]['idx'][0]\n ax = pairs[cycle]['info'][0,0]\n trace = {'x': 'positionX', 'y': 'positionY'}\n # Get the raw data of the excited position (stage) movement\n # and calculate the approximate speed\n position = tether.get_data(traces=trace[ax], samples=idx)\n amplitude = position.max() - position.min()\n duration = (idx.stop - idx.start) / tether.resolution\n speed = amplitude / duration # m / s\n return speed\n\n\ndef binned_force_extension(tether, i, posmin=10e-9, bins=None, resolution=None,\n bin_width_e=None, sortcolumn=0, dXYZ_factors=None,\n fXYZ_factors=None, angles=False,\n angles_after_binning=False, phi_shift_twopi=False):\n \"\"\"\n Parameters\n ----------\n bins : int or str\n number of bins, takes precedence over resolution\n resolution : float\n number of bins per unit of sortcolumn.\n bin_width_e : float\n Width of bins of extension in m. Only evaluated if bins and resolution\n are None. A resolution (s) is calculated by dividing the bin_width_e\n with an approximate speed of the positionXY signal. Therefore, if\n bin_width_e is evaluated, the sortcolumn is automatically set to 0\n (i.e. time).\n sortcolumn : int\n 0: time, 1: extension, 2: force, n >= 3: angles and/or extra_columns\n angles : bool\n Calculate theta and phi for extension and force.\n 3: theta_extension, 4: phi_extension, 5: theta_force, 6: phi_force,\n 7,8,9: distanceXYZ, 10,11,12: forceXYZ\n angles_after_binning : bool\n 13,14,15,16: theta (13,15) and phi (14,16) for extension and force\n \"\"\"\n # Get the force, extension, etc. data from the tether\n data = tether.force_extension_pairs(i=i, posmin=posmin,\n dXYZ_factors=dXYZ_factors,\n fXYZ_factors=fXYZ_factors,\n reduce_list=True)\n\n # Add angles of force and extension\n if angles:\n data = _add_angles(data, phi_shift_twopi=phi_shift_twopi)\n\n edges = {}\n centers = {}\n width = {}\n bin_means = {}\n bin_stds = {}\n bin_Ns = {}\n for cycle in [ 'stress', 'release' ]:\n # Concatenate data dictionary into one array\n d, keys, columns = concatenate_data_dict(data[cycle])\n\n # Calculate bin width of time according to bin width of extension\n if bins is None and resolution is None and bin_width_e is not None:\n speed = _get_speed_approx(tether, i, cycle)\n resolution = speed / bin_width_e\n sortcolumn = 0\n\n # Bin the data\n result = calculate_bin_means(d, bins=bins,\n resolution=resolution,\n sortcolumn=sortcolumn)\n edges[cycle] = result['edges']\n centers[cycle] = result['centers']\n width[cycle] = result['width']\n bin_Ns[cycle] = result['bin_Ns']\n\n # Separate data arrays into data dictionaries\n bin_means[cycle] = separate_data_array(result['bin_means'], keys,\n columns)\n bin_stds[cycle] = separate_data_array(result['bin_stds'], keys,\n columns)\n\n # Calculate angles with already binned distance/force data\n if angles_after_binning:\n bin_means = _add_angles(bin_means, phi_shift_twopi=phi_shift_twopi,\n key_suffix='_after')\n\n settings = {\n 'posmin': posmin,\n 'bins': bins,\n 'resolution': resolution,\n 'bin_width_e': bin_width_e,\n 'sortcolumn': sortcolumn,\n 'dXYZ_factors': dXYZ_factors,\n 'fXYZ_factors': fXYZ_factors,\n 'angles': angles,\n 'angles_after_binning': angles_after_binning,\n 'phi_shift_twopi': phi_shift_twopi\n }\n\n return { 'settings': settings,\n 'data': data,\n 'edges': edges,\n 'centers': centers,\n 'width': width,\n 'bin_Ns': bin_Ns,\n 'bin_means': bin_means,\n 'bin_stds': bin_stds }\n\n\ndef fbnl_force_extension(tether, i, posmin=10e-9, filter_time=None,\n filter_length_e=None, edginess=1, dXYZ_factors=None,\n fXYZ_factors=None, angles=False,\n angles_after_filter=False, phi_shift_twopi=False):\n \"\"\"\n Parameters\n ----------\n filter_time : float\n time of running filter in s\n filter_length_e : float\n Length of running filter of extension in m. Only evaluated if\n filter_time is None. A filter_time (s) is calculated by dividing the\n filter_length_e with an approximate speed of the positionXY signal.\n angles : bool\n Calculate theta and phi for extension and force.\n 3: theta_extension, 4: phi_extension, 5: theta_force, 6: phi_force,\n 7,8,9: distanceXYZ, 10,11,12: forceXYZ\n angles_after_filter : bool\n 13,14,15,16: theta (13,15) and phi (14,16) for extension and force\n\n Returns\n -------\n filtered_data, fbnl_filters\n filtered_data is a dict of two np.ndarrays (stress, release)\n each array has the filtered data with the columns 0: time, 1: extension\n 2: force, and extra traces/angles\n fbnl_filters is a dict of two lists (stress, release) containing\n the individual FBNL_Filter_results of the filtered data\n \"\"\"\n # Get the force, extension, etc. data from the tether\n data = tether.force_extension_pairs(i=i, posmin=posmin,\n dXYZ_factors=dXYZ_factors,\n fXYZ_factors=fXYZ_factors)\n\n # Add angles of force and extension\n if angles:\n data = _add_angles(data, phi_shift_twopi=phi_shift_twopi)\n\n # Filter the data\n resolution = tether.resolution\n ft = 0.005 if filter_time is None else filter_time\n window = max(int(np.round(ft * resolution)), 1)\n pad_data = True\n\n fbnl_filters = {}\n data_filtered = {}\n for cycle in [ 'stress', 'release' ]:\n fbnl_filters[cycle] = {}\n data_filtered[cycle] = {}\n if filter_time is None and filter_length_e is not None:\n # filter_time has priority over filter_length_e\n speed = _get_speed_approx(tether, i, cycle)\n ft = filter_length_e / speed # s\n window = max(int(np.round(ft * resolution)), 1)\n for key in data[cycle]: # time, extension, force, ...\n d = data[cycle][key]\n if d.ndim == 1: d = [d]\n else: d = d.T\n rs = []\n rs_data = []\n for _d in d:\n r = filter_fbnl(_d, resolution, window=window,\n window_var=window, p=edginess,\n pad_data=pad_data)\n rs.append(r)\n rs_data.append(np.expand_dims(r.data_filtered, axis=1))\n fbnl_filters[cycle][key] = rs\n data_filtered[cycle][key] = np.concatenate(rs_data, axis=1).squeeze()\n\n # Calculate angles with already filtered distance/force data\n if angles_after_filter:\n data_filtered = _add_angles(data_filtered,\n phi_shift_twopi=phi_shift_twopi,\n key_suffix='_after')\n settings = {\n 'posmin': posmin,\n 'filter_time': filter_time,\n 'filter_length_e': filter_length_e,\n 'edginess': edginess,\n 'dXYZ_factors': dXYZ_factors,\n 'fXYZ_factors': fXYZ_factors,\n 'angles': angles,\n 'angles_after_filter': angles_after_filter,\n 'phi_shift_twopi': phi_shift_twopi\n }\n\n return { 'settings': settings,\n 'data': data,\n 'data_filtered': data_filtered,\n 'fbnl_filters': fbnl_filters }\n\n\ndef _add_angles(data, phi_shift_twopi=False, key_suffix=''):\n # Calculate angles theta and phi of distance (i.e. extension) and force\n # vectors for cycles stress and release\n for c in [ 'stress', 'release' ]:\n angle_extension = np.array([\n cart2sph(*point)[1:] for point in data[c]['distanceXYZ']\n ])*180/math.pi\n angle_force = np.array([\n cart2sph(*point)[1:] for point in data[c]['forceXYZ']\n ])*180/math.pi\n if phi_shift_twopi:\n angle_extension[angle_extension[:,1] < 0.0, 1] += 360\n angle_force[angle_force[:,1] < 0.0, 1] += 360\n data[c]['angle_extension' + key_suffix] = angle_extension\n data[c]['angle_force' + key_suffix] = angle_force\n\n return data\n\n\ndef plot_force_extension(x, y, ystd=None, yerr=None, label=None, ax=None,\n show=False):\n if ax is None:\n # Create new figure\n fig, ax = plt.subplots()\n ax.set_xlabel('Extension (nm)')\n ax.set_ylabel('Force (pN)')\n ax.set_title('Force Extension')\n\n # plot force extension lines and errorbars\n ax.plot(x * 1e9, y * 1e12, label=label)\n if ystd is not None:\n ax.errorbar(x * 1e9, y * 1e12, fmt='none', yerr=ystd * 1e12,\n color='grey', ecolor='grey', alpha=0.25)\n if yerr is not None:\n ax.errorbar(x * 1e9, y * 1e12, fmt='none', yerr=yerr * 1e12,\n color='grey', ecolor='black', alpha=0.25)\n\n if show:\n ax.get_figure().show()\n return ax\n\n\ndef _create_twin_ax(ax, subplot_pos=None):\n fig = ax.get_figure()\n subplot_pos = subplot_pos or (1, 1, 1)\n ax2 = fig.add_subplot(*subplot_pos, frame_on=False)\n ax2.xaxis.set_label_position('top')\n ax2.yaxis.set_label_position('right')\n ax2.xaxis.tick_top()\n ax2.yaxis.tick_right()\n ax2._get_lines.prop_cycler = ax._get_lines.prop_cycler\n return ax2\n\n\ndef plot_angle_extension(x, theta_phi, axes=None, show=False):\n if axes is None:\n # Create new figure\n fig, ax = plt.subplots()\n ax.set_xlabel('Apparent extension (nm)')\n ax.set_ylabel('Theta (°)')\n ax2 = _create_twin_ax(ax)\n ax2.set_ylabel('Phi (°)')\n else:\n ax, ax2 = axes\n\n # 0: theta_extension, 1: phi_exension, 2: theta_force, 3: phi_force\n lns1 = ax.plot(x * 1e9, theta_phi[:, 0], label=r'$\\theta$ E')\n lns2 = ax.plot(x * 1e9, theta_phi[:, 2], label=r'$\\theta$ F')\n lns3 = ax2.plot(x * 1e9, theta_phi[:, 1], label=r'$\\phi$ E')\n lns4 = ax2.plot(x * 1e9, theta_phi[:, 3], label=r'$\\phi$ F')\n\n lns = list(itertools.chain(lns1, lns2, lns3, lns4))\n labs = [l.get_label() for l in lns]\n ax.legend(lns, labs)\n\n if show:\n ax.get_figure().show()\n\n return ax, ax2\n\n\ndef update_force_extension(tether, i=0, posmin=10e-9, bins=None,\n resolution=None, sortcolumn=0, dXYZ_factors=None,\n fXYZ_factors=None, ax=None, autoscale=True,\n xlim=None, ylim=None, info=True):\n \"\"\"\n Update the figure with force extension data.\n\n Parameters\n ----------\n xlim : (float, float), optional\n Set xlim of the axis.\n ylim : (float, float), optional\n Set ylim of the axis.\n \"\"\"\n ax = ax or plt.gcf().gca()\n clear_force_extension(ax=ax)\n\n # Calculate binned force extension data\n r = binned_force_extension(tether=tether, i=i, posmin=posmin, bins=bins,\n resolution=resolution, sortcolumn=sortcolumn,\n dXYZ_factors=dXYZ_factors,\n fXYZ_factors=fXYZ_factors)\n if info:\n srp = tether.stress_release_pairs(i=i)\n\n for c in [ 'stress', 'release' ]:\n e = r['bin_means'][c]['extension']\n f = r['bin_means'][c]['force']\n fstd = r['bin_stds'][c]['force']\n ferr = r['bin_stds'][c]['force'] / np.sqrt(r['bin_Ns'][c])\n if info: label = ' '.join(srp[c]['info'][0])\n else: label = None\n plot_force_extension(e, f, fstd, ferr, label=label, ax=ax)\n\n '''\n # Calculate force extension of a dna with a known length and plot it\n if bps:\n x, F = dna.force_extension(bps=bps)\n ax.lines[2].set_data(x*1e9, F*1e12)\n else:\n ax.lines[2].set_data([0], [0])\n '''\n\n if info:\n ax.legend()\n if autoscale:\n ax.relim()\n # ax.autoscale_view()\n ax.autoscale()\n else:\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n return ax\n\n\ndef clear_force_extension(ax=None):\n # clear old force extension lines and errorbars\n ax = ax or plt.gcf().gca()\n ax.set_prop_cycle(None)\n for l in ax.lines[::-1]:\n l.remove()\n ax.lines.clear()\n for c in ax.containers[::-1]:\n c.remove()\n ax.containers.clear()\n for t in ax.texts[::-1]:\n t.remove()\n ax.texts.clear()\n\n\ndef show_force_extension(tether, i=0, posmin=10e-9, bins=0, resolution=0,\n sortcolumn=0, dXYZ_factors=None, fXYZ_factors=None,\n autoscale=False, xlim=None, ylim=None, **kwargs):\n \"\"\"\n Plot the force extension data with index `i` (see method\n `tether.force_extension_pairs()`) on tether.fe_figure.\n\n Parameters\n ----------\n i : int\n Index of force extension pair. See method\n `tether.force_extension_pairs()`.\n xlim : (float, float), optional\n Xlimit of force extension axis.\n ylim : (float, float), optional\n Ylimit of force extension axis.\n \"\"\"\n # Initialize figure\n fig, ax = plt.subplots()\n ax.set_xlabel('Extension (nm)')\n ax.set_ylabel('Force (pN)')\n ax.set_title('Force Extension')\n\n def update_fe_pair(i, posmin, bins, resolution, sortcolumn, autoscale,\n xlim_l, xlim_h, ylim_l, ylim_h):\n if bins <= 0:\n bins = None\n if resolution <= 0:\n resolution = None\n update_force_extension(tether, i, posmin=posmin, bins=bins,\n resolution=resolution, sortcolumn=sortcolumn,\n dXYZ_factors=dXYZ_factors,\n fXYZ_factors=fXYZ_factors,\n ax=ax, autoscale=autoscale,\n xlim=(xlim_l, xlim_h),\n ylim=(ylim_l, ylim_h), **kwargs)\n fig.canvas.draw()\n\n # Set default xlim/ylim values\n if xlim is None or ylim is None:\n _xlim, _ylim = autolimits(tether, posmin=posmin)\n xlim = xlim or _xlim\n ylim = ylim or _ylim\n\n # Get number of all force extension pairs\n stop = len(tether.stress_release_pairs(**kwargs)['stress']['idx'])\n\n # Build user interface (ui)\n index = BoundedIntText(value=i, min=0, max=stop - 1, description='FE_pair:')\n posmin = FloatText(value=posmin, description='PosMin')\n bins = IntText(value=bins, description='Bins')\n resolution = FloatText(value=resolution, step=1,\n description='Resolution')\n sortcolumn = BoundedIntText(value=sortcolumn, min=0, max=2,\n description='Sortcolumn')\n autolim = Checkbox(value=autoscale, description='Autoscale')\n xlim_l = FloatText(value=xlim[0])\n xlim_h = FloatText(value=xlim[1])\n ylim_l = FloatText(value=ylim[0])\n ylim_h = FloatText(value=ylim[1])\n xlim_b = HBox((Label('xlim'), xlim_l, xlim_h))\n ylim_b = HBox((Label('ylim'), ylim_l, ylim_h))\n ui_fe = VBox((HBox((index, posmin)), HBox((bins, resolution, sortcolumn))))\n ui_plot = VBox((autolim, xlim_b, ylim_b))\n\n # initialize force extension plot\n update_fe_pair(i, posmin.value, bins.value, resolution.value,\n sortcolumn.value, autolim.value, xlim[0], xlim[1], ylim[0],\n ylim[1])\n\n # Make user input fields interactive\n out = interactive_output(update_fe_pair, {'i': index,\n 'posmin': posmin,\n 'bins': bins,\n 'resolution': resolution,\n 'sortcolumn': sortcolumn,\n 'autoscale': autolim,\n 'xlim_l': xlim_l,\n 'xlim_h': xlim_h,\n 'ylim_l': ylim_l,\n 'ylim_h': ylim_h})\n\n # Show user interface\n display(ui_fe)\n fig.show()\n display(ui_plot)\n\n return ui_fe, fig, ui_plot\n\n\ndef autolimits(tether, posmin=10e-9, samples=None, e=None, f=None, xlim=None,\n ylim=None):\n \"\"\"\n Determine xlim and ylim values for force extension plots.\n\n Parameters\n ----------\n samples : int, slice or index array, optional\n Samples to get extension and force from.\n e : 1D numpy.ndarray of floats, optional\n Extension in nm. Takes precedence over extension determined with\n `samples`.\n f : 1D numpy.ndarray of floats, optional\n Force in pN. Takes precedence over force determined with `samples`.\n xlim : (float, float), optional\n Xlimit of force extension axis. Takes precedence over xlim\n determined with `e`.\n ylim : (float, float), optional\n Ylimit of force extension axis. Takes precedence over ylim\n determined with `f`.\n\n Returns\n -------\n (float, float)\n The xlim\n (float, float)\n The ylim\n \"\"\"\n if samples is None \\\n and (xlim is None and e is None) \\\n or (ylim is None and f is None):\n # Get the start/stop indices of the data to be used to determine the\n # min max values\n pairs = tether.stress_release_pairs(slices=True)\n start = pairs['stress']['idx'][0].start\n stop = pairs['release']['idx'][-1].stop\n samples = slice(start, stop)\n\n if xlim is None and ylim is None and e is None and f is None:\n e_f = tether.force_extension(samples=samples, posmin=posmin) # m, N\n e = e_f['extension']\n f = e_f['force']\n if xlim is None and e is None:\n e = tether.extension(samples=samples, posmin=posmin) # m\n if ylim is None and f is None:\n f = tether.force(samples=samples, posmin=posmin) # N\n\n if xlim is None:\n e_min = e.min()\n e_max = e.max()\n e_diff = (e_max - e_min) * 0.02\n xlim = ((e_min - e_diff) * 1e9, (e_max + e_diff) * 1e9)\n\n if ylim is None:\n f_min = f.min()\n f_max = f.max()\n f_diff = (f_max - f_min) * 0.02\n ylim = ((f_min - f_diff) * 1e12, (f_max + f_diff) * 1e12)\n\n # Return the set limits\n return xlim, ylim\n\n\ndef save_figures(figures, directory=None, file_prefix=None, file_suffix=None,\n file_extension='.png', index_digits=3):\n \"\"\"\n Save matplotlib figures in a given directory.\n\n The filenames of the figures will be a concatenation of the `file_prefix`,\n an index with `index_digits` digits, the `file_suffix` and the\n `file_extension`.\n\n Parameters\n ----------\n figures : Iterable of matplotlib figures or one figure\n A list, array, generator or other Iterable type of matplotlib figures.\n If figures is only one matplotlib figure, no index will be included in\n the filename of the figure.\n directory : str\n The directory, the figures should be saved in. The directory will be\n created, if it does not exist.\n file_prefix : str, optional\n A prefix every filename of the saved figures should include.\n file_suffix : str, optional\n A suffix every filename of the saved figures should include.\n files_extension : str, optional\n The file extension (and type) to be used to save the figures (default\n '.png').\n index_digits : int, optional\n Digits to be used for the index in the filename of the figures.\n \"\"\"\n directory = directory or os.path.join(\".\", \"results\")\n file_prefix = file_prefix or \"\"\n file_suffix = file_suffix or \"\"\n\n # create results dir\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # If only one figure, save it without an index\n if not isinstance(figures, collections.Iterable):\n filename = \"%s%s%s\" % (file_prefix, file_suffix, file_extension)\n figures.savefig(os.path.join(directory, filename))\n\n for idx, fig in enumerate(figures):\n format_string = \"\".join((\"%s%.\", str(index_digits), \"i%s%s\"))\n filename = format_string % (file_prefix, idx, file_suffix,\n file_extension)\n fig.savefig(os.path.join(directory, filename))\n\n'''\n def save_force_extension_plots(self, directory=None, file_prefix=None,\n file_suffix=None, file_extension='.png',\n **kwargs):\n \"\"\"\n Save all plots created by `plot_force_extensions()`.\n\n directory : str\n The directory the images to be displayed are located in.\n file_prefix : str\n Display only the files beginning with `prefix`.\n file_suffix : str\n Display only the files ending with `suffix`.\n file_extension : str, optional\n The extension of the images that should be displayed. Default is\n '.png'.\n figure : matplotlib figure, optional\n A reference to a figure that should be used to plot the force\n extension pairs. If no figure is given, a new one is automatically\n created.\n **kwargs\n Parameters passed to the method `self.plot_force_extensions()`.\n \"\"\"\n kwargs.pop('draw', None)\n # Create generator for all force/extension stress/release pairs\n figures = self.plot_force_extensions(draw=False, **kwargs)\n\n # Save all figures\n evaluate.save_figures(figures, directory=directory,\n file_prefix=file_prefix, file_suffix=file_suffix,\n file_extension=file_extension)\n\n # Redraw the figure, after the last one has been saved\n self.fe_figure.canvas.draw()\n'''\n\n\ndef get_simulation(tether, i, settings_file, posZ=None, individual_posZ=False,\n kappa=None, kappa_z_factor=None, excited_axis=None,\n **kwargs):\n \"\"\"\n Get unzipping simulation for tether force extension segment number `i`.\n\n Determine `kappa` and `positionZ` from `tether` to get a proper simulation.\n\n Parameters\n ----------\n tether : Tether\n The tether object\n i : int\n The segment number to get the unzipping simulation for\n settings_file : str\n The filepath of the settings file for the simulation\n individual_posZ : bool\n Calculate the median of the distance of the microsphere to the surface\n from positionZ for each individual segment or the whole tether.region.\n posZ : float\n Set the positionZ manually (m).\n \"\"\"\n # Get radius from calibration\n radius = tether.calibration.radius\n\n # Determine distance between microsphere and surface\n if posZ is None:\n idx = None\n if individual_posZ:\n idx = tether.samples(i, cycle='stress')\n posZ = np.median(tether.get_data('positionZ', samples=idx))\n h0 = max(0.0, -posZ * tether.calibration.focalshift)\n\n # Get kappa for excited axis and axis Z\n kappa = tether.calibration.kappa(posZ) if kappa is None else kappa\n kappa_z_factor = 1 if kappa_z_factor is None else kappa_z_factor\n if excited_axis is None:\n axis = {'x': 0, 'y': 1}\n ax = tether.stress_release_pairs(i=i)['stress']['info'][0,0]\n excited_axis = axis[ax]\n axes_kappa = [excited_axis, 2]\n kappa = kappa[axes_kappa] * np.array([1, kappa_z_factor])\n\n # Get/do simulation with simulation_settings_file and radius, h0, and kappa\n simulation = uzsi.get_unzipping_simulation(settings_file, radius=radius,\n kappa=kappa, h0=h0, **kwargs)\n\n return simulation\n\n\ndef plot_unzip_data(tether, I, ax=None, fbnl=False, shift_x=0e-9, t_delta=15,\n plot_stress=True, plot_release=True, plot_raw=False,\n annotate_stress=True, annotate_release=True,\n simulation=None, **filter_kwargs):\n \"\"\"\n i : int or list of ints\n the force extension data to be plotted\n t_delta : float\n Time in seconds the microsphere was trapped before the start of the very\n first stress-release cycle\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n\n ###############################\n ### Get and plot simulated data\n if simulation is not None:\n sim_values = uzsi.get_simulation_values(simulation)\n e_sim = sim_values['extension']\n f_sim = sim_values['force']\n fXYZ = sim_values['fXYZ']\n nuz = sim_values['nuz']\n # Apparent extension and average force acting on the microsphere\n ax.plot(e_sim[f_sim<=25e-12]*1e9, f_sim[f_sim<=25e-12]*1e12,\n color='#000000', ls=(0, (2.5, 2.5)), label='Simulation')\n # (offset, (on_off_seq)) dashes=(3, 2))\n #ax.annotate('Simulation', xy=(650, 7), xycoords='data', color=c)\n # # xytext=(21.35, -15), textcoords='offset points',\n # # arrowprops=dict(linewidth=1.25, arrowstyle=\"->\", color=c))\n\n if not isinstance(I, collections.Iterable):\n I = [I]\n for i in I:\n ##################################\n ### Get and plot raw force extension\n if plot_raw:\n # Get raw extension and force for stress and release cycle\n pair = tether.force_extension_pairs(i=i, reduce_list=True)\n extension_raw = np.r_[pair['stress']['extension'],\n pair['release']['extension']]\n force_raw = np.r_[pair['stress']['force'],\n pair['release']['force']]\n ax.plot((extension_raw + shift_x) * 1e9, force_raw * 1e12,\n c='#CCCCCC')\n\n #######################################################################\n ### Get and plot force extension curve and simulation\n if fbnl:\n # Get fbnl_filter filtered force extension\n result = fbnl_force_extension(tether, i, **filter_kwargs)\n filtered_data = result['filtered_data'] # stress, release\n else:\n # Get binned force extension\n result = binned_force_extension(tether, i, **filter_kwargs)\n filtered_data = result['bin_means'] # stress, release\n\n # Get tmin and tmax from the first stress and the last release cycle datapoint\n tmin = filtered_data['stress']['time'][0] + t_delta\n tmax = filtered_data['release']['time'][-1] + t_delta\n time = '$t={:3.0f}-{:3.0f}\\,s$'.format(tmin, tmax)\n time = '$t={:3.0f}\\,s$'.format((tmin+tmax)/2)\n\n # Plot release cycle\n if plot_release:\n cycle = 'release'\n pre = 'rls '\n c = 'magenta'\n ax.plot((filtered_data[cycle]['extension'] + shift_x) * 1e9,\n filtered_data[cycle]['force'] * 1e12,\n label='{}{}'.format(pre, time))\n if annotate_release:\n ax.annotate('Release', xy=(700, 23), xycoords='data', color=c)\n # xytext=(5, -30), textcoords='offset points',\n # arrowprops=dict(linewidth=1.25, arrowstyle=\"->\", color=c))\n ax.annotate(\"\", xytext=(700, 22), xy=(750, 22), xycoords='data',\n arrowprops=dict(linewidth=1.25, arrowstyle=\"<-\", color=c))\n\n # Plot stress cycle\n if plot_stress:\n cycle = 'stress'\n pre = 'str '\n c = 'cyan'\n ax.plot((filtered_data[cycle]['extension'] + shift_x) * 1e9,\n filtered_data[cycle]['force'] * 1e12,\n label='{}{}'.format(pre, time))\n if annotate_stress:\n ax.annotate('Stretch', xy=(600, 23), xycoords='data', color=c)\n # xytext=(5, 20), textcoords='offset points',\n # arrowprops=dict(linewidth=1.25, arrowstyle=\"->\", color=c))\n ax.annotate(\"\", xytext=(600, 22), xy=(650, 22), xycoords='data',\n arrowprops=dict(linewidth=1.25, arrowstyle=\"->\", color=c))\n\n ax.set_xlabel('Extension (nm)')\n ax.set_ylabel('Force (pN)')\n\n return fig, ax\n", "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# binning, functions to bin data\n# Copyright 2019 Tobias Jachowski\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\n\ndef calculate_bin_means(data, bins=None, resolution=None, sortcolumn=0):\n \"\"\"\n Calculate binned means.\n\n Parameters\n ----------\n data : 2D numpy.ndarray of type float\n bins : int or sequence of scalars or str, optional\n If bins is an int, it defines the number of equal-width bins. If bins\n is a sequence, it defines the bin edges, including the rightmost edge,\n allowing for non-uniform bin widths.\n\n If bins is a string from the list below, histogram_bin_edges will use\n the method chosen to calculate the optimal bin width and consequently\n the number of bins (see `numpy.histogram_bin_edges()` for more detail)\n from the data that falls within the requested range. While the\n bin width will be optimal for the actual data in the range, the number\n of bins will be computed to fill the entire range, including the empty\n portions. For visualisation, using the ‘auto’ option is suggested.\n Weighted data is not supported for automated bin size selection.\n\n ‘auto’\n Maximum of the ‘sturges’ and ‘fd’ estimators. Provides good all\n around performance.\n ‘fd’ (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into account\n data variability and data size.\n ‘doane’\n An improved version of Sturges’ estimator that works better with\n non-normal datasets.\n ‘scott’\n Less robust estimator that that takes into account data variability\n and data size.\n ‘stone’\n Estimator based on leave-one-out cross-validation estimate of the\n integrated squared error. Can be regarded as a generalization of\n Scott’s rule.\n ‘rice’\n Estimator does not take variability into account, only data size.\n Commonly overestimates number of bins required.\n ‘sturges’\n R’s default method, only accounts for data size. Only optimal for\n gaussian data and underestimates number of bins for large\n non-gaussian datasets.\n ‘sqrt’\n Square root (of data size) estimator, used by Excel and other\n programs for its speed and simplicity.\n resolution: float\n Number of bins per unit of `sortcolumn`. Used to calculate the number\n of bins needed for the specified resolution. Only evaluated if `edges`\n and `bins` is None.\n\n Returns\n -------\n edges, centers, width, bin_means, bin_stds, bin_Ns\n \"\"\"\n if bins is None:\n bins = number_of_bins(data[:, sortcolumn], resolution)\n # Create the bins based on data[:, sortcolumn]\n edges, centers, width, nbins = get_edges(data[:, sortcolumn], bins)\n # get first dim, i.e. the sortcolumn\n edges, centers, width = edges[0], centers[0], width[0]\n\n # Get the indices of the bins to which each value in input array belongs.\n bin_idx = np.digitize(data[:, sortcolumn], edges)\n\n # Find which points are on the rightmost edge.\n on_edge = data[:, sortcolumn] == edges[-1]\n # Shift these points one bin to the left.\n bin_idx[on_edge] -= 1\n\n # Calculate the histogram, means, and std of the data in the bins\n bin_Ns = np.array([np.sum(bin_idx == i)\n for i in range(1, len(edges))])\n bin_means = np.array([data[bin_idx == i].mean(axis=0)\n for i in range(1, len(edges))])\n bin_stds = np.array([data[bin_idx == i].std(axis=0, ddof=1)\n for i in range(1, len(edges))])\n\n return { 'edges': edges,\n 'centers': centers,\n 'width': width,\n 'bin_Ns': bin_Ns,\n 'bin_means': bin_means,\n 'bin_stds': bin_stds }\n\n\ndef calculate_bin_means_ND(data, bins=None):\n \"\"\"\n Calculate D-dimensional histogram\n\n Parameters\n ----------\n data : 2D np.ndarray of shape N,D\n bins : int or sequence of scalars or str, optional\n If bins is an int, it defines the number of equal-width bins. If bins\n is a sequence, it defines the bin edges, including the rightmost edge,\n allowing for non-uniform bin widths.\n\n If bins is a string from the list below, histogram_bin_edges will use\n the method chosen to calculate the optimal bin width and consequently\n the number of bins (see `numpy.histogram_bin_edges()` for more detail)\n from the data that falls within the requested range. While the\n bin width will be optimal for the actual data in the range, the number\n of bins will be computed to fill the entire range, including the empty\n portions. For visualisation, using the ‘auto’ option is suggested.\n Weighted data is not supported for automated bin size selection.\n\n ‘auto’\n Maximum of the ‘sturges’ and ‘fd’ estimators. Provides good all\n around performance.\n ‘fd’ (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into account\n data variability and data size.\n ‘doane’\n An improved version of Sturges’ estimator that works better with\n non-normal datasets.\n ‘scott’\n Less robust estimator that that takes into account data variability\n and data size.\n ‘stone’\n Estimator based on leave-one-out cross-validation estimate of the\n integrated squared error. Can be regarded as a generalization of\n Scott’s rule.\n ‘rice’\n Estimator does not take variability into account, only data size.\n Commonly overestimates number of bins required.\n ‘sturges’\n R’s default method, only accounts for data size. Only optimal for\n gaussian data and underestimates number of bins for large\n non-gaussian datasets.\n ‘sqrt’\n Square root (of data size) estimator, used by Excel and other\n programs for its speed and simplicity.\n\n Returns\n -------\n edges, centers, widths, bin_means, bin_stds, bin_Ns\n \"\"\"\n N, D = data.shape\n edges, centers, widths, nbins = get_edges(data, bins)\n nbin = nbins + 2 # include outliers on each end\n\n # indices of x in bins of x and\n # indices of y in bins of y\n Ncount = tuple(\n # avoid np.digitize to work around gh-11022\n np.searchsorted(edges[i], data[:, i], side='right')\n for i in range(D)\n )\n for i in range(D):\n # Find which points are on the rightmost edge.\n on_edge = (data[:, i] == edges[i][-1])\n # Shift these points one bin to the left.\n Ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = np.ravel_multi_index(Ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = np.bincount(xy, minlength=nbin.prod())\n #bin_Ns = np.array([np.sum([xy == i])\n # for i in range(nbin.prod())])\n #data_xy = [data[xy == i]\n # for i in range(nbin.prod())]\n mean_xy = np.array([np.mean(data[xy == i], axis=0)\n for i in range(nbin.prod())])\n std_xy = np.array([np.std(data[xy == i], axis=0, ddof=1)\n for i in range(nbin.prod())])\n\n # Shape into a proper matrix\n bin_Ns = hist.reshape(nbin)\n bin_Ns = bin_Ns.reshape(nbin)\n bin_means = mean_xy.T.reshape(np.r_[D, nbin]).T\n bin_stds = std_xy.T.reshape(np.r_[D, nbin]).T\n\n # remove outliers\n #hist = hist[1:-1, 1:-1]\n #mean_xy = mean_xy[1:-1, 1:-1]\n #std_xy = std_xy[1:-1, 1:-1]\n\n return { 'edges': edges,\n 'centers': centers,\n 'widths': widths,\n 'bin_Ns': bin_Ns,\n 'bin_means': bin_means,\n 'bin_stds': bin_stds }\n\n\ndef concatenate_data_dict(data_dict, keys=None):\n data = []\n keys = list(data_dict.keys()) if keys is None else keys\n columns = []\n for key in keys:\n d = data_dict[key]\n if d.ndim == 1:\n d = np.expand_dims(d, axis=1)\n data.append(d)\n columns.append(d.shape[1])\n\n return np.concatenate(data, axis=1), keys, columns\n\n\ndef separate_data_array(data_array, keys, columns):\n data = {}\n start = 0\n stop = 0\n for key, column in zip(keys, columns):\n stop += column\n d = data_array[:,start:stop].squeeze()\n if column == 1:\n d = np.atleast_1d(d)\n else:\n d = np.atleast_2d(d)\n data[key] = d\n start = stop\n\n return data\n\n\ndef get_edges(data, bins=None):\n \"\"\"\n Get edges for bins in data\n\n Parameters\n ----------\n bins : int or sequence of scalars, str or sequence of floats, optional\n If bins is an int, it defines the number of equal-width bins in the\n given range ('auto', by default). If bins is a sequence of sequences,\n it defines the bin edges, including the rightmost edge, for every\n dimension of data, allowing for non-uniform bin widths.\n\n If bins is a string from the list below, histogram_bin_edges will use\n the method chosen to calculate the optimal bin width and consequently\n the number of bins (see `numpy.histogram_bin_edges()` for more detail)\n from the data that falls within the requested range. While the\n bin width will be optimal for the actual data in the range, the number\n of bins will be computed to fill the entire range, including the empty\n portions. For visualisation, using the ‘auto’ option is suggested.\n Weighted data is not supported for automated bin size selection.\n\n ‘auto’\n Maximum of the ‘sturges’ and ‘fd’ estimators. Provides good all\n around performance.\n ‘fd’ (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into account\n data variability and data size.\n ‘doane’\n An improved version of Sturges’ estimator that works better with\n non-normal datasets.\n ‘scott’\n Less robust estimator that that takes into account data variability\n and data size.\n ‘stone’\n Estimator based on leave-one-out cross-validation estimate of the\n integrated squared error. Can be regarded as a generalization of\n Scott’s rule.\n ‘rice’\n Estimator does not take variability into account, only data size.\n Commonly overestimates number of bins required.\n ‘sturges’\n R’s default method, only accounts for data size. Only optimal for\n gaussian data and underestimates number of bins for large\n non-gaussian datasets.\n ‘sqrt’\n Square root (of data size) estimator, used by Excel and other\n programs for its speed and simplicity.\n\n Returns\n -------\n edges, centers, widths, nbins\n \"\"\"\n # data has one dimension, make it 2D and add one dimension to bins\n extend = False\n if np.ndim(data) == 1:\n data = np.atleast_2d(data).T\n extend = True\n N, D = data.shape\n\n # Default to 'auto' number of bins\n if bins is None:\n bins = 'auto'\n\n # bins is a str, an int, or data was extended by one dimension and bins\n # are defining the edges. Create number of bins for each dimension of data.\n if isinstance(bins, str) or isinstance(bins, int) or extend:\n bins = D*[bins]\n\n # Check if for every dimension in data there is one number of bins\n T = len(bins)\n if T != D:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n 'sample x.')\n\n # Create edges arrays\n nbins = np.empty(D, int)\n edges = D*[None]\n centers = D*[None]\n widths = D*[None]\n\n for i in range(D):\n if np.ndim(bins[i]) == 0:\n # bins[i] is a str or an int\n edges[i] = np.histogram_bin_edges(data[:, i], bins[i])\n elif np.ndim(bins[i]) == 1:\n # bins[i] is a sequence and defines the edges\n edges[i] = np.asarray(bins[i])\n if np.any(edges[i][:-1] > edges[i][1:]):\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when '\n 'defining edges.'.format(i))\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or an 1D array.'.format(i))\n nbins[i] = len(edges[i]) - 1\n widths[i] = edges[i][1] - edges[i][0]\n centers[i] = edges[i][0:-1] + widths[i] / 2\n\n return edges, centers, widths, nbins\n\n\ndef number_of_bins(data, resolution=None):\n \"\"\"\n Calculate the number of bins for requested resolution of the data\n\n Parameters\n ----------\n data : numpy.ndarray of type float\n resolution : float\n\n Returns\n -------\n bins : None or int\n Number of bins. If `resolution` <= 0 returns None.\n \"\"\"\n if resolution is None:\n return None\n d_max = data.max()\n d_min = data.min()\n bins = int(round((d_max - d_min) * resolution))\n return max(1, bins)\n", "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# helpers, functions to ease miscellaneous work\n# Copyright 2016,2017,2018,2019 Tobias Jachowski\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport operator\nimport os\nimport sys\n\nfrom contextlib import contextmanager\n\n# Suppress stdout\n@contextmanager\ndef suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\ndef min_max_idx(x, min_x=None, max_x=None, include_bounds=True,\n detailed=False):\n \"\"\"\n Parameters\n ----------\n x : 1D numpy.ndarray of type float\n The x values.\n min_x : float\n The minimum value of `x`.\n max_x : float\n The maximum value of `x`.\n include_bounds : bool\n Whether to include or exlude min/max values in the output array\n\n Returns\n -------\n dict with indices\n The indices of the values to not be cropped (i.e. value is True).\n \"\"\"\n # Select values with min < value < max:\n idx_min = compare_idx(x, min_x, 'greater', include_bounds=include_bounds)\n idx_max = compare_idx(x, max_x, 'less', include_bounds=include_bounds)\n idx_min_max = np.logical_and(idx_min, idx_max)\n\n if detailed:\n return_value = {\n 'min_max': idx_min_max,\n 'min': idx_min,\n 'max': idx_max,\n }\n else:\n return_value = idx_min_max\n\n return return_value\n\n\n_fu = {\n True: {\n 'less': operator.le,\n 'equal': operator.eq,\n 'greater': operator.ge\n },\n False: {\n 'less': operator.lt,\n 'equal': operator.ne,\n 'greater': operator.gt\n}}\n_va = {\n 'less': float('inf'),\n 'equal': 0.0,\n 'greater': float('-inf')\n}\ndef compare_idx(x, y=None, comparison='greater', include_bounds=True):\n f = _fu[include_bounds][comparison]\n y = _va[comparison] if y is None else y\n return f(x, y)\n\n\ndef step_idx(x, threshold, comparison='greater', include_bounds=True):\n idx = compare_idx(x, threshold, comparison, include_bounds=include_bounds)\n if np.any(idx):\n i_first = first_last_idx(idx)[0]\n idx[i_first:] = True\n return idx\n\n\ndef first_last_idx(idx_bool):\n # Get first and last index of values above min and values below max.\n length = len(idx_bool)\n i_first = np.argmax(idx_bool)\n i_last = length - 1 - np.argmax(idx_bool[::-1])\n return i_first, i_last\n\n\ndef make_contiguous_idx(idx_bool):\n # Make selection contiguous\n idx = idx_bool.copy()\n i_first, i_last = first_last_idx(idx)\n start = i_first\n stop = i_last + 1\n idx[start:stop] = True\n return idx\n\n\ndef crop_x_y_idx(x, y=None, min_x=None, max_x=None, min_y=None, max_y=None,\n include_bounds=True):\n \"\"\"\n Crop pairs of variates according to their minimum and maximum values.\n\n Parameters\n ----------\n x : 1D numpy.ndarray of type float\n The x values.\n y : 1D numpy.ndarray of type float\n The y values.\n min_x : float\n The minimum value of `x`.\n max_x : float\n The maximum value of `x`.\n min_y : float\n The minimum value of `y`.\n max_y : float\n The maximum value of `y`.\n include_bounds : bool\n Whether to include or exlude min/max values in the output arrays.\n\n Returns\n -------\n index array of type bool\n The index of the values to not be cropped (i.e. value is True).\n \"\"\"\n idx_x = min_max_idx(\n x, min_x=min_x, max_x=max_x, include_bounds=include_bounds)\n idx_y = min_max_idx(\n y, min_x=min_y, max_x=max_y, include_bounds=include_bounds)\n\n idx = np.logical_and(idx_x, idx_y)\n return idx\n\n\ndef crop_x_y(x, y=None, min_x=None, max_x=None, min_y=None, max_y=None,\n include_bounds=True):\n \"\"\"\n Crop pairs of variates according to their minimum and maximum values.\n\n Parameters\n ----------\n x : 1D numpy.ndarray of type float\n The x values.\n y : 1D numpy.ndarray of type float\n The y values.\n min_x : float\n The minimum value of `x`.\n max_x : float\n The maximum value of `x`.\n min_y : float\n The minimum value of `y`.\n max_y : float\n The maximum value of `y`.\n include_bounds : bool\n Whether to include or exlude min/max values in the output arrays.\n\n Returns\n -------\n tuple of 2 1D numpy.ndarray of type float\n The cropped values (x, y).\n \"\"\"\n idx = crop_x_y_idx(x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,\n max_y=max_y, include_bounds=include_bounds)\n if y is None:\n return x[idx]\n else:\n return x[idx], y[idx]\n" ]
[ [ "numpy.expand_dims", "numpy.sqrt", "numpy.linalg.norm", "matplotlib.pyplot.subplots", "matplotlib.pyplot.gcf", "numpy.round", "numpy.concatenate", "numpy.array" ], [ "numpy.expand_dims", "numpy.asarray", "numpy.concatenate", "numpy.ndim", "numpy.std", "numpy.atleast_1d", "numpy.mean", "numpy.atleast_2d", "numpy.searchsorted", "numpy.ravel_multi_index", "numpy.histogram_bin_edges", "numpy.any", "numpy.digitize", "numpy.sum", "numpy.empty" ], [ "numpy.logical_and", "numpy.argmax", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TaoweiZhang/MegEngine
[ "bd3c4a05274f69dacca6097d8cbadbb34c7cc2e4", "bd3c4a05274f69dacca6097d8cbadbb34c7cc2e4" ]
[ "imperative/python/test/unit/utils/test_module_stats.py", "imperative/python/test/unit/traced_module/test_modification.py" ]
[ "import collections\nimport math\nfrom copy import deepcopy\n\nimport numpy as np\nimport pytest\n\nimport megengine as mge\nimport megengine.functional as F\nimport megengine.hub as hub\nimport megengine.module as M\nfrom megengine.core._trace_option import use_symbolic_shape\nfrom megengine.utils.module_stats import module_stats\n\n\[email protected](\n use_symbolic_shape(), reason=\"This test do not support symbolic shape.\",\n)\ndef test_module_stats():\n net = ResNet(BasicBlock, [2, 2, 2, 2])\n input_shape = (1, 3, 224, 224)\n total_stats, stats_details = module_stats(net, input_shapes=input_shape)\n x1 = np.random.random((1, 3, 224, 224)).astype(\"float32\")\n gt_flops, gt_acts = net.get_stats(mge.tensor(x1))\n assert (total_stats.flops, total_stats.act_dims) == (gt_flops, gt_acts,)\n\n total_stats, stats_details = module_stats(net, inputs=x1)\n assert (total_stats.flops, total_stats.act_dims) == (gt_flops, gt_acts,)\n\n\[email protected](\n use_symbolic_shape(), reason=\"This test do not support symbolic shape.\",\n)\ndef test_other_input_module_state():\n a = [1, 2]\n b = {\"1\": 1, \"2\": 2}\n nt = collections.namedtuple(\"nt\", [\"n\", \"t\"])\n _nt = nt(n=1, t=2)\n net = FakeNet()\n net(a)\n net(b)\n net(_nt)\n\n\nclass FakeNet(M.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n assert isinstance(\n x,\n (np.ndarray, collections.abc.Mapping, collections.abc.Sequence, mge.Tensor),\n ) or (isinstance(x, tuple) and hasattr(x, \"_fields\"))\n\n\nclass BasicBlock(M.Module):\n expansion = 1\n\n def __init__(\n self,\n in_channels,\n channels,\n stride=1,\n groups=1,\n base_width=64,\n dilation=1,\n norm=M.BatchNorm2d,\n ):\n super().__init__()\n\n self.tmp_in_channels = in_channels\n self.tmp_channels = channels\n self.stride = stride\n\n if groups != 1 or base_width != 64:\n raise ValueError(\"BasicBlock only supports groups=1 and base_width=64\")\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n self.conv1 = M.Conv2d(\n in_channels, channels, 3, stride, padding=dilation, bias=False\n )\n self.bn1 = norm(channels)\n self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=False)\n self.bn2 = norm(channels)\n\n self.downsample_id = M.Identity()\n self.downsample_conv = M.Conv2d(in_channels, channels, 1, stride, bias=False)\n self.downsample_norm = norm(channels)\n\n def forward(self, x):\n identity = x\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n if self.tmp_in_channels == self.tmp_channels and self.stride == 1:\n identity = self.downsample_id(identity)\n else:\n identity = self.downsample_conv(identity)\n identity = self.downsample_norm(identity)\n x += identity\n x = F.relu(x)\n return x\n\n def get_stats(self, x):\n activations, flops = 0, 0\n\n identity = x\n\n in_x = deepcopy(x)\n x = self.conv1(x)\n tmp_flops, tmp_acts = cal_conv_stats(self.conv1, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n in_x = deepcopy(x)\n x = self.bn1(x)\n tmp_flops, tmp_acts = cal_norm_stats(self.bn1, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n x = F.relu(x)\n\n in_x = deepcopy(x)\n x = self.conv2(x)\n tmp_flops, tmp_acts = cal_conv_stats(self.conv2, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n in_x = deepcopy(x)\n x = self.bn2(x)\n tmp_flops, tmp_acts = cal_norm_stats(self.bn2, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n if self.tmp_in_channels == self.tmp_channels and self.stride == 1:\n identity = self.downsample_id(identity)\n else:\n in_x = deepcopy(identity)\n identity = self.downsample_conv(identity)\n tmp_flops, tmp_acts = cal_conv_stats(self.downsample_conv, in_x, identity)\n activations += tmp_acts\n flops += tmp_flops\n\n in_x = deepcopy(identity)\n identity = self.downsample_norm(identity)\n tmp_flops, tmp_acts = cal_norm_stats(self.downsample_norm, in_x, identity)\n activations += tmp_acts\n flops += tmp_flops\n\n x += identity\n x = F.relu(x)\n\n return x, flops, activations\n\n\nclass ResNet(M.Module):\n def __init__(\n self,\n block,\n layers=[2, 2, 2, 2],\n num_classes=1000,\n zero_init_residual=False,\n groups=1,\n width_per_group=64,\n replace_stride_with_dilation=None,\n norm=M.BatchNorm2d,\n ):\n super().__init__()\n self.in_channels = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\n \"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation)\n )\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = M.Conv2d(\n 3, self.in_channels, kernel_size=7, stride=2, padding=3, bias=False\n )\n self.bn1 = norm(self.in_channels)\n self.maxpool = M.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1_0 = BasicBlock(\n self.in_channels,\n 64,\n stride=1,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm=M.BatchNorm2d,\n )\n self.layer1_1 = BasicBlock(\n self.in_channels,\n 64,\n stride=1,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm=M.BatchNorm2d,\n )\n self.layer2_0 = BasicBlock(64, 128, stride=2)\n self.layer2_1 = BasicBlock(128, 128)\n self.layer3_0 = BasicBlock(128, 256, stride=2)\n self.layer3_1 = BasicBlock(256, 256)\n self.layer4_0 = BasicBlock(256, 512, stride=2)\n self.layer4_1 = BasicBlock(512, 512)\n\n self.layer1 = self._make_layer(block, 64, layers[0], norm=norm)\n self.layer2 = self._make_layer(\n block, 128, 2, stride=2, dilate=replace_stride_with_dilation[0], norm=norm\n )\n self.layer3 = self._make_layer(\n block, 256, 2, stride=2, dilate=replace_stride_with_dilation[1], norm=norm\n )\n self.layer4 = self._make_layer(\n block, 512, 2, stride=2, dilate=replace_stride_with_dilation[2], norm=norm\n )\n self.fc = M.Linear(512, num_classes)\n\n for m in self.modules():\n if isinstance(m, M.Conv2d):\n M.init.msra_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)\n bound = 1 / math.sqrt(fan_in)\n M.init.uniform_(m.bias, -bound, bound)\n elif isinstance(m, M.BatchNorm2d):\n M.init.ones_(m.weight)\n M.init.zeros_(m.bias)\n elif isinstance(m, M.Linear):\n M.init.msra_uniform_(m.weight, a=math.sqrt(5))\n if m.bias is not None:\n fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)\n bound = 1 / math.sqrt(fan_in)\n M.init.uniform_(m.bias, -bound, bound)\n if zero_init_residual:\n for m in self.modules():\n M.init.zeros_(m.bn2.weight)\n\n def _make_layer(\n self, block, channels, blocks, stride=1, dilate=False, norm=M.BatchNorm2d\n ):\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n\n layers = []\n layers.append(\n block(\n self.in_channels,\n channels,\n stride,\n groups=self.groups,\n base_width=self.base_width,\n dilation=previous_dilation,\n norm=norm,\n )\n )\n self.in_channels = channels * block.expansion\n for _ in range(1, blocks):\n layers.append(\n block(\n self.in_channels,\n channels,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm=norm,\n )\n )\n\n return M.Sequential(*layers)\n\n def extract_features(self, x):\n outputs = {}\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.maxpool(x)\n outputs[\"stem\"] = x\n\n x = self.layer1(x)\n outputs[\"res2\"] = x\n x = self.layer2(x)\n outputs[\"res3\"] = x\n x = self.layer3(x)\n outputs[\"res4\"] = x\n x = self.layer4(x)\n outputs[\"res5\"] = x\n return outputs\n\n def forward(self, x):\n x = self.extract_features(x)[\"res5\"]\n\n x = F.avg_pool2d(x, 7)\n x = F.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def get_stats(self, x):\n flops, activations = 0, 0\n in_x = deepcopy(x)\n x = self.conv1(x)\n tmp_flops, tmp_acts = cal_conv_stats(self.conv1, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n in_x = deepcopy(x)\n x = self.bn1(x)\n tmp_flops, tmp_acts = cal_norm_stats(self.bn1, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n x = F.relu(x)\n\n in_x = deepcopy(x)\n x = self.maxpool(x)\n tmp_flops, tmp_acts = cal_pool_stats(self.maxpool, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer1_0.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer1_1.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer2_0.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer2_1.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer3_0.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer3_1.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer4_0.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x, tmp_flops, tmp_acts = self.layer4_1.get_stats(x)\n activations += tmp_acts\n flops += tmp_flops\n\n x = F.avg_pool2d(x, 7)\n\n x = F.flatten(x, 1)\n\n in_x = deepcopy(x)\n x = self.fc(x)\n tmp_flops, tmp_acts = cal_linear_stats(self.fc, in_x, x)\n activations += tmp_acts\n flops += tmp_flops\n\n return flops, activations\n\n\ndef cal_conv_stats(module, input, output):\n bias = 1 if module.bias is not None else 0\n flops = np.prod(output[0].shape) * (\n module.in_channels // module.groups * np.prod(module.kernel_size) + bias\n )\n acts = np.prod(output[0].shape)\n return flops, acts\n\n\ndef cal_norm_stats(module, input, output):\n return np.prod(input[0].shape) * 7, np.prod(output[0].shape)\n\n\ndef cal_linear_stats(module, inputs, outputs):\n bias = module.out_features if module.bias is not None else 0\n return (\n np.prod(outputs[0].shape) * module.in_features + bias,\n np.prod(outputs[0].shape),\n )\n\n\ndef cal_pool_stats(module, inputs, outputs):\n return (\n np.prod(outputs[0].shape) * (module.kernel_size ** 2),\n np.prod(outputs[0].shape),\n )\n", "# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport pickle\nfrom collections import defaultdict\nfrom itertools import chain\n\nimport numpy as np\n\nimport megengine.functional as F\nimport megengine.module as M\nimport megengine.module.qat as qat\nfrom megengine.module.identity import Identity\nfrom megengine.traced_module import TracedModule, trace_module\nfrom megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input\nfrom megengine.traced_module.node import ModuleNode, Node, TensorNode\n\n\nclass IdentityMod(M.Module):\n def forward(self, x):\n return x\n\n\nclass MyBlock(M.Module):\n def __init__(self, in_channels=3, channels=3):\n super(MyBlock, self).__init__()\n self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)\n self.bn1 = M.BatchNorm2d(channels)\n self.nothing = IdentityMod()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x) + 1\n x = self.nothing(x)\n return x\n\n\nclass MyModule(M.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.block0 = MyBlock()\n self.block1 = MyBlock()\n self.nothing = IdentityMod()\n\n def forward(self, x):\n x = self.block0(x)\n x = self.block1(x)\n x = self.nothing(x)\n return x\n\n\nclass MyBlock1(M.Module):\n def forward(self, a):\n y = F.concat([a, a])\n return a, y\n\n\nclass MyModule1(M.Module):\n def __init__(self):\n super().__init__()\n self.block0 = MyBlock1()\n self.block1 = MyBlock1()\n\n def forward(self, a):\n a, y1 = self.block0(a)\n a = a + 1\n a, y2 = self.block1(a)\n return a, y1 + y2\n\n\nclass NewModule(M.Module):\n def __init__(self, traced_module):\n super(NewModule, self).__init__()\n self.module = traced_module\n\n def forward(self, x):\n x = x - 1\n x = self.module(x)\n x = x + 1\n return x\n\n\ndef _check_expr_users(flattened_module):\n node_user = defaultdict(list)\n for expr in flattened_module.graph._exprs:\n for node in expr.inputs:\n node_user[node].append(expr)\n for node in flattened_module.graph.nodes():\n node.users.sort(key=lambda m: m._id)\n node_user[node].sort(key=lambda m: m._id)\n assert node.users == node_user[node]\n\n\ndef _init_cls(cls):\n module = cls()\n x = F.ones((1, 3, 3, 3))\n y = module(x)\n traced_module = trace_module(module, x)\n return traced_module, x, y\n\n\ndef _init_block():\n return _init_cls(MyBlock)\n\n\ndef _init_module():\n return _init_cls(MyModule)\n\n\ndef test_search():\n traced_module, *_ = _init_block()\n graph = traced_module.graph\n relu_expr = graph.get_function_by_type(F.relu).as_unique()\n assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu\n\n conv_node = graph.get_module_by_type(M.Conv2d).as_unique()\n assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d\n\n add_expr = graph.get_method_by_type(\"__add__\").as_unique()\n assert isinstance(add_expr, CallMethod) and add_expr.method == \"__add__\"\n\n conv_node = graph.get_node_by_name(\"MyBlock_conv1\").as_unique()\n assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d\n\n\ndef test_producer_and_users():\n traced_module, *_ = _init_module()\n\n def _check(exprs):\n for expr in exprs:\n for n in chain(expr.inputs, expr.outputs):\n if not isinstance(n.expr, Input):\n assert n.expr in exprs\n for e in n.users:\n assert e in exprs\n assert n in e.inputs\n\n for mod in traced_module.modules():\n if not hasattr(mod, \"argdef_graph_map\"):\n continue\n for g in mod.argdef_graph_map.values():\n _check(g._exprs)\n\n\ndef test_insert():\n traced_module, x, expect = _init_block()\n graph = traced_module.graph\n relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]\n with graph.insert_exprs():\n neg_out = F.neg(relu_out)\n graph.replace_node({relu_out: neg_out})\n graph.compile()\n np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)\n\n\ndef test_insert_module():\n class Neg(M.Module):\n def __init__(self, name):\n super().__init__(name)\n self.identity = M.Identity()\n self.identity_list = [M.Identity(), M.Identity()]\n self.identity_dict = {\"0\": M.Identity(), \"1\": M.Identity()}\n self.param = F.zeros((1,))\n\n def forward(self, x):\n x = self.identity(x)\n for m in self.identity_dict:\n x = self.identity_dict[m](x)\n for m in self.identity_list:\n x = m(x)\n return F.neg(x) + self.param\n\n traced_module, x, expect = _init_block()\n graph = traced_module.graph\n relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]\n self = graph.inputs[0]\n setattr(traced_module, \"neg\", Neg(name=\"neg\"))\n setattr(traced_module, \"neg2\", Neg(name=\"neg\"))\n setattr(traced_module, \"param\", F.zeros((1,)))\n with graph.insert_exprs():\n neg_out = self.neg(relu_out)\n neg_out = self.neg2(relu_out)\n neg_out = neg_out + self.param\n graph.replace_node({relu_out: neg_out})\n graph.compile()\n\n np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)\n assert traced_module.neg.graph is not None\n assert traced_module.neg2.graph is not None\n assert traced_module.neg2.param is not None\n assert len(traced_module.neg.graph._exprs) == 13\n for n in traced_module.graph.nodes():\n if isinstance(n, TensorNode):\n assert n.value is None\n\n traced_module, x, expect = _init_module()\n setattr(traced_module.block0, \"neg\", Neg(name=None))\n graph = traced_module.graph\n self = graph.inputs[0]\n out_node = graph.outputs[0]\n with graph.insert_exprs():\n neg_out = self.block0.neg(out_node)\n graph.replace_node({out_node: neg_out})\n graph.compile()\n np.testing.assert_allclose(expect, -traced_module(x), atol=1e-6)\n assert isinstance(traced_module.block0.neg, TracedModule)\n assert traced_module.block0.neg.graph is not None\n\n setattr(traced_module.block0.neg, \"neg\", Neg(name=None))\n setattr(traced_module.block0.neg.neg, \"relu\", M.ReLU())\n out_node = graph.outputs[0]\n with graph.insert_exprs():\n neg_out = self.block0.neg.neg(out_node)\n neg_out = self.block0.neg.neg(neg_out)\n relu_out = self.block0.neg.neg.relu(neg_out)\n graph.replace_node({out_node: relu_out})\n graph.compile()\n np.testing.assert_allclose(F.relu(-expect), traced_module(x), atol=1e-6)\n assert isinstance(traced_module.block0.neg.neg, TracedModule)\n assert traced_module.block0.neg.neg.graph is not None\n\n\ndef test_insert_qat_module():\n class concat(qat.Concat):\n pass\n\n traced_module, x, expect = _init_block()\n graph = traced_module.graph\n self = graph.inputs[0]\n out = graph.outputs[0]\n setattr(traced_module, \"cat_0\", qat.Concat())\n setattr(traced_module, \"cat_1\", concat())\n\n with graph.insert_exprs():\n x_0 = self.cat_0([out, out])\n x_1 = self.cat_1([out, x_0])\n graph.replace_node({out: x_1})\n graph.compile()\n\n x = F.copy(x)\n np.testing.assert_allclose(\n F.concat([expect, expect, expect]), traced_module(x), atol=1e-6\n )\n assert not hasattr(traced_module.cat_0, \"graph\")\n assert traced_module.cat_1.graph is not None\n\n\ndef test_add_input_and_output():\n traced_module, x, y = _init_module()\n\n data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name=\"data\")\n traced_module.graph.add_output_node(data_node)\n\n assert data_node.name == \"data\"\n assert traced_module.graph.inputs[-1] == data_node\n assert len(traced_module.graph.inputs) == 3\n assert len(traced_module.graph.outputs) == 2\n\n y1, y2 = traced_module(x, x)\n np.testing.assert_equal(y1.numpy(), y.numpy())\n np.testing.assert_equal(y2.numpy(), x.numpy())\n\n y1, y2 = traced_module(x, y)\n np.testing.assert_equal(y2.numpy(), y.numpy())\n\n traced_module.graph.reset_outputs(\n ({\"orig_out\": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])\n )\n\n out = traced_module(x, x)\n assert isinstance(out, tuple)\n assert isinstance(out[0], dict)\n np.testing.assert_equal(out[0][\"orig_out\"].numpy(), y.numpy())\n np.testing.assert_equal(out[1].numpy(), x.numpy())\n\n\ndef test_delete():\n traced_module, x, expect = _init_block()\n graph = traced_module.graph\n relu_expr = graph.get_function_by_type(F.relu).as_unique()\n node = relu_expr.outputs\n repl_node = relu_expr.inputs\n graph.replace_node({node[0]: repl_node[0]})\n graph.compile()\n np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)\n\n # clear graph\n graph.replace_node({graph.outputs[0]: graph.inputs[1]})\n graph.compile()\n np.testing.assert_equal(len(list(graph._exprs)), 0)\n np.testing.assert_equal(traced_module(x).numpy(), x.numpy())\n\n\ndef test_flatten():\n traced_module, x, expect = _init_module()\n traced_module = traced_module.flatten()\n assert len(traced_module.graph._exprs) == 12\n np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())\n\n traced_module = traced_module.flatten()\n assert len(traced_module.graph._exprs) == 12\n np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())\n\n traced_module, x, expect = _init_cls(MyModule1)\n traced_module = traced_module.flatten()\n _check_expr_users(traced_module)\n\n\ndef test_id_and_name():\n def _check_id(traced_module):\n _total_ids = traced_module.graph._total_ids\n node_ids = [n._id for n in traced_module.graph.nodes().as_list()]\n assert len(set(node_ids)) == len(node_ids)\n assert max(node_ids) + 1 == _total_ids[0]\n\n expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]\n assert len(set(expr_ids)) == len(expr_ids)\n assert max(expr_ids) + 1 == _total_ids[1]\n\n def _check_name(flatened_module):\n node_names = [n._name for n in flatened_module.graph.nodes().as_list()]\n assert len(set(node_names)) == len(node_names)\n\n traced_module, x, expect = _init_module()\n _check_id(traced_module)\n\n flattened_module = traced_module.flatten()\n _check_id(flattened_module)\n _check_name(flattened_module)\n\n # pickle check\n obj = pickle.dumps(traced_module)\n traced_module = pickle.loads(obj)\n Node._set_next_id(159)\n Expr._set_next_id(1024)\n\n graph = traced_module.graph\n for expr in graph.get_function_by_type(F.relu).as_list():\n relu_out = expr.outputs[0]\n cur_graph = expr.top_graph\n with cur_graph.insert_exprs():\n neg_out = F.neg(relu_out)\n cur_graph.replace_node({relu_out: neg_out})\n cur_graph.compile()\n _check_id(traced_module)\n\n flattened_module = traced_module.flatten()\n _check_id(flattened_module)\n _check_name(flattened_module)\n\n # check trace TracedModule\n obj = pickle.dumps(traced_module)\n traced_module = pickle.loads(obj)\n module = NewModule(traced_module)\n traced_module = trace_module(module, x)\n _check_id(traced_module)\n\n flattened_module = traced_module.flatten()\n _check_id(flattened_module)\n _check_name(flattened_module)\n\n\ndef test_set_node_name():\n traced_module, x, expect = _init_module()\n graph = traced_module.graph\n output_node = graph.outputs[0]\n\n def rename(name):\n output_node.name = name\n\n np.testing.assert_raises(AssertionError, rename, \"block1_out\")\n rename(\"output\")\n np.testing.assert_equal(str(graph.outputs[0]), \"output\")\n\n def add_1(x):\n x = x + 1\n x.name = \"func_add_1\"\n return x\n\n class ModuleAdd_3(M.Module):\n def forward(self, x):\n x = x + 1\n x.name = \"module_add_1\"\n x = x + 2\n return x\n\n setattr(traced_module, \"add_3\", ModuleAdd_3())\n\n self = graph.inputs[0]\n with graph.insert_exprs():\n x = output_node + 1\n x.name = \"_add_1\"\n x = add_1(x)\n x = self.add_3(x)\n graph.replace_node({output_node: x})\n graph.compile()\n\n assert \"_add_1\" in graph._namespace.used_names\n assert \"func_add_1\" in graph._namespace.used_names\n assert \"module_add_1\" in traced_module.add_3.graph._namespace.used_names\n\n\ndef test_set_graph_name():\n traced_module, x, expect = _init_module()\n graph = traced_module.graph\n output_node = graph.outputs[0]\n\n node_name = output_node.name\n\n graph.name = \"Top\"\n node = graph.get_node_by_name(\"{}_{}\".format(\"Top\", node_name)).as_unique()\n assert node is output_node\n\n\ndef test_extra_block():\n class PostProcess(M.Module):\n def forward(self, x):\n return x * 2\n\n class Net(M.Module):\n def __init__(self, traced_module):\n super().__init__()\n self.post_process = PostProcess()\n self.traced_module = traced_module\n\n def forward(self, x):\n x = self.traced_module(x)\n x = self.post_process(x)\n return x\n\n traced_module, x, expect = _init_block()\n module = Net(traced_module)\n np.testing.assert_allclose(2 * expect, module(x), atol=1e-6)\n traced_module = trace_module(module, x)\n np.testing.assert_allclose(2 * expect, traced_module(x), atol=1e-6)\n" ]
[ [ "numpy.random.random", "numpy.prod" ], [ "numpy.testing.assert_raises" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
calebh94/ml-hv-grid-pub
[ "492fc267f22a3390211035bbd8ff41557139820b" ]
[ "train_xcept.py" ]
[ "\"\"\"\ntrain_xcept.py\n\nTrain the Xception network to classify HV pylons\n\"\"\"\nimport os\nfrom os import path as op\nfrom functools import partial\nfrom datetime import datetime as dt\nimport pickle\nimport pprint\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import Adam, rmsprop, SGD\nfrom keras.applications.xception import Xception, preprocess_input as xcept_preproc\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import (ModelCheckpoint, EarlyStopping, TensorBoard,\n ReduceLROnPlateau)\nfrom hyperopt import fmin, Trials, STATUS_OK, tpe\nimport yaml\n\nfrom utils import (print_start_details, print_end_details)\nfrom utils_data import get_concatenated_data\nfrom config import (get_params, tboard_dir, ckpt_dir, dataset_fpaths,\n model_params as MP, train_params as TP)\n\n\ndef get_optimizer(opt_params, lr):\n \"\"\"Helper to get optimizer from text params\"\"\"\n if opt_params['opt_func'] == 'sgd':\n return SGD(lr=lr, momentum=opt_params['momentum'])\n elif opt_params['opt_func'] == 'adam':\n return Adam(lr=lr)\n elif opt_params['opt_func'] == 'rmsprop':\n return rmsprop(lr=lr)\n else:\n raise ValueError\n\n\ndef xcept_net(params):\n \"\"\"Train the Xception network\"\"\"\n K.clear_session() # Remove any existing graphs\n mst_str = dt.now().strftime(\"%m%d_%H%M%S\")\n\n print('\\n' + '=' * 40 + '\\nStarting model at {}'.format(mst_str))\n print('Model # %s' % len(trials))\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(params)\n\n ######################\n # Paths and Callbacks\n ######################\n ckpt_fpath = op.join(ckpt_dir, mst_str + '_L{val_loss:.2f}_E{epoch:02d}_weights.h5')\n tboard_model_dir = op.join(tboard_dir, mst_str)\n\n callbacks_phase1 = [TensorBoard(log_dir=tboard_model_dir, histogram_freq=0,\n write_grads=False, embeddings_freq=0,\n embeddings_layer_names=['dense_preoutput', 'dense_output'])]\n callbacks_phase2 = [\n TensorBoard(log_dir=tboard_model_dir, histogram_freq=0,\n write_grads=False, embeddings_freq=0,\n embeddings_layer_names=['dense_preoutput', 'dense_output']),\n ModelCheckpoint(ckpt_fpath, monitor='val_acc', save_weights_only=True,\n save_best_only=True),\n EarlyStopping(min_delta=TP['early_stopping_min_delta'],\n patience=TP['early_stopping_patience'], verbose=1),\n ReduceLROnPlateau(epsilon=TP['reduce_lr_epsilon'],\n patience=TP['reduce_lr_patience'], verbose=1)]\n\n #########################\n # Construct model\n #########################\n # Get the original xception model pre-initialized weights\n base_model = Xception(weights='imagenet',\n include_top=False, # Peel off top layer\n input_shape=TP['img_size'],\n pooling='avg') # Global average pooling\n\n x = base_model.output # Get final layer of base XCeption model\n\n # Add a fully-connected layer\n x = Dense(params['dense_size'], activation=params['dense_activation'],\n kernel_initializer=params['weight_init'],\n name='dense_preoutput')(x)\n if params['dropout_rate'] > 0:\n x = Dropout(rate=params['dropout_rate'])(x)\n\n # Finally, add softmax output with 2 classes (since we have binary prediction)\n pred = Dense(2, activation='softmax', name='dense_output')(x)\n\n model = Model(inputs=base_model.input, outputs=pred)\n\n #####################\n # Save model details\n #####################\n model_yaml = model.to_yaml()\n save_template = op.join(ckpt_dir, mst_str + '_{}.{}')\n arch_fpath = save_template.format('arch', 'yaml')\n if not op.exists(arch_fpath):\n with open(arch_fpath.format('arch', 'yaml'), 'w') as yaml_file:\n yaml_file.write(model_yaml)\n\n # Save params to yaml file\n params_fpath = save_template.format('params', 'yaml')\n if not op.exists(params_fpath):\n with open(params_fpath, 'w') as yaml_file:\n yaml_file.write(yaml.dump(params))\n yaml_file.write(yaml.dump(TP))\n\n ####################\n # Train top layers\n ####################\n # Train the top layers which we just added by setting all orig layers untrainable\n for layer in base_model.layers:\n layer.trainable = False\n\n # Compile the model (do this after setting non-trainable layers)\n model.compile(optimizer=get_optimizer(params['optimizer'],\n lr=params['lr_phase1']),\n loss=params['loss'], metrics=MP['metrics'])\n\n # Train top layers for a few epocs\n steps_per_epo = (len(X_train) * TP['prop_total_img_set']) // TP['batch_size']\n steps_per_val = len(X_test) // TP['batch_size']\n\n print('Phase 1, training near-output layer(s)')\n hist = model.fit_generator(\n train_gen.flow(X_train, Y_train, batch_size=TP['batch_size']),\n steps_per_epoch=steps_per_epo,\n epochs=params['n_epo_phase1'],\n #validation_data=test_gen.flow(X_test, Y_test, batch_size=TP['batch_size']),\n #validation_steps=steps_per_val,\n callbacks=callbacks_phase1,\n class_weight=TP['class_weight'],\n verbose=1)\n\n ###############################################\n # Train entire network to fine-tune performance\n ###############################################\n # Visualize layer names/indices to see how many layers to freeze:\n #print('Layer freeze cutoff = {}'.format(params['freeze_cutoff']))\n #for li, layer in enumerate(base_model.layers):\n # print(li, layer.name)\n\n # Set all layers trainable\n for layer in model.layers:\n layer.trainable = True\n\n # Recompile model for second round of training\n model.compile(optimizer=get_optimizer(params['optimizer'], params['lr_phase2']),\n loss=params['loss'], metrics=MP['metrics'])\n\n print('/nPhase 2, training from layer {} on.'.format(params['freeze_cutoff']))\n hist = model.fit_generator(\n train_gen.flow(X_train, Y_train, batch_size=TP['batch_size']),\n steps_per_epoch=steps_per_epo,\n epochs=params['n_epo_phase2'],\n validation_data=test_gen.flow(X_test, Y_test, batch_size=TP['batch_size']),\n validation_steps=steps_per_val,\n callbacks=callbacks_phase2,\n class_weight=['class_weight'],\n verbose=1)\n\n # Return best of last validation accuracies\n check_ind = -1 * (TP['early_stopping_patience'] + 1)\n result_dict = dict(loss=np.min(hist.history['val_loss'][check_ind:]),\n status=STATUS_OK)\n\n return result_dict\n\n\nif __name__ == '__main__':\n start_time = dt.now()\n print_start_details(start_time)\n\n #########################\n # Load data\n #########################\n data_set = get_concatenated_data(dataset_fpaths, True, seed=TP['shuffle_seed'])\n X_train = data_set['x_train']\n X_test = data_set['x_test']\n Y_train = data_set['y_train']\n Y_test = data_set['y_test']\n total_counts = np.sum(Y_train, axis=0) + np.sum(Y_test, axis=0)\n\n train_gen = ImageDataGenerator(\n horizontal_flip=True,\n vertical_flip=True,\n rotation_range=180,\n zoom_range=(1, 1.2),\n preprocessing_function=xcept_preproc)\n test_gen = ImageDataGenerator(\n preprocessing_function=xcept_preproc)\n\n trials = Trials()\n algo = partial(tpe.suggest, n_startup_jobs=TP['n_rand_hp_iters'])\n argmin = fmin(xcept_net, space=get_params(MP, TP), algo=algo,\n max_evals=TP['n_total_hp_iters'], trials=trials)\n\n end_time = dt.now()\n print_end_details(start_time, end_time)\n print(\"Evalutation of best performing model:\")\n print(trials.best_trial['result']['loss'])\n\n with open(op.join(ckpt_dir, 'trials_{}.pkl'.format(start_time)), \"wb\") as pkl_file:\n pickle.dump(trials, pkl_file)\n" ]
[ [ "numpy.sum", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Gamrix/pytorch
[ "b5b158a6c6de94dfb983b447fa33fea062358844", "b5b158a6c6de94dfb983b447fa33fea062358844" ]
[ "test/onnx/test_pytorch_onnx_onnxruntime.py", "test/package/test_misc.py" ]
[ "import unittest\nimport onnxruntime\nimport torch\n\nimport numpy as np\nimport io\nimport itertools\nimport copy\nimport os\nimport random\n\nfrom torch.nn.utils import rnn as rnn_utils\nfrom model_defs.lstm_flattening_result import (LstmFlatteningResultWithSeqLength,\n LstmFlatteningResultWithoutSeqLength)\nfrom model_defs.rnn_model_with_packed_sequence import (RnnModelWithPackedSequence,\n RnnModelWithPackedSequenceWithState,\n RnnModelWithPackedSequenceWithoutState)\nfrom test_pytorch_common import (skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion,\n skipIfNoLapack, disableScriptTest, skipIfONNXShapeInference,\n skipIfUnsupportedMaxOpsetVersion)\nfrom test_pytorch_common import BATCH_SIZE\nfrom test_pytorch_common import RNN_BATCH_SIZE, RNN_SEQUENCE_LENGTH, RNN_INPUT_SIZE, RNN_HIDDEN_SIZE\nfrom typing import List, Tuple, Optional, Dict\nimport model_defs.word_language_model as word_language_model\n\nimport onnx\n\nimport torchvision\nfrom torchvision import ops\nfrom torchvision.models.detection.image_list import ImageList\nfrom torchvision.models.detection.transform import GeneralizedRCNNTransform\nfrom torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork\nfrom torchvision.models.detection.roi_heads import RoIHeads\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead\nfrom collections import OrderedDict\n\nfrom torch.nn.utils.rnn import PackedSequence\n\ndef to_numpy(tensor):\n if tensor.requires_grad:\n return tensor.detach().cpu().numpy()\n else:\n return tensor.cpu().numpy()\n\ndef convert_to_onnx(model, input=None, opset_version=9, example_outputs=None,\n do_constant_folding=True, keep_initializers_as_inputs=True,\n dynamic_axes=None, input_names=None, output_names=None,\n fixed_batch_size=False, training=None,\n onnx_shape_inference=False):\n # export the model to ONNX\n f = io.BytesIO()\n input_copy = copy.deepcopy(input)\n torch.onnx._export(model, input_copy, f,\n opset_version=opset_version,\n example_outputs=example_outputs,\n do_constant_folding=do_constant_folding,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n dynamic_axes=dynamic_axes,\n input_names=input_names, output_names=output_names,\n fixed_batch_size=fixed_batch_size, training=training,\n onnx_shape_inference=onnx_shape_inference)\n\n # compute onnxruntime output prediction\n ort_sess = onnxruntime.InferenceSession(f.getvalue())\n return ort_sess\n\n\ndef inline_flatten_list(inputs, res_list):\n for i in inputs:\n res_list.append(i) if not isinstance(i, (list, tuple)) else inline_flatten_list(i, res_list)\n return res_list\n\n\ndef run_ort(ort_sess, input):\n input_copy = copy.deepcopy(input)\n input, _ = torch.jit._flatten(input_copy)\n inputs = [to_numpy(inp) for inp in input]\n\n ort_inputs = dict((ort_sess.get_inputs()[i].name, input) for i, input in enumerate(inputs))\n ort_outs = ort_sess.run(None, ort_inputs)\n return inline_flatten_list(ort_outs, [])\n\n\ndef ort_compare_with_pytorch(ort_outs, output, rtol, atol):\n output, _ = torch.jit._flatten(output)\n outputs = [to_numpy(outp) for outp in output]\n\n # compare onnxruntime and PyTorch results\n assert len(outputs) == len(ort_outs), \"number of outputs differ\"\n\n # compare onnxruntime and PyTorch results\n [np.testing.assert_allclose(out, ort_out, rtol=rtol, atol=atol) for out, ort_out in zip(outputs, ort_outs)]\n\n\ndef run_model_test(self, model, batch_size=2, state_dict=None,\n input=None, use_gpu=True, rtol=0.001, atol=1e-7,\n example_outputs=None, do_constant_folding=True,\n dynamic_axes=None, test_with_inputs=None,\n input_names=None, output_names=None,\n fixed_batch_size=False, dict_check=True,\n training=None):\n model.eval()\n if input is None:\n input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)\n with torch.no_grad():\n if isinstance(input, torch.Tensor):\n input = (input,)\n # In-place operators will update input tensor data as well.\n # Thus inputs are replicated before every forward call.\n if isinstance(input, dict):\n input = (input,)\n input_args = copy.deepcopy(input)\n input_kwargs = {}\n if dict_check and isinstance(input_args[-1], dict):\n input_kwargs = input_args[-1]\n input_args = input_args[:-1]\n try:\n model_copy = copy.deepcopy(model)\n output = model_copy(*input_args, **input_kwargs)\n except Exception:\n output = model(*input_args, **input_kwargs)\n if isinstance(output, torch.Tensor):\n output = (output,)\n\n if not dict_check and isinstance(input[-1], dict):\n input = input + ({},)\n\n ort_sess = convert_to_onnx(model, input=input, opset_version=self.opset_version,\n example_outputs=output, do_constant_folding=do_constant_folding,\n keep_initializers_as_inputs=self.keep_initializers_as_inputs,\n dynamic_axes=dynamic_axes, input_names=input_names,\n output_names=output_names, fixed_batch_size=fixed_batch_size, training=training,\n onnx_shape_inference=self.onnx_shape_inference)\n # compute onnxruntime output prediction\n ort_outs = run_ort(ort_sess, input)\n ort_compare_with_pytorch(ort_outs, output, rtol, atol)\n\n\n # if additional test inputs are provided run the onnx\n # model with these inputs and check the outputs\n if test_with_inputs is not None:\n for test_input in test_with_inputs:\n if isinstance(test_input, torch.Tensor):\n test_input = (test_input,)\n test_input_copy = copy.deepcopy(test_input)\n output = model(*test_input_copy)\n if isinstance(output, torch.Tensor):\n output = (output,)\n ort_outs = run_ort(ort_sess, test_input)\n ort_compare_with_pytorch(ort_outs, output, rtol, atol)\n\ndef _init_test_generalized_rcnn_transform():\n min_size = 100\n max_size = 200\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n return transform\n\ndef _init_test_rpn():\n anchor_sizes = ((32,), (64,), (128,), (256,), (512,))\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)\n out_channels = 256\n rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])\n rpn_fg_iou_thresh = 0.7\n rpn_bg_iou_thresh = 0.3\n rpn_batch_size_per_image = 256\n rpn_positive_fraction = 0.5\n rpn_pre_nms_top_n = dict(training=2000, testing=1000)\n rpn_post_nms_top_n = dict(training=2000, testing=1000)\n rpn_nms_thresh = 0.7\n rpn_score_thresh = 0.0\n\n rpn = RegionProposalNetwork(\n rpn_anchor_generator, rpn_head,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh,\n score_thresh=rpn_score_thresh)\n return rpn\n\ndef _init_test_roi_heads_faster_rcnn():\n out_channels = 256\n num_classes = 91\n\n box_fg_iou_thresh = 0.5\n box_bg_iou_thresh = 0.5\n box_batch_size_per_image = 512\n box_positive_fraction = 0.25\n bbox_reg_weights = None\n box_score_thresh = 0.05\n box_nms_thresh = 0.5\n box_detections_per_img = 100\n\n box_roi_pool = ops.MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=7,\n sampling_ratio=2)\n\n resolution = box_roi_pool.output_size[0]\n representation_size = 1024\n box_head = TwoMLPHead(\n out_channels * resolution ** 2,\n representation_size)\n\n representation_size = 1024\n box_predictor = FastRCNNPredictor(\n representation_size,\n num_classes)\n\n roi_heads = RoIHeads(\n box_roi_pool, box_head, box_predictor,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights,\n box_score_thresh, box_nms_thresh, box_detections_per_img)\n return roi_heads\n\ndef set_rng_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n\nclass TestONNXRuntime(unittest.TestCase):\n from torch.onnx.symbolic_helper import _export_onnx_opset_version\n opset_version = _export_onnx_opset_version\n keep_initializers_as_inputs = True # For IR version 3 type export.\n onnx_shape_inference = True\n\n def setUp(self):\n torch.manual_seed(0)\n onnxruntime.set_seed(0)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(0)\n np.random.seed(seed=0)\n os.environ['ALLOW_RELEASED_ONNX_OPSET_ONLY'] = '0'\n self.is_script_test_enabled = True\n\n def run_test(self, model, input, rtol=1e-3, atol=1e-7, do_constant_folding=True,\n batch_size=2, use_gpu=True, dynamic_axes=None, test_with_inputs=None,\n input_names=None, output_names=None, fixed_batch_size=False, dict_check=True,\n training=None):\n def _run_test(m):\n return run_model_test(self, m, batch_size=batch_size,\n input=input, use_gpu=use_gpu, rtol=rtol, atol=atol,\n do_constant_folding=do_constant_folding,\n dynamic_axes=dynamic_axes, test_with_inputs=test_with_inputs,\n input_names=input_names, output_names=output_names,\n fixed_batch_size=fixed_batch_size, dict_check=dict_check,\n training=training)\n if self.is_script_test_enabled:\n script_model = torch.jit.script(model)\n _run_test(script_model)\n\n _run_test(model)\n\n def run_model_test_with_external_data(self, model, input, rtol=0.001, atol=1e-7,\n example_outputs=None, do_constant_folding=True,\n dynamic_axes=None, input_names=None, output_names=None,\n ort_optim_on=True):\n import os\n import tempfile\n\n model.eval()\n with torch.no_grad():\n if isinstance(input, torch.Tensor):\n input = (input,)\n # In-place operators will update input tensor data as well.\n # Thus inputs are replicated before every forward call.\n input_copy = copy.deepcopy(input)\n output = model(*input_copy)\n if isinstance(output, torch.Tensor):\n output = (output,)\n\n # export the model to ONNX\n with tempfile.TemporaryDirectory() as tmpdirname:\n model_file_name = os.path.join(tmpdirname, 'model.onnx')\n input_copy = copy.deepcopy(input)\n torch.onnx.export(model, input_copy, model_file_name,\n opset_version=self.opset_version,\n example_outputs=output,\n verbose=False,\n do_constant_folding=do_constant_folding,\n keep_initializers_as_inputs=self.keep_initializers_as_inputs,\n dynamic_axes=dynamic_axes,\n input_names=input_names, output_names=output_names,\n use_external_data_format=True)\n # compute onnxruntime output prediction\n ort_sess_opt = onnxruntime.SessionOptions()\n ort_sess_opt.graph_optimization_level = \\\n onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED if ort_optim_on else \\\n onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n ort_sess = onnxruntime.InferenceSession(model_file_name, sess_options=ort_sess_opt)\n input_copy = copy.deepcopy(input)\n ort_outs = run_ort(ort_sess, input_copy)\n ort_compare_with_pytorch(ort_outs, output, rtol, atol)\n\n\n @skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.\n def test_embedding_model_with_external_data(self):\n class LargeModel(torch.nn.Module):\n def __init__(self):\n super(LargeModel, self).__init__()\n dim = 15\n n = 4 * 100\n self.emb = torch.nn.Embedding(n, dim)\n self.lin1 = torch.nn.Linear(dim, 1)\n self.seq = torch.nn.Sequential(\n self.emb,\n self.lin1,\n )\n\n def forward(self, input):\n return self.seq(input)\n\n model = LargeModel()\n x = torch.tensor([2], dtype=torch.long)\n self.run_model_test_with_external_data(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.\n def test_mobilenet_v2_with_external_data(self):\n model = torchvision.models.mobilenet_v2(pretrained=True)\n x = torch.randn(2, 3, 224, 224, requires_grad=True)\n # We are turning off Onnx Runtime optimization off in this test,\n # because external data format is not supported to in ORT optimizer.\n # Once that support is added, we can set ort_optim_on=True (default).\n self.run_model_test_with_external_data(model, x, rtol=1e-3, atol=1e-5,\n ort_optim_on=False)\n\n @skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.\n def test_attribute_with_external_data(self):\n class LargeModel(torch.nn.Module):\n def forward(self, x):\n return x + torch.ones(2, 1024)\n\n x = torch.randn(2, 1)\n self.run_model_test_with_external_data(LargeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.\n @unittest.skip(\"Enable this once large model with subgraph is supported in ORT\")\n def test_subgraph_with_external_data(self):\n class LargeModel(torch.nn.Module):\n def forward(self, x):\n for i in range(x.size(0)):\n x = x + torch.ones(2, 1024)\n return x\n\n x = torch.randn(2, 1)\n self.run_model_test_with_external_data(torch.jit.script(LargeModel()), x)\n\n def test_fuse_conv_bn1d(self):\n class Fuse(torch.nn.Module):\n def __init__(self):\n super(Fuse, self).__init__()\n self.conv = torch.nn.Conv1d(16, 33, 3, stride=2)\n self.bn = torch.nn.BatchNorm1d(33)\n\n def forward(self, x):\n out = self.conv(x)\n return self.bn(out)\n\n model = Fuse()\n x = torch.randn(20, 16, 50, requires_grad=True)\n self.run_test(model, (x,))\n\n def test_fuse_conv_bn2d(self):\n class Fuse(torch.nn.Module):\n def __init__(self):\n super(Fuse, self).__init__()\n self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False)\n self.bn = torch.nn.BatchNorm2d(2)\n\n def forward(self, x):\n out = self.conv(x)\n return self.bn(out)\n\n model = Fuse()\n x = torch.randn(2, 3, 2, 2, requires_grad=True)\n self.run_test(model, (x,))\n\n def test_fuse_conv_bn3d(self):\n class Fuse(torch.nn.Module):\n def __init__(self):\n super(Fuse, self).__init__()\n self.conv = torch.nn.Conv3d(3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False)\n self.bn = torch.nn.BatchNorm3d(2)\n\n def forward(self, x):\n out = self.conv(x)\n return self.bn(out)\n\n model = Fuse()\n x = torch.randn(2, 3, 10, 50, 100, requires_grad=True)\n self.run_test(model, (x,), rtol=1e-3, atol=1e-6)\n\n def test_reshape_constant_fold(self):\n class Reshape(torch.nn.Module):\n def __init__(self, ):\n super(Reshape, self).__init__()\n self.register_buffer(\"weight\", torch.ones(5))\n\n def forward(self, x):\n scale_1 = self.weight.reshape(1, -1, 1, 1)\n return x * scale_1\n\n x = torch.randn(4, 5)\n self.run_test(Reshape(), (x,), rtol=1e-3, atol=1e-5)\n\n def run_word_language_model(self, model_name):\n ntokens = 50\n emsize = 5\n nhid = 5\n nlayers = 5\n dropout = 0.2\n tied = False\n batchsize = 5\n if model_name == \"GRU\":\n model = word_language_model.RNNModelWithTensorHidden(model_name, ntokens, emsize,\n nhid, nlayers, dropout, tied,\n batchsize)\n elif model_name == \"LSTM\":\n model = word_language_model.RNNModelWithTupleHidden(model_name, ntokens, emsize,\n nhid, nlayers, dropout, tied,\n batchsize)\n else:\n model = word_language_model.RNNModel(model_name, ntokens, emsize,\n nhid, nlayers, dropout, tied,\n batchsize)\n x = torch.arange(0, ntokens).long().view(-1, batchsize)\n # Only support CPU version, since tracer is not working in GPU RNN.\n self.run_test(model, (x, model.hidden))\n\n def get_image_from_url(self, url, size=(300, 200)):\n import os\n from urllib.parse import urlsplit\n from urllib import request\n from PIL import Image\n from torchvision import transforms\n from torch._utils_internal import get_writable_path\n\n filename = os.path.basename(urlsplit(url)[2])\n data_dir = get_writable_path(os.path.join(os.path.dirname(__file__)))\n path = os.path.join(data_dir, filename)\n data = request.urlopen(url, timeout=15).read()\n with open(path, 'wb') as f:\n f.write(data)\n image = Image.open(path).convert(\"RGB\")\n\n image = image.resize(size, Image.BILINEAR)\n\n to_tensor = transforms.ToTensor()\n return to_tensor(image)\n\n def get_test_images(self):\n image_url = \"http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg\"\n image = self.get_image_from_url(url=image_url, size=(100, 320))\n\n image_url2 = \"https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image05.png\"\n image2 = self.get_image_from_url(url=image_url2, size=(250, 380))\n\n return [image], [image2]\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest() # Faster RCNN model is not scriptable\n def test_faster_rcnn(self):\n model = torchvision.models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True, min_size=200,\n max_size=300)\n model.eval()\n x = torch.randn(2, 3, 200, 300, requires_grad=True)\n self.run_test(model, (x,), rtol=1e-3, atol=1e-5)\n self.run_test(model, (x,), input_names=[\"images_tensors\"], output_names=[\"outputs\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2, 3], \"outputs\": [0, 1, 2, 3]}, rtol=1e-3, atol=1e-5)\n dummy_image = [torch.ones(3, 100, 100) * 0.3]\n images, test_images = self.get_test_images()\n self.run_test(model, (images,), test_with_inputs=[(images,), (test_images,), (dummy_image,)],\n input_names=[\"images_tensors\"], output_names=[\"outputs\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2], \"outputs\": [0, 1, 2]}, rtol=1e-3, atol=1e-5)\n self.run_test(model, (dummy_image,), test_with_inputs=[(dummy_image,), (images,)],\n input_names=[\"images_tensors\"], output_names=[\"outputs\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2], \"outputs\": [0, 1, 2]}, rtol=1e-3, atol=1e-5)\n\n def test_paste_mask_in_image(self):\n # disable profiling\n torch._C._jit_set_profiling_executor(False)\n torch._C._jit_set_profiling_mode(False)\n\n masks = torch.rand(10, 1, 26, 26)\n boxes = torch.rand(10, 4)\n boxes[:, 2:] += torch.rand(10, 2)\n boxes *= 50\n o_im_s = (100, 100)\n from torchvision.models.detection.roi_heads import paste_masks_in_image\n out = paste_masks_in_image(masks, boxes, o_im_s)\n jit_trace = torch.jit.trace(paste_masks_in_image,\n (masks, boxes,\n [torch.tensor(o_im_s[0]),\n torch.tensor(o_im_s[1])]))\n out_trace = jit_trace(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])])\n\n assert torch.all(out.eq(out_trace))\n\n masks2 = torch.rand(20, 1, 26, 26)\n boxes2 = torch.rand(20, 4)\n boxes2[:, 2:] += torch.rand(20, 2)\n boxes2 *= 100\n o_im_s2 = (200, 200)\n from torchvision.models.detection.roi_heads import paste_masks_in_image\n out2 = paste_masks_in_image(masks2, boxes2, o_im_s2)\n out_trace2 = jit_trace(masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])])\n\n assert torch.all(out2.eq(out_trace2))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_mask_rcnn(self):\n model = torchvision.models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200,\n max_size=300)\n images, test_images = self.get_test_images()\n self.run_test(model, (images,), rtol=1e-3, atol=1e-5)\n self.run_test(model, (images,), input_names=[\"images_tensors\"], output_names=[\"boxes\", \"labels\", \"scores\", \"masks\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2], \"boxes\": [0, 1], \"labels\": [0],\n \"scores\": [0], \"masks\": [0, 1, 2]}, rtol=1e-3, atol=1e-5)\n dummy_image = [torch.ones(3, 100, 100) * 0.3]\n self.run_test(model, (images,), test_with_inputs=[(images,), (test_images,), (dummy_image,)],\n input_names=[\"images_tensors\"], output_names=[\"boxes\", \"labels\", \"scores\", \"masks\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2], \"boxes\": [0, 1], \"labels\": [0],\n \"scores\": [0], \"masks\": [0, 1, 2]}, rtol=1e-3, atol=1e-5)\n self.run_test(model, (dummy_image,), test_with_inputs=[(dummy_image,), (images,)],\n input_names=[\"images_tensors\"], output_names=[\"boxes\", \"labels\", \"scores\", \"masks\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2], \"boxes\": [0, 1], \"labels\": [0],\n \"scores\": [0], \"masks\": [0, 1, 2]}, rtol=1e-3, atol=1e-5)\n\n def test_heatmaps_to_keypoints(self):\n # disable profiling\n torch._C._jit_set_profiling_executor(False)\n torch._C._jit_set_profiling_mode(False)\n\n maps = torch.rand(10, 1, 26, 26)\n rois = torch.rand(10, 4)\n from torchvision.models.detection.roi_heads import heatmaps_to_keypoints\n out = heatmaps_to_keypoints(maps, rois)\n jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois))\n out_trace = jit_trace(maps, rois)\n\n assert torch.all(out[0].eq(out_trace[0]))\n assert torch.all(out[1].eq(out_trace[1]))\n\n maps2 = torch.rand(20, 2, 21, 21)\n rois2 = torch.rand(20, 4)\n from torchvision.models.detection.roi_heads import heatmaps_to_keypoints\n out2 = heatmaps_to_keypoints(maps2, rois2)\n out_trace2 = jit_trace(maps2, rois2)\n\n assert torch.all(out2[0].eq(out_trace2[0]))\n assert torch.all(out2[1].eq(out_trace2[1]))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_keypoint_rcnn(self):\n model = torchvision.models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200,\n max_size=300)\n images, test_images = self.get_test_images()\n self.run_test(model, (images,), rtol=1e-3, atol=1e-5)\n self.run_test(model, (images,), input_names=[\"images_tensors\"],\n output_names=[\"outputs1\", \"outputs2\", \"outputs3\", \"outputs4\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2]},\n rtol=1e-3, atol=1e-5)\n dummy_images = [torch.ones(3, 100, 100) * 0.3]\n self.run_test(model, (images,), test_with_inputs=[(images,), (test_images,), (dummy_images,)],\n input_names=[\"images_tensors\"], output_names=[\"outputs1\", \"outputs2\", \"outputs3\", \"outputs4\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2]},\n rtol=5e-3, atol=1e-5)\n self.run_test(model, (dummy_images,), test_with_inputs=[(dummy_images,), (test_images,)],\n input_names=[\"images_tensors\"], output_names=[\"outputs1\", \"outputs2\", \"outputs3\", \"outputs4\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2]},\n rtol=5e-3, atol=1e-5)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_shufflenet_v2_dynamic_axes(self):\n model = torchvision.models.shufflenet_v2_x0_5(pretrained=True)\n dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True)\n test_inputs = torch.randn(3, 3, 224, 224, requires_grad=True)\n self.run_test(model, (dummy_input,), test_with_inputs=[(dummy_input,), (test_inputs,)],\n input_names=[\"input_images\"], output_names=[\"outputs\"],\n dynamic_axes={\"input_images\": {0: 'batch_size'}, \"output\": {0: 'batch_size'}},\n rtol=1e-3, atol=1e-5)\n\n @disableScriptTest()\n def test_word_language_model_RNN_TANH(self):\n self.run_word_language_model(\"RNN_TANH\")\n\n @disableScriptTest()\n def test_word_language_model_RNN_RELU(self):\n self.run_word_language_model(\"RNN_RELU\")\n\n @disableScriptTest() # scripting prim::unchecked_cast prim::setattr\n def test_word_language_model_LSTM(self):\n self.run_word_language_model(\"LSTM\")\n\n def test_word_language_model_GRU(self):\n self.run_word_language_model(\"GRU\")\n\n def test_index_1d(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[0]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n def test_index_2d_1dimslice(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[0:1, :]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n def test_index_2d_sliceint(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[1, :]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n def test_index_2d_neg_slice(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[0:-1, :]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_index_mask(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[torch.tensor([0, 1, 0], dtype=torch.uint8)]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[torch.tensor([0, 1, 0], dtype=torch.bool)]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_data(self):\n class Data(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return x.new_zeros(x.data.size())\n\n x = torch.randn(3, 4)\n self.run_test(Data(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest() # Need type inference\n def test_index_mask_nd(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[input > 0]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), m1)\n\n @disableScriptTest()\n def test_dict(self):\n class MyModel(torch.nn.Module):\n def forward(self, x_in):\n x_out = {}\n x_out[\"test_key_out\"] = torch.add(x_in[list(x_in.keys())[0]], list(x_in.keys())[0])\n return x_out\n\n x = {torch.tensor(1.): torch.randn(1, 2, 3)}\n self.run_test(MyModel(), (x, {}))\n\n @disableScriptTest()\n def test_dict_str(self):\n class MyModel(torch.nn.Module):\n def forward(self, x_in):\n x_out = {}\n x_out[\"test_key_out\"] = torch.add(x_in[\"test_key_in\"], 2.)\n return x_out\n\n x = {\"test_key_in\": torch.randn(1, 2, 3)}\n self.run_test(MyModel(), (x, {}))\n\n @disableScriptTest()\n def test_dict_output(self):\n class DictModelOutput(OrderedDict):\n tensor_out: torch.Tensor\n tuple_out: Optional[Tuple[torch.Tensor]] = None\n list_out: Optional[List[torch.Tensor]] = None\n\n class MyModel(torch.nn.Module):\n def forward(self, a, b, c, d):\n return DictModelOutput(\n tensor_out=a,\n tuple_out=(b, c),\n list_out=[d],\n )\n\n a = torch.randn(2, 3)\n b = torch.randn(2, 3)\n c = torch.randn(2, 3)\n d = torch.randn(2, 3)\n self.run_test(MyModel(), (a, b, c, d))\n\n def test_tuple_output(self):\n class MyModel(torch.nn.Module):\n def forward(self, a, b, c, d):\n return a, (b, c), d\n\n a = torch.randn(2, 3)\n b = torch.randn(2, 3)\n c = torch.randn(2, 3)\n d = torch.randn(2, 3)\n self.run_test(MyModel(), (a, b, c, d))\n\n def test_nested_tuple_output(self):\n class MyModel(torch.nn.Module):\n def forward(self, a, b, c, d):\n return a, ((b,), (c, d))\n\n a = torch.randn(2, 3)\n b = torch.randn(2, 3)\n c = torch.randn(2, 3)\n d = torch.randn(2, 3)\n self.run_test(MyModel(), (a, b, c, d))\n\n def test_tuple_input(self):\n class TupleModel(torch.nn.Module):\n def forward(self, a: Tuple[torch.Tensor, torch.Tensor]):\n return a\n\n x = (torch.randn(3, 4), torch.randn(4, 3))\n self.run_test(TupleModel(), input=(x,))\n\n def test_tuple_primitive_input(self):\n class TupleModel(torch.nn.Module):\n def forward(self, a: Tuple[int, torch.Tensor], b):\n return a[0], a[1] + b\n\n x = (3, torch.randn(4, 3))\n y = torch.randn(4, 3)\n self.run_test(TupleModel(), input=(x, y))\n\n def test_nested_tuple_input(self):\n class NestedTupleModel(torch.nn.Module):\n def forward(self, a, b: Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]):\n return a + b[0] + b[1][0] + b[1][1]\n\n x = torch.randn(4, 5)\n y = (torch.randn(4, 5), (torch.randn(1, 5), torch.randn(4, 1)))\n self.run_test(NestedTupleModel(), input=(x, y))\n\n @disableScriptTest()\n def test_optional_inputs_with_no_optionals(self):\n class NoOptionalModel(torch.nn.Module):\n def forward(self, input):\n return input\n\n # Without empty optional arguments dictionary\n x = torch.randn(2, 3)\n self.run_test(NoOptionalModel(), (x,))\n # With empty optional arguments dictionary\n y = torch.randn(2, 3)\n self.run_test(NoOptionalModel(), (y, {}))\n\n @disableScriptTest()\n def test_optional_inputs_with_mixed_optionals(self):\n class MixedModel(torch.nn.Module):\n def forward(self, x, y=None, z=None):\n if y is not None:\n return x + y\n if z is not None:\n return x + z\n return x\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n z = torch.randn(2, 3)\n # Without optional arguments dictionary\n self.run_test(MixedModel(), (x, y, None))\n self.run_test(MixedModel(), (x, None, z))\n # With optional arguments dictionary\n self.run_test(MixedModel(), (x, {'y': y, 'z': None}))\n self.run_test(MixedModel(), (x, {'y': None, 'z': z}))\n self.run_test(MixedModel(), (x, {'z': z}))\n self.run_test(MixedModel(), (x, {'y': y}))\n\n @disableScriptTest()\n def test_optional_inputs_with_all_optionals(self):\n class AllOptionalModel(torch.nn.Module):\n def forward(self, y=None, z=None):\n if y is not None:\n return y\n if z is not None:\n return z\n\n y = torch.randn(2, 3)\n # Without optional arguments dictionary\n self.run_test(AllOptionalModel(), (y, None))\n # With optional arguments dictionary\n self.run_test(AllOptionalModel(), {'y': y, 'z': None})\n\n @disableScriptTest()\n def test_input_names_with_optional_args(self):\n class NoOptionalModel(torch.nn.Module):\n def forward(self, input):\n return input\n\n # Without empty optional arguments dictionary\n x = torch.randn(2, 3)\n self.run_test(NoOptionalModel(), (x,), input_names=['input_x'])\n # With empty optional arguments dictionary\n y = torch.randn(2, 3)\n self.run_test(NoOptionalModel(), (y, {}))\n\n class MixedModel(torch.nn.Module):\n def forward(self, x, y=None, z=None):\n if y is not None:\n return x + y\n if z is not None:\n return x + z\n return x\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n z = torch.randn(2, 3)\n # Without optional arguments dictionary\n self.run_test(MixedModel(), (x, y, None), input_names=['input_x', 'input_y'])\n self.run_test(MixedModel(), (x, None, z), input_names=['input_x', 'input_z'])\n\n # With optional arguments dictionary\n self.run_test(MixedModel(), (x, {'y': y, 'z': None}), input_names=['input_x', 'input_y'])\n self.run_test(MixedModel(), (x, {'y': None, 'z': z}), input_names=['input_x', 'input_z'])\n\n class AllOptionalModel(torch.nn.Module):\n def forward(self, y=None, z=None):\n if y is not None:\n return y\n if z is not None:\n return z\n\n y = torch.randn(2, 3)\n z = torch.randn(2, 3)\n # Without optional arguments dictionary\n self.run_test(AllOptionalModel(), (y, None), input_names=['input_y'])\n self.run_test(AllOptionalModel(), (None, z), input_names=['input_z'])\n # With optional arguments dictionary\n self.run_test(AllOptionalModel(), {'y': y, 'z': None}, input_names=['input_y'])\n self.run_test(AllOptionalModel(), {'y': None, 'z': z}, input_names=['input_z'])\n\n def test_input_as_output(self):\n class Model(torch.nn.Module):\n def forward(self, x, y):\n return x, y\n\n x = torch.randn(2, 3)\n y = torch.randn(3, 4)\n self.run_test(Model(), (x, y), input_names=['x', 'y'], output_names=['x_out', 'y_out'])\n\n @disableScriptTest()\n def test_none_as_input(self):\n class Model(torch.nn.Module):\n def forward(self, x, y):\n if y is not None:\n return x + y\n return x\n\n x = torch.randn(2, 3)\n self.run_test(Model(), (x, None))\n\n @disableScriptTest()\n def test_none_as_tuple_input(self):\n class Model(torch.nn.Module):\n def forward(self, x, y):\n if y[0] is not None:\n return x + y[0]\n if y[1] is not None:\n return x + y[1]\n return x\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n self.run_test(Model(), (x, (None, y)))\n\n @disableScriptTest()\n def test_none_as_named_input(self):\n class Model(torch.nn.Module):\n def forward(self, x, y=None, z=None):\n if y is not None:\n return x + y\n if z is not None:\n return x + z\n return x\n\n x = torch.randn(2, 3)\n z = torch.randn(2, 3)\n self.run_test(Model(), (x, None, z))\n\n def test_primitive_input_integer(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x: int, y):\n return x + y\n\n x = 3\n y = torch.randint(10, (2, 3, 4))\n self.run_test(Model(), (x, y))\n\n def test_primitive_input_floating(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x: float, y):\n return x + y\n\n x = 3.0\n y = torch.randn(2, 3, 4)\n self.run_test(Model(), (x, y))\n\n def test_primitive_input_bool(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, flag: bool, x, y):\n if flag:\n return x\n else:\n return y\n\n flag = True\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 3, 4)\n self.run_test(torch.jit.script(Model()), (flag, x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_cste_script(self):\n class MyModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.zeros(x.size(0)), torch.ones((x.size(1), x.size(0)), dtype=torch.int64)\n\n x = torch.randn(3, 4)\n self.run_test(MyModel(), x)\n\n def test_scalar_tensor(self):\n class test(torch.nn.Module):\n def forward(self, input):\n return torch.scalar_tensor(input.size(0)), \\\n torch.scalar_tensor(input.size(1), dtype=torch.int64)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(7, 8, 9)\n model = test()\n self.run_test(model, x, test_with_inputs=[y],\n input_names=['input_1'],\n dynamic_axes={'input_1': [0, 1, 2]})\n\n def test_tensor(self):\n class ScalarInputModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return torch.tensor(input.shape[1])\n\n x = torch.randn(3, 4)\n self.run_test(ScalarInputModel(), x)\n\n class TensorInputModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return torch.tensor([input.shape[0], input.shape[1]])\n\n x = torch.randn(3, 4)\n self.run_test(TensorInputModel(), x)\n\n class FloatInputModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return torch.tensor([float(input)])\n\n x = torch.randn(1)\n self.run_test(FloatInputModel(), x)\n\n class InputWithDtypeModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return torch.tensor(input.shape[1], dtype=torch.long)\n\n x = torch.randn(3, 4)\n self.run_test(InputWithDtypeModel(), x)\n\n class MixedInputModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return torch.tensor([input.shape[0], int(input)])\n\n x = torch.randn(1)\n self.run_test(MixedInputModel(), x)\n\n def test_hardtanh(self):\n model = torch.nn.Hardtanh(-1.5, 2.5)\n x = torch.arange(-5, 5).to(dtype=torch.float32)\n self.run_test(model, x)\n\n def test_hardtanh_script_with_default_values(self):\n class MyModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.nn.functional.hardtanh(x)\n\n x = torch.arange(-5, 5).to(dtype=torch.float32)\n self.run_test(MyModel(), x)\n\n def test_hardswish(self):\n model = torch.nn.Hardswish()\n\n x = torch.rand(3, 3).to(dtype=torch.float32)\n self.run_test(model, x)\n\n # Testing edge cases\n x = torch.tensor(3).to(dtype=torch.float32)\n self.run_test(model, x)\n x = torch.tensor(-3).to(dtype=torch.float32)\n self.run_test(model, x)\n\n def test_hardswish_script(self):\n class MyModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.nn.functional.hardswish(x)\n\n x = torch.rand(3, 3).to(dtype=torch.float32)\n self.run_test(MyModel(), x)\n\n def test_hardsigmoid(self):\n model = torch.nn.Hardsigmoid()\n\n x = torch.rand(3, 3).to(dtype=torch.float32)\n self.run_test(model, x)\n\n # corner cases\n x = torch.tensor(3).to(dtype=torch.float32)\n self.run_test(model, x)\n x = torch.tensor(-3).to(dtype=torch.float32)\n self.run_test(model, x)\n\n def test_clamp(self):\n class ClampModel(torch.nn.Module):\n def forward(self, x):\n return x.clamp(-0.5, 0.5)\n\n x = torch.randn(3, 4)\n self.run_test(ClampModel(), x)\n\n class ClampMinModel(torch.nn.Module):\n def forward(self, x):\n return x.clamp(min=-0.5)\n\n x = torch.randn(3, 4)\n self.run_test(ClampMinModel(), x)\n\n class ClampMaxModel(torch.nn.Module):\n def forward(self, x):\n return x.clamp(max=0.5)\n\n x = torch.randn(3, 4)\n self.run_test(ClampMaxModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(8)\n def test_clamp_dyn(self):\n class ClampMaxModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return x.clamp(None, x.size(0))\n\n x = torch.arange(16).view(4, 4).float()\n self.run_test(ClampMaxModel(), x)\n\n\n class ClampMinModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return x.clamp(x.size(0), None)\n\n x = torch.arange(16).view(4, 4).float()\n self.run_test(ClampMinModel(), x)\n\n class ClampMinMaxModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return x.clamp(x.size(0), x.size(1))\n\n x = torch.arange(16).view(2, 8).float()\n self.run_test(ClampMinMaxModel(), x)\n\n class ClampTensorModel(torch.nn.Module):\n def forward(self, x, min, max):\n return x.clamp(min, max)\n\n x = torch.randn(3, 4)\n y = torch.randn(3, 4)\n z = torch.randn(3, 4)\n self.run_test(ClampTensorModel(), (x, y, z))\n\n class ClampTensorMinModel(torch.nn.Module):\n def forward(self, x, min):\n return x.clamp(min=min)\n\n self.run_test(ClampTensorMinModel(), (x, y))\n\n class ClampTensorMaxModel(torch.nn.Module):\n def forward(self, x, max):\n return x.clamp(max=max)\n\n self.run_test(ClampTensorMaxModel(), (x, z))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_full_trace(self):\n class FullModel(torch.nn.Module):\n def forward(self, x):\n return torch.full((3, 4), x, dtype=torch.long)\n\n x = torch.tensor(12)\n self.run_test(FullModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_full_script(self):\n class FullModelScripting(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.full((3, 4), x, dtype=torch.long)\n\n x = torch.tensor(12)\n self.run_test(FullModelScripting(), x)\n\n def test_fuse_addmm(self):\n class AddmmModel(torch.nn.Module):\n def forward(self, x):\n return torch.mm(x, x) + x\n\n x = torch.ones(3, 3)\n self.run_test(AddmmModel(), x)\n\n def test_maxpool(self):\n model = torch.nn.MaxPool1d(2, stride=1)\n x = torch.randn(20, 16, 50)\n self.run_test(model, x)\n\n def test_conv(self):\n class TraceModel(torch.nn.Module):\n def __init__(self):\n super(TraceModel, self).__init__()\n self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)\n self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n self.conv3 = torch.nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))\n\n def forward(self, input1, input2, input3):\n return self.conv1(input1), self.conv2(input2), self.conv3(input3)\n\n x1 = torch.randn(20, 16, 50)\n x2 = torch.randn(20, 16, 50, 100)\n x3 = torch.randn(20, 16, 10, 50, 100)\n\n self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)\n\n def test_conv_shape_inference(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n\n def forward(self, input):\n return self.conv2(input) + 2\n\n x = torch.randn(20, 16, 50, 100)\n self.run_test(Model(), x, atol=10e-5,\n input_names=['x'],\n dynamic_axes={'x': [0]})\n\n def test_conv_transpose(self):\n class TraceModel(torch.nn.Module):\n def __init__(self):\n super(TraceModel, self).__init__()\n self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)\n self.conv2 = torch.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n self.conv3 = torch.nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))\n\n def forward(self, input1, input2, input3):\n return self.conv1(input1), self.conv2(input2), self.conv3(input3)\n\n x1 = torch.randn(20, 16, 50)\n x2 = torch.randn(20, 16, 50, 100)\n x3 = torch.randn(20, 16, 10, 50, 100)\n\n self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)\n\n # Conversion of Transpose depends on input shape to be known.\n # The following test only works when onnx shape inference is enabled.\n @skipIfONNXShapeInference(False)\n def test_transpose_infer_shape(self):\n class TransposeModule(torch.jit.ScriptModule):\n def __init__(self):\n super(TransposeModule, self).__init__()\n self.conv = torch.nn.Conv2d(3, 1, 3, stride=2)\n\n @torch.jit.script_method\n def forward(self, x):\n x = self.conv(x)\n return x.transpose(0, 1)\n\n x = torch.randn(32, 3, 64, 64)\n y = torch.randn(16, 3, 8, 64)\n self.run_test(TransposeModule(), x, input_names=['x'],\n dynamic_axes={'x': [0, 2]},\n test_with_inputs=[y])\n\n def squeeze_model_tests(self, d, x1, x2):\n class Squeeze(torch.nn.Module):\n def __init__(self, d):\n super(Squeeze, self).__init__()\n self.d = d\n\n def forward(self, x):\n if self.d is not None:\n return torch.squeeze(x, dim=self.d)\n else:\n return torch.squeeze(x)\n\n x2 = [] if x2 is None else [x2]\n if len(x2) > 0:\n self.run_test(Squeeze(d), x1,\n input_names=['input'], dynamic_axes={'input': {0: '0', 1: '1', 2: '2'}},\n test_with_inputs=x2)\n else:\n self.run_test(Squeeze(d), x1)\n\n def test_squeeze_without_no_op(self):\n x = torch.randn(2, 1, 4)\n self.squeeze_model_tests(1, x, None)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_squeeze_dynamic(self):\n x_squeeze = torch.randn(2, 1, 4)\n x_noop = torch.randn(2, 2, 3)\n self.squeeze_model_tests(1, x_squeeze, x_noop)\n\n def test_squeeze_neg_without_no_op(self):\n x = torch.randn(2, 1, 4)\n self.squeeze_model_tests(-2, x, None)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_squeeze_neg(self):\n x_squeeze = torch.randn(2, 1, 4)\n x_noop = torch.randn(2, 2, 3)\n self.squeeze_model_tests(-2, x_squeeze, x_noop)\n\n def test_squeeze_all_dims(self):\n x_squeeze = torch.randn(2, 1, 4)\n x_noop = torch.randn(2, 2, 3)\n self.squeeze_model_tests(None, x_squeeze, x_noop)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_squeeze_no_op(self):\n x_noop = torch.randn(2, 1, 4)\n x_squeeze = torch.randn(2, 2, 1)\n self.squeeze_model_tests(2, x_noop, x_squeeze)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_squeeze_runtime_dim(self):\n class Squeeze(torch.nn.Module):\n def forward(self, d1, d2):\n t = torch.zeros(d1[0], d2[0])\n return t.squeeze(0)\n\n d1 = torch.tensor([1])\n d3 = torch.tensor([3])\n d4 = torch.tensor([4])\n self.run_test(Squeeze(), (d1, d4), test_with_inputs=[(d3, d4)])\n self.run_test(Squeeze(), (d3, d4), test_with_inputs=[(d1, d3)])\n\n def test_squeeze(self):\n class Squeeze(torch.nn.Module):\n def forward(self, x):\n return torch.squeeze(x, dim=-2)\n\n x = torch.randn(2, 1, 4)\n self.run_test(Squeeze(), x)\n\n def test_unsqueeze(self):\n class Unsqueeze(torch.nn.Module):\n def forward(self, x):\n return torch.unsqueeze(x, dim=-2)\n\n x = torch.randn(2, 3, 4)\n self.run_test(Unsqueeze(), x)\n\n def test_maxpool_default_stride(self):\n class MaxPoolModel(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.max_pool2d(x, 2)\n\n model = MaxPoolModel()\n x = torch.randn(10, 20, 16, 50)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(8)\n def test_maxpool_adaptive(self):\n model = torch.nn.AdaptiveMaxPool1d((5), return_indices=False)\n x = torch.randn(20, 16, 50, requires_grad=True)\n y = torch.randn(32, 16, 50, requires_grad=True)\n self.run_test(model, x, input_names=['x'],\n dynamic_axes={'x' : [0]},\n test_with_inputs=[y])\n\n def test_maxpool_2d(self):\n model = torch.nn.MaxPool2d(5, padding=(1, 2))\n x = torch.randn(1, 20, 16, 50, requires_grad=True)\n self.run_test(model, x)\n\n def test_maxpool_1d_ceil(self):\n model = torch.nn.MaxPool1d(3, 2, ceil_mode=True)\n x = torch.randn(20, 16, 50)\n self.run_test(model, x)\n\n def test_maxpool_2d_ceil(self):\n model = torch.nn.MaxPool2d(3, 2, ceil_mode=True)\n x = torch.randn(20, 16, 50, 32)\n self.run_test(model, x)\n\n def test_maxpool_3d_ceil(self):\n model = torch.nn.MaxPool3d(3, 2, ceil_mode=True)\n x = torch.randn(20, 16, 50, 44, 31)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(8)\n @disableScriptTest() # Functional module not scriptable\n def test_maxpool_with_indices(self):\n model = torch.nn.MaxPool1d(2, stride=1, return_indices=True)\n x = torch.randn(20, 16, 50)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_maxpool_dilation(self):\n model = torch.nn.MaxPool1d(2, stride=1, dilation=2)\n x = torch.randn(20, 16, 50)\n self.run_test(model, x)\n\n def test_avgpool_default_stride(self):\n class AvgPoolModel(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.avg_pool2d(x, 2)\n\n model = AvgPoolModel()\n x = torch.randn(10, 20, 16, 50)\n self.run_test(model, x)\n\n def test_avgpool(self):\n model = torch.nn.AvgPool1d(2, stride=1)\n x = torch.randn(20, 16, 50)\n self.run_test(model, x)\n\n def test_avgpool_1d_ceil(self):\n model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)\n x = torch.randn(1, 1, 7)\n self.run_test(model, x)\n\n def test_avgpool_2d_ceil(self):\n model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)\n x = torch.randn(20, 16, 50, 32)\n self.run_test(model, x)\n\n def test_avgpool_3d_ceil(self):\n model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)\n x = torch.randn(20, 16, 50, 44, 31)\n y = torch.randn(32, 8, 50, 44, 31)\n self.run_test(model, x, input_names=['x'],\n dynamic_axes={'x' : [0, 1]},\n test_with_inputs=[y])\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_floating_point(self):\n class FloatingPoint(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n if x.is_floating_point():\n return x.new_zeros(x.shape)\n return x.new_zeros(x.shape)\n\n x = torch.randn(2, 3, 4)\n self.run_test(FloatingPoint(), x)\n\n class FloatingPoint(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n if x.size(0) > 1:\n a = x + 2\n if a.is_floating_point():\n return x + 1\n return x + 1\n return x\n\n x = torch.randn(2, 3, 4)\n self.run_test(FloatingPoint(), x)\n\n # Operator rank mismatch between outputs of two branches for opsets below 11.\n @skipIfUnsupportedMinOpsetVersion(11)\n @skipIfONNXShapeInference(False)\n def test_floating_point_infer_dtype(self):\n class FloatingPoint(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n if x.size(0) > 1:\n a = x + 2\n if a.is_floating_point():\n return x.new_zeros(x.shape[1:])\n return x.new_zeros(x.shape)\n return x\n\n x = torch.randn(2, 3, 4)\n self.run_test(FloatingPoint(), x)\n\n class FloatingPoint(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n if x.size(0) > 1:\n a = x + 2\n if a.is_floating_point():\n return x + 1\n return x\n return x\n\n x = torch.randn(2, 3, 4).to(torch.int32)\n self.run_test(FloatingPoint(), x)\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_prim_min(self):\n @torch.jit.script\n def list_append(boxes: List[torch.Tensor]):\n temp = []\n for i, b in enumerate(boxes): # enumerate is creating a prim::min op in torch graph\n temp.append(torch.full_like(b[:, 1], i))\n return temp[0]\n\n class Min(torch.nn.Module):\n def forward(self, x):\n boxes = [x, x, x]\n return list_append(boxes)\n\n x = torch.rand(5, 5)\n self.run_test(Min(), (x,))\n\n class M(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n i = 3\n return min(x[i], i)\n\n x = torch.arange(6, dtype=torch.int64)\n self.run_test(M(), (x,))\n\n def test_arithmetic(self):\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x):\n x = x + 2\n x = x - 4\n x = x * 6\n x = x / 8\n return x\n\n x = torch.randn(2, 3, 4)\n self.run_test(ArithmeticModule(), x)\n\n def test_arithmetic_prim_long(self):\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x, y: int):\n x = x + y\n x = x - y\n x = x * (y * 3)\n x = x / (y * 4)\n return x\n\n x = torch.randn(2, 3, 4)\n y = 2\n self.run_test(ArithmeticModule(), (x, y))\n\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x):\n x = x + 2\n x = x - 3\n return x.shape[0]\n\n x = torch.randn(2, 3, 4)\n self.run_test(ArithmeticModule(), x)\n\n def test_arithmetic_prim_float(self):\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x, y: float):\n x = x + y\n x = x - y\n x = x * (y * 3)\n x = x / (y * 4)\n return x\n\n x = torch.randn(2, 3, 4)\n y = 2.5\n self.run_test(ArithmeticModule(), (x, y))\n\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x):\n x = x + 2\n x = x - 3\n return x.shape[1] / 2\n\n x = torch.randn(2, 3, 4)\n self.run_test(ArithmeticModule(), x)\n\n def test_arithmetic_prim_bool(self):\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x, y: int, z: bool, t: float):\n x = x + y\n x = x - y\n if z:\n x = x * (y * 3)\n x = x / (y * 4)\n return x / t, z\n\n x = torch.randn(2, 3, 4)\n y = 2\n z = False\n t = 2.5\n self.run_test(ArithmeticModule(), (x, y, z, t))\n\n class ArithmeticModule(torch.nn.Module):\n def forward(self, x: int, y: int):\n return x == y\n\n x = 3\n y = 2\n self.run_test(ArithmeticModule(), (x, y))\n\n # In scripting the first transpose node do not carry shape and dtype info.\n # The following test only works when onnx shape inference is enabled.\n @skipIfONNXShapeInference(False)\n def test_arithmetic_infer_dtype(self):\n class ArithmeticModule(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n x = x.t()\n x = x + 2\n x = x - 4\n x = x * 6\n x = x / 8\n return x\n\n x = torch.randn(2, 3)\n self.run_test(ArithmeticModule(), x)\n\n def test_floor_div(self):\n class FloorDivModule(torch.nn.Module):\n def forward(self, x, y):\n return x // 3, x // 2., \\\n x.to(dtype=torch.float64) // 3, x.to(dtype=torch.float64) // 2., \\\n x.to(dtype=torch.int64) // 3, x.to(dtype=torch.int64) // 2., \\\n x // (y + 1.).to(dtype=torch.int64), x // y, \\\n x.to(dtype=torch.float64) // y.to(dtype=torch.int64), x.to(dtype=torch.float64) // y.to(dtype=torch.float64), \\\n x.to(dtype=torch.int64) // y.to(dtype=torch.int64), x.to(dtype=torch.int64) // y\n\n x = torch.randn(2, 3, 4)\n y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4)\n self.run_test(FloorDivModule(), (x, y))\n\n def test_floor_div_script(self):\n class FloorDivModule(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, y):\n return x // 3, x // 2., x // y\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 3, 4)\n self.run_test(FloorDivModule(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_floordiv(self):\n class FloordivModule(torch.nn.Module):\n def forward(self, x):\n return x.new_zeros(x.size(2) // x.size(1))\n\n x = torch.randn(2, 3, 4)\n self.run_test(FloordivModule(), (x,))\n\n def test_div(self):\n class DivModule(torch.nn.Module):\n def forward(self, x, y):\n return x / y, torch.true_divide(x, y)\n\n x = torch.randn(2, 3, 4).to(torch.int)\n y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)\n self.run_test(DivModule(), (x, y))\n self.run_test(DivModule(), (x.float(), y.float()))\n\n # Note: div cannot (generally) be exported via scripting\n # since its type promotion logic is dependent on knowing the scalar types\n # of the input tensors. That is, the ONNX graph is dependent on the\n # data type of the inputs. This makes it appropriate for tracing only.\n def test_div_promotion_trace(self):\n class DivModule(torch.nn.Module):\n def forward(self, x, y):\n return x / y, torch.true_divide(x, y)\n\n x = torch.randn(2, 3, 4).to(torch.int)\n y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)\n\n prev_default = torch.get_default_dtype()\n\n torch.set_default_dtype(torch.float)\n self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))\n\n torch.set_default_dtype(torch.double)\n self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))\n\n torch.set_default_dtype(prev_default)\n\n # In scripting x, y do not carry shape and dtype info.\n # The following test only works when onnx shape inference is enabled.\n @skipIfONNXShapeInference(False)\n def test_div_promotion_script(self):\n class DivModule(torch.nn.Module):\n def forward(self, x, y):\n # Add transpose to hide shape/type information\n # Otherwise shape and type are still avaiable from input.\n x = x.transpose(1, 2)\n y = y.transpose(1, 2)\n return x / y, torch.true_divide(x, y)\n\n x = torch.randn(2, 3, 4).to(torch.int)\n y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)\n\n prev_default = torch.get_default_dtype()\n\n # 1. x,y are int, and output is float.\n # This can be handled by the default case, where both are cast to float.\n # It works even if type of x, y are unknown.\n torch.set_default_dtype(torch.float)\n self.run_test(torch.jit.script(DivModule()), (x, y))\n\n # 2. x,y are int, and output is double.\n # This can be handled by the default case, where both are cast to double.\n # It works even if type of x, y are unknown.\n torch.set_default_dtype(torch.double)\n self.run_test(torch.jit.script(DivModule()), (x, y))\n\n # 3. x is int, y is double, and output is double.\n # This can only be handled when both type of x and y are known.\n torch.set_default_dtype(prev_default)\n x = torch.randn(2, 3, 4).to(torch.int)\n y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.double)\n self.run_test(torch.jit.script(DivModule()), (x, y))\n\n def test_div_rounding_mode(self):\n class TrueDivModule(torch.nn.Module):\n def forward(self, x, y):\n return (x.div(y, rounding_mode=None),\n torch.div(x, y, rounding_mode=None))\n\n class TruncDivModule(torch.nn.Module):\n def forward(self, x, y):\n return (x.div(y, rounding_mode='trunc'),\n torch.div(x, y, rounding_mode='trunc'))\n\n class FloorDivModule(torch.nn.Module):\n def forward(self, x, y):\n return (x.div(y, rounding_mode='floor'),\n torch.div(x, y, rounding_mode='floor'))\n\n modules = [TrueDivModule(), TruncDivModule()]\n if self.opset_version >= 9:\n modules.append(FloorDivModule())\n\n x = (torch.randn(2, 3, 4) * 100).to(torch.int)\n y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)\n\n for module in modules:\n self.run_test(module, (x, y))\n self.run_test(torch.jit.trace(module, (x, y)), (x, y))\n self.run_test(torch.jit.script(module), (x, y))\n\n x = torch.randn(2, 3, 4)\n y = torch.rand(2, 3, 4) * 10.0 + 0.1\n\n for module in modules:\n self.run_test(module, (x, y))\n self.run_test(torch.jit.trace(module, (x, y)), (x, y))\n self.run_test(torch.jit.script(module), (x, y))\n\n def test_slice_trace(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return x[0:1]\n\n x = torch.randn(3)\n self.run_test(MyModule(), x)\n\n def test_slice_neg(self):\n class NegSlice(torch.nn.Module):\n def forward(self, x):\n return x[-1:]\n\n x = torch.randn(3, 4, 5)\n self.run_test(NegSlice(), x)\n\n def test_slice_neg_large(self):\n class NegSlice(torch.nn.Module):\n def forward(self, x):\n return x[:, :, -3:-1, :, -1]\n\n x = torch.randn(3, 4, 5, 6, 7)\n self.run_test(NegSlice(), x)\n\n def test_slice_neg_large_negone(self):\n class NegSlice(torch.nn.Module):\n def forward(self, x):\n return x[:, :, :, :, -1]\n\n x = torch.randn(3, 4, 5, 6, 7)\n self.run_test(NegSlice(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_slice_with_input_index(self):\n class InputIndexSlice(torch.nn.Module):\n def forward(self, x, y):\n x[:y.size(0), 0, :] = y\n return x\n\n x = torch.zeros((56, 6, 256))\n y = torch.rand((22, 256))\n self.run_test(InputIndexSlice(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n @disableScriptTest() # scripting tuple/list append\n def test_slice_dynamic(self):\n class DynamicSliceExportMod(torch.nn.Module):\n def forward(self, x):\n results = []\n for i in range(4):\n results.append(x[:x.size(0) - i, i:x.size(2), i:3])\n return tuple(results)\n\n x = torch.rand(5, 5, 5)\n y = torch.randn(6, 7, 8)\n self.run_test(DynamicSliceExportMod(), x, test_with_inputs=[y],\n input_names=['input_1'],\n output_names=['output_1'],\n dynamic_axes={'input_1': [0, 1, 2],\n 'output_1': [0, 1, 2]})\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_slice_dynamic_script(self):\n class DynamicSliceModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return x[1:x.size(1)]\n\n x = torch.rand(1, 2)\n self.run_test(DynamicSliceModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_slice_dynamic_shape_script(self):\n class DynamicSliceModel(torch.nn.Module):\n def forward(self, x):\n return x.new_zeros(x.shape[1:x.size(2)])\n\n x = torch.rand(1, 2, 3, 4)\n self.run_test(DynamicSliceModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(10)\n @disableScriptTest() # scripting tuple/list append\n def test_slice_dynamic_to_end(self):\n class DynamicSliceExportMod(torch.nn.Module):\n def forward(self, x):\n results = []\n for i in range(4):\n results.append(x[:, i:, x.size(2) - 5])\n return tuple(results)\n\n x = torch.rand(5, 5, 5)\n self.run_test(DynamicSliceExportMod(), x,\n dynamic_axes={'input_1': [0, 1, 2],\n 'output_1': [0, 1, 2]})\n\n def test_square(self):\n class Square(torch.nn.Module):\n def forward(self, x):\n return torch.square(x)\n\n x = torch.randn(2, 3, 4)\n self.run_test(Square(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_arange_dynamic(self):\n class ArangeModel(torch.nn.Module):\n def forward(self, input):\n return torch.arange(input.shape[0]), \\\n torch.arange(12), \\\n torch.arange(start=input.shape[0], end=input.shape[0] + 5)\n\n x = torch.randn(5, 3, 2)\n y = torch.randn(8, 3, 2)\n self.run_test(ArangeModel(), x, test_with_inputs=[y],\n input_names=['input_1'],\n output_names=['output_1', 'output_2', 'output_3'],\n dynamic_axes={'input_1': [0],\n 'output_1': [0]})\n self.run_test(torch.jit.script(ArangeModel()), x,\n test_with_inputs=[y], input_names=['input_1'],\n output_names=['output_1', 'output_2', 'output_3'],\n dynamic_axes={'input_1': [0],\n 'output_1': [0]})\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_dynamic_arange_out(self):\n class ArangeOutModel(torch.nn.Module):\n def forward(self, end):\n out_t = torch.tensor([1], dtype=torch.int64)\n return torch.arange(end, out=out_t)\n\n x = torch.tensor(8)\n self.run_test(ArangeOutModel(), (x))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_dynamic_arange_start_out(self):\n class ArangeStartOutModel(torch.nn.Module):\n def forward(self, start, end):\n out_t = torch.tensor([1], dtype=torch.int64)\n return torch.arange(start.size(0), end, out=out_t)\n\n x = torch.randn(2, 3, 4)\n y = torch.tensor(8)\n self.run_test(ArangeStartOutModel(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange(self):\n class ArangeModel(torch.nn.Module):\n def forward(self, start, end):\n return torch.arange(start.size(0), end, 1.5, dtype=torch.int64)\n\n x = torch.randn(2, 3, 4)\n y = torch.tensor(8.5, dtype=torch.float)\n self.run_test(ArangeModel(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange_out(self):\n class ArangeOutModel(torch.nn.Module):\n def forward(self, end):\n out_t = torch.tensor([1], dtype=torch.float)\n return torch.arange(end, out=out_t)\n\n x = torch.tensor(8.5, dtype=torch.float)\n self.run_test(ArangeOutModel(), (x))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange_start_out(self):\n class ArangeStartOutModel(torch.nn.Module):\n def forward(self, start, end):\n out_t = torch.tensor([1], dtype=torch.float)\n return torch.arange(start.size(0), end, out=out_t)\n\n x = torch.randn(2, 3, 4)\n y = torch.tensor(8.5, dtype=torch.float)\n self.run_test(ArangeStartOutModel(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange_no_type(self):\n class ArangeModel(torch.nn.Module):\n def forward(self, end):\n return torch.arange(end), \\\n torch.arange(0, end)\n\n x = torch.tensor(6.2, dtype=torch.float)\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_size(self):\n class SizeModel(torch.nn.Module):\n def forward(self, input):\n return torch.arange(input.size(0)), torch.arange(input.size(-1)), torch.ones(input.shape)\n\n x = torch.randn(5, 3, 2)\n self.run_test(SizeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n @disableScriptTest() # x.stride() not scriptable\n def test_as_strided(self):\n class Model(torch.nn.Module):\n def forward(self, x):\n chunk_size = list(x.size())\n chunk_size[1] = chunk_size[1] * 2 - 1\n chunk_stride = list(x.stride())\n chunk_stride[1] = chunk_stride[1] // 2\n return x.as_strided((3, 3, 3), (1, 4, 2), storage_offset=2), x.as_strided(chunk_size, chunk_stride)\n\n x = torch.randn(5, 8, 7)\n self.run_test(Model(), x)\n\n @disableScriptTest() # Ellipses followed by tensor indexing not scriptable\n def test_tensor_index_advanced_indexing_ellipsis(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[..., torch.tensor([2, 1]), torch.tensor([0, 3])]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), (m1,))\n\n def test_tensor_index_advanced_indexing(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[:, torch.tensor([[0, 2], [1, 1]]), :, torch.tensor([2, 1]), torch.tensor([0, 3])]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), (m1,))\n\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])]\n\n self.run_test(MyModel(), (m1,))\n\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[:, torch.tensor([0, 2]), torch.tensor([1]), 2:4, torch.tensor([[1], [4]])]\n\n self.run_test(MyModel(), (m1,))\n\n def test_tensor_index_advanced_indexing_consecutive(self):\n class MyModel(torch.nn.Module):\n def forward(self, input):\n return input[:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None]\n\n m1 = torch.randn(3, 4, 5, 6, 7)\n self.run_test(MyModel(), (m1,))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put(self):\n class IndexPutModel(torch.nn.Module):\n def forward(self, x, ind, update):\n x[ind] = update\n return x\n\n x = torch.randn(3, 4)\n ind = torch.tensor([1], dtype=torch.long)\n update = torch.ones(4)\n self.run_test(IndexPutModel(), (x, ind, update))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_singular(self):\n class IndexPutBoolModel(torch.nn.Module):\n def forward(self, mask, indices):\n mask[indices] = True\n return mask\n\n mask = torch.zeros(100, dtype=torch.bool)\n indices = (torch.rand(25) * mask.shape[0]).to(torch.int64)\n self.run_test(IndexPutBoolModel(), (mask, indices))\n\n class IndexPutFloatModel(torch.nn.Module):\n def forward(self, mask, indices):\n mask[indices] = torch.tensor(5.5)\n return mask\n\n mask = torch.rand(100, dtype=torch.float)\n indices = (torch.rand(50) * mask.shape[0]).to(torch.int64)\n self.run_test(IndexPutFloatModel(), (mask, indices))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_accumulate(self):\n class IndexPutModel(torch.nn.Module):\n def forward(self, x, ind, update):\n return x.index_put((ind, ), update, accumulate=True)\n\n x = torch.randn(3, 4)\n ind = torch.tensor([2], dtype=torch.long)\n update = torch.ones(4)\n self.run_test(IndexPutModel(), (x, ind, update))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_slice_index(self):\n class IndexPutModel(torch.nn.Module):\n def forward(self, x, update):\n x[1:2, 1:3, torch.tensor([1])] += update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.tensor([10, 15]).view(1, 2, 1)\n self.run_test(IndexPutModel(), (x, update))\n\n class IndexPutModel2(torch.nn.Module):\n def forward(self, x, update):\n x[torch.tensor([0, 2]), torch.tensor([1, 2])] += update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.randn(2, 5)\n self.run_test(IndexPutModel2(), (x, update))\n\n class IndexPutModel3(torch.nn.Module):\n def forward(self, x, update):\n x[torch.tensor([0, 2]), 1:2] += update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.tensor([10, 15]).view(2, 1, 1)\n self.run_test(IndexPutModel3(), (x, update))\n\n class IndexPutModel4(torch.nn.Module):\n def forward(self, x, update):\n x[torch.tensor([0, 2]), 2] += update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.tensor([10, 15]).view(2, 1)\n self.run_test(IndexPutModel4(), (x, update))\n\n class IndexPutModel5(torch.nn.Module):\n def forward(self, x, update):\n x[1:3, torch.tensor([0, 2]), 2] += update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.tensor([10, 15]).view(2, 1)\n self.run_test(IndexPutModel5(), (x, update))\n\n class IndexPutModel6(torch.nn.Module):\n def forward(self, x, update):\n x[1:3, 0] = update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.arange(2 * 5).to(torch.float).view(2, 5)\n self.run_test(IndexPutModel6(), (x, update))\n\n class IndexPutModel7(torch.nn.Module):\n def forward(self, x, update):\n x[1:, 0] = update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.arange(2 * 5).to(torch.float).view(2, 5)\n self.run_test(IndexPutModel7(), (x, update))\n\n class IndexPutModel8(torch.nn.Module):\n def forward(self, x, update):\n x[:3, 0] = update\n return x\n\n x = torch.randn(3, 4, 5)\n update = torch.arange(3 * 5).to(torch.float).view(3, 5)\n self.run_test(IndexPutModel8(), (x, update))\n\n class IndexPutModel9(torch.nn.Module):\n def forward(self, poses):\n w = 32\n x = poses[:, :, 0] - (w - 1) // 2\n boxes = torch.zeros([poses.shape[0], 17, 4])\n boxes[:, :, 0] = x\n return boxes\n\n x = torch.zeros([2, 17, 3], dtype=torch.int64)\n self.run_test(IndexPutModel9(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest() # Ellipses followed by tensor indexing not scriptable\n def test_index_put_ellipsis(self):\n class IndexPutModel(torch.nn.Module):\n def forward(self, x, update):\n x[..., torch.tensor([2, 1, 3]), 2:4] += update\n return x\n\n x = torch.randn(3, 4, 5, 6, 7)\n update = torch.randn(3, 1, 1, 3, 2)\n self.run_test(IndexPutModel(), (x, update))\n\n class IndexPutModel2(torch.nn.Module):\n def forward(self, x, update):\n x[2, ..., torch.tensor([2, 1, 3]), 2:4] += update\n return x\n\n x = torch.randn(3, 4, 5, 6, 7)\n update = torch.randn(4, 1, 3, 2)\n self.run_test(IndexPutModel2(), (x, update))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_loop(self):\n @torch.jit.script\n def ngram_attention_bias(sequence_length: int, ngram: int, device: torch.device, dtype: torch.dtype):\n bias = torch.ones((ngram, sequence_length), device=device, dtype=dtype) * float(\"-inf\")\n for stream_idx in range(ngram):\n for i in range(sequence_length):\n bias = bias * 2\n bias[stream_idx, i] = 5\n bias = bias * 5\n bias[0, 0] = 5\n\n for stream_idx in range(ngram):\n for i in range(sequence_length):\n bias[stream_idx, i] = 5\n bias[0, i] = 5\n return bias\n\n class ScriptModel(torch.nn.Module):\n def __init__(self):\n super(ScriptModel, self).__init__()\n self.ngram = 2\n self.max_target_positions = 512\n\n def forward(self, hidden_states):\n seq_length, batch_size = hidden_states.shape[:2]\n predict_causal_mask = ngram_attention_bias(\n self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype\n )\n predict_causal_mask = predict_causal_mask[:, :seq_length]\n return predict_causal_mask\n\n x = torch.randn(6, 2)\n y = torch.randn(4, 1)\n self.run_test(ScriptModel(), x, input_names=['x'],\n dynamic_axes={'x': {0: 'seq_length', 1: 'batch_size'}}, test_with_inputs=[y])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_copy_(self):\n class CopyModel(torch.nn.Module):\n def forward(self, x, data):\n x[1:3] = data\n return x\n\n x = torch.randn(3, 4)\n update = torch.randn(2, 4)\n self.run_test(CopyModel(), (x, update))\n\n # mixed slice and select\n class CopyModel2(torch.nn.Module):\n def forward(self, x, data):\n x[1:3, 0] = data\n return x\n\n x = torch.randn(3, 4)\n update = torch.tensor([0], dtype=torch.float32)\n self.run_test(CopyModel2(), (x, update))\n\n update = torch.tensor([2, 3], dtype=torch.float32)\n self.run_test(CopyModel2(), (x, update))\n\n update = torch.randn(2)\n self.run_test(CopyModel2(), (x, update))\n\n class CopyModel3(torch.nn.Module):\n def forward(self, x, data):\n x[1, 1:3] = data\n return x\n\n x = torch.randn(3, 4)\n update = torch.tensor([0], dtype=torch.float32)\n self.run_test(CopyModel3(), (x, update))\n\n update = torch.tensor([2, 3], dtype=torch.float32)\n self.run_test(CopyModel3(), (x, update))\n\n update = torch.randn(2)\n self.run_test(CopyModel3(), (x, update))\n\n class CopyModel4(torch.nn.Module):\n def forward(self, x, ind, data):\n x[ind] = data\n return x\n\n x = torch.randn(3, 4)\n ind = torch.tensor(2)\n data = torch.randn(4)\n self.run_test(CopyModel4(), (x, ind, data))\n\n class CopyModel5(torch.nn.Module):\n def forward(self, x, mask):\n if mask is not None:\n x.copy_(mask)\n return x\n\n x = torch.randn(3, 4)\n mask = torch.randn(3, 1)\n self.run_test(CopyModel5(), (x, mask))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest() # Model not scriptable (output with shape doesn't match the broadcast shape)\n def test_copy_tracing(self):\n class CopyModel(torch.nn.Module):\n def forward(self, x, data):\n x[1, 1:3] = data\n return x\n\n x = torch.randn(3, 4)\n update = torch.randn(1, 2)\n self.run_test(CopyModel(), (x, update))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_copy_ellipsis(self):\n class CopyModel(torch.nn.Module):\n def forward(self, x, update):\n x[..., 1] = update\n return x\n\n x = torch.randn(2, 3, 4)\n update = torch.ones(1)\n self.run_test(CopyModel(), (x, update))\n\n x = torch.randn(2, 3, 4, 5, 6)\n update = torch.ones(1)\n self.run_test(CopyModel(), (x, update))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_copy_ellipsis_script(self):\n class CopyModel(torch.nn.Module):\n def forward(self, x, update):\n # Insert reshape node to ensure no shape/type info for\n # x in scripting, without onnx shape inference.\n x = x.reshape(4, 3, 5, 6)\n x[2, ..., 1:3] = update\n return x\n\n x = torch.randn(3, 4, 5, 6)\n\n update = torch.ones(1)\n self.run_test(CopyModel(), (x, update))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_flip(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return torch.flip(x, dims=[0])\n\n x = torch.tensor(np.arange(6.0).reshape(2, 3))\n self.run_test(MyModule(), x)\n\n def test_random(self):\n class RandN(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x, (torch.randn(2, 3, 4) + x).size(0))\n\n x = torch.randn(2, 3, 4)\n self.run_test(RandN(), x)\n\n class Rand(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x, (torch.rand(2, 3, 4) + x).size(0))\n\n x = torch.randn(2, 3, 4)\n self.run_test(Rand(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_random_dynamic_size(self):\n class RandN(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x, torch.randn(x.size()).size(1))\n\n x = torch.randn(2, 3, 4)\n self.run_test(RandN(), x)\n\n class Rand(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x, torch.rand(x.size()).size(1))\n\n x = torch.randn(2, 3, 4)\n self.run_test(Rand(), x)\n\n def test_random_like(self):\n class RandNLike(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x, torch.randn_like(x).size(0))\n\n x = torch.randn(2, 3, 4)\n self.run_test(RandNLike(), x)\n self.run_test(torch.jit.script(RandNLike()), x)\n\n class RandLike(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x, torch.rand_like(x).size(0))\n\n x = torch.randn(2, 3, 4)\n self.run_test(RandLike(), x)\n self.run_test(torch.jit.script(RandLike()), x)\n\n def test_random_like_dtype(self):\n class RandNLike(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x.to(torch.double), torch.randn_like(x, dtype=torch.double).size(0))\n\n x = torch.randn(2, 3, 4)\n self.run_test(RandNLike(), x)\n\n class RandLike(torch.nn.Module):\n def forward(self, x):\n return torch.mul(x.to(torch.double), torch.rand_like(x, dtype=torch.double).size(0))\n\n x = torch.randn(2, 3, 4)\n self.run_test(RandLike(), x)\n\n def test_reshape_different_rank(self):\n class ReshapeModel(torch.nn.Module):\n def forward(self, x):\n x = x.reshape(-1, 2, 4, 4, 5, 5)\n return x\n\n x = torch.randn(1, 32, 5, 5)\n self.run_test(ReshapeModel(), x)\n\n def _interpolate(self, x, mode, use_size, is_upsample, align_corners=False):\n class MyModel(torch.nn.Module):\n __constants__ = ['mode', 'use_size', 'is_upsample', 'size', 'scale', 'size_array', 'scale_array', 'align_corners']\n\n def __init__(self, mode, use_size, is_upsample, align_corners):\n super(MyModel, self).__init__()\n self.mode = mode\n self.use_size = use_size\n self.is_upsample = is_upsample\n self.align_corners = align_corners\n self.scale = 2.0 if self.is_upsample else 0.5\n self.size = 24 if self.is_upsample else 2\n if x.dim() == 3:\n self.scale_array = [2.3]\n self.size_array = [16]\n elif x.dim() == 4:\n self.scale_array = [2.3, 3.1]\n self.size_array = [16, 32]\n else:\n self.scale_array = [2.3, 3.1, 4.6]\n self.size_array = [16, 32, 64]\n\n def forward(self, x):\n if self.use_size:\n if self.align_corners:\n return torch.nn.functional.interpolate(x, mode=self.mode, size=self.size, align_corners=True), \\\n torch.nn.functional.interpolate(x, mode=self.mode, size=self.size_array, align_corners=True)\n return torch.nn.functional.interpolate(x, mode=self.mode, size=self.size), \\\n torch.nn.functional.interpolate(x, mode=self.mode, size=self.size_array)\n if self.align_corners:\n return torch.nn.functional.interpolate(x, mode=self.mode,\n scale_factor=self.scale, recompute_scale_factor=False), \\\n torch.nn.functional.interpolate(x, mode=self.mode,\n scale_factor=self.scale_array, recompute_scale_factor=False)\n return torch.nn.functional.interpolate(x, mode=self.mode,\n scale_factor=self.scale, recompute_scale_factor=False), \\\n torch.nn.functional.interpolate(x, mode=self.mode,\n scale_factor=self.scale_array, recompute_scale_factor=False)\n\n model = MyModel(mode, use_size, is_upsample, align_corners)\n self.run_test(model, x, atol=1e-6)\n\n def _interpolate_tests(self, is_upsample):\n # - cubic mode is not supported for opsets below 11;\n # - linear mode does not match for opsets below 11;\n modes = [\"nearest\", \"linear\", \"bicubic\"]\n if self.opset_version < 11:\n modes = [\"nearest\"]\n x = [torch.randn(1, 2, 6, requires_grad=True),\n torch.randn(1, 2, 4, 6, requires_grad=True),\n torch.randn(1, 2, 4, 4, 6, requires_grad=True)]\n\n for mode in modes:\n for xi in x:\n mode_i = mode\n # TODO: enable bicubic downsample when ORT precision loss fixed\n if mode == \"bicubic\" and xi.dim() != 4:\n continue\n elif mode == \"linear\":\n if xi.dim() == 3:\n # TODO : enable when linear mode is implemented for 1d inputs in ORT\n continue\n elif xi.dim() == 4:\n mode_i = \"bilinear\"\n elif xi.dim() == 5:\n # TODO : enable when linear mode is implemented for 3d inputs in ORT\n mode_i = \"trilinear\"\n continue\n self._interpolate(xi, mode_i, True, is_upsample)\n # test with align_corners if supported\n if mode != 'nearest':\n self._interpolate(xi, mode_i, True, is_upsample, True)\n # the following cases, require dynamic sizes/scales,\n # which which is not supported for opset_version < 9\n if self.opset_version >= 9:\n self._interpolate(xi, mode_i, True, is_upsample)\n # test with align_corners if supported\n if mode != 'nearest':\n self._interpolate(xi, mode_i, False, is_upsample, True)\n self._interpolate(xi, mode_i, False, is_upsample)\n\n # ONNX export failed on interpolate scripting because dynamic size not supported for opsets below 9.\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_interpolate_upsample(self):\n self._interpolate_tests(True)\n\n @skipIfUnsupportedMaxOpsetVersion(8)\n @disableScriptTest()\n def test_interpolate_upsample_trace(self):\n self._interpolate_tests(True)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_interpolate_function_substitution(self):\n class ScriptModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.nn.functional.interpolate(x, mode=\"nearest\", scale_factor=2.)\n\n class ScriptModule(torch.jit.ScriptModule):\n def __init__(self):\n super(ScriptModule, self).__init__()\n self.submodule = ScriptModel()\n\n @torch.jit.script_method\n def forward(self, input):\n return self.submodule(input)\n\n x = torch.randn(1, 2, 4, 4, 6)\n self.run_test(ScriptModule(), (x,))\n\n @torch.jit.script\n def script_method(x):\n return torch.nn.functional.interpolate(x, mode=\"nearest\", scale_factor=2.)\n\n class TracingModule(torch.nn.Module):\n def forward(self, x):\n return script_method(x)\n\n self.run_test(TracingModule(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_interpolate_downsample(self):\n self._interpolate_tests(False)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_interpolate_no_shape(self):\n class MyModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, y):\n x = torch.add(x, x)\n out1 = torch.nn.functional.interpolate(x, mode=\"bilinear\", size=(16, 16), align_corners=False)\n out2 = torch.nn.functional.interpolate(x, mode=\"nearest\", size=(int(y.size(0)), int(y.size(1))))\n return out1, out2\n\n x = torch.randn(1, 2, 4, 4, requires_grad=True)\n y = torch.randn(16, 16, requires_grad=True)\n self.run_test(MyModel(), (x, y))\n\n # scripting will throw the OnnxRuntimeError\n @disableScriptTest()\n def test_interpolate_adaptive_pooling_error(self):\n x = torch.randn(1, 2, 6, requires_grad=True)\n with self.assertRaises(RuntimeError) as cm:\n self._interpolate(x, \"area\", True, True)\n\n with self.assertRaises(RuntimeError) as cm:\n self._interpolate(x, \"area\", False, True)\n\n def test_groupnorm(self):\n model = torch.nn.GroupNorm(3, 6, 0.002)\n x = torch.randn(4, 6, 180, 180, 180)\n self.run_test(model, x)\n\n model = torch.nn.GroupNorm(1, 6, 0.002)\n x = torch.randn(4, 6, 180, 180)\n self.run_test(model, x)\n\n model = torch.nn.GroupNorm(6, 6, 0.002)\n x = torch.randn(4, 6, 180, 180)\n self.run_test(model, x)\n\n @disableScriptTest()\n def test_groupnorm_noaffine(self):\n model = torch.nn.GroupNorm(4, 8, 0.002, affine=False)\n x = torch.randn(3, 8, 224, 224)\n self.run_test(model, x)\n\n model = torch.nn.GroupNorm(1, 6, 0.002, affine=False)\n x = torch.randn(4, 6, 180, 180)\n self.run_test(model, x)\n\n model = torch.nn.GroupNorm(6, 6, 0.002, affine=False)\n x = torch.randn(4, 6, 180, 180)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_listunpack(self):\n class ListUnpack(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n a, b = x.shape\n return x.new_zeros((a, b))\n\n x = torch.randn(2, 3)\n self.run_test(ListUnpack(), x)\n\n class ListUnpackSlice(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n a, b = x.shape[2:]\n return x.new_zeros((a, b))\n\n x = torch.randn(2, 3, 4, 5)\n self.run_test(ListUnpackSlice(), x)\n\n def test_pow(self):\n class PowModule(torch.nn.Module):\n def forward(self, x, y):\n return x.pow(y)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 3, 4)\n self.run_test(PowModule(), (x, y))\n\n x = torch.randint(10, (2, 3, 4))\n y = torch.randint(10, (2, 3, 4)).to(dtype=torch.int32)\n self.run_test(PowModule(), (x, y))\n\n x = torch.randint(10, (2, 3, 4))\n y = torch.randint(10, (2, 3, 4))\n self.run_test(PowModule(), (x, y))\n\n x = torch.randn(2, 3, 4).to(dtype=torch.float64)\n y = torch.randint(10, (2, 3, 4))\n self.run_test(PowModule(), (x, y))\n\n class PowModule2(torch.nn.Module):\n def forward(self, x):\n return torch.pow(2, x)\n\n x = torch.randn(1, 10)\n self.run_test(PowModule2(), (x,))\n\n x = torch.randint(10, (2, 3, 4))\n self.run_test(PowModule2(), (x,))\n\n x = torch.randn(1, 10).to(dtype=torch.float64)\n self.run_test(PowModule2(), (x,))\n\n class PowModule3(torch.nn.Module):\n def forward(self, x, y):\n return y[torch.pow(2, x)]\n\n x = torch.randint(5, (2, 3, 4))\n y = torch.rand(100)\n self.run_test(PowModule3(), (x, y))\n\n # the arithmeticOps(Add\\Sub\\Mul\\Div\\Gemm\\Pow\\Mod) with low precision include unit8 will be failed in ORT\n # add to(dtype=torch.long) to avoid ORT output type does not match expected type.\n # will be fixed in ONNX version 14.\n @skipIfUnsupportedMaxOpsetVersion(13)\n def test_arithmeticOps_with_low_precision(self):\n class AddModule(torch.nn.Module):\n def forward(self, x, y):\n return x + y\n\n class SubModule(torch.nn.Module):\n def forward(self, x, y):\n return x - y\n\n class MulModule(torch.nn.Module):\n def forward(self, x, y):\n return x * y\n\n class DivModule(torch.nn.Module):\n def forward(self, x, y):\n return x / y\n\n class PowModule(torch.nn.Module):\n def forward(self, x, y):\n return x.pow(y)\n\n x = torch.tensor([2, 3, 5], dtype=torch.uint8)\n y = torch.tensor([2, 3, 5], dtype=torch.uint8)\n z = torch.tensor([1], dtype=torch.uint8)\n self.run_test(AddModule(), (x, y))\n self.run_test(SubModule(), (x, y))\n self.run_test(MulModule(), (x, y))\n self.run_test(DivModule(), (x, y))\n self.run_test(PowModule(), (x, z))\n\n x = torch.tensor([2, 3, 5], dtype=torch.int8)\n y = torch.tensor([2, 3, 5], dtype=torch.int8)\n z = torch.tensor([1], dtype=torch.int8)\n self.run_test(AddModule(), (x, y))\n self.run_test(SubModule(), (x, y))\n self.run_test(MulModule(), (x, y))\n self.run_test(DivModule(), (x, y))\n self.run_test(PowModule(), (x, z))\n\n x = torch.tensor([2, 3, 5], dtype=torch.int16)\n y = torch.tensor([2, 3, 5], dtype=torch.int16)\n z = torch.tensor([1], dtype=torch.int16)\n self.run_test(AddModule(), (x, y))\n self.run_test(SubModule(), (x, y))\n self.run_test(MulModule(), (x, y))\n self.run_test(DivModule(), (x, y))\n self.run_test(PowModule(), (x, z))\n\n x = torch.tensor([2, 3, 5], dtype=torch.uint8)\n y = torch.tensor([2, 3, 5], dtype=torch.float32)\n z = torch.tensor([1], dtype=torch.float64)\n self.run_test(AddModule(), (x, y))\n self.run_test(SubModule(), (x, y))\n self.run_test(MulModule(), (x, y))\n self.run_test(DivModule(), (x, y))\n self.run_test(PowModule(), (x, z))\n\n x = torch.tensor([2, 3, 5], dtype=torch.uint8)\n y = torch.tensor([2, 3, 5], dtype=torch.int64)\n z = torch.tensor([1], dtype=torch.int32)\n self.run_test(AddModule(), (x, y))\n self.run_test(SubModule(), (x, y))\n self.run_test(MulModule(), (x, y))\n self.run_test(DivModule(), (x, y))\n self.run_test(PowModule(), (x, z))\n\n # fmod was added in version 10\n @skipIfUnsupportedMinOpsetVersion(10)\n @skipIfUnsupportedMaxOpsetVersion(13)\n def test_mod_with_low_precision(self):\n class ModModule(torch.nn.Module):\n def forward(self, x, y):\n return torch.fmod(x, y).to(dtype=torch.long)\n\n x = torch.tensor([2, 3, 5], dtype=torch.uint8)\n y = torch.tensor([2, 3, 5], dtype=torch.uint8)\n self.run_test(ModModule(), (x, y))\n\n x = torch.tensor([2, 3, 5], dtype=torch.int8)\n y = torch.tensor([2, 3, 5], dtype=torch.int8)\n self.run_test(ModModule(), (x, y))\n\n x = torch.tensor([2, 3, 5], dtype=torch.int16)\n y = torch.tensor([2, 3, 5], dtype=torch.int16)\n self.run_test(ModModule(), (x, y))\n\n x = torch.tensor([2, 3, 5], dtype=torch.uint8)\n y = torch.tensor([2, 3, 5], dtype=torch.int32)\n self.run_test(ModModule(), (x, y))\n\n x = torch.tensor([2, 3, 5], dtype=torch.uint8)\n y = torch.tensor([2, 3, 5], dtype=torch.float64)\n self.run_test(ModModule(), (x, y))\n\n @unittest.skip(\"Gemm operator only support float/double in ONNX\")\n @skipIfUnsupportedMaxOpsetVersion(13)\n def test_gemm_with_low_precision(self):\n class GemmModule(torch.nn.Module):\n def forward(self, x, y):\n return torch.mm(x, y).to(dtype=torch.long)\n\n mat1 = torch.randn(2, 3).to(dtype=torch.uint8)\n mat2 = torch.randn(3, 2).to(dtype=torch.uint8)\n self.run_test(GemmModule(), (mat1, mat2))\n\n mat1 = torch.randn(2, 3).to(dtype=torch.int8)\n mat2 = torch.randn(3, 2).to(dtype=torch.int8)\n self.run_test(GemmModule(), (mat1, mat2))\n\n mat1 = torch.randn(2, 3).to(dtype=torch.int16)\n mat2 = torch.randn(3, 2).to(dtype=torch.int16)\n self.run_test(GemmModule(), (mat1, mat2))\n\n mat1 = torch.randn(2, 3).to(dtype=torch.uint8)\n mat2 = torch.randn(3, 2).to(dtype=torch.int32)\n self.run_test(GemmModule(), (mat1, mat2))\n\n mat1 = torch.randn(2, 3).to(dtype=torch.uint8)\n mat2 = torch.randn(3, 2).to(dtype=torch.float64)\n self.run_test(GemmModule(), (mat1, mat2))\n\n def test_std(self):\n class StandardDeviation(torch.nn.Module):\n def forward(self, input):\n return torch.std(input, unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviation()\n self.run_test(model, x)\n\n class StandardDeviationUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.std(input, unbiased=True)\n\n model = StandardDeviationUnbiased()\n self.run_test(model, x)\n\n def test_std_along_dims(self):\n class StandardDeviation(torch.nn.Module):\n def forward(self, input):\n return torch.std(input, dim=(0, 1), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviation()\n self.run_test(model, x)\n\n class StandardDeviationUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.std(input, dim=(0, 1), unbiased=True)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviationUnbiased()\n self.run_test(model, x)\n\n def test_std_keepdim(self):\n class StandardDeviation(torch.nn.Module):\n def forward(self, input):\n return torch.std(input, dim=(0, 1), unbiased=False, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviation()\n self.run_test(model, x)\n\n class StandardDeviationUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.std(input, dim=(0, 1), unbiased=True, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviationUnbiased()\n self.run_test(model, x)\n\n def test_var(self):\n class Variance(torch.nn.Module):\n def forward(self, input):\n return torch.var(input, unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = Variance()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.var(input, unbiased=True)\n\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n class VarianceSqrt(torch.nn.Module):\n def forward(self, input):\n y = torch.var(input, 1)\n return torch.sqrt(y + 1e-8)\n\n x = torch.randn(1, 2, 3, 300, 300)\n model = VarianceSqrt()\n self.run_test(model, x)\n\n def test_var_along_dims(self):\n class Variance(torch.nn.Module):\n def forward(self, input):\n return torch.var(input, dim=(0, 1), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = Variance()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.var(input, dim=(0, 1), unbiased=True)\n\n x = torch.randn(2, 3, 4)\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n def test_var_keepdim(self):\n class Variance(torch.nn.Module):\n def forward(self, input):\n return torch.var(input, dim=(0, 1), unbiased=False, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = Variance()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.var(input, dim=(0, 1), unbiased=True, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n def test_var_mean(self):\n class Variance(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = Variance()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, unbiased=True)\n\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n def test_var_mean_along_dims(self):\n class Variance(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(0, 1), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = Variance()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(0, 1), unbiased=True)\n\n x = torch.randn(2, 3, 4)\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n def test_var_mean_mixed_dims(self):\n class ReverseDims(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(2, 1), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = ReverseDims()\n self.run_test(model, x)\n\n class SkipDims(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(0, 2), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = SkipDims()\n self.run_test(model, x)\n\n class NonZeroDims(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(1, 2), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = NonZeroDims()\n self.run_test(model, x)\n\n def test_var_mean_keepdim(self):\n class Variance(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(0, 1), unbiased=False, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = Variance()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.var_mean(input, dim=(0, 1), unbiased=True, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n def test_std_mean(self):\n class StandardDeviation(torch.nn.Module):\n def forward(self, input):\n return torch.std_mean(input, unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviation()\n self.run_test(model, x)\n\n class StandardDeviationUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.std_mean(input, unbiased=True)\n\n model = StandardDeviationUnbiased()\n self.run_test(model, x)\n\n def test_std_mean_along_dims(self):\n class StandardDeviation(torch.nn.Module):\n def forward(self, input):\n return torch.std_mean(input, dim=(0, 1), unbiased=False)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviation()\n self.run_test(model, x)\n\n class VarianceUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.std_mean(input, dim=(0, 1), unbiased=True)\n\n x = torch.randn(2, 3, 4)\n model = VarianceUnbiased()\n self.run_test(model, x)\n\n def test_std_mean_keepdim(self):\n class StandardDeviation(torch.nn.Module):\n def forward(self, input):\n return torch.std_mean(input, dim=(0, 1), unbiased=False, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviation()\n self.run_test(model, x)\n\n class StandardDeviationUnbiased(torch.nn.Module):\n def forward(self, input):\n return torch.std_mean(input, dim=(0, 1), unbiased=True, keepdim=True)\n\n x = torch.randn(2, 3, 4)\n model = StandardDeviationUnbiased()\n self.run_test(model, x)\n\n def test_bitshift(self):\n class BitshiftModel(torch.nn.Module):\n def forward(self, input, input2):\n return input >> 1, input << 3.1, \\\n input2 >> torch.tensor([1, 2]), input2 << 4.2\n input = torch.arange(24, dtype=torch.float32).reshape(3, 4, 2)\n input2 = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)\n self.run_test(BitshiftModel(), (input, input2))\n\n def test_bitshift_other_fp(self):\n class BitshiftModel(torch.nn.Module):\n def forward(self, input):\n return input << 2.4\n input = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)\n self.run_test(BitshiftModel(), input)\n\n # uint8 not implemented in ORT for Mul used in\n # exporting bitshift for opset_version < 10\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_bitshift_uint8(self):\n class BitshiftModel(torch.nn.Module):\n def forward(self, input, input2):\n return input >> 1, input << 3., \\\n input2 >> torch.tensor([1, 2], dtype=torch.uint8), input2 << 4.\n input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)\n input2 = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)\n self.run_test(BitshiftModel(), (input, input2))\n\n def test_narrow(self):\n class NarrowModel(torch.nn.Module):\n def forward(self, input):\n return torch.narrow(input, 0, 0, 2)\n\n x = torch.randn(3, 3, requires_grad=True)\n self.run_test(NarrowModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_narrow_dynamic(self):\n class NarrowModel(torch.nn.Module):\n def forward(self, input):\n return torch.narrow(input, 0, 0, input.shape[0] - 1)\n\n x = torch.randn(3, 3, requires_grad=True)\n self.run_test(NarrowModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_index_fill(self):\n class IndexFillModel(torch.nn.Module):\n def forward(self, input):\n index = torch.tensor([2, 0])\n return input.index_fill(2, index, -1)\n\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(IndexFillModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_index_copy(self):\n class IndexCopyModel(torch.nn.Module):\n def forward(self, input):\n index = torch.tensor([2, 0])\n source = torch.ones(3, 2, 5)\n return input.index_copy(1, index, source)\n\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(IndexCopyModel(), x)\n\n def test_select(self):\n class Select(torch.nn.Module):\n def forward(self, x):\n return x[:, 1]\n\n x = torch.randn(3, 4)\n self.run_test(Select(), x)\n\n def test_select_negative_index(self):\n class Select(torch.nn.Module):\n def forward(self, x):\n return x[:, -1]\n\n x = torch.randn(3, 4)\n self.run_test(Select(), x)\n\n def test_index_select_constant_scaler_index(self):\n class IndexSelectScalerIndexModel(torch.nn.Module):\n def forward(self, x):\n index = 2\n return torch.index_select(x, 1, torch.tensor(index))\n x = torch.randn(3, 4)\n self.run_test(IndexSelectScalerIndexModel(), x)\n\n def test_index_select_scaler_index(self):\n class IndexSelectScalerIndexModel(torch.nn.Module):\n def __init__(self, index_base):\n super(IndexSelectScalerIndexModel, self).__init__()\n self.index_base = torch.tensor(index_base)\n\n def forward(self, x, index_offset):\n index = self.index_base + index_offset\n return torch.index_select(x, 1, index)\n x = torch.randn(3, 4)\n offset = 2\n index_offset = torch.tensor(offset)\n base = 1\n self.run_test(IndexSelectScalerIndexModel(base), (x, index_offset))\n\n def test_take(self):\n class TakeModel(torch.nn.Module):\n def forward(self, x, y):\n return torch.take(x, y)\n\n x = torch.randn(6, 4, 3, 3)\n y = torch.tensor([4, 1, 7, 15, 63])\n self.run_test(TakeModel(), (x, y))\n\n def test_topk(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return torch.topk(x, 3)\n\n x = torch.arange(1., 6., requires_grad=True)\n self.run_test(MyModule(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_topk_smallest_unsorted(self):\n class MyModule(torch.nn.Module):\n def forward(self, x, k):\n # When sorted=False, order of elements in the outout tensors\n # are not expected to match between PyTorch and ORT\n topk_unsorted = torch.topk(x, k, largest=False, sorted=False)\n topk_sorted = torch.topk(x, k, largest=False, sorted=True)\n return topk_sorted, torch.sort(topk_unsorted.values).values\n\n x = torch.arange(1., 6., requires_grad=True)\n k = torch.tensor(3)\n self.run_test(MyModule(), (x, k))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_topk_script(self):\n class MyModuleDynamic(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, k):\n return torch.topk(x, k)\n\n x = torch.arange(1., 6., requires_grad=True)\n k = torch.tensor(3)\n self.run_test(MyModuleDynamic(), [x, k])\n\n @skipIfUnsupportedOpsetVersion([7])\n def test_normalize(self):\n class Model(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.normalize(x)\n\n x = torch.randn(3, 3)\n self.run_test(Model(), x)\n\n def test_layer_norm(self):\n model = torch.nn.LayerNorm([10, 10])\n x = torch.randn(20, 5, 10, 10)\n self.run_test(model, x)\n\n def test_batchnorm1d(self):\n x = torch.randn(10, 10)\n model = torch.nn.BatchNorm1d(10, affine=True)\n self.run_test(model, x)\n\n x = torch.randn(10, 10, 128)\n self.run_test(model, x)\n\n def test_batchnorm1d_noaffine(self):\n x = torch.randn(10, 10)\n model = torch.nn.BatchNorm1d(10, affine=False)\n self.run_test(model, x)\n\n x = torch.randn(10, 10, 128)\n self.run_test(model, x)\n\n def test_batchnorm1d_norunningstats(self):\n x = torch.randn(10, 10)\n model = torch.nn.BatchNorm1d(10, track_running_stats=False)\n self.run_test(model, x)\n\n x = torch.randn(10, 10, 128)\n self.run_test(model, x)\n\n def test_batchnorm2d(self):\n x = torch.randn(10, 3, 128, 128)\n model = torch.nn.BatchNorm2d(3, affine=True)\n self.run_test(model, x)\n\n def test_batchnorm2d_noaffine(self):\n x = torch.randn(10, 3, 128, 128)\n model = torch.nn.BatchNorm2d(3, affine=False)\n self.run_test(model, x)\n\n def test_batchnorm2d_norunningstats(self):\n x = torch.randn(10, 3, 128, 128)\n model = torch.nn.BatchNorm2d(3, track_running_stats=False)\n self.run_test(model, x)\n\n def test_batchnorm3d(self):\n x = torch.randn(10, 3, 128, 128, 128)\n model = torch.nn.BatchNorm3d(3, affine=True)\n self.run_test(model, x)\n\n def test_batchnorm3d_noaffine(self):\n x = torch.randn(10, 3, 128, 128, 128)\n model = torch.nn.BatchNorm3d(3, affine=False)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_scatter_with_scalar(self):\n class ScatterModel(torch.nn.Module):\n def forward(self, input, indices):\n values = 1.0\n return input.scatter(1, indices, values)\n\n input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=torch.float64)\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n self.run_test(ScatterModel(), input=(input, indices))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_scatter_with_scalar_different_types(self):\n # Tests the case when scalar src (updates values) type is different\n # from self type. Happens only with scalar src - PyTorch does not\n # allow this when src is a tensor.\n class ScatterModel(torch.nn.Module):\n def forward(self, input, indices):\n values = 1.0\n return input.scatter(1, indices, values)\n\n input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=torch.float32)\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n self.run_test(ScatterModel(), input=(input, indices))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_scatter(self):\n class ScatterModel(torch.nn.Module):\n def forward(self, input, indices, values):\n return input.scatter(1, indices, values)\n\n input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])\n self.run_test(ScatterModel(), input=(input, indices, values))\n\n input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)\n values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])\n self.run_test(ScatterModel(), (input, indices, values))\n\n input = torch.zeros(3, 4, 5, 6)\n indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)\n indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)\n values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)\n self.run_test(ScatterModel(), (input, indices, values))\n\n input = torch.zeros(3, 4, 2)\n indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])\n values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)\n self.run_test(ScatterModel(), (input, indices, values))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_scatter_add(self):\n class ScatterModel(torch.nn.Module):\n def forward(self, input, indices, values):\n return input.scatter_add(1, indices, values)\n\n input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])\n self.run_test(ScatterModel(), input=(input, indices, values))\n\n @torch.jit.script\n def scatter_sum(src: torch.Tensor, index: torch.Tensor):\n size = src.size()\n out = torch.zeros(size, dtype=src.dtype)\n return out.scatter_add_(1, index, src)\n\n class ScatterModel(torch.nn.Module):\n def forward(self, src, index):\n return scatter_sum(src, index)\n\n src = torch.rand(3, 2)\n index = torch.tensor([[0, 1], [0, 1], [0, 1]], dtype=torch.int64)\n self.run_test(ScatterModel(), (src, index))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_one_hot(self):\n class OneHot(torch.nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n self.num_classes = num_classes\n\n def forward(self, x):\n return torch.nn.functional.one_hot(x, self.num_classes)\n\n x = torch.arange(10)\n self.run_test(OneHot(15), (x))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_gather(self):\n class GatherModel(torch.nn.Module):\n def forward(self, input, indices):\n return input.gather(1, indices)\n\n input = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n self.run_test(GatherModel(), input=(input, indices))\n\n @disableScriptTest() # RuntimeError: Python type cannot be used as a value\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_gather_constant_fold(self):\n class GatherModule(torch.nn.Module):\n def __init__(self):\n super(GatherModule, self).__init__()\n self.register_buffer(\"weight\", torch.ones(5))\n\n def forward(self, x):\n # shape is of rank 0\n shape = self.weight.shape[0]\n m = 5 - shape\n return x.clamp(min=m)\n\n x = torch.randn(1)\n self.run_test(GatherModule(), (x,))\n\n class GatherModule(torch.nn.Module):\n def __init__(self):\n super(GatherModule, self).__init__()\n self.register_buffer(\"weight\", torch.ones(2))\n\n def forward(self, x):\n # shape is of rank 0\n shape = self.weight.shape[0]\n pad = [1, shape, shape, shape]\n zero_pad = torch.nn.ZeroPad2d(pad)\n return zero_pad(x)\n\n x = torch.randn(1, 3, 2)\n self.run_test(GatherModule(), (x,))\n\n @skipIfUnsupportedOpsetVersion([13])\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_expand(self):\n class ExpandModel(torch.nn.Module):\n def forward(self, input):\n return input.expand(2, 3, -1)\n\n input = torch.randn(2, 1, 4)\n self.run_test(ExpandModel(), input=(input))\n\n class ExpandInferDimModel(torch.nn.Module):\n def forward(self, input):\n return input.expand(-1, input.size(0))\n\n input = torch.randn(3, 1)\n self.run_test(ExpandInferDimModel(), input=(input))\n\n class ExpandTensorSizeModel(torch.nn.Module):\n def forward(self, input, size):\n return input.expand(size)\n\n input = torch.randn(3,)\n size = torch.tensor(-1)\n self.run_test(ExpandTensorSizeModel(), input=(input, size))\n\n def test_multinomial(self):\n class Multinomial(torch.nn.Module):\n def forward(self, weight):\n return torch.multinomial(weight, 3, replacement=True)\n\n class MultinomialNoReplacement(torch.nn.Module):\n def forward(self, weight):\n return torch.multinomial(weight, 1)\n\n weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)\n self.run_test(Multinomial(), (weight,))\n self.run_test(MultinomialNoReplacement(), (weight,))\n\n def _test_reduced_ops(self, op):\n class ReducedOpModule(torch.nn.Module):\n def forward(self, input):\n return op(input, dim=-1)\n\n if op != torch.mean: # torch.mean only supports float types\n x = torch.randint(10, (4, 4), dtype=torch.uint8)\n self.run_test(ReducedOpModule(), x)\n\n x = torch.randint(10, (4, 4), dtype=torch.int8)\n self.run_test(ReducedOpModule(), x)\n\n x = torch.randint(10, (4, 4), dtype=torch.int16)\n self.run_test(ReducedOpModule(), x)\n\n x = torch.randint(10, (4, 4), dtype=torch.int32)\n self.run_test(ReducedOpModule(), x)\n\n x = torch.randint(10, (4, 4), dtype=torch.int64)\n self.run_test(ReducedOpModule(), x)\n\n # torch.mean only supports float types\n # ORT does not support double ReduceProd for double\n if op != torch.prod and op != torch.mean:\n x = torch.randn(4, 5, dtype=torch.double)\n self.run_test(ReducedOpModule(), x)\n\n if op != torch.prod: # torch.prod not implemented for Half\n x = torch.randn(4, 4, dtype=torch.half)\n self.run_test(ReducedOpModule(), x)\n\n x = torch.randn(4, 5, dtype=torch.float)\n self.run_test(ReducedOpModule(), x)\n\n def test_reduced_sum(self):\n return self._test_reduced_ops(op=torch.sum)\n\n def test_reduced_mean(self):\n return self._test_reduced_ops(op=torch.mean)\n\n def test_reduced_prod(self):\n return self._test_reduced_ops(op=torch.prod)\n\n def test_reduced_min_max(self):\n class ReducedMinMaxModule(torch.nn.Module):\n def forward(self, input):\n return torch.min(input, dim=-1)[0], torch.max(input, dim=0)[0]\n x = torch.randint(10, (4, 4), dtype=torch.int32)\n self.run_test(ReducedMinMaxModule(), x)\n\n x = torch.randint(10, (4, 4), dtype=torch.int64)\n self.run_test(ReducedMinMaxModule(), x)\n\n x = torch.randn(4, 5, dtype=torch.float)\n self.run_test(ReducedMinMaxModule(), x)\n\n def test_reduce_log_sum_exp(self):\n class ReduceLogSumExpModel(torch.nn.Module):\n def forward(self, input):\n a = torch.logsumexp(input, dim=0)\n b = torch.logsumexp(input, dim=(0, 1))\n return a + b\n\n x = torch.randn(4, 4, requires_grad=True)\n self.run_test(ReduceLogSumExpModel(), x)\n\n def test_softmax(self):\n for i in range(-4, 3):\n model = torch.nn.Softmax(dim=i)\n input = torch.randn(3, 4, 5, 6)\n self.run_test(model, input)\n\n class SoftmaxUnknownRank(torch.nn.Module):\n def __init__(self, i):\n super().__init__()\n self.softmax = torch.nn.Softmax(dim=i)\n\n def forward(self, x):\n return self.softmax(x.reshape(3, 4, 5, 6))\n\n model = torch.jit.script(SoftmaxUnknownRank(i))\n self.run_test(model, input)\n\n def test_softmax_large_values(self):\n input = torch.tensor([[-1e12, -1e12, -1e12], [1e12, 0.0, -5.0], [3.0, 4.0, 5.0]])\n for i in range(-2, 1):\n model = torch.nn.Softmax(dim=i)\n self.run_test(model, input)\n\n class SoftmaxUnknownRank(torch.nn.Module):\n def __init__(self, i):\n super().__init__()\n self.softmax = torch.nn.Softmax(dim=i)\n\n def forward(self, x):\n return self.softmax(x.reshape(3, 3))\n\n model = torch.jit.script(SoftmaxUnknownRank(i))\n self.run_test(model, input)\n\n def test_logsoftmax(self):\n for i in range(7)[2:]:\n model = torch.nn.LogSoftmax(dim=i - 1)\n dims = [2] * (i - 2) + [3, 4]\n input = torch.ones(*dims, requires_grad=True)\n self.run_test(model, input)\n\n def test_logsoftmax_dim(self):\n for i in range(-4, 3):\n model = torch.nn.LogSoftmax(dim=i)\n input = torch.randn(3, 4, 5, 6)\n self.run_test(model, input)\n\n def test_logsoftmax_dtype(self):\n class Model(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.log_softmax(x, dim=1, dtype=torch.float64)\n\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(Model(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm_no_hidden(self):\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16)\n\n def forward(self, x):\n return self.rnn(x)\n\n input = torch.randn((10, 16, 16))\n self.run_test(LSTMModel(), (input,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm_proj_no_hidden(self):\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16, proj_size=8)\n\n def forward(self, x):\n return self.rnn(x)\n\n input = torch.randn((10, 16, 16))\n with self.assertRaises(RuntimeError):\n self.run_test(LSTMModel(), (input,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm(self):\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)\n\n def forward(self, x, h0, c0):\n return self.rnn(x, (h0, c0))\n\n input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)\n c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)\n self.run_test(LSTMModel(), (input, h0, c0))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm_default_init_state(self):\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)\n\n def forward(self, x):\n return self.rnn(x)\n\n input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n self.run_test(LSTMModel(), input)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm_fixed_batch_size(self):\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super(LSTMModel, self).__init__()\n self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)\n self.RNN_HIDDEN_SIZE = RNN_HIDDEN_SIZE\n\n def forward(self, input):\n batch_size = input.size()[1]\n h0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])\n c0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])\n return self.lstm(input, (h0, c0))\n\n input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n # verify with different input of same batch size\n input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n self.run_test(LSTMModel(), input, fixed_batch_size=True, test_with_inputs=[input2])\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm_post_fix_init_state(self):\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super(LSTMModel, self).__init__()\n self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)\n self.RNN_HIDDEN_SIZE = RNN_HIDDEN_SIZE\n\n def forward(self, input):\n batch_size = input.size()[1]\n h0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])\n c0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])\n return self.lstm(input, (h0, c0))\n\n model = LSTMModel()\n input = torch.randn(RNN_SEQUENCE_LENGTH, 1, RNN_INPUT_SIZE)\n # verify with different input of different batch size\n input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n self.run_test(model, input, input_names=[\"input.1\"], dynamic_axes={'input.1' : {0 : 'seq', 1 : 'batch'}},\n test_with_inputs=[input2])\n\n def test_lstm_constant_folding(self):\n class LstmNet(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, bidirectional):\n super(LstmNet, self).__init__()\n self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers, bidirectional=bidirectional)\n\n def forward(self, input, initial_state: Tuple[torch.Tensor, torch.Tensor]):\n return self.lstm(input, initial_state)\n\n def get_LstmNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,\n seq_len, bidirectional):\n num_directions = 2 if bidirectional else 1\n model = LstmNet(input_size, hidden_size, num_layers, bidirectional)\n input = torch.randn(seq_len, batch_size, input_size)\n h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)\n c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)\n return model, (input, (h0, c0))\n\n batch_size1 = 3\n model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)\n self.run_test(model1, input1, do_constant_folding=True)\n\n batch_size2 = 4\n model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)\n self.run_test(model2, input2, do_constant_folding=True)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_lstm_no_bias(self):\n class LstmNet(torch.nn.Module):\n def __init__(self, num_layers, bidirectional):\n super(LstmNet, self).__init__()\n self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers, bias=False, bidirectional=bidirectional)\n\n def forward(self, input, initial_state: Tuple[torch.Tensor, torch.Tensor]):\n return self.lstm(input, initial_state)\n\n def get_LstmNet_model_and_inputs(num_layers, bidirectional):\n input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n num_directions = 2 if bidirectional else 1\n model = LstmNet(num_layers, bidirectional)\n h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)\n c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)\n return model, (input, (h0, c0))\n\n num_layers = [1, 1, 2, 3]\n bidirectional = [True, False, True, False]\n models_and_inputs = [get_LstmNet_model_and_inputs(n, b) for n, b in zip(num_layers, bidirectional)]\n for model, input in models_and_inputs:\n self.run_test(model, input)\n\n @disableScriptTest()\n def test_rnn_no_bias(self):\n def make_model(layers, packed_sequence):\n batch_first = True if packed_sequence == 2 else False\n model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, bidirectional=False,\n batch_first=batch_first, bias=False)\n\n if packed_sequence == 1:\n model = RnnModelWithPackedSequence(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequence(model, True)\n return model\n\n def make_input(batch_size, layers, packed_sequence):\n batch_first = True if packed_sequence == 2 else False\n seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)\n seq_lengths = list(reversed(sorted(map(int, seq_lengths))))\n inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]\n inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)\n inputs = [inputs]\n\n h0 = torch.randn(layers, batch_size, RNN_HIDDEN_SIZE)\n inputs.append(h0)\n if packed_sequence != 0:\n inputs.append(torch.IntTensor(seq_lengths))\n if len(inputs) == 1:\n input = inputs[0]\n else:\n input = tuple(inputs)\n return input\n\n layers = [1, 3, 1, 3, 1, 3]\n packed_sequence = [0, 0, 1, 1, 2, 2]\n models = [make_model(l, p) for l, p in zip(layers, packed_sequence)]\n inputs = [make_input(RNN_BATCH_SIZE, l, p) for l, p in zip(layers, packed_sequence)]\n\n for model, input in zip(models, inputs):\n self.run_test(model, input, batch_size=RNN_BATCH_SIZE)\n\n def test_gru_no_bias(self):\n class GruNet(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, bidirectional):\n super(GruNet, self).__init__()\n self.mygru = torch.nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional, bias=False)\n\n def forward(self, input, initial_state):\n out = self.mygru(input, initial_state)\n return out\n\n def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,\n seq_len, bidirectional):\n num_directions = 2 if bidirectional else 1\n model = GruNet(input_size, hidden_size, num_layers, bidirectional)\n input = torch.randn(seq_len, batch_size, input_size)\n h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)\n return model, (input, h0)\n\n input_size = [7, 5]\n hidden_size = [3, 4]\n num_layers = [2, 3]\n batch_size = [3, 4]\n seq_len = [5, 7]\n bidirectional = [True, False]\n models_and_inputs = [get_GruNet_model_and_inputs(i, h, n, b, s, bi)\n for i, h, n, b, s, bi in zip(input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional)]\n for model, input in models_and_inputs:\n self.run_test(model, input, do_constant_folding=True)\n\n def test_gru_constant_folding(self):\n class GruNet(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, bidirectional):\n super(GruNet, self).__init__()\n self.mygru = torch.nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional)\n\n def forward(self, input, initial_state):\n out = self.mygru(input, initial_state)\n return out\n\n def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,\n seq_len, bidirectional):\n num_directions = 2 if bidirectional else 1\n model = GruNet(input_size, hidden_size, num_layers, bidirectional)\n input = torch.randn(seq_len, batch_size, input_size)\n h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)\n return model, (input, h0)\n\n batch_size1 = 3\n model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)\n self.run_test(model1, input1, do_constant_folding=True)\n\n batch_size2 = 4\n model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)\n self.run_test(model2, input2, do_constant_folding=True)\n\n @skipIfUnsupportedMinOpsetVersion(8)\n def test_max_tensors(self):\n class MaxModel(torch.nn.Module):\n def forward(self, input, other):\n return torch.max(input, other)\n\n model = MaxModel()\n x = torch.randn(4, 4, requires_grad=True)\n y = torch.randn(4, 1, requires_grad=True)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_arange_end(self):\n class ArangeScript(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, a):\n return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a\n\n x = torch.randn(3, 4, requires_grad=True)\n outputs = ArangeScript()(x)\n self.run_test(ArangeScript(), x)\n\n class ArangeModel(torch.nn.Module):\n def forward(self, a):\n return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a\n\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange_end_notype(self):\n class ArangeScript(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, a):\n return torch.arange(a.size(0))\n\n x = torch.randn(3, 4, requires_grad=True)\n outputs = ArangeScript()(x)\n self.run_test(ArangeScript(), x)\n\n class ArangeModel(torch.nn.Module):\n def forward(self, a):\n return torch.arange(a.size(0))\n\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_arange_start_end(self):\n class ArangeScript(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, a):\n return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a\n\n x = torch.randn(3, 4, requires_grad=True)\n self.run_test(ArangeScript(), x)\n\n class ArangeModel(torch.nn.Module):\n def forward(self, a):\n return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a\n\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange_start_end_notype(self):\n class ArangeScript(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, a):\n return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a\n\n x = torch.randn(3, 4, requires_grad=True)\n self.run_test(ArangeScript(), x)\n\n class ArangeModel(torch.nn.Module):\n def forward(self, a):\n return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a\n\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_arange_start_end_step(self):\n class ArangeScript(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, a):\n return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a\n\n x = torch.randn(3, 4, requires_grad=True)\n self.run_test(ArangeScript(), x)\n\n class ArangeModel(torch.nn.Module):\n def forward(self, a):\n return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a\n\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_arange_start_end_step_notype(self):\n class ArangeScript(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, a):\n return torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1) + a\n\n x = torch.randn(3, 4, requires_grad=True)\n self.run_test(ArangeScript(), x)\n\n class ArangeModel(torch.nn.Module):\n def forward(self, a):\n return torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1) + a\n\n self.run_test(ArangeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test__dim_arange(self):\n class DimArange(torch.nn.Module):\n def forward(self, input):\n return torch._dim_arange(input, 1)\n\n x = torch.ones(5, 6)\n self.run_test(DimArange(), x)\n\n def _test_compare_ops(self, model, num_inputs):\n x_float = torch.randn(1, 2, 3, 4, requires_grad=True)\n x_int = torch.randint(10, (3, 4), dtype=torch.int32)\n if num_inputs > 1:\n y_float = torch.randn(1, 2, 3, 4, requires_grad=True)\n y_int = torch.randint(10, (3, 4), dtype=torch.int32)\n self.run_test(model, (x_float, y_float))\n self.run_test(model, (x_float, y_int))\n self.run_test(model, (x_int, y_float))\n self.run_test(model, (x_int, y_int))\n else:\n self.run_test(model, x_float)\n self.run_test(model, x_int)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_logical_and(self):\n class AndModel(torch.nn.Module):\n def forward(self, x, y):\n return torch.logical_and(x, y)\n\n x = torch.randint(0, 2, (5, 5), dtype=torch.bool)\n y = torch.randint(0, 2, (5, 5), dtype=torch.bool)\n self.run_test(AndModel(), input=(x, y))\n\n x = torch.randint(10, (5, 5), dtype=torch.int32)\n y = torch.randint(10, (5, 5), dtype=torch.int32)\n self.run_test(AndModel(), input=(x, y))\n\n x = torch.randint(10, (5, 5), dtype=torch.double)\n y = torch.randint(10, (5, 5), dtype=torch.double)\n self.run_test(AndModel(), input=(x, y))\n\n x = torch.randint(10, (2, 3, 5), dtype=torch.float32)\n y = torch.randint(10, (2, 3, 5), dtype=torch.long)\n self.run_test(AndModel(), input=(x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_logical_or(self):\n class OrModel(torch.nn.Module):\n def forward(self, x, y):\n return torch.logical_or(x, y)\n\n x = torch.randint(0, 2, (5, 5), dtype=torch.bool)\n y = torch.randint(0, 2, (5, 5), dtype=torch.bool)\n self.run_test(OrModel(), input=(x, y))\n\n x = torch.randint(10, (5, 5), dtype=torch.int32)\n y = torch.randint(10, (5, 5), dtype=torch.int32)\n self.run_test(OrModel(), input=(x, y))\n\n x = torch.randint(10, (5, 5), dtype=torch.double)\n y = torch.randint(10, (5, 5), dtype=torch.double)\n self.run_test(OrModel(), input=(x, y))\n\n x = torch.randint(10, (2, 3, 5), dtype=torch.float32)\n y = torch.randint(10, (2, 3, 5), dtype=torch.long)\n self.run_test(OrModel(), input=(x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_logical_xor(self):\n class XorModel(torch.nn.Module):\n def forward(self, x, y):\n return torch.logical_xor(x, y)\n\n x = torch.randint(0, 2, (5, 5), dtype=torch.bool)\n y = torch.randint(0, 2, (5, 5), dtype=torch.bool)\n self.run_test(XorModel(), input=(x, y))\n\n x = torch.randint(10, (5, 5), dtype=torch.int32)\n y = torch.randint(10, (5, 5), dtype=torch.int32)\n self.run_test(XorModel(), input=(x, y))\n\n x = torch.randint(10, (5, 5), dtype=torch.double)\n y = torch.randint(10, (5, 5), dtype=torch.double)\n self.run_test(XorModel(), input=(x, y))\n\n x = torch.randint(10, (2, 3, 5), dtype=torch.float32)\n y = torch.randint(10, (2, 3, 5), dtype=torch.long)\n self.run_test(XorModel(), input=(x, y))\n\n def test_gt(self):\n class GreaterModel(torch.nn.Module):\n def forward(self, input, other):\n return input > other\n self._test_compare_ops(GreaterModel(), 2)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_ge(self):\n class GreaterOrEqualModel(torch.nn.Module):\n def forward(self, input, other):\n return input >= other\n self._test_compare_ops(GreaterOrEqualModel(), 2)\n\n def test_gt_scalar(self):\n class GreaterModel(torch.nn.Module):\n def forward(self, input):\n return input > 1\n self._test_compare_ops(GreaterModel(), 1)\n\n def test_gt_primitive(self):\n class GreaterModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.y : int = 2\n\n def forward(self, x: int):\n return self.y > x\n\n x = 3\n self.run_test(GreaterModel(), (x, ))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_ge_scalar(self):\n class GreaterOrEqualModel(torch.nn.Module):\n def forward(self, input):\n return input >= 1\n self._test_compare_ops(GreaterOrEqualModel(), 1)\n\n def test_lt(self):\n class LessModel(torch.nn.Module):\n def forward(self, input, other):\n return input > other\n self._test_compare_ops(LessModel(), 2)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_le(self):\n class LessOrEqualModel(torch.nn.Module):\n def forward(self, input, other):\n return input <= other\n self._test_compare_ops(LessOrEqualModel(), 2)\n\n def test_lt_scalar(self):\n class LessModel(torch.nn.Module):\n def forward(self, input):\n return input < 1\n self._test_compare_ops(LessModel(), 1)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_le_scalar(self):\n class LessOrEqualModel(torch.nn.Module):\n def forward(self, input):\n return input <= 1\n self._test_compare_ops(LessOrEqualModel(), 1)\n\n def test_matmul(self):\n class MatmulModel(torch.nn.Module):\n def forward(self, input, other):\n return torch.matmul(input, other)\n\n x = torch.randn(3, 4, requires_grad=True)\n y = torch.randn(4, 5, requires_grad=True)\n self.run_test(MatmulModel(), (x, y))\n\n x = torch.randint(10, (3, 4))\n y = torch.randint(10, (4, 5))\n self.run_test(MatmulModel(), (x, y))\n\n def test_matmul_batch(self):\n class MatmulModel(torch.nn.Module):\n def forward(self, input, other):\n return torch.matmul(input, other)\n\n x = torch.randn(2, 3, 4, requires_grad=True)\n y = torch.randn(2, 4, 5, requires_grad=True)\n self.run_test(MatmulModel(), (x, y))\n\n x = torch.randint(10, (2, 3, 4))\n y = torch.randint(10, (2, 4, 5))\n self.run_test(MatmulModel(), (x, y))\n\n def _argmin_argmax_model(self, input):\n class ArgminArgmaxModel(torch.nn.Module):\n def forward(self, input):\n return torch.argmin(input), \\\n torch.argmax(input), \\\n torch.argmin(input, keepdim=True), \\\n torch.argmax(input, keepdim=True)\n\n self.run_test(ArgminArgmaxModel(), input)\n\n def test_argmin_argmax(self):\n input = torch.randn(7, 3, 5)\n self._argmin_argmax_model(input)\n\n # Argmin and Argmax with \"select_last_index\" is not supprted before opset 12\n # \"select_last_index\" was added in opset 12 to deal with corner case where the\n # same value appears multiple times in the tensor\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_argmin_argmax_select_last_index(self):\n input = torch.tensor([[1., 2., 3.],\n [1., 1., 2.]])\n self._argmin_argmax_model(input)\n\n input = torch.ones(7, 3, 5)\n self._argmin_argmax_model(input)\n\n def test_repeat(self):\n class RepeatModel(torch.nn.Module):\n def forward(self, x, y):\n x2 = x.repeat(y.shape[0], 1)\n y1 = y.view(-1, 1)\n return x2 + y1\n\n x = torch.tensor([1, 2, 3])\n y = torch.tensor([4, 5, 8, 9])\n self.run_test(RepeatModel(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_repeat_interleave(self):\n class FlattenModel(torch.nn.Module):\n def forward(self, x):\n return x.repeat_interleave(2)\n\n x = torch.tensor([1, 2, 3])\n self.run_test(FlattenModel(), (x,))\n\n class DimsModel(torch.nn.Module):\n def forward(self, x):\n return x.repeat_interleave(4, dim=1)\n\n x = torch.tensor([[1, 2], [3, 4]])\n self.run_test(DimsModel(), (x,))\n\n class DimsModel2(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor([4])\n return torch.repeat_interleave(x, repeats, dim=1)\n\n x = torch.tensor([[1, 2], [3, 4]])\n self.run_test(DimsModel2(), (x,))\n\n class RepeatsDimsModel(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor([1, 2])\n return torch.repeat_interleave(x, repeats, dim=0)\n\n x = torch.tensor([[1, 2], [3, 4]])\n self.run_test(RepeatsDimsModel(), (x,))\n\n class RepeatsDimsModel2(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor([1, 2])\n return torch.repeat_interleave(x, repeats, dim=1)\n\n x = torch.tensor([[1, 2], [3, 4]])\n self.run_test(RepeatsDimsModel2(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_dynamic_repeat_interleave(self):\n class SingleDynamicModel(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor(4)\n return torch.repeat_interleave(x, repeats, dim=1)\n\n x = torch.tensor([[1, 2, 4], [3, 4, 7]])\n another_x = torch.tensor([[7, 8], [5, 6]])\n self.run_test(SingleDynamicModel(), x, test_with_inputs=[another_x],\n input_names=['input_1'], dynamic_axes={'input_1' : {1 : 'w'}})\n\n class NegDynamicModel(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor(4)\n return torch.repeat_interleave(x, repeats, dim=-1)\n\n x = torch.tensor([[1, 2, 4], [3, 4, 7]])\n another_x = torch.tensor([[7, 8], [5, 6]])\n self.run_test(NegDynamicModel(), x, test_with_inputs=[another_x],\n input_names=['input_1'], dynamic_axes={'input_1' : {1 : 'w'}})\n\n class SingleDynamicModel2(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor([4])\n return torch.repeat_interleave(x, repeats, dim=0)\n\n x = torch.tensor([[1, 2], [3, 4]])\n another_x = torch.tensor([[7, 8], [5, 6]])\n self.run_test(SingleDynamicModel2(), x, test_with_inputs=[another_x],\n input_names=['input_1'], dynamic_axes={'input_1' : {0 : 'h'}})\n\n class AllDynamicModel(torch.nn.Module):\n def forward(self, x):\n repeats = torch.tensor([4])\n return torch.repeat_interleave(x, repeats, dim=0)\n\n x = torch.tensor([[1, 2, 4, 16], [3, 9, 27, 81], [2, 3, 5, 7]])\n another_x = torch.tensor([[7, 8], [5, 6]])\n self.run_test(AllDynamicModel(), x, test_with_inputs=[another_x],\n input_names=['input_1'], dynamic_axes={'input_1' : {0 : 'h', 1 : 'w'}})\n\n def test_view(self):\n class ViewModel(torch.nn.Module):\n def forward(self, input):\n return input.view(4, 24)\n\n x = torch.randint(10, (4, 2, 3, 4), dtype=torch.int32)\n self.run_test(ViewModel(), x)\n\n def test_view_dynamic(self):\n class ViewModel(torch.nn.Module):\n def forward(self, input, other):\n return input.view(other.shape)\n\n x = torch.randn(2, 3, 4)\n shape = torch.randn(6, 4)\n self.run_test(ViewModel(), (x, shape))\n\n def test_view_dynamic_zero_dim(self):\n class ViewModel(torch.nn.Module):\n def forward(self, input):\n input = input.view(-1, 2)\n return input.view(1, -1)\n\n x = torch.ones(2)\n another_x = torch.empty((0,))\n self.run_test(ViewModel(), x, test_with_inputs=[another_x],\n input_names=['input_1'], dynamic_axes={'input_1': [0, ]})\n\n def test_view_as(self):\n class ViewModel(torch.nn.Module):\n def forward(self, input, other):\n return input.view_as(other)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(6, 4)\n self.run_test(ViewModel(), (x, y))\n\n def test_linear(self):\n class LinearModel(torch.nn.Module):\n def __init__(self):\n super(LinearModel, self).__init__()\n self.fc = torch.nn.Linear(16, 16)\n\n def forward(self, x):\n out = self.fc(x)\n out = self.fc(out)\n return out\n\n x = torch.randn(3, 16)\n self.run_test(LinearModel(), (x,))\n\n class LinearModel(torch.nn.Module):\n def forward(self, input, weight, bias):\n return torch.nn.functional.linear(input, weight, bias)\n\n # input of rank 2\n x = torch.randn(2, 2)\n y = torch.randn(2, 2)\n z = torch.randn(1)\n self.run_test(LinearModel(), (x, y, z))\n\n # input of rank 3\n x = torch.randn(3, 3, 3)\n y = torch.randn(3, 3)\n z = torch.randn(1)\n self.run_test(LinearModel(), (x, y, z))\n\n @disableScriptTest()\n def test_weight_norm(self):\n # addmm for 3-d inputs converts to onnx::MatMul\n model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(model, x)\n\n # addmm for 2-d inputs converts to onnx::Gemm\n model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)\n x = torch.randn(4, 5, requires_grad=True)\n self.run_test(model, x)\n\n model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3))\n x = torch.randn(1, 1, 5, requires_grad=True)\n self.run_test(model, x)\n\n model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3), dim=-2)\n x = torch.randn(1, 1, 5, requires_grad=True)\n self.run_test(model, x)\n\n model = torch.nn.utils.weight_norm(torch.nn.Conv1d(3, 6, 3), name='weight')\n x = torch.randn(3, 3, 5, requires_grad=True)\n self.run_test(model, x)\n\n @disableScriptTest()\n def test_weight_norm_nodim(self):\n # addmm for 3-d inputs converts to onnx::MatMul\n model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(model, x)\n\n # addmm for 2-d inputs converts to onnx::Gemm\n model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)\n x = torch.randn(4, 5, requires_grad=True)\n self.run_test(model, x)\n\n def test_flatten(self):\n class FlattenModel(torch.nn.Module):\n def forward(self, input):\n return torch.flatten(input)\n\n x = torch.randint(10, (1, 2, 3, 4))\n self.run_test(FlattenModel(), x)\n\n def test_flatten2d(self):\n class FlattenModel(torch.nn.Module):\n def forward(self, input):\n return torch.flatten(input, 1)\n\n x = torch.randint(10, (1, 2, 3, 4))\n self.run_test(FlattenModel(), x)\n\n def test_flatten2d_neg(self):\n class FlattenModel(torch.nn.Module):\n def forward(self, x):\n return torch.flatten(x, 1, -1), torch.flatten(x, 0, -2), torch.flatten(x, 1, -2)\n\n x = torch.randint(10, (1, 2, 3, 4))\n self.run_test(FlattenModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_flatten_dynamic_axes(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return torch.flatten(x, start_dim=2, end_dim=3)\n\n batch_size = 3\n x = torch.randn(batch_size, 5, 4, 5)\n y = torch.randn(5, 5, 4, 5)\n model = MyModule()\n self.run_test(model, x, test_with_inputs=[y],\n input_names=['input'],\n output_names=['output'],\n dynamic_axes={'input' : {0 : 'batch_size'},\n 'output' : {0 : 'batch_size'}})\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_getitem(self):\n class GetItemModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, y, z, ind):\n # this will create prim::ListConstruct(x, y, z) + aten::__getitem__\n arr = [x, y, z]\n return arr[ind]\n\n x = torch.randn(3, 4, 5)\n y = torch.randn(1, 4, 5)\n z = torch.randn(2, 4, 5)\n ind = torch.tensor(1, dtype=torch.long)\n self.run_test(GetItemModel(), (x, y, z, ind))\n\n ind = torch.tensor(-2, dtype=torch.long)\n self.run_test(GetItemModel(), (x, y, z, ind))\n\n @disableScriptTest() # torch.nonzero(x, as_tuple=True) is not scriptable.\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_nonzero(self):\n class NonzeroModel(torch.nn.Module):\n def forward(self, x):\n return x.nonzero(), x.nonzero(as_tuple=True)\n\n x = torch.randn(60).index_fill_(0, torch.randint(0, 60, (20,)), 0).view(3, 4, 5)\n self.run_test(NonzeroModel(), (x,))\n\n def test_unbind(self):\n class UnbindModel(torch.nn.Module):\n def forward(self, input):\n _, out, _ = input.unbind()\n return out\n\n x = torch.randn(3, 4, 5)\n self.run_test(UnbindModel(), x)\n\n class UnbindModel2(torch.nn.Module):\n def forward(self, input):\n _, out, _, _ = input.unbind(1)\n return out\n\n x = torch.randn(3, 4, 5)\n self.run_test(UnbindModel2(), x)\n\n class UnbindModel3(torch.nn.Module):\n def forward(self, input):\n _, out, _, _ = input.unbind(-2)\n return out\n\n x = torch.randn(3, 4, 5)\n self.run_test(UnbindModel3(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_len(self):\n class LenModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return len(input.unbind()) + input\n\n x = torch.randn(4, 5)\n self.run_test(LenModel(), x, input_names=['input'], dynamic_axes={'input': {0: 'seq'}},\n test_with_inputs=(torch.randn(5, 5),))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_len_list(self):\n class LenListModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return torch.ones(len(input.shape))\n\n x = torch.randn(4, 5)\n self.run_test(LenListModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_unbind_dynamic(self):\n class UnbindModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return input.unbind()[1]\n\n x = torch.randn(3, 4, 5)\n self.run_test(UnbindModel(), x)\n\n class UnbindModel2(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return input.unbind(-1)[1]\n\n x = torch.randn(3, 4, 5)\n self.run_test(UnbindModel2(), x)\n\n @disableScriptTest() # scripting tests run for opsets > 11. See: test_split_script\n def test_split(self):\n class SplitModel(torch.nn.Module):\n def forward(self, input):\n return input.split([2, 1, 2]), input.split([3, 2])[0]\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel(), x)\n\n class SplitModel2(torch.nn.Module):\n def forward(self, input):\n return input.split([2, 1, 1], -2), input.split([2, 2], -2)[-1]\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel2(), x)\n\n class SplitModel3(torch.nn.Module):\n def forward(self, input):\n return input.split([2, 1, 2])\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel3(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_split_script(self):\n class SplitModel(torch.nn.Module):\n def forward(self, input):\n return input.split([2, 1, 2]), input.split([3, 2])[0]\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel(), x)\n\n class SplitModel2(torch.nn.Module):\n def forward(self, input):\n return input.split([2, 1, 1], -2), input.split([2, 2], -2)[-1]\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel2(), x)\n\n class SplitModel3(torch.nn.Module):\n def forward(self, input):\n return input.split([2, 1, 2])\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel3(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_split_size_as_list(self):\n class SplitModel(torch.nn.Module):\n def forward(self, input, split_sizes: List[int]):\n out = []\n split_list: List[torch.Tensor] = input.split(split_sizes)\n\n for ob in split_list:\n out.append(ob)\n return torch.cat(out, dim=0)\n\n x = torch.randn(6, 4, 3)\n split_sizes = [torch.tensor(2), torch.tensor(4)]\n self.run_test(SplitModel(), (x, split_sizes))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_split_size_with_slice(self):\n class SplitModule(torch.nn.Module):\n def forward(self, x, y, t):\n splits = (x.size(1), y.size(1))\n out, out2 = torch.split(t, splits, dim=1)\n return out, out2\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 4)\n t = torch.randn(2, 7)\n self.run_test(SplitModule(), (x, y, t))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_split_dynamic(self):\n class SplitModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return input.split(2)[1]\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel(), x)\n\n class SplitModel2(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return input.split(2, -3)[1]\n\n x = torch.randn(5, 4, 3)\n self.run_test(SplitModel2(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_chunk(self):\n class ChunkModel(torch.nn.Module):\n def __init__(self, dim=1):\n super(ChunkModel, self).__init__()\n self.dim = dim\n\n def forward(self, x):\n return torch.chunk(x, 3, dim=self.dim)\n\n model = ChunkModel()\n model.eval()\n model_neg_dim = ChunkModel(-1)\n model_neg_dim.eval()\n x = torch.randn(1, 18)\n\n for dim_size_ in range(13, 16):\n y = torch.randn(1, dim_size_)\n self.run_test(model, x, test_with_inputs=[y],\n input_names=['x'],\n dynamic_axes={'x': {0: 'batch_size', 1: 'dims'}})\n\n self.run_test(model_neg_dim, x, test_with_inputs=[y],\n input_names=['x'],\n dynamic_axes={'x': {0: 'batch_size', 1: 'dims'}})\n\n def test_concat(self):\n class ConcatModel(torch.nn.Module):\n def forward(self, x, y, z):\n return torch.cat((x, y, z))\n\n x = torch.randn(3, 4, 5)\n y = torch.randn(1, 4, 5)\n z = torch.randn(2, 4, 5)\n self.run_test(ConcatModel(), (x, y, z))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_concat_dynamic(self):\n class ConcatDynamicModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.cat(x.unbind())\n\n x = torch.randn(4, 5, 6)\n self.run_test(ConcatDynamicModel(), x)\n\n def test_stack(self):\n class StackModel(torch.nn.Module):\n def forward(self, x, y, z):\n return torch.stack((x, y, z), 1)\n\n x = torch.randn(3, 4, 5)\n y = torch.randn(3, 4, 5)\n z = torch.randn(3, 4, 5)\n self.run_test(StackModel(), (x, y, z))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_stack_dynamic(self):\n class StackDynamicModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.stack(x.unbind(), 1)\n\n x = torch.randn(4, 5, 6)\n self.run_test(StackDynamicModel(), x)\n\n def test_loop_dynamic(self):\n class LoopModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n for i in range(x.size(2)):\n x = x + i\n return x\n\n model = LoopModel()\n inputs = torch.zeros(1, 2, 3, dtype=torch.long)\n self.run_test(model, inputs)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_loop_nested(self):\n class NestedLoopsModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n for i in range(5):\n a = 0\n while a < 4:\n a += 1\n x = x + a\n return x\n\n model = NestedLoopsModel()\n inputs = torch.zeros(1, 2, 3, dtype=torch.long)\n self.run_test(model, inputs)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_loop_with_list(self):\n class ListLoopModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n res = []\n res1 = []\n arr = x.split([3, 4, 1, 1, 2, 3, 2], 0)\n res2 = torch.zeros(3, 4, dtype=torch.long)\n res3 = []\n res4 = []\n for i in range(len(arr)):\n res.append(arr[i].sum(0, False))\n res1.append(arr[-1 - i].sum(0, False))\n res2 += 1\n res3 = res3 + [arr[i].sum(0, False)]\n res4 += [arr[-1 - i].sum(0, False)]\n return res, res1, res2, torch.stack(res3), torch.stack(res4)\n\n model = ListLoopModel()\n inputs = torch.randn(16)\n self.run_test(model, inputs)\n\n @skipIfONNXShapeInference(False)\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_loop_transpose(self):\n class LoopModel(torch.nn.Module):\n def forward(self, x):\n res = torch.zeros_like(x[0])\n for i in range(x.size(0)):\n res += x[0].transpose(0, 1)\n return res\n\n model = torch.jit.script(LoopModel())\n x = torch.randn(5, 3, 3)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_loop_multi_dim(self):\n class LoopMultiDimModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, y):\n for x_ in torch.flip(x.narrow(0, 0, 7), [0]):\n y = x_[0][y]\n return y\n\n model = LoopMultiDimModel()\n x = torch.randint(0, 5, (8, 1, 17), dtype=torch.long)\n y = torch.ones(1, dtype=torch.long)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list(self):\n class ListModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n tensors = x.unbind()\n res = []\n res.append(tensors[0])\n res.append(tensors[1])\n res.pop(1)\n\n res.insert(0, tensors[1])\n res.append(tensors[2])\n res += [tensors[3], tensors[4]]\n res = res + [tensors[5]]\n return torch.ones(len(res))\n\n model = ListModel()\n inputs = torch.randn(16, 1)\n self.run_test(model, inputs)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list_append(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n res += [torch.matmul(x[i], y)]\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_append_nested(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n for j in range(x.size(1)):\n res += [torch.matmul(x[i][j], y)]\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(4, 4, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(14) # Need onnx::identity of sequence in opset 14\n def test_list_append_nested_2(self):\n class ListModel(torch.nn.Module):\n def forward(self, x):\n res = []\n res_replicate = []\n for i in range(x.size(0)):\n if len(res) > 2:\n for j in range(x.size(1)):\n res.append(x[i][j])\n res_replicate.append(res[-1])\n res.append(res_replicate[-1])\n return res, res_replicate\n\n model = torch.jit.script(ListModel())\n x = torch.randn(4, 4, 3, 4)\n self.run_test(model, (x, ))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list_pop(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n res += [torch.matmul(x[i], y)]\n res.pop()\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_pop_nested(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n for j in range(x.size(1)):\n res += [torch.matmul(x[i][j], y)]\n res.pop()\n res += [torch.matmul(x[i][0], y)]\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(4, 4, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list_del(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n res += [torch.matmul(x[i], y)]\n del res[2]\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_del_nested(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n for j in range(x.size(1)):\n res += [torch.matmul(x[i][j], y)]\n del res[i]\n res += [torch.matmul(x[i][0], y)]\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(4, 4, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @unittest.skip(\"Enable this once remove is supported by pytorch\")\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list_remove(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n res += [torch.matmul(x[i], y)]\n # The following fails with pytorch\n # RuntimeError: Boolean value of Tensor with more than one value is ambiguous\n res.remove(res[2])\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list_set(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n res.append(x[i])\n res[y] = x[y]\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(12, 4)\n y = torch.tensor(2, dtype=torch.long)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_idx_sum(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n indices = torch.arange(x.size(0))\n res = []\n for i in range(x.size(0)):\n res.append(x[i])\n return res[torch.sum(indices[:y])]\n\n model = torch.jit.script(ListModel())\n x = torch.randn(12, 4)\n y = torch.tensor(2, dtype=torch.long)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_tensor_factories(self):\n class TensorFactory(torch.nn.Module):\n def forward(self, x):\n return torch.zeros(x.size()) + torch.ones(x.size())\n\n x = torch.randn(2, 3, 4)\n self.run_test(TensorFactory(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_tensor_factories_script(self):\n class TensorFactory(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n return torch.zeros(x.shape, dtype=torch.float) + torch.ones(x.shape, dtype=torch.float)\n\n x = torch.randn(2, 3, 4)\n self.run_test(TensorFactory(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_tensor_like_factories_script(self):\n class TensorFactory(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n zeros = torch.zeros_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))\n ones = torch.ones_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))\n return zeros + ones\n\n x = torch.randn(2, 3, 4)\n self.run_test(TensorFactory(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_eye(self):\n class TensorFactory(torch.nn.Module):\n def forward(self, x):\n return torch.eye(x.size()[1], 3), torch.eye(4, 4, dtype=torch.long), \\\n torch.eye(x.size()[1], 2, dtype=torch.long), torch.eye(x.shape[0]), \\\n torch.eye(x.shape[0], dtype=torch.float64)\n\n x = torch.randn(2, 3, 4)\n another_x = torch.randn(5, 6, 7)\n self.run_test(TensorFactory(), x, test_with_inputs=[another_x],\n input_names=['input_1'], dynamic_axes={'input_1': [0, 1, 2]})\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_inplace_zero(self):\n class Zero_(torch.nn.Module):\n def forward(self, x):\n return x.zero_(), x\n\n x = torch.randn(2, 3, 4)\n self.run_test(Zero_(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_new_zeros(self):\n class Zero_(torch.nn.Module):\n def forward(self, x):\n return x.new_zeros(x.shape[1:2]), x.new_zeros(x.shape[2:], dtype=torch.long)\n\n x = torch.randn(2, 3, 4)\n self.run_test(Zero_(), x)\n\n @skipIfONNXShapeInference(True)\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_tolist(self):\n class List(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n cur_shape = torch._shape_as_tensor(input)\n final_shape: List[int] = cur_shape.tolist()\n pad_tensor = torch.zeros([1, 2] + final_shape)\n return pad_tensor\n\n x = torch.randn(2, 3)\n self.run_test(List(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n @disableScriptTest()\n def test_list_pass(self):\n class Slice(torch.nn.Module):\n def forward(self, x, y):\n return x.new_zeros(x.shape[2:] + y.shape[1:])\n\n x = torch.randn(2, 3, 4, 5)\n y = torch.randn(1, 2, 3, 4)\n self.run_test(Slice(), (x, y))\n\n class Size(torch.nn.Module):\n def forward(self, x, y):\n return x.new_zeros(x.shape + y.shape)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(1, 2, 3)\n self.run_test(Size(), (x, y))\n\n class Array(torch.nn.Module):\n def forward(self, x, y):\n arr1 = [x.shape[0], x.shape[1], 2]\n arr2 = [y.shape[0], y.shape[1]]\n return x.new_zeros(arr1 + arr2)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(1, 2, 3)\n self.run_test(Array(), (x, y))\n\n class List(torch.nn.Module):\n def forward(self, x, y):\n l1 = list(x.shape)\n l2 = list(y.shape)\n return x.new_zeros(l1 + l2)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(1, 2, 3)\n self.run_test(List(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_new_empty(self):\n class Emtpy(torch.nn.Module):\n def forward(self, x):\n return x.new_empty(x.shape[0]).fill_(0), x.new_empty(x.shape[0], dtype=torch.long) * 0\n\n x = torch.randn(2, 3, 4)\n self.run_test(Emtpy(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_new_full(self):\n class Full(torch.nn.Module):\n def forward(self, x):\n return x.new_full(x.shape[1:2], 5), x.new_full(x.shape[0:1], 1.3, dtype=torch.long)\n\n x = torch.randn(2, 3, 4)\n self.run_test(Full(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_inplace_list(self):\n class Arithmetic(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, y):\n return torch.cat([x.add_(3), y.fill_(0)])\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n self.run_test(Arithmetic(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_inplace_fill(self):\n class Fill_(torch.nn.Module):\n def forward(self, x):\n return x.fill_(3), x\n\n x = torch.randn(2, 3, 4)\n self.run_test(Fill_(), x)\n\n def test_inplace_arithmetic(self):\n class Arithmetic(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x, y):\n x.add_(3)\n y.mul_(x)\n return x, y\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 3, 4)\n self.run_test(Arithmetic(), (x, y))\n\n def test_inplace_arithmetic_half(self):\n class InplaceAddModel(torch.nn.Module):\n def forward(self, x, y):\n return x.add_(y)\n\n class InplaceMulModel(torch.nn.Module):\n def forward(self, x, y):\n return x.mul_(y)\n\n x = torch.randn(2, 2, dtype=torch.half)\n y = torch.randn(2, 2, dtype=torch.float)\n self.run_test(InplaceAddModel(), (x, y), rtol=1e-2, atol=1e-2)\n self.run_test(InplaceMulModel(), (x, y), rtol=1e-2, atol=1e-2)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_inplace_with_loop(self):\n class M(torch.nn.Module):\n def forward(self, x):\n a = torch.ones(12,)\n for i in range(10):\n a.add_(torch.ones(12,))\n return a + x\n\n m = M()\n x = torch.randn(12,)\n self.run_test(torch.jit.script(M()), (x))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_inplace_with_loop_2(self):\n class M(torch.nn.Module):\n def forward(self, x):\n _bias = torch.ones(12,)\n a = torch.ones(12,) # used in loop, altered.\n a_ref = a # not used in loop, should be altered.\n b = x.clone() # used in loop, not be altered.\n b_ref = b # not used in loop, should not be altered.\n for i in range(10):\n if i == 3:\n for j in range(5):\n a += _bias\n _bias.add_(torch.ones(12,))\n b = b + torch.ones(12,)\n\n _bias.add_(torch.ones(12,))\n a += _bias\n # TODO: value for a_ref is incorrect.\n # a_ref += torch.ones(12,)\n b_ref += torch.ones(12,)\n return _bias + x, a, b, b_ref\n\n m = M()\n x = torch.zeros(12,)\n self.run_test(torch.jit.script(M()), (x))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_inplace_attr_with_loop(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self._bias = torch.arange(12,)\n\n def forward(self, x):\n self._bias = torch.arange(12,)\n for i in range(10):\n if i == 3:\n for j in range(5):\n self._bias += torch.arange(12,)\n return self._bias + x\n\n m = M()\n x = torch.zeros(12,)\n self.run_test(torch.jit.script(M()), (x))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_inplace_attr_copy_with_loop(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self._bias = torch.arange(12,)\n\n def forward(self, x):\n self._bias = torch.arange(12,)\n for i in range(10):\n if i == 3:\n for j in range(5):\n self._bias.copy_(torch.arange(12,))\n self._bias.copy_(self._bias + torch.arange(12,))\n\n self._bias.copy_(self._bias + torch.arange(12,))\n return self._bias + x\n\n m = M()\n x = torch.zeros(12,)\n self.run_test(torch.jit.script(M()), (x))\n\n @skipIfUnsupportedMinOpsetVersion(14) # Need onnx::identity of sequence in opset 14\n def test_inplace_sequence_with_loop(self):\n class M(torch.nn.Module):\n def process(self, beam_hyps: List[torch.Tensor], done: torch.Tensor, x):\n batch_size = x.shape[0]\n for i in range(batch_size):\n if done[i]:\n continue\n\n beam_idx = 0\n for _, token in enumerate(x[i]):\n beam_hyps.append(token)\n beam_idx += 1\n\n if beam_idx == 6:\n break\n\n done[i] = len(beam_hyps) > 4\n\n return beam_hyps, done\n\n def forward(self, x):\n beam_hyps: List[torch.Tensor] = []\n batch_size = x.shape[0]\n cur_len = 0\n max_len = x.shape[1]\n done = torch.zeros(batch_size, dtype=torch.bool)\n while cur_len < max_len:\n beam_hyps, done = self.process(beam_hyps, done, x[:, 0, :])\n cur_len = cur_len + 1\n\n return beam_hyps\n\n m = torch.jit.script(M())\n x = torch.randn(8, 4, 3)\n self.run_test(torch.jit.script(M()), (x))\n\n\n @disableScriptTest() # Sort with dynamic dim not supported in ONNX\n def test_sort(self):\n class SortModel(torch.nn.Module):\n def forward(self, x):\n out = []\n for i in range(-2, 2):\n out.append(torch.sort(x, dim=i, descending=True))\n return out\n\n x = torch.randn(3, 4)\n self.run_test(SortModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest() # Sort with dynamic dim not supported in ONNX\n def test_sort_ascending(self):\n class SortModel(torch.nn.Module):\n def forward(self, x):\n out = []\n for i in range(-2, 2):\n out.append(torch.sort(x, dim=i, descending=False))\n return out\n\n x = torch.randn(3, 4)\n self.run_test(SortModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_masked_fill(self):\n class MaskedFillModel(torch.nn.Module):\n def forward(self, x):\n mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)\n return x.masked_fill(mask, 2)\n\n x = torch.zeros(4, 2, 3, requires_grad=True)\n self.run_test(MaskedFillModel(), x)\n\n class MaskedFillModel2(torch.nn.Module):\n def forward(self, x):\n return x.masked_fill(x > 3, -1)\n\n x = torch.arange(16).view(2, 2, 4).to(torch.float32)\n self.run_test(MaskedFillModel2(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_masked_fill_inplace(self):\n\n class MaskedFillModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)\n x.masked_fill_(mask, 2)\n return x\n\n x = torch.zeros(4, 2, 3, requires_grad=True)\n self.run_test(MaskedFillModel(), x)\n\n class MaskedFillModel2(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, x):\n x.masked_fill_(x > 3, -1)\n return x\n\n x = torch.arange(16).view(2, 2, 4).to(torch.float32)\n self.run_test(MaskedFillModel2(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_masked_scatter(self):\n class MaskedScatterModel(torch.nn.Module):\n def forward(self, x):\n return torch.masked_scatter(x, x.ge(0.5), torch.ones(100, 100) * 5)\n\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(MaskedScatterModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_masked_select(self):\n class MaskedSelectModel(torch.nn.Module):\n def forward(self, x):\n return torch.masked_select(x, x.ge(0.5))\n\n x = torch.randn(3, 4, 5, requires_grad=True)\n self.run_test(MaskedSelectModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_to_masked_fill(self):\n class MaskedFillModel(torch.nn.Module):\n def forward(self, input_mask, some_const):\n mask = input_mask.clone()\n mask[mask != some_const] = 1\n mask[mask == some_const] = 0\n return mask\n\n mask = torch.randn(2, 2, 2, requires_grad=True)\n constant = torch.tensor(5, dtype=torch.float)\n self.run_test(MaskedFillModel(), (mask, constant))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_to_masked_scatter(self):\n class MaskedScatterModel(torch.nn.Module):\n def forward(self, input_mask, some_const):\n mask = input_mask.clone()\n mask[mask != some_const] = torch.ones(8)\n return mask\n\n mask = torch.randn(2, 2, 2, requires_grad=True)\n constant = torch.tensor(5, dtype=torch.float)\n self.run_test(MaskedScatterModel(), (mask, constant))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_pixel_shuffle(self):\n class PixelShuffle(torch.nn.Module):\n def forward(self, x):\n return torch.pixel_shuffle(x, upscale_factor=2)\n\n x = torch.randn(2, 16, 4, 3, requires_grad=True)\n self.run_test(PixelShuffle(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_scalar_type(self):\n class ArithmeticModel(torch.nn.Module):\n def forward(self, x):\n return x.size(0) * 2 * x, 2 - x\n\n x = torch.ones(2, 3, dtype=torch.float32)\n self.run_test(ArithmeticModel(), x)\n\n class ReciprocalModel(torch.nn.Module):\n def forward(self, x):\n return torch.reciprocal(x)\n\n x = torch.tensor([2.0, 4.0], dtype=torch.double)\n self.run_test(ReciprocalModel(), x)\n\n class ComparisonModel(torch.nn.Module):\n def forward(self, x, y):\n a = torch.tensor([12.0])\n return x.lt(1.5) & y.le(2) & x.le(1), x.gt(y), x.lt(y), a.ge(x.size(0))\n\n x = torch.ones(2, 3, dtype=torch.int32)\n y = torch.ones(2, 3, dtype=torch.float32)\n self.run_test(ComparisonModel(), (x, y))\n\n class MatMulModel(torch.nn.Module):\n def forward(self, x):\n return (torch.mm(x, x) + x + torch.mm(x, x) + x)\n\n x = torch.ones(3, 3)\n self.run_test(MatMulModel(), x)\n\n class AddMMModel(torch.nn.Module):\n def forward(self, x):\n return torch.mm(x, x) + x\n\n x = torch.ones(3, 3)\n self.run_test(AddMMModel(), x)\n\n class FullModel(torch.nn.Module):\n # add is used for exporting full\n def forward(self, x):\n return torch.full((3, 4), x)\n x = torch.tensor(12.)\n self.run_test(FullModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_full_like(self):\n class FullLikeModel(torch.nn.Module):\n def forward(self, x):\n return torch.full_like(x, 4)\n\n x = torch.tensor(12)\n self.run_test(FullLikeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_full_like_value(self):\n class FullLikeModel(torch.nn.Module):\n def forward(self, x, y):\n out = y + 2\n return torch.full_like(x, out)\n\n x = torch.tensor(12)\n y = torch.tensor(2)\n self.run_test(FullLikeModel(), (x, y))\n\n def test_l1_norm(self):\n class NormModel(torch.nn.Module):\n def forward(self, x):\n return torch.norm(x, p=1, dim=-1, keepdim=False)\n\n x = torch.randn(4, 2, 3, requires_grad=True)\n self.run_test(NormModel(), x)\n\n def test_l2_norm(self):\n class NormModel(torch.nn.Module):\n def forward(self, x):\n return torch.norm(x, p=2, dim=-2, keepdim=False)\n\n x = torch.randn(4, 2, 3, requires_grad=True)\n self.run_test(NormModel(), x)\n\n def test_frobenius_norm(self):\n class NormModel(torch.nn.Module):\n def forward(self, x):\n return torch.norm(x, p=\"fro\", dim=0, keepdim=False)\n\n x = torch.randn(4, 2, 3, requires_grad=True)\n self.run_test(NormModel(), x)\n\n def test_frobenius_norm_keepdim(self):\n class NormModel(torch.nn.Module):\n def forward(self, x):\n return torch.norm(x, p=\"fro\", dim=(0, 1), keepdim=True)\n\n x = torch.randn(4, 2, 3, requires_grad=True)\n self.run_test(NormModel(), x)\n\n def test_unfold(self):\n class UnfoldModel(torch.nn.Module):\n def forward(self, x):\n return x.unfold(dimension=2, size=2, step=2)\n\n x = torch.randn(4, 2, 3, requires_grad=True)\n y = torch.randn(2, 1, 3, requires_grad=True)\n self.run_test(UnfoldModel(), x,\n dynamic_axes={'x': [0, 1]},\n input_names=['x'],\n test_with_inputs=[y])\n\n @skipIfONNXShapeInference(False)\n def test_unfold_infer_shape(self):\n class UnfoldModule(torch.jit.ScriptModule):\n def __init__(self):\n super(UnfoldModule, self).__init__()\n self.conv = torch.nn.Conv1d(3, 1, 3, stride=2)\n\n @torch.jit.script_method\n def forward(self, x):\n x = self.conv(x)\n return x.unfold(dimension=2, size=2, step=2)\n\n x = torch.randn(32, 3, 64)\n self.run_test(UnfoldModule(), x)\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_unfold_dynamic_inputs(self):\n class UnfoldModel(torch.nn.Module):\n def forward(self, x):\n return x.unfold(dimension=2, size=x.shape[1], step=x.shape[1] - 1)\n\n x = torch.randn(4, 2, 4, requires_grad=True)\n self.run_test(UnfoldModel(), x)\n\n class UnfoldModel(torch.nn.Module):\n def forward(self, x):\n return x.unfold(dimension=2, size=x.shape[1], step=1)\n\n x = torch.randn(4, 2, 4, requires_grad=True)\n self.run_test(UnfoldModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9) # MatMul long inputs is added in ONNX opset 9.\n def test_mv(self):\n class MatmulModel(torch.nn.Module):\n def forward(self, input, other):\n return torch.mv(input, other)\n\n x = torch.randn(4, 5, requires_grad=True)\n y = torch.randn(5, requires_grad=True)\n self.run_test(MatmulModel(), (x, y))\n\n x = torch.randint(10, (4, 5))\n y = torch.randint(10, (5, ))\n self.run_test(MatmulModel(), (x, y))\n\n def test_prelu(self):\n class PReluModel(torch.nn.Module):\n def __init__(self):\n super(PReluModel, self).__init__()\n self.prelu = torch.nn.PReLU()\n\n def forward(self, x):\n return self.prelu(x)\n\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 4, 5)\n self.run_test(PReluModel(), x, input_names=['x'],\n dynamic_axes={'x': [1, 2]},\n test_with_inputs=[y])\n\n def test_silu(self):\n class SiLUModel(torch.nn.Module):\n def __init__(self):\n super(SiLUModel, self).__init__()\n self.silu = torch.nn.SiLU()\n\n def forward(self, x):\n return self.silu(x)\n\n x = torch.randn(2, 3, 4)\n self.run_test(SiLUModel(), (x))\n\n def test_remainder(self):\n class RemainderModel(torch.nn.Module):\n def forward(self, input, other):\n return torch.remainder(input, other)\n\n x = torch.randn(4, 2, 3)\n y = torch.randn(1, 2, 1)\n self.run_test(RemainderModel(), (x, y))\n\n def test_remainder_scalar(self):\n class RemainderModel(torch.nn.Module):\n def forward(self, input):\n return torch.remainder(input, 2.55)\n\n x = torch.randint(10, (2, 3))\n self.run_test(RemainderModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_fmod(self):\n class FModModel(torch.nn.Module):\n def forward(self, input, other):\n return torch.fmod(input, other)\n\n x = torch.randn(4, 2, 3)\n y = torch.randn(1, 2, 1)\n self.run_test(FModModel(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_fmod_scalar(self):\n class FModModel(torch.nn.Module):\n def forward(self, input):\n return torch.fmod(input, 2.55)\n\n x = torch.randint(10, (2, 3))\n self.run_test(FModModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_glu(self):\n class GluModel(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.glu(x)\n\n x = torch.randn(2, 4, 5, 6, requires_grad=True)\n self.run_test(GluModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_gelu(self):\n class GeluModel(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.gelu(x)\n\n x = torch.randn(2, 4, 5, 6, requires_grad=True)\n self.run_test(GeluModel(), x)\n\n def test_add_inplace(self):\n class InplaceAddModel(torch.nn.Module):\n def forward(self, x):\n x += 12\n return x\n\n x = torch.randn(4, 2, 3, requires_grad=True)\n self.run_test(InplaceAddModel(), x)\n\n def test_rsqrt(self):\n class RsqrtModel(torch.nn.Module):\n def forward(self, x):\n return x.rsqrt()\n\n x = torch.randn(4, 2, 3, requires_grad=True, dtype=torch.float64)\n self.run_test(RsqrtModel(), x)\n\n def test_rsqrt_zeros(self):\n class RsqrtModel(torch.nn.Module):\n def forward(self, x):\n return x.rsqrt()\n x = torch.zeros(4, 2, 3, requires_grad=True, dtype=torch.float64)\n self.run_test(RsqrtModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_unique(self):\n class UniqueModel(torch.nn.Module):\n def forward(self, x):\n return torch.unique(x, sorted=True, return_inverse=False, return_counts=True)\n\n x = torch.tensor([1, 3, 2, 3], dtype=torch.long)\n self.run_test(UniqueModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_unique_along_dim(self):\n class UniqueModel(torch.nn.Module):\n def forward(self, x):\n return torch.unique(x, dim=0, sorted=True, return_inverse=True, return_counts=False)\n\n x = torch.tensor([1, 3, 2, 3], dtype=torch.long)\n self.run_test(UniqueModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_cumsum(self):\n class CumSum(torch.nn.Module):\n def forward(self, input):\n return torch.cumsum(input, dim=0)\n x = torch.randn(2, 3, 4)\n model = CumSum()\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_cumsum_with_cast(self):\n class CumSum(torch.nn.Module):\n def forward(self, input):\n return torch.cumsum(input, dim=0, dtype=torch.float32)\n\n model = CumSum()\n x = torch.tensor([2, 3, 4], dtype=torch.int32)\n self.run_test(model, x)\n x = torch.tensor([False, True, True])\n self.run_test(model, x)\n\n @disableScriptTest() # error in propagate as assign input shape\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_embedding_bag(self):\n model = torch.nn.EmbeddingBag(10, 5, mode='sum', scale_grad_by_freq=True)\n input = torch.randint(10, (7,))\n offset = torch.tensor([0, 2, 5, 6])\n self.run_test(model, (input, offset))\n\n model = torch.nn.EmbeddingBag(10, 5, mode='sum', include_last_offset=True)\n input = torch.randint(10, (7,))\n offset = torch.tensor([0, 2, 5, 6])\n self.run_test(model, (input, offset))\n\n model = torch.nn.EmbeddingBag(10, 5, mode='max')\n input = torch.randint(10, (7, 5))\n self.run_test(model, (input))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_embedding_bag_1d_per_sample_weights(self):\n class EmbeddingModel(torch.nn.Module):\n def forward(self, embedding_matrix, input, offset, weights):\n return torch.nn.functional.embedding_bag(input, embedding_matrix, offsets=offset,\n mode='sum', per_sample_weights=weights)\n\n model = EmbeddingModel()\n x = torch.randint(7, (6,))\n w = torch.randn(6, )\n offset = torch.tensor([0, 2, 5])\n embedding_matrix = torch.rand(10, 15)\n self.run_test(model, (embedding_matrix, x, offset, w))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_embedding_bag_2d_per_sample_weights(self):\n class EmbeddingModel(torch.nn.Module):\n def forward(self, embedding_matrix, input, weights):\n return torch.nn.functional.embedding_bag(input, embedding_matrix,\n mode='sum', per_sample_weights=weights)\n\n embedding_matrix = torch.rand(10, 15)\n model = EmbeddingModel()\n x = torch.randint(7, (2, 3))\n w = torch.randn(2, 3)\n self.run_test(model, (embedding_matrix, x, w))\n\n @disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast\n @skipIfUnsupportedMinOpsetVersion(11)\n @unittest.skip(\"Due to ONNX Loop shape inference issue.\")\n def test_embedding_bag_dynamic_input(self):\n class EmbeddingModel1D(torch.nn.Module):\n def forward(self, embedding_matrix, input, weights, offsets):\n return torch.nn.functional.embedding_bag(input, embedding_matrix, offsets=offsets,\n mode='sum', per_sample_weights=weights)\n\n model = EmbeddingModel1D()\n x = torch.randint(7, (6,))\n w = torch.randn(6, )\n offsets = torch.tensor([0, 2, 5], dtype=torch.long)\n embedding_matrix = torch.rand(10, 15)\n x2 = torch.randint(7, (2,))\n w2 = torch.randn(2, )\n embedding_matrix2 = torch.rand(12, 25)\n offsets2 = torch.tensor([0, ], dtype=torch.long)\n self.run_test(model, (embedding_matrix, x, w, offsets),\n test_with_inputs=[(embedding_matrix2, x2, w2, offsets2)],\n input_names=['embedding_matrix', 'x', 'offsets', 'w'],\n dynamic_axes={'embedding_matrix': [0, 1], 'x': [0], 'offsets': [0], 'w': [0]})\n\n class EmbeddingModel2D(torch.nn.Module):\n def forward(self, embedding_matrix, input, weights):\n return torch.nn.functional.embedding_bag(input, embedding_matrix,\n mode='sum', per_sample_weights=weights)\n\n model = EmbeddingModel2D()\n x = torch.randint(7, (2, 3))\n w = torch.randn(2, 3)\n embedding_matrix = torch.rand(10, 15)\n x2 = torch.randint(7, (3, 5))\n w2 = torch.randn(3, 5)\n embedding_matrix2 = torch.rand(12, 25)\n self.run_test(model, (embedding_matrix, x, w),\n test_with_inputs=[(embedding_matrix2, x2, w2)],\n input_names=['embedding_matrix', 'x', 'w'],\n dynamic_axes={'embedding_matrix': [0, 1], 'x': [0, 1], 'w': [0, 1]})\n\n @skipIfUnsupportedMinOpsetVersion(8)\n def test_meshgrid(self):\n class Meshgrid(torch.nn.Module):\n def forward(self, x, y, z):\n output1, output2, output3 = torch.meshgrid(x, y, z)\n return output1, output2, output3\n\n x = torch.randn(3, requires_grad=True)\n y = torch.zeros(4, requires_grad=True)\n z = torch.randn(5, requires_grad=True)\n self.run_test(Meshgrid(), (x, y, z))\n\n @skipIfUnsupportedMinOpsetVersion(8)\n def test_meshgrid_scalar(self):\n class Meshgrid(torch.nn.Module):\n def forward(self, x, y, z):\n output1, output2, output3 = torch.meshgrid(x, y, z)\n return output1, output2, output3\n\n x = torch.ones(3, requires_grad=True)\n y = torch.zeros(4, requires_grad=True)\n z = torch.tensor(2.0)\n self.run_test(Meshgrid(), (x, y, z))\n\n def test_baddbmm(self):\n class MyModule(torch.nn.Module):\n def forward(self, input, batch1, batch2):\n return torch.baddbmm(input, batch1, batch2, alpha=torch.tensor(5), beta=3.5)\n x = torch.randn(10, 3, 5)\n batch1 = torch.randn(10, 3, 4)\n batch2 = torch.randn(10, 4, 5)\n model = MyModule()\n self.run_test(model, (x, batch1, batch2))\n\n def test_baddbmm_dynamic(self):\n class MyModule(torch.nn.Module):\n def forward(self, input, batch1, batch2, alpha, beta):\n return torch.baddbmm(input, batch1, batch2, alpha=alpha, beta=beta)\n x = torch.randn(10, 3, 5)\n batch1 = torch.randn(10, 3, 4)\n batch2 = torch.randn(10, 4, 5)\n alpha = torch.tensor(5)\n beta = torch.tensor(3.5)\n model = MyModule()\n self.run_test(model, (x, batch1, batch2, alpha, beta))\n\n def test_numel(self):\n class MyModule(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return input.numel() * input\n\n x = torch.randn(2, 3, 5)\n model = MyModule()\n self.run_test(model, (x,))\n\n def test_numel_empty(self):\n class MyModule(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n return input.numel() * input\n\n x = torch.randn(0)\n model = MyModule()\n self.run_test(model, (x,))\n\n def test_dtype(self):\n class MyModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input, other):\n return input.to(dtype=other.dtype) + other\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n self.run_test(MyModel(), (x, y))\n\n def test_dtype_eq(self):\n class MyModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input, other):\n if input.dtype == other.dtype:\n return input + other\n return input\n\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n self.run_test(MyModel(), (x, y))\n\n def test_cast_to(self):\n class MyModule(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input, other):\n return input.to(other) + other\n\n x = torch.randn(2, 3, 4)\n y = torch.tensor([1], dtype=torch.int64)\n model = MyModule()\n self.run_test(model, (x, y))\n\n def test_cast_to_bool(self):\n class MyModule(torch.nn.Module):\n def forward(self, input, other):\n return torch.cat((input.to(other), other), 0)\n\n x = torch.randn(2, 3, 4)\n y = torch.zeros([2, 3, 4], dtype=torch.bool)\n model = MyModule()\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_ones_bool(self):\n class MyModule(torch.nn.Module):\n def forward(self, input):\n true = torch.ones(input.shape, dtype=torch.bool)\n return input.to(true) & true\n\n x = torch.randn(2, 3, 4)\n model = MyModule()\n self.run_test(model, x)\n\n def test_log(self):\n class Log(torch.nn.Module):\n def forward(self, input):\n return torch.log(input)\n x = torch.rand(2, 3, 4)\n model = Log()\n self.run_test(model, x)\n\n def test_log1p(self):\n class Log1p(torch.nn.Module):\n def forward(self, input):\n return torch.log1p(input)\n x = torch.rand(2, 3, 4)\n model = Log1p()\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_round(self):\n class Round(torch.nn.Module):\n def forward(self, x):\n return torch.round(x)\n\n x = torch.tensor([0.9920, -1.0362, -1.5000, 3.5000], requires_grad=True)\n self.run_test(Round(), x)\n\n def test_constant_pad(self):\n model = torch.nn.ConstantPad1d(2, 3.5)\n x = torch.randn(2, 4, 4)\n self.run_test(model, x)\n\n model = torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5)\n x = torch.randn(2, 2, 4, 4)\n self.run_test(model, x)\n\n # Dynamic padding is added in opset 11\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest() # Functional module not scriptable\n def test_pad_types(self):\n # Test for different pad integer types\n class Pad(torch.nn.Module):\n def forward(self, x, pad: List[int]):\n return torch.nn.functional.pad(x, pad)\n\n x = torch.randn(2, 2, 4, 4)\n y = pad = (torch.tensor(2, dtype=torch.int32), torch.tensor(4, dtype=torch.int32))\n self.run_test(Pad(), (x, y))\n\n y = pad = (torch.tensor(2, dtype=torch.int64), torch.tensor(4, dtype=torch.int64))\n self.run_test(Pad(), (x, y))\n\n @skipIfUnsupportedMaxOpsetVersion(10)\n def test_unsupported_pad(self):\n class Pad(torch.nn.Module):\n def forward(self, x, pad):\n return torch.nn.functional.pad(x, pad)\n\n def run():\n x = torch.randn(2, 2, 4, 4)\n y = pad = (torch.tensor(2, dtype=torch.int32), torch.tensor(4, dtype=torch.int32))\n p = Pad()\n f = io.BytesIO()\n torch.onnx._export(p, (x, y), f)\n\n with self.assertRaises(RuntimeError) as cm:\n run()\n\n the_exception = cm.exception\n self.assertEqual('Unsupported: ONNX export of Pad in opset 9. The sizes of the padding must be constant. ' +\n 'Please try opset version 11.', the_exception.args[0])\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_if_fold(self):\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.dim() == 2:\n y = y + 4\n y = y + 2\n else:\n y = y - 1\n return y\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.numel() > 1:\n y = y + 4\n else:\n y = y + 2\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.dim() != 3:\n y = y + 4\n y = y + 2\n else:\n return y\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.dim() >= 1:\n y = y + 4\n else:\n y = y - 1\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.dim() <= 1:\n y = y + 4\n else:\n y = y + 2\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.dim() < 3 and y.dtype == torch.int:\n y = y + 4\n y = y + 2\n else:\n return y\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.dim() == 3 and y.dtype == torch.int:\n y = y + 4\n y = y + 2\n else:\n y = y + 1\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, y):\n if y.numel() != 0 and y.dim() == 2:\n y = y + 4\n y = y + 2\n else:\n return y\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), x)\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, x, y):\n if x.numel() == y.numel():\n y = x + y\n else:\n y = y - x\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n y = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), (x, y))\n\n class IfFoldModel(torch.nn.Module):\n def forward(self, x, y):\n if x.numel() != y.numel():\n y = x + y\n else:\n y = y - x\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n y = torch.ones((3, 4), dtype=torch.int)\n self.run_test(IfFoldModel(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @skipIfONNXShapeInference(False)\n def test_uninitialized(self):\n class UninitializedModel(torch.nn.Module):\n def forward(self, y):\n if y.shape[1] < 5:\n if y.size(0) == 1:\n y = y + 4\n else:\n return y\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n self.run_test(UninitializedModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @skipIfONNXShapeInference(False)\n def test_uninitialized_dynamic(self):\n class UninitializedModel(torch.nn.Module):\n def forward(self, y):\n if y.shape[1] < 5:\n if y.size(0) == 1:\n y = y + 4\n else:\n return y\n return y\n\n x = torch.ones((3, 4), dtype=torch.int)\n y = torch.ones((6, 7), dtype=torch.int)\n self.run_test(UninitializedModel(), x, test_with_inputs=[y],\n input_names=['input_1'],\n dynamic_axes={'input_1': [0, 1]})\n\n def test_reflection_pad(self):\n model = torch.nn.ReflectionPad1d(2)\n x = torch.randn(2, 4, 4)\n self.run_test(model, x)\n\n model = torch.nn.ReflectionPad2d((3, 0, 2, 1))\n x = torch.randn(2, 2, 4, 4)\n self.run_test(model, x)\n\n def test_replication_pad(self):\n model = torch.nn.ReplicationPad1d(2)\n x = torch.randn(2, 4, 4)\n self.run_test(model, x)\n\n model = torch.nn.ReplicationPad2d((3, 0, 2, 1))\n x = torch.randn(2, 2, 4, 4)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_im2col(self):\n class Unfold(torch.nn.Module):\n def forward(self, input):\n return torch.nn.functional.unfold(input, kernel_size=(10, 15), dilation=2, padding=5, stride=3), \\\n torch.nn.functional.unfold(input, kernel_size=(2, 2), dilation=1, padding=0, stride=3), \\\n torch.nn.functional.unfold(input, kernel_size=(1, 1), dilation=5, padding=2, stride=3)\n\n x = torch.rand(1, 1, 200, 100)\n self.run_test(Unfold(), x)\n\n @skipIfNoLapack\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_det(self):\n class Det(torch.nn.Module):\n def forward(self, x):\n return torch.linalg.det(x)\n\n x = torch.randn(2, 3, 5, 5)\n self.run_test(Det(), x)\n\n # This test checks output scalar type in the ONNX graph should not be null\n # https://github.com/pytorch/pytorch/issues/28607\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_trace_script(self):\n @torch.jit.script\n def center_slice_helper(input, h_offset):\n return input[:, h_offset:]\n\n class CenterCrop(torch.nn.Module):\n def forward(self, input):\n return center_slice_helper(input, torch.tensor(input.shape[1] - 1))\n\n x = torch.randn(3, 4)\n self.run_test(CenterCrop(), x)\n\n @skipIfNoLapack\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_logdet(self):\n class LogDet(torch.nn.Module):\n def forward(self, x):\n return torch.logdet(x)\n\n x = torch.randn(2, 3, 5, 5)\n self.run_test(LogDet(), x)\n\n def test_dim(self):\n class DimModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n out = input * 2\n out *= out.dim()\n return out\n\n empty_input = torch.randn(0, requires_grad=True)\n multi_dim_input = torch.randn(1, 2, 3, requires_grad=True)\n self.run_test(DimModel(), empty_input)\n self.run_test(DimModel(), multi_dim_input)\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_outer(self):\n class Outer(torch.nn.Module):\n def forward(self, x, y):\n return torch.outer(x, y)\n\n x = torch.arange(1, 5)\n y = torch.arange(1, 4)\n self.run_test(Outer(), input=(x, y))\n\n x = torch.arange(1, 6).to(dtype=torch.float32)\n y = torch.arange(1, 4).to(dtype=torch.long)\n self.run_test(Outer(), input=(x, y))\n\n x = torch.arange(2, 5).to(dtype=torch.float32)\n y = torch.arange(2, 4).to(dtype=torch.float64)\n self.run_test(Outer(), input=(x, y))\n\n x = torch.arange(3, 6).to(dtype=torch.int32)\n y = torch.arange(4, 7).to(dtype=torch.long)\n self.run_test(Outer(), input=(x, y))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_einsum(self):\n class EinsumModelBatchDiagonal(torch.nn.Module):\n def forward(self, x):\n eqn = '...ii ->...i'\n return torch.einsum(eqn, x)\n\n x = torch.randn(3, 5, 5)\n self.run_test(EinsumModelBatchDiagonal(), input=(x,))\n\n class EinsumModelBatchMatmul(torch.nn.Module):\n def forward(self, x, y):\n eqn = 'bij, bjk -> bik'\n return torch.einsum(eqn, x, y)\n\n x = torch.randn(5, 2, 3)\n y = torch.randn(5, 3, 4)\n self.run_test(EinsumModelBatchMatmul(), input=(x, y))\n\n class EinsumModelInnerProd(torch.nn.Module):\n def forward(self, x, y):\n eqn = 'i,i'\n return torch.einsum(eqn, x, y)\n\n x = torch.randn(5)\n y = torch.randn(5)\n self.run_test(EinsumModelInnerProd(), input=(x, y))\n\n class EinsumModelTranspose(torch.nn.Module):\n def forward(self, x):\n eqn = 'ij->ji'\n return torch.einsum(eqn, x)\n\n x = torch.randn(3, 4)\n self.run_test(EinsumModelTranspose(), input=(x,))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_crossentropyloss(self):\n for ignore_index in [-100, 1]:\n x = torch.randn(3, 5)\n y = torch.empty(3, dtype=torch.long).random_(5)\n y[y == 1] = ignore_index\n\n self._crossentropyloss(x, y, ignore_index)\n\n x = torch.randn(3, 5, 2)\n y = torch.empty(3, 2, dtype=torch.long).random_(5)\n y[y == 1] = ignore_index\n self._crossentropyloss(x, y, ignore_index)\n\n x = torch.randn(3, 5, 2, 7)\n y = torch.empty(3, 2, 7, dtype=torch.long).random_(5)\n y[y == 1] = ignore_index\n self._crossentropyloss(x, y, ignore_index)\n\n def _crossentropyloss(self, x, y, ignore_index):\n class CrossEntropyLossNone(torch.nn.Module):\n def __init__(self, ignore_index):\n super(CrossEntropyLossNone, self).__init__()\n if ignore_index == -100:\n self.loss = torch.nn.CrossEntropyLoss(reduction='none')\n else:\n self.loss = torch.nn.CrossEntropyLoss(reduction='none', ignore_index=ignore_index)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(CrossEntropyLossNone(ignore_index), input=(x, y))\n\n class CrossEntropyLossNoneWeight(torch.nn.Module):\n def __init__(self, ignore_index):\n super(CrossEntropyLossNoneWeight, self).__init__()\n if ignore_index == -100:\n self.loss = torch.nn.CrossEntropyLoss(reduction='none', weight=torch.randn(5))\n else:\n self.loss = torch.nn.CrossEntropyLoss(reduction='none', weight=torch.randn(5), ignore_index=ignore_index)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(CrossEntropyLossNoneWeight(ignore_index), input=(x, y))\n\n class CrossEntropyLossSum(torch.nn.Module):\n def __init__(self, ignore_index):\n super(CrossEntropyLossSum, self).__init__()\n if ignore_index == -100:\n self.loss = torch.nn.CrossEntropyLoss(reduction='sum')\n else:\n self.loss = torch.nn.CrossEntropyLoss(reduction='sum', ignore_index=ignore_index)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(CrossEntropyLossSum(ignore_index), input=(x, y))\n\n class CrossEntropyLossSumWeight(torch.nn.Module):\n def __init__(self, ignore_index):\n super(CrossEntropyLossSumWeight, self).__init__()\n if ignore_index == -100:\n self.loss = torch.nn.CrossEntropyLoss(reduction='sum', weight=torch.randn(5))\n else:\n self.loss = torch.nn.CrossEntropyLoss(reduction='sum', weight=torch.randn(5), ignore_index=ignore_index)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(CrossEntropyLossSumWeight(ignore_index), input=(x, y))\n\n class CrossEntropyLossMean(torch.nn.Module):\n def __init__(self, ignore_index):\n super(CrossEntropyLossMean, self).__init__()\n if ignore_index == -100:\n self.loss = torch.nn.CrossEntropyLoss()\n else:\n self.loss = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(CrossEntropyLossMean(ignore_index), input=(x, y))\n\n class CrossEntropyLossMeanWeight(torch.nn.Module):\n def __init__(self, ignore_index):\n super(CrossEntropyLossMeanWeight, self).__init__()\n if ignore_index == -100:\n self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5))\n else:\n self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5), ignore_index=ignore_index)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(CrossEntropyLossMeanWeight(ignore_index), input=(x, y))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_kldiv_loss(self):\n\n x = torch.randn(5)\n y = torch.randn(5)\n self._kldiv_loss(x, y)\n\n x = torch.randn(2, 3, 5)\n y = torch.randn(2, 3, 5)\n self._kldiv_loss(x, y)\n\n x = torch.randn(2, 3, 5, 7)\n y = torch.randn(2, 3, 5, 7)\n self._kldiv_loss(x, y)\n\n def _kldiv_loss(self, x, y):\n class KLDivLossNone(torch.nn.Module):\n def __init__(self):\n super(KLDivLossNone, self).__init__()\n self.loss = torch.nn.KLDivLoss(reduction='none', log_target=True)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(KLDivLossNone(), input=(x, y))\n\n class KLDivLossMean(torch.nn.Module):\n def __init__(self):\n super(KLDivLossMean, self).__init__()\n self.loss = torch.nn.KLDivLoss(reduction='mean', log_target=False)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(KLDivLossMean(), input=(x, y))\n\n class KLDivLossSum(torch.nn.Module):\n def __init__(self):\n super(KLDivLossSum, self).__init__()\n self.loss = torch.nn.KLDivLoss(reduction='sum', log_target=True)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(KLDivLossSum(), input=(x, y))\n\n class KLDivLossBatchMean(torch.nn.Module):\n def __init__(self):\n super(KLDivLossBatchMean, self).__init__()\n self.loss = torch.nn.KLDivLoss(reduction='batchmean', log_target=False)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(KLDivLossBatchMean(), input=(x, y))\n\n class KLDivLossMiniBatchMean(torch.nn.Module):\n def __init__(self):\n super(KLDivLossMiniBatchMean, self).__init__()\n self.loss = torch.nn.KLDivLoss(reduction='batchmean', size_average=False, log_target=True)\n\n def forward(self, input, target):\n return self.loss(input, target)\n\n self.run_test(KLDivLossMiniBatchMean(), input=(x, y))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='none')\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(2 * input), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16)\n target = torch.empty(N, dtype=torch.long).random_(0, C)\n\n # using test data containing default ignore_index=-100\n target[target == 1] = -100\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss_2d_none(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='none')\n self.conv = torch.nn.Conv2d(16, C, (3, 3))\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(self.conv(input)), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16, 10, 10)\n target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)\n\n # using test data containing default ignore_index=-100\n target[target == 1] = -100\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss_2d_mean(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='mean')\n self.conv = torch.nn.Conv2d(16, C, (3, 3))\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(self.conv(input)), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16, 10, 10)\n target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)\n\n # using test data containing default ignore_index=-100\n target[target == 1] = -100\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss_2d_sum(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='sum')\n self.conv = torch.nn.Conv2d(16, C, (3, 3))\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(self.conv(input)), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16, 10, 10)\n target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)\n\n # using test data containing default ignore_index=-100\n target[target == 1] = -100\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss_2d_mean_weights(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='mean', weight=torch.randn(C))\n self.conv = torch.nn.Conv2d(16, C, (3, 3))\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(self.conv(input)), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16, 10, 10)\n target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)\n\n # using test data containing default ignore_index=-100\n target[target == 1] = -100\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss_2d_mean_ignore_index(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='mean', ignore_index=1)\n self.conv = torch.nn.Conv2d(16, C, (3, 3))\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(self.conv(input)), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16, 10, 10)\n target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_nllloss_2d_mean_ignore_index_weights(self):\n class NLLModel(torch.nn.Module):\n def __init__(self):\n super(NLLModel, self).__init__()\n self.loss = torch.nn.NLLLoss(reduction='mean', weight=torch.randn(C), ignore_index=1)\n self.conv = torch.nn.Conv2d(16, C, (3, 3))\n self.m = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n output = self.loss(self.m(self.conv(input)), target)\n return output\n\n N, C = 5, 4\n input = torch.randn(N, 16, 10, 10)\n target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)\n self.run_test(NLLModel(), (input, target))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_binary_cross_entropy_with_logits(self):\n x = torch.randn(5)\n y = torch.empty(5).random_(2)\n self._bce_logits(x, y)\n\n x = torch.randn(3, 4)\n y = torch.empty(3, 4).random_(2)\n weight = torch.tensor([3])\n self._bce_logits_wegiht(x, y, weight)\n\n x = torch.randn(3, 2, 4)\n y = torch.empty(3, 2, 4).random_(2)\n pos_weight = torch.empty([2, 4]).random_(2)\n self._bce_logits_posweight(x, y, pos_weight)\n\n x = torch.randn(3, 3, 4)\n y = torch.empty(3, 3, 4).random_(2)\n weight = torch.tensor([3])\n pos_weight = torch.empty([3, 4]).random_(2)\n self._bce_logits_loss_weight_posweight(x, y, weight, pos_weight)\n\n def _bce_logits(self, x, y):\n class BCEWithLogitsLossNone(torch.nn.Module):\n def forward(self, input, target):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, reduction='none')\n\n self.run_test(BCEWithLogitsLossNone(), input=(x, y))\n\n class BCEWithLogitsLossMean(torch.nn.Module):\n def forward(self, input, target):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, reduction='mean')\n\n self.run_test(BCEWithLogitsLossMean(), input=(x, y))\n\n class BCEWithLogitsLossSum(torch.nn.Module):\n def forward(self, input, target):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, reduction='sum')\n\n self.run_test(BCEWithLogitsLossSum(), input=(x, y))\n\n def _bce_logits_wegiht(self, x, y, weight):\n class BCEWithLogitsLossWegihtNone(torch.nn.Module):\n def forward(self, input, target, weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=weight, reduction='none')\n self.run_test(BCEWithLogitsLossWegihtNone(), input=(x, y, weight))\n\n class BCEWithLogitsLossWegihtMean(torch.nn.Module):\n def forward(self, input, target, weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=weight, reduction='mean')\n\n self.run_test(BCEWithLogitsLossWegihtMean(), input=(x, y, weight))\n\n class BCEWithLogitsLossWegihtSum(torch.nn.Module):\n def forward(self, input, target, weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=weight, reduction='sum')\n\n self.run_test(BCEWithLogitsLossWegihtSum(), input=(x, y, weight))\n\n def _bce_logits_posweight(self, x, y, pos_weight):\n class BCEWithLogitsLossPosWegihtNone(torch.nn.Module):\n def forward(self, input, target, pos_weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, pos_weight=pos_weight, reduction='none')\n self.run_test(BCEWithLogitsLossPosWegihtNone(), input=(x, y, pos_weight))\n\n class BCEWithLogitsLossPosWegihtMean(torch.nn.Module):\n def forward(self, input, target, pos_weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, pos_weight=pos_weight, reduction='mean')\n\n self.run_test(BCEWithLogitsLossPosWegihtMean(), input=(x, y, pos_weight))\n\n class BCEWithLogitsLossPosWegihtSum(torch.nn.Module):\n def forward(self, input, target, pos_weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, pos_weight=pos_weight, reduction='sum')\n\n self.run_test(BCEWithLogitsLossPosWegihtSum(), input=(x, y, pos_weight))\n\n def _bce_logits_loss_weight_posweight(self, x, y, weight, pos_weight):\n class BCEWithLogitsLossWeightPosweightNone(torch.nn.Module):\n def forward(self, input, target, weight, pos_weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=weight,\n pos_weight=pos_weight, reduction='none')\n\n self.run_test(BCEWithLogitsLossWeightPosweightNone(), input=(x, y, weight, pos_weight))\n\n class BCEWithLogitsLossWeightPosweightMean(torch.nn.Module):\n def forward(self, input, target, weight, pos_weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=weight,\n pos_weight=pos_weight, reduction='mean')\n\n self.run_test(BCEWithLogitsLossWeightPosweightMean(), input=(x, y, weight, pos_weight))\n\n class BCEWithLogitsLossWeightPosweightSum(torch.nn.Module):\n def forward(self, input, target, weight, pos_weight):\n return torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=weight,\n pos_weight=pos_weight, reduction='sum')\n\n self.run_test(BCEWithLogitsLossWeightPosweightSum(), input=(x, y, weight, pos_weight))\n\n\n def test_torch_mm(self):\n class M(torch.nn.Module):\n def forward(self, mat1, mat2):\n mm = torch.mm(mat1, mat2)\n return mm\n\n mat1 = torch.randn(2, 3)\n mat2 = torch.randn(3, 3)\n self.run_test(M(), input=(mat1, mat2))\n\n @skipIfUnsupportedMinOpsetVersion(9) # Because where op is not supported for opset < 9.\n def test_where_with_bool_tensor(self):\n class M(torch.nn.Module):\n def forward(self, mat1, mat2):\n out = torch.where(mat1 > 0, mat1, mat2)\n return out\n\n mat1 = torch.randn(2, 3)\n mat2 = torch.ones(2, 3)\n self.run_test(M(), input=(mat1, mat2))\n\n @skipIfUnsupportedMinOpsetVersion(9) # Because where op is not supported for opset < 9.\n def test_where_with_byte_tensor(self):\n class M(torch.nn.Module):\n def forward(self, cond, mat1, mat2):\n out = torch.where(cond, mat1, mat2)\n return out\n\n cond = torch.ones(2, 3, dtype=torch.uint8)\n cond[1, 2] = 0\n mat1 = torch.randn(2, 3)\n mat2 = torch.ones(2, 3)\n self.run_test(M(), input=(cond, mat1, mat2))\n\n @skipIfUnsupportedMinOpsetVersion(10) # ONNX IsInf op is added in opset 10.\n def test_isinf(self):\n class M(torch.nn.Module):\n def forward(self, x):\n return x.isinf()\n\n x = torch.tensor([[1, 2, float('inf')], [2, float('nan'), float('inf')]])\n self.run_test(M(), (x, ))\n\n @skipIfUnsupportedMinOpsetVersion(9) # ONNX IsNaN op is added in opset 9.\n def test_isnan(self):\n class M(torch.nn.Module):\n def forward(self, x):\n return x.isnan()\n\n x = torch.tensor([[1, 2, float('inf')], [2, float('nan'), float('inf')]])\n self.run_test(M(), (x, ))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_any(self):\n class M(torch.nn.Module):\n def forward(self, x):\n return x.any()\n\n x = torch.tensor([[True, False], [False, False]])\n self.run_test(M(), (x, ))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_all(self):\n class M(torch.nn.Module):\n def forward(self, x):\n return x.all()\n\n x = torch.tensor([[True, False], [False, False]])\n self.run_test(M(), (x, ))\n\n def test_dropout(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.dropout = torch.nn.Dropout(0.3)\n\n def forward(self, x):\n dropout = self.dropout(x)\n return dropout\n\n x = torch.randn(10, 3, 53)\n self.run_test(M(), (x))\n\n def test_shape_constant_fold(self):\n class ShapeModule(torch.nn.Module):\n def __init__(self):\n super(ShapeModule, self).__init__()\n self.register_buffer(\"weight\", torch.ones(5))\n\n def forward(self, x):\n shape = self.weight.shape[0]\n return x + shape\n\n x = torch.randn(2, 5)\n self.run_test(ShapeModule(), (x,), rtol=1e-3, atol=1e-5)\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_celu(self):\n class Celu(torch.nn.Module):\n def __init__(self):\n super(Celu, self).__init__()\n self.celu = torch.nn.CELU(alpha=1.0)\n\n def forward(self, input):\n return self.celu(input)\n\n input = torch.randn(2)\n self.run_test(Celu(), (input,))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_celu_default(self):\n class Celu(torch.nn.Module):\n def __init__(self):\n super(Celu, self).__init__()\n self.celu = torch.nn.CELU()\n\n def forward(self, input):\n return self.celu(input)\n\n input = torch.randn(2)\n self.run_test(Celu(), (input,))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_celu_alpha(self):\n class Celu(torch.nn.Module):\n def __init__(self):\n super(Celu, self).__init__()\n self.celu = torch.nn.CELU(alpha=2.)\n\n def forward(self, input):\n return self.celu(input)\n\n input = torch.randn(2)\n self.run_test(Celu(), (input,))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_celu_cast(self):\n class Celu(torch.nn.Module):\n def __init__(self):\n super(Celu, self).__init__()\n self.celu = torch.nn.CELU()\n\n def forward(self, input):\n return self.celu(input)\n\n input = torch.randn(2, 5, 7, dtype=torch.float64)\n self.run_test(Celu(), (input,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_where(self):\n class Model(torch.nn.Module):\n def forward(self, cond, input, other):\n return torch.where(cond, input, other)\n\n x = torch.randint(0, 1, (2, 3, 4), dtype=torch.bool)\n y = torch.randn(2, 1, 4)\n z = torch.ones(2, 3, 1)\n self.run_test(Model(), (x, y, z))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n @disableScriptTest() # scripting tests run for opsets > 11. See: test_where_condition_script\n def test_where_condition(self):\n class Model1(torch.nn.Module):\n def forward(self, input):\n return torch.stack(torch.where(input > 0.5), dim=1)\n\n x = torch.randint(0, 2, (2, 3, 4), dtype=bool)\n self.run_test(Model1(), (x))\n\n class Model2(torch.nn.Module):\n def forward(self, input, other):\n return torch.stack(torch.where(input > other), dim=1)\n\n x = torch.randint(0, 1, (2, 3, 4), dtype=bool)\n y = torch.randint(1, 2, (2, 3, 4), dtype=bool)\n self.run_test(Model2(), (x, y))\n\n @skipIfUnsupportedOpsetVersion([13])\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_where_condition_script(self):\n class Model1(torch.nn.Module):\n def forward(self, input):\n return torch.stack(torch.where(input > 0.5), dim=1)\n\n x = torch.randint(0, 2, (2, 3, 4), dtype=bool)\n self.run_test(Model1(), (x))\n\n class Model2(torch.nn.Module):\n def forward(self, input, other):\n return torch.stack(torch.where(input > other), dim=1)\n\n x = torch.randint(0, 1, (2, 3, 4), dtype=bool)\n y = torch.randint(1, 2, (2, 3, 4), dtype=bool)\n self.run_test(Model2(), (x, y))\n\n def test_empty_branch(self):\n class EmptyBranchModel(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self, input):\n out = input + 1\n if out.dim() > 2:\n if out.dim() > 3:\n out += 3\n else:\n pass\n else:\n pass\n return out\n\n x = torch.randn(1, 2, 3, requires_grad=True)\n self.run_test(EmptyBranchModel(), x)\n\n @disableScriptTest()\n def test_derive_index(self):\n class MyModule(torch.nn.Module):\n def forward(self, x: torch.Tensor):\n j = []\n for idx in range(len(x) - 1, -len(x), -2):\n y = x[idx]\n j += [x * y]\n return j\n\n x = torch.randn(5, 13)\n self.run_test(MyModule(), x)\n\n class MyModule(torch.nn.Module):\n def forward(self, x: torch.Tensor):\n j = []\n for idx in range(-len(x), len(x) - 1, 2):\n y = x[idx]\n j += [x * y]\n return j\n\n x = torch.randn(5, 13)\n self.run_test(MyModule(), x)\n\n class MyModule(torch.nn.Module):\n def forward(self, x: torch.Tensor):\n j = []\n for idx in range(len(x) - 1, -len(x), -3):\n y = x[idx]\n j += [x * y]\n return j\n\n self.run_test(MyModule(), x)\n\n class MyModule(torch.nn.Module):\n def forward(self, x: torch.Tensor):\n j = []\n for idx in range(-len(x), len(x) - 1, 3):\n y = x[idx]\n j += [x * y]\n return j\n\n self.run_test(MyModule(), x)\n\n @skipIfONNXShapeInference(False)\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_if_transpose(self):\n class IfModel(torch.nn.Module):\n def forward(self, x):\n x = x.transpose(0, 1)\n if x.size(0) == 2:\n return x.transpose(0, 1)\n else:\n return x\n\n x = torch.randn(2, 3)\n self.run_test(torch.jit.script(IfModel()), x,\n output_names=['output_1'],\n dynamic_axes={'output_1': [0, 1]})\n\n @skipIfONNXShapeInference(False)\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_if_list(self):\n class IfModel(torch.nn.Module):\n def forward(self, x, y, cond):\n res = []\n if cond:\n res = res + [x]\n else:\n res = res + [y]\n return res\n\n x = torch.randn(2, 3)\n y = torch.randn(3, 3)\n cond = torch.tensor(1, dtype=torch.bool)\n self.run_test(torch.jit.script(IfModel()), (x, y, cond))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_if_view(self):\n class IfModel(torch.nn.Module):\n def forward(self, x, y, cond):\n bs, seq = y.shape[:2]\n if cond:\n res = x.view(bs, seq, -1)\n else:\n res = y\n return res.transpose(1, 2)\n\n x = torch.randn(2, 16, 2, 2)\n y = torch.randn(2, 16, 8)\n cond = torch.tensor(1, dtype=torch.bool)\n self.run_test(torch.jit.script(IfModel()), (x, y, cond),\n output_names=['output_1'],\n dynamic_axes={'output_1': [1]})\n\n def test_onnx_proto_checker(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n def forward(self, x):\n return 2 * x\n\n x = torch.randn(1, 2, 3, requires_grad=True)\n f = io.BytesIO()\n torch.onnx._export(Model(), x, f)\n model = onnx.load(f)\n model.ir_version = 0\n\n def check_proto():\n torch._C._check_onnx_proto(model.SerializeToString())\n\n self.assertRaises(RuntimeError, check_proto)\n\n @disableScriptTest() # dtype mismatch\n def test_split_tensor_scalar(self):\n class SplitModel(torch.nn.Module):\n def forward(self, x):\n return torch.split(x, x.size(1))\n\n x = torch.randn(1, 2, 3, requires_grad=True)\n self.run_test(SplitModel(), x)\n\n def test_split_tensor_multi(self):\n class SplitModel(torch.nn.Module):\n def forward(self, x):\n return torch.split(x, torch.ones(3))\n\n x = torch.randn(1, 2, 3, requires_grad=True)\n\n def run_model():\n SplitModel(x)\n\n self.assertRaises(TypeError, run_model)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_embedding(self):\n class EmbedModel(torch.nn.Module):\n def forward(self, input, emb):\n return torch.nn.functional.embedding(input, emb, padding_idx=1)\n\n model = EmbedModel()\n x = torch.randint(4, (4,))\n x[2] = x[0] = 1\n embedding_matrix = torch.rand(10, 3)\n self.run_test(model, (x, embedding_matrix))\n\n x = torch.randint(4, (4, 3, 2))\n x[2] = 1\n x[0][1] = 1\n self.run_test(model, (x, embedding_matrix))\n self.run_test(model, (x, embedding_matrix), training=torch.onnx.TrainingMode.TRAINING)\n\n class EmbedModelWithoutPaddingIdx(torch.nn.Module):\n def forward(self, input, emb):\n return torch.nn.functional.embedding(input, emb)\n\n model = EmbedModelWithoutPaddingIdx()\n x = torch.randint(4, (4, 3, 2))\n self.run_test(model, (x, embedding_matrix))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_embedding_module(self):\n class EmbedModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(4, 3, padding_idx=1)\n self.emb2 = torch.nn.Embedding(4, 3, padding_idx=1)\n with torch.no_grad():\n self.emb2.weight[1] = torch.ones(3)\n\n def forward(self, input):\n return self.emb(input), self.emb2(input)\n\n model = EmbedModel()\n x = torch.randint(4, (4,))\n x[2] = x[0] = 1\n self.run_test(model, (x,))\n\n x = torch.randint(4, (4, 3, 2))\n x[2] = 1\n x[0][1] = 1\n self.run_test(model, (x,))\n\n class EmbedModelWithoutPaddingIdx(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(4, 3)\n\n def forward(self, input):\n return self.emb(input)\n\n model = EmbedModelWithoutPaddingIdx()\n x = torch.randint(4, (4, 3, 2))\n self.run_test(model, (x,))\n\n def _dispatch_rnn_test(self, name, *args, **kwargs):\n if name == 'elman':\n self._elman_rnn_test(*args, **kwargs)\n if name == 'lstm':\n self._lstm_test(*args, **kwargs)\n if name == 'gru':\n self._gru_test(*args, **kwargs)\n\n def _elman_rnn_test(self, layers, nonlinearity, bidirectional,\n initial_state, packed_sequence, dropout):\n\n class ElmanWithStateModel(torch.nn.Module):\n def __init__(self, layers, nonlinearity, bidirect, dropout, batch_first):\n super(ElmanWithStateModel, self).__init__()\n\n self.batch_first = batch_first\n self.inner_model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, nonlinearity=nonlinearity,\n bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)\n\n def forward(self, input: PackedSequence, hx=None):\n return self.inner_model(input, hx)\n\n class ElmanWithoutStateModel(torch.nn.Module):\n def __init__(self, layers, nonlinearity, bidirect, dropout, batch_first):\n super(ElmanWithoutStateModel, self).__init__()\n self.batch_first = batch_first\n self.inner_model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, nonlinearity=nonlinearity,\n bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)\n\n def forward(self, input: PackedSequence):\n return self.inner_model(input)\n\n batch_first = True if packed_sequence == 2 else False\n\n if initial_state:\n model = ElmanWithStateModel(layers=layers, bidirect=bidirectional, nonlinearity=nonlinearity,\n dropout=dropout, batch_first=batch_first)\n\n if packed_sequence == 1:\n model = RnnModelWithPackedSequenceWithState(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequenceWithState(model, True)\n else:\n model = ElmanWithStateModel(layers=layers, bidirect=bidirectional,\n nonlinearity=nonlinearity, dropout=dropout,\n batch_first=batch_first)\n\n if packed_sequence == 1:\n model = RnnModelWithPackedSequenceWithoutState(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequenceWithoutState(model, True)\n\n def make_input(batch_size):\n seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)\n seq_lengths = list(reversed(sorted(map(int, seq_lengths))))\n inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]\n inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)\n inputs = [inputs]\n\n directions = 2 if bidirectional else 1\n\n if initial_state:\n h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)\n inputs.append(h0)\n if packed_sequence != 0:\n inputs.append(torch.IntTensor(seq_lengths))\n if len(inputs) == 1:\n input = inputs[0]\n else:\n input = tuple(inputs)\n return input\n\n input = make_input(RNN_BATCH_SIZE)\n self.run_test(model, input, batch_size=RNN_BATCH_SIZE)\n\n # test that the model still runs with a different batch size\n other_input = make_input(RNN_BATCH_SIZE + 1)\n self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)\n\n def _lstm_test(self, layers, bidirectional, initial_state,\n packed_sequence, dropout):\n batch_first = True if packed_sequence == 2 else False\n\n if packed_sequence == 0:\n model = LstmFlatteningResultWithoutSeqLength(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers,\n bidirectional, dropout, batch_first)\n else:\n model = LstmFlatteningResultWithSeqLength(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers,\n bidirectional, dropout, batch_first)\n if initial_state:\n if packed_sequence == 1:\n model = RnnModelWithPackedSequenceWithState(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequenceWithState(model, True)\n else:\n if packed_sequence == 1:\n model = RnnModelWithPackedSequenceWithoutState(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequenceWithoutState(model, True)\n\n def make_input(batch_size):\n seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)\n seq_lengths = list(reversed(sorted(map(int, seq_lengths))))\n inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]\n inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)\n inputs = [inputs]\n\n directions = 2 if bidirectional else 1\n\n if initial_state:\n h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)\n c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)\n inputs.append((h0, c0))\n if packed_sequence != 0:\n inputs.append(torch.IntTensor(seq_lengths))\n if len(inputs) == 1:\n input = inputs[0]\n else:\n input = tuple(inputs)\n return input\n\n input = make_input(RNN_BATCH_SIZE)\n self.run_test(model, input, batch_size=RNN_BATCH_SIZE)\n\n # test that the model still runs with a different batch size\n other_input = make_input(RNN_BATCH_SIZE + 1)\n self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)\n\n def _gru_test(self, layers, bidirectional, initial_state,\n packed_sequence, dropout):\n\n class GRUWithStateModel(torch.nn.Module):\n def __init__(self, layers, bidirect, dropout, batch_first):\n super(GRUWithStateModel, self).__init__()\n\n self.batch_first = batch_first\n self.inner_model = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers=layers,\n bidirectional=bidirectional, dropout=dropout,\n batch_first=batch_first)\n\n def forward(self, input: PackedSequence, hx):\n return self.inner_model(input, hx)\n\n class GRUWithoutStateModel(torch.nn.Module):\n def __init__(self, layers, bidirect, dropout, batch_first):\n super(GRUWithoutStateModel, self).__init__()\n self.batch_first = batch_first\n self.inner_model = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers=layers,\n bidirectional=bidirectional, dropout=dropout,\n batch_first=batch_first)\n\n def forward(self, input: PackedSequence):\n return self.inner_model(input)\n\n class GRUNoSeqLengthWithoutStateModel(torch.nn.Module):\n def __init__(self, layers, bidirect, dropout, batch_first):\n super(GRUNoSeqLengthWithoutStateModel, self).__init__()\n self.batch_first = batch_first\n self.inner_model = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers=layers,\n bidirectional=bidirectional, dropout=dropout,\n batch_first=batch_first)\n\n def forward(self, input):\n return self.inner_model(input)\n\n class GRUNoSeqLengthWithStateModel(torch.nn.Module):\n def __init__(self, layers, bidirect, dropout, batch_first):\n super(GRUNoSeqLengthWithStateModel, self).__init__()\n self.batch_first = batch_first\n self.inner_model = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers=layers,\n bidirectional=bidirectional, dropout=dropout,\n batch_first=batch_first)\n\n def forward(self, input, hx):\n return self.inner_model(input, hx)\n\n batch_first = True if packed_sequence == 2 else False\n\n if packed_sequence == 0:\n if initial_state:\n model = GRUNoSeqLengthWithStateModel(layers=layers, bidirect=bidirectional,\n dropout=dropout, batch_first=batch_first)\n else:\n model = GRUNoSeqLengthWithoutStateModel(layers=layers, bidirect=bidirectional,\n dropout=dropout, batch_first=batch_first)\n else:\n if initial_state:\n model = GRUWithStateModel(layers=layers, bidirect=bidirectional, dropout=dropout,\n batch_first=batch_first)\n if packed_sequence == 1:\n model = RnnModelWithPackedSequenceWithState(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequenceWithState(model, True)\n else:\n model = GRUWithoutStateModel(layers=layers, bidirect=bidirectional, dropout=dropout,\n batch_first=batch_first)\n if packed_sequence == 1:\n model = RnnModelWithPackedSequenceWithoutState(model, False)\n if packed_sequence == 2:\n model = RnnModelWithPackedSequenceWithoutState(model, True)\n\n def make_input(batch_size):\n seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)\n seq_lengths = list(reversed(sorted(map(int, seq_lengths))))\n inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]\n inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)\n inputs = [inputs]\n\n directions = 2 if bidirectional else 1\n\n if initial_state:\n h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)\n inputs.append(h0)\n if packed_sequence != 0:\n inputs.append(torch.IntTensor(seq_lengths))\n if len(inputs) == 1:\n input = inputs[0]\n else:\n input = tuple(inputs)\n return input\n\n input = make_input(RNN_BATCH_SIZE)\n self.run_test(model, input, batch_size=RNN_BATCH_SIZE)\n\n # test that the model still runs with a different batch size\n other_input = make_input(RNN_BATCH_SIZE + 1)\n self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)\n\n @disableScriptTest() # TODO: RuntimeError: Exporting the operator __is_ to ONNX is not supported\n def test_transformer_encoder(self):\n from torch.nn import TransformerEncoderLayer, TransformerEncoder\n\n class MyModule(torch.nn.Module):\n def __init__(self, ninp, nhead, nhid, dropout, nlayers):\n super(MyModule, self).__init__()\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n\n def forward(self, input):\n return self.transformer_encoder(input)\n\n x = torch.rand(10, 32, 512)\n self.run_test(MyModule(512, 8, 2048 , 0., 3), (x,), atol=1e-6)\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_fake_quantize_per_tensor(self):\n class FakeQuantizePerTensorModel(torch.nn.Module):\n def forward(self, input):\n scale = 1. / 127\n zero_point = 0\n quant_min = -128\n quant_max = 127\n return torch.fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max)\n\n x = torch.randn(6, 4, 3, 3)\n self.run_test(FakeQuantizePerTensorModel(), (x))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_fake_quantize_per_channel(self):\n class FakeQuantizePerChannelModel(torch.nn.Module):\n def forward(self, input):\n amax = torch.ones(4)\n scale = amax / 127.\n zero_point = torch.zeros_like(amax, dtype=torch.long)\n # Quantize twice to test differnet branches\n y = torch.fake_quantize_per_channel_affine(input, scale, zero_point, 1, 0, 255)\n return torch.fake_quantize_per_channel_affine(y, scale, zero_point, 1, -128, 127)\n\n x = torch.randn(6, 4, 3, 3)\n self.run_test(FakeQuantizePerChannelModel(), (x))\n\n def test_batchnorm_training(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.bn = torch.nn.BatchNorm2d(3, affine=True)\n\n def forward(self, x):\n bn = self.bn(x)\n return bn\n\n model = MyModule()\n x = torch.randn(10, 3, 128, 128)\n\n model.train()\n out = model(x)\n\n # state after 1 train epoch\n running_mean = model.bn.running_mean\n running_var = model.bn.running_var\n saved_mean = x.mean((0, 2, 3))\n saved_var = x.var((0, 2, 3))\n\n pytorch_out = [out.detach().numpy(),\n running_mean.cpu().numpy(), running_var.cpu().numpy(),\n saved_mean.cpu().numpy(), saved_var.cpu().numpy()]\n\n model_export = MyModule()\n f = io.BytesIO()\n\n ort_sess = convert_to_onnx(model_export, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs = run_ort(ort_sess, input=(x,))\n [np.testing.assert_allclose(p_out, ort_out, atol=10e-3, rtol=10e-3) for p_out, ort_out in zip(pytorch_out, ort_outs)]\n\n model_export = torch.jit.script(MyModule())\n ort_sess = convert_to_onnx(model_export, input=(x,), opset_version=self.opset_version,\n example_outputs=out,\n training=torch.onnx.TrainingMode.TRAINING,\n onnx_shape_inference=True)\n ort_outs = run_ort(ort_sess, input=(x,))\n [np.testing.assert_allclose(p_out, ort_out, atol=10e-3, rtol=10e-3) for p_out, ort_out in\n zip(pytorch_out, ort_outs)]\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_dropout_training(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.dropout = torch.nn.Dropout(0.4)\n\n def forward(self, x):\n dropout = self.dropout(x)\n return dropout\n\n model = MyModule()\n x = torch.randn(10)\n\n model.train()\n\n ort_sess = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs = run_ort(ort_sess, input=(x,))\n assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))\n\n script_model = torch.jit.script(model)\n output = model(x)\n ort_sess = convert_to_onnx(script_model, input=(x,), opset_version=self.opset_version,\n example_outputs=output,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs = run_ort(ort_sess, input=(x,))\n assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_dropout_training_zero(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.dropout = torch.nn.Dropout(0.5)\n\n def forward(self, x):\n dropout = self.dropout(x)\n return dropout\n\n model = MyModule()\n\n # ensure there are no zeros in the input\n x = torch.randn(10, 3, 128, 128)\n y = x.numpy()\n y_mask = np.where(y == 0, 1, y)\n input = torch.from_numpy(y_mask)\n nb_elements = torch.numel(input)\n\n model.train()\n\n ort_sess = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs = run_ort(ort_sess, input=(x,))\n\n y = model(input)\n output = y.cpu().numpy()\n ort_mask = np.where(ort_outs[0] != 0, 1, 0)\n pyt_mask = np.where(output != 0, 1, 0)\n\n ratio_pytorch = np.sum(pyt_mask) / nb_elements\n ratio_ort = np.sum(ort_mask) / nb_elements\n\n np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)\n\n script_model = torch.jit.script(model)\n y = model(input)\n output = y.cpu().numpy()\n ort_sess = convert_to_onnx(script_model, input=(x,), opset_version=self.opset_version,\n example_outputs=y,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs = run_ort(ort_sess, input=(x,))\n ort_mask = np.where(ort_outs[0] != 0, 1, 0)\n pyt_mask = np.where(output != 0, 1, 0)\n\n ratio_pytorch = np.sum(pyt_mask) / nb_elements\n ratio_ort = np.sum(ort_mask) / nb_elements\n\n np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)\n\n def test_conv_bn(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv2d(3, 16, kernel_size=1, stride=2, padding=3, bias=True)\n self.bn = torch.nn.BatchNorm2d(16, affine=True)\n\n def forward(self, x):\n x = self.conv(x)\n bn = self.bn(x)\n return bn\n\n model = MyModule()\n x = torch.randn(10, 3, 128, 128)\n ort_sess1 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs1 = run_ort(ort_sess1, input=(x,))\n ort_sess2 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.EVAL)\n ort_outs2 = run_ort(ort_sess2, input=(x,))\n [np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in\n zip(ort_outs1, ort_outs2)]\n\n script_model = torch.jit.script(model)\n outputs = model(x)\n ort_sess1 = convert_to_onnx(script_model, input=(x,), opset_version=self.opset_version,\n example_outputs=outputs,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs1 = run_ort(ort_sess1, input=(x,))\n ort_sess2 = convert_to_onnx(script_model, input=(x,), opset_version=self.opset_version,\n example_outputs=outputs,\n training=torch.onnx.TrainingMode.EVAL)\n ort_outs2 = run_ort(ort_sess2, input=(x,))\n [np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in\n zip(ort_outs1, ort_outs2)]\n\n def test_multiple_conv_bn(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.conv2 = torch.nn.Conv2d(64, 2, kernel_size=1, stride=1, padding=0, bias=False)\n self.conv3 = torch.nn.Conv2d(2, 2, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn = torch.nn.BatchNorm2d(64)\n self.bn2 = torch.nn.BatchNorm2d(2)\n self.relu = torch.nn.ReLU(inplace=True)\n self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn2(x)\n x = self.relu(x)\n return x\n\n model = MyModule()\n x = torch.randn(2, 3, 224, 224)\n ort_sess1 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.TRAINING)\n ort_outs1 = run_ort(ort_sess1, input=(x,))\n ort_sess2 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,\n training=torch.onnx.TrainingMode.EVAL)\n ort_outs2 = run_ort(ort_sess2, input=(x,))\n [np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in\n zip(ort_outs1, ort_outs2)]\n\n def test_script_custom_class_error(self):\n class BoxCoder(object):\n def __init__(self, bbox_xform_clip: float):\n # type: (float) -> None\n self.bbox_xform_clip = bbox_xform_clip\n\n def decode(self, rel_codes, boxes):\n # type: (Tensor, List[Tensor]) -> Tensor\n boxes = torch.cat(boxes, dim=0)\n pred_ctr_x = torch.clamp(rel_codes[:, 0::4], max=self.bbox_xform_clip) * boxes[:, 2]\n return pred_ctr_x\n\n class MyModule(torch.nn.Module):\n __annotations__ = {\n 'box_coder': BoxCoder,\n }\n\n def __init__(self):\n super(MyModule, self).__init__()\n self.box_coder = BoxCoder(1.4)\n\n def forward(self, box_regression: torch.Tensor, proposals: List[torch.Tensor]):\n return self.box_coder.decode(box_regression, proposals)\n\n model = torch.jit.script(MyModule())\n box_regression = torch.randn([4, 4])\n proposal = [torch.randn(2, 4), torch.randn(2, 4)]\n outputs = model(box_regression, proposal)\n\n with self.assertRaises(RuntimeError) as cm:\n convert_to_onnx(model, input=(box_regression, proposal),\n example_outputs=outputs)\n\n def test_initializer_sequence(self):\n class MyModule(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(MyModule, self).__init__()\n self.fc1 = torch.nn.Linear(input_size, hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(hidden_size, num_classes)\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n test_model = MyModule(3, 4, 10)\n state_dict_list = [k for (k, v) in test_model.state_dict().items()]\n named_params_list = [k for (k, v) in test_model.named_parameters()]\n\n x = torch.randn(32, 3)\n f = io.BytesIO()\n torch.onnx._export(test_model, (x,), f, _retain_param_name=True, do_constant_folding=False)\n loaded_model = onnx.load_from_string(f.getvalue())\n\n actual_list = [p.name for p in loaded_model.graph.initializer]\n assert actual_list == state_dict_list, \\\n \"Initializers' sequence is not as same as state_dict(). Expected: (\" \\\n + ', '.join(state_dict_list) + \"). Actual:(\" + ', '.join(actual_list) + \").\"\n assert actual_list == named_params_list, \\\n \"Initializers' sequence is not as same as named_parameters(). Expected: (\" \\\n + ', '.join(named_params_list) + \"). Actual:(\" + ', '.join(actual_list) + \").\"\n\n def test_initializer_sequence_script_model(self):\n def list_is_expected(short_list, long_list) -> bool:\n if (len(short_list) > len(long_list)):\n return False\n\n for i in range(len(short_list)):\n if (short_list[i] not in long_list[i]):\n return False\n\n return True\n\n def loop(x, y):\n for i in range(int(y)):\n x = x + i\n return x\n\n class MyModule(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(MyModule, self).__init__()\n self.fc1 = torch.nn.Linear(input_size, hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(hidden_size, num_classes)\n\n def forward(self, x, y):\n x = loop(x, y)\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n test_model = torch.jit.script(MyModule(3, 4, 10))\n state_dict_list = [k for (k, v) in test_model.state_dict().items()]\n named_params_list = [k for (k, v) in test_model.named_parameters()]\n\n x = torch.ones(2, 3, dtype=torch.float)\n y = torch.tensor(5, dtype=torch.long)\n example_output = (test_model(x, y),)\n f = io.BytesIO()\n\n torch.onnx.export(test_model, (x, y), f, example_outputs=example_output, _retain_param_name=True, do_constant_folding=False)\n loaded_model = onnx.load_from_string(f.getvalue())\n\n actual_list = [p.name for p in loaded_model.graph.initializer]\n assert list_is_expected(state_dict_list, actual_list), \\\n \"ScriptModel - Initializers' sequence is not as same as state_dict(). Expected: (\" \\\n + ', '.join(state_dict_list) + \"). Actual:(\" + ', '.join(actual_list) + \").\"\n assert list_is_expected(named_params_list, actual_list), \\\n \"ScriptModel - Initializers' sequence is not as same as named_parameters(). Expected: (\" \\\n + ', '.join(named_params_list) + \"). Actual:(\" + ', '.join(actual_list) + \").\"\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_nms(self):\n boxes = torch.rand(5, 4)\n boxes[:, 2:] += torch.rand(5, 2)\n scores = torch.randn(5)\n\n class Module(torch.nn.Module):\n def forward(self, boxes, scores):\n return ops.nms(boxes, scores, 0.5)\n\n self.run_test(Module(), (boxes, scores))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_clip_boxes_to_image(self):\n boxes = torch.randn(5, 4) * 500\n boxes[:, 2:] += boxes[:, :2]\n size = torch.randn(200, 300)\n\n size_2 = torch.randn(300, 400)\n\n class Module(torch.nn.Module):\n def forward(self, boxes, size):\n shape = (size.shape[0], size.shape[1])\n return ops.boxes.clip_boxes_to_image(boxes, shape)\n\n self.run_test(Module(), (boxes, size),\n input_names=[\"boxes\", \"size\"],\n dynamic_axes={\"size\": [0, 1]},\n test_with_inputs=[(boxes, size), (boxes, size_2)])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_roi_align(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)\n model = ops.RoIAlign((5, 5), 1., 2)\n self.run_test(model, (x, single_roi))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_roi_align_aligned(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32)\n model1 = ops.RoIAlign((5, 5), 1., 2, aligned=True)\n self.run_test(model1, (x, single_roi))\n\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)\n model2 = ops.RoIAlign((5, 5), 0.5, 3, aligned=True)\n self.run_test(model2, (x, single_roi))\n\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)\n model3 = ops.RoIAlign((5, 5), 1.8, 2, aligned=True)\n self.run_test(model3, (x, single_roi))\n\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)\n model4 = ops.RoIAlign((2, 2), 2.5, 0, aligned=True)\n self.run_test(model4, (x, single_roi))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_roi_pool(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)\n pool_h = 5\n pool_w = 5\n model = ops.RoIPool((pool_h, pool_w), 2.)\n self.run_test(model, (x, rois))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_resize_images(self):\n class TransformModule(torch.nn.Module):\n def __init__(self):\n super(TransformModule, self).__init__()\n self.transform = _init_test_generalized_rcnn_transform()\n\n def forward(self, images):\n return self.transform.resize(images, None)[0]\n\n input = torch.rand(3, 10, 20)\n input_test = torch.rand(3, 100, 150)\n self.run_test(TransformModule(), (input,),\n input_names=[\"input1\"], dynamic_axes={\"input1\": [0, 1, 2]},\n test_with_inputs=[(input,), (input_test,)])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_transform_images(self):\n\n class TransformModule(torch.nn.Module):\n def __init__(self):\n super(TransformModule, self).__init__()\n self.transform = _init_test_generalized_rcnn_transform()\n\n def forward(self, images: List[torch.Tensor]):\n return self.transform(images)[0].tensors\n\n input = torch.rand(3, 100, 200), torch.rand(3, 200, 200)\n input_test = torch.rand(3, 100, 200), torch.rand(3, 200, 200)\n self.run_test(TransformModule(), (input,), test_with_inputs=[(input,), (input_test,)])\n\n def get_features(self, images):\n s0, s1 = images.shape[-2:]\n features = [\n ('0', torch.rand(2, 256, s0 // 4, s1 // 4)),\n ('1', torch.rand(2, 256, s0 // 8, s1 // 8)),\n ('2', torch.rand(2, 256, s0 // 16, s1 // 16)),\n ('3', torch.rand(2, 256, s0 // 32, s1 // 32)),\n ('4', torch.rand(2, 256, s0 // 64, s1 // 64)),\n ]\n features = OrderedDict(features)\n return features\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_rpn(self):\n set_rng_seed(0)\n\n class RPNModule(torch.nn.Module):\n def __init__(self):\n super(RPNModule, self).__init__()\n self.rpn = _init_test_rpn()\n\n def forward(self, images, features: Dict[str, torch.Tensor]):\n images_m = ImageList(images, [(i.shape[-1], i.shape[-2]) for i in images])\n return self.rpn(images_m, features)\n\n images = torch.rand(2, 3, 150, 150)\n features = self.get_features(images)\n images2 = torch.rand(2, 3, 80, 80)\n test_features = self.get_features(images2)\n\n model = RPNModule()\n model.eval()\n model(images, features)\n self.run_test(model, (images, features),\n input_names=[\"input1\", \"input2\", \"input3\", \"input4\", \"input5\", \"input6\"],\n dynamic_axes={\"input1\": [0, 1, 2, 3], \"input2\": [0, 1, 2, 3],\n \"input3\": [0, 1, 2, 3], \"input4\": [0, 1, 2, 3],\n \"input5\": [0, 1, 2, 3], \"input6\": [0, 1, 2, 3]},\n test_with_inputs=[(images, features), (images2, test_features)],\n dict_check=False)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_multi_scale_roi_align(self):\n\n class TransformModule(torch.nn.Module):\n def __init__(self):\n super(TransformModule, self).__init__()\n self.model = ops.MultiScaleRoIAlign(['feat1', 'feat2'], 3, 2)\n self.image_sizes = [(512, 512)]\n\n def forward(self, input, boxes):\n # type: (Dict[str, torch.Tensor], List[torch.Tensor]) -> torch.Tensor\n return self.model(input, boxes, self.image_sizes)\n\n i = OrderedDict()\n i['feat1'] = torch.rand(1, 5, 64, 64)\n i['feat2'] = torch.rand(1, 5, 16, 16)\n boxes = torch.rand(6, 4) * 256\n boxes[:, 2:] += boxes[:, :2]\n\n i1 = OrderedDict()\n i1['feat1'] = torch.rand(1, 5, 64, 64)\n i1['feat2'] = torch.rand(1, 5, 16, 16)\n boxes1 = torch.rand(6, 4) * 256\n boxes1[:, 2:] += boxes1[:, :2]\n\n self.run_test(TransformModule(), (i, [boxes],), test_with_inputs=[(i, [boxes],), (i1, [boxes1],)])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n @disableScriptTest()\n def test_roi_heads(self):\n class RoiHeadsModule(torch.nn.Module):\n def __init__(self):\n super(RoiHeadsModule, self).__init__()\n self.transform = _init_test_generalized_rcnn_transform()\n self.rpn = _init_test_rpn()\n self.roi_heads = _init_test_roi_heads_faster_rcnn()\n\n def forward(self, images, features: Dict[str, torch.Tensor]):\n original_image_sizes = [(img.shape[-1], img.shape[-2]) for img in images]\n\n images_m = ImageList(images, [(i.shape[-1], i.shape[-2]) for i in images])\n proposals, _ = self.rpn(images_m, features)\n detections, _ = self.roi_heads(features, proposals, images_m.image_sizes)\n detections = self.transform.postprocess(detections,\n images_m.image_sizes,\n original_image_sizes)\n return detections\n\n images = torch.rand(2, 3, 100, 100)\n features = self.get_features(images)\n images2 = torch.rand(2, 3, 150, 150)\n test_features = self.get_features(images2)\n\n model = RoiHeadsModule()\n model.eval()\n model(images, features)\n\n self.run_test(model, (images, features),\n input_names=[\"input1\", \"input2\", \"input3\", \"input4\", \"input5\", \"input6\"],\n dynamic_axes={\"input1\": [0, 1, 2, 3], \"input2\": [0, 1, 2, 3], \"input3\": [0, 1, 2, 3],\n \"input4\": [0, 1, 2, 3], \"input5\": [0, 1, 2, 3], \"input6\": [0, 1, 2, 3]},\n test_with_inputs=[(images, features), (images2, test_features)],\n dict_check=False)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_set_attr_modules(self):\n class InnerModule2(torch.nn.Module):\n def __init__(self, embedding_dim):\n super().__init__()\n self.weights = InnerModule2.get_embedding(embedding_dim)\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.const = 2\n\n @staticmethod\n def get_embedding(embedding_dim: int):\n emb = 4 / ((embedding_dim // 2) - 1)\n emb = torch.exp(torch.arange((embedding_dim // 2), dtype=torch.float) * -emb)\n return emb\n\n def forward(self, input, incremental_state: Optional[torch.Tensor] = None):\n bsz, seq_len = input.shape[0], input.shape[1]\n self.const = 3\n if self.weights is None:\n self.weights = InnerModule.get_embedding(self.embedding_dim)\n self.weights = self.weights.to(self._float_tensor)\n self.weights = self.weights * self.const\n if incremental_state is not None:\n pos = seq_len\n return self.weights[1 + pos, :].expand(bsz, 1, -1)\n return (\n self.weights.index_select(0, torch.ones((bsz * seq_len), dtype=torch.int64)).view(bsz, seq_len, -1)\n )\n\n class InnerModule(torch.nn.Module):\n def __init__(self, embedding_dim):\n super().__init__()\n self.weights = InnerModule.get_embedding(embedding_dim)\n self.module = InnerModule2(embedding_dim=8)\n\n @staticmethod\n def get_embedding(embedding_dim: int):\n emb = 4 / ((embedding_dim // 2) - 1)\n emb = torch.exp(torch.arange((embedding_dim // 2), dtype=torch.float) * -emb)\n return emb\n\n def forward(self, x):\n return self.module(x) + self.weights\n\n class Module(torch.nn.Module):\n def __init__(self):\n super(Module, self).__init__()\n self.module = InnerModule(embedding_dim=8)\n\n def forward(self, x):\n return self.module(x)\n\n x = torch.randn(3, 256)\n self.run_test(Module(), (x, ))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_set_attr_modules_2(self):\n class InnerModule(torch.nn.Module):\n def __init__(self, embedding_dim):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.const = 2.5\n self.weights = InnerModule.get_embedding(self.embedding_dim)\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n\n @staticmethod\n def get_embedding(embedding_dim: int):\n emb = 4 / ((embedding_dim // 2) - 1)\n emb = torch.exp(torch.arange((embedding_dim // 2), dtype=torch.float) * -emb)\n return emb\n\n def forward(self, input, incremental_state: Optional[torch.Tensor] = None):\n bsz, seq_len = input.shape[0], input.shape[1]\n self.const = 1.5\n self.weights = InnerModule.get_embedding(self.embedding_dim)\n return (\n self.weights.index_select(0, torch.ones((bsz * seq_len), dtype=torch.int64)).view(bsz, seq_len, -1)\n ) * self.const\n\n class Module(torch.nn.Module):\n def __init__(self):\n super(Module, self).__init__()\n self.module = InnerModule(embedding_dim=8)\n\n def forward(self, x):\n return self.module(x)\n\n x = torch.randn(3, 256)\n self.run_test(Module(), (x, ))\n\n def test_set_attr(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv1d(3, 10, 2)\n self.b = False\n\n def forward(self, box_regression, weight):\n self.b = True\n self.conv.weight = weight\n w = torch.softmax(self.conv.weight, dim=0)\n self.conv.weight = w + w\n if self.b:\n return box_regression + self.conv.weight\n else:\n return box_regression - self.conv.weight\n\n model = torch.jit.script(MyModule())\n weight = torch.ones(3, 2)\n box_regression = torch.randn(3, 2)\n self.run_test(model, (box_regression, weight))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_set_attr_2(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv1d(10, 3, 3)\n self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))\n\n def set_cell_anchors(self, anchors):\n if self.conv.bias is not None:\n b = self.conv.bias\n assert b is not None\n self.conv.bias = anchors + b\n elif self.conv.weight is not None:\n self.conv.weight = torch.randn(3, 10)\n self.conv.bias = self.conv.weight[:]\n\n def forward(self, anchors) -> Optional[torch.Tensor]:\n self.set_cell_anchors(anchors)\n return self.conv.bias\n\n model = torch.jit.script(MyModule())\n anchors = torch.ones(3, 10, 3)\n self.run_test(model, (anchors))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_set_attr_3(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv1d(10, 3, 3)\n self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))\n self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))\n\n def set_cell_anchors(self, anchors, boxes):\n self.conv.weight = torch.ones(3, 10)\n if self.conv.bias is not None:\n self.conv.bias = torch.randn(3, 10, 3)\n self.conv.weight = anchors + self.conv.weight\n boxes[:] = torch.zeros(2, 3)\n\n def forward(self, anchors) -> Tuple[torch.Tensor, torch.Tensor]:\n boxes = torch.ones(2, 2, 3)\n self.set_cell_anchors(anchors, boxes)\n if self.conv.bias is not None:\n return self.conv.weight, boxes\n return anchors, boxes\n\n model = torch.jit.script(MyModule())\n anchors = torch.rand(3, 10)\n self.run_test(model, (anchors))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_set_attr_4(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv1d(10, 3, 3)\n self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))\n\n def set_cell_anchors(self, anchors):\n self.conv.weight = torch.zeros(10, 3)\n if self.conv.bias is not None:\n w = self.conv.bias\n assert w is not None\n self.conv.bias = anchors + w\n else:\n self.conv.bias = torch.ones(3, 10, 3)\n\n def forward(self, feature_maps, anchors) -> Tuple[torch.Tensor, torch.Tensor]:\n self.set_cell_anchors(anchors)\n result = []\n if self.conv.bias is not None:\n a = self.conv.bias\n assert a is not None\n result += [a]\n result += [feature_maps]\n return result[0], result[1]\n\n model = torch.jit.script(MyModule())\n x = torch.rand(5, 11, 30)\n anchors = torch.ones(3, 10, 3)\n self.run_test(model, (x, anchors))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_set_attr_5(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv1d(10, 3, 3)\n self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))\n\n def set_cell_anchors(self, anchors):\n self.conv.weight = torch.arange(10)\n for i in range(10):\n if i == 3:\n for j in range(10):\n w = self.conv.weight\n self.conv.weight = torch.arange(10) + w\n\n self.conv.weight = self.conv.weight + torch.arange(10)\n # NOTE: `is not None` and `assert` is for passing torchscript.\n if self.conv.bias is not None:\n a = self.conv.bias\n assert a is not None\n self.conv.bias = anchors + a\n\n def forward(self, anchors):\n self.set_cell_anchors(anchors)\n return self.conv.weight, self.conv.bias\n\n model = torch.jit.script(MyModule())\n anchors = torch.ones(3, 10, 3)\n self.run_test(model, (anchors))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_set_attr_in_loop(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.conv = torch.nn.Conv1d(10, 3, 3)\n self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))\n self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))\n\n def set_cell_anchors(self, anchors, boxes):\n self.conv.weight = torch.randn(3, 10)\n for i in range(self.conv.weight.size(0)):\n for j in range(10):\n self.conv.bias = torch.randn(3, 10, 3)\n self.conv.weight = anchors * i\n boxes[j] += torch.ones(3, 3)\n\n def forward(self, anchors) -> Tuple[torch.Tensor, torch.Tensor]:\n boxes = torch.ones(10, 3, 3)\n self.set_cell_anchors(anchors, boxes)\n if self.conv.bias is not None:\n return self.conv.weight, boxes\n return anchors, boxes\n\n model = torch.jit.script(MyModule())\n anchors = torch.rand(10)\n self.run_test(model, anchors)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_if(self):\n @torch.jit.script\n def check_init(input_data, hidden_size, prev_state):\n # type: (torch.Tensor, int, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]\n batch_size = input_data.size(0)\n spatial_size_0 = input_data.size(2)\n spatial_size_1 = input_data.size(3)\n # generate empty prev_state, if None is provided\n state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state = torch.zeros(state_size, device=input_data.device)\n state_copy = torch.zeros(state_size, device=input_data.device)\n if prev_state.size(0) == 0:\n state[:] = torch.zeros(batch_size, hidden_size, spatial_size_0, spatial_size_1) + state[:]\n state_copy[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 2\n state_copy[:] = torch.zeros(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 2\n else:\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 4\n return state, state_copy\n\n class Example(torch.nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n def forward(self, input_data, prev_state):\n prev_state = check_init(input_data, self.hidden_size, prev_state)\n return prev_state[0], prev_state[1]\n\n model = Example(10)\n random_data = torch.rand((1, 5, 30, 30))\n empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)\n self.run_test(model, (random_data, empty_tensor))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_if_2(self):\n @torch.jit.script\n def check_init(input_data, hidden_size, prev_state):\n # type: (torch.Tensor, int, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]\n batch_size = input_data.size(0)\n spatial_size_0 = input_data.size(2)\n spatial_size_1 = input_data.size(3)\n # generate empty prev_state, if None is provided\n state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state = torch.zeros(state_size, device=input_data.device)\n state_copy = torch.zeros(state_size, device=input_data.device)\n if prev_state.size(0) == 0:\n for i in range(2):\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * i\n state_copy[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * i\n elif prev_state.size(0) == 1:\n s = state[:]\n state[:] = prev_state + s\n elif prev_state.size(0) == 2:\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 4\n return state, state_copy\n\n class Example(torch.nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n def forward(self, input_data, prev_state):\n prev_state = check_init(input_data, self.hidden_size, prev_state)\n return prev_state[0], prev_state[1]\n\n model = Example(10)\n random_data = torch.rand((1, 5, 30, 30))\n empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)\n random_state = torch.rand((1, 1, 10, 30, 30))\n self.run_test(model, (random_data, empty_tensor),\n input_names=['data', 'state'],\n dynamic_axes={'state': [0, 1, 2, 3, 4]},\n test_with_inputs=[(random_data, random_state)])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_if_3(self):\n @torch.jit.script\n def check_init(input_data, hidden_size, prev_state):\n # type: (torch.Tensor, int, torch.Tensor) -> torch.Tensor\n batch_size = input_data.size(0)\n spatial_size_0 = input_data.size(2)\n spatial_size_1 = input_data.size(3)\n # generate empty prev_state, if None is provided\n state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state = torch.zeros(state_size, device=input_data.device)\n if prev_state.size(0) < 2:\n state = state * 3\n if prev_state.size(0) == 0:\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 3\n else:\n state = state + 2\n\n return state\n\n class Example(torch.nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n def forward(self, input_data, prev_state):\n prev_state = check_init(input_data, self.hidden_size, prev_state)\n return prev_state\n\n model = Example(4)\n random_data = torch.rand((1, 5, 4, 4))\n empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)\n self.run_test(model, (random_data, empty_tensor))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_if_4(self):\n @torch.jit.script\n def check_init(input_data, hidden_size, prev_state):\n # type: (torch.Tensor, int, torch.Tensor) -> torch.Tensor\n batch_size = input_data.size(0)\n spatial_size_0 = input_data.size(2)\n spatial_size_1 = input_data.size(3)\n # generate empty prev_state, if None is provided\n state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state = torch.zeros(state_size, device=input_data.device)\n if prev_state.size(0) == 0:\n state = state + 3\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 3\n state = state + 3\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 4\n else:\n state = state + 2\n return state\n\n class Example(torch.nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n def forward(self, input_data, prev_state):\n prev_state = check_init(input_data, self.hidden_size, prev_state)\n return prev_state\n\n model = Example(4)\n random_data = torch.rand((1, 5, 4, 4))\n empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)\n self.run_test(model, (random_data, empty_tensor))\n\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_if_5(self):\n @torch.jit.script\n def check_init(input_data, hidden_size, prev_state):\n # type: (torch.Tensor, int, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]\n batch_size = input_data.size(0)\n spatial_size_0 = input_data.size(2)\n spatial_size_1 = input_data.size(3)\n # generate empty prev_state, if None is provided\n state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state = torch.zeros(state_size, device=input_data.device)\n state_ref = state\n if prev_state.size(0) == 0:\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 3\n state = state + 3\n state[:] = torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 4\n else:\n state = state + 2\n return state, state_ref\n\n class Example(torch.nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n def forward(self, input_data, prev_state):\n prev_state, state_ref = check_init(input_data, self.hidden_size, prev_state)\n return prev_state, state_ref\n\n model = Example(4)\n random_data = torch.rand((1, 5, 4, 4))\n empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)\n self.run_test(model, (random_data, empty_tensor))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_list_append_in_block(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n res.append(torch.matmul(x[i], y))\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_append_in_nested_block(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n for i in range(x.size(0)):\n for j in range(x.size(1)):\n res.append(torch.matmul(x[i][j], y))\n return res\n\n model = torch.jit.script(ListModel())\n x = torch.randn(4, 4, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_pop_in_block(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n elem = torch.matmul(x[0], y)\n for i in range(x.size(0)):\n res.append(torch.matmul(x[i], y))\n for i in range(x.size(0)):\n elem = res.pop()\n for i in range(x.size(0)):\n res.append(torch.matmul(x[i], y))\n elem = res.pop()\n return res.append(elem)\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_list_del_in_block(self):\n class ListModel(torch.nn.Module):\n def forward(self, x, y):\n res = []\n elem = torch.matmul(x[0], y)\n for i in range(x.size(0)):\n res.append(torch.matmul(x[i], y))\n for i in range(x.size(0)):\n del res[0]\n for i in range(x.size(0)):\n res.append(torch.matmul(x[i], y))\n del res[0]\n return res.append(elem)\n\n model = torch.jit.script(ListModel())\n x = torch.randn(16, 3, 4)\n y = torch.randn(4, 5)\n self.run_test(model, (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_index_put_inplace_ops(self):\n @torch.jit.script\n def check_init(input_data, hidden_size):\n # type: (torch.Tensor, int) -> torch.Tensor\n batch_size = input_data.size(0)\n spatial_size_0 = input_data.size(2)\n spatial_size_1 = input_data.size(3)\n # generate empty prev_state, if None is provided\n state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state = torch.zeros(state_size, device=input_data.device)\n if input_data.size(0) == 1:\n state[1] += torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 2\n state[1] /= torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * 3\n for i in range(input_data.size(0)):\n state[1] += torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)\n state[1] /= torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1) * i\n return state\n\n class Example(torch.nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n def forward(self, input_data):\n state = check_init(input_data, self.hidden_size)\n return state\n\n model = Example(10)\n random_data = torch.rand((1, 5, 30, 30))\n self.run_test(model, (random_data))\n\n @disableScriptTest()\n def test_unsafe_chunk(self):\n class ChunkModel(torch.nn.Module):\n def forward(self, x):\n return torch.unsafe_chunk(x, 3, dim=1)\n\n model = ChunkModel()\n model.eval()\n x = torch.randn(1, 18)\n self.run_test(model, x, input_names=['x'])\n\n def test_symbolic_shape_inference(self):\n # ConstantOfShape is tested in test_embedding_bag\n # Tile is tested in test_repeat\n # test Shape, Reshape, Transpose, Gather\n class ShapeModel(torch.nn.Module):\n def forward(self, x, y):\n shape = x.size()[:3] + (-1,) # shape [4], ('batch', 3, 4, -1)\n y = y.reshape(shape) # batch, 3, 4, 10/batch\n return y.transpose(1, 2)\n\n model = ShapeModel()\n model.eval()\n x = torch.ones(2, 3, 4, 5)\n y = torch.ones(3, 4, 5, 2)\n self.run_test(model, (x, y))\n\n class ViewModel(torch.nn.Module):\n def forward(self, x):\n return x.view(-1)\n\n model = ViewModel()\n model.eval()\n x = torch.tensor(2.)\n self.run_test(model, (x,))\n\n # test prim::ListConstruct for Reshape input 1\n class ViewModel_2(torch.nn.Module):\n def forward(self, x):\n N, C, H, W = x.shape[0], x.shape[2], x.shape[3], x.shape[4]\n x1 = x.view(N, -1, C, H, W)\n x2 = x1.permute(0, 3, 4, 1, 2)\n return x2.reshape(N, -1, C)\n\n model = ViewModel_2()\n model.eval()\n x = torch.ones(2, 3, 4, 5, 6)\n self.run_test(model, x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_symbolic_shape_inference_arange(self):\n # test Range\n class ArangeModel(torch.nn.Module):\n def forward(self, signal):\n frame_step = 2\n outer_dimensions = signal.size()[:-2]\n frames, frame_length = signal.size()[-2:]\n\n subframe_length = signal.size()[0]\n subframe_step = frame_step // subframe_length\n subframes_per_frame = frame_length // subframe_length\n output_size = frame_step * (frames - 1) + frame_length\n output_subframes = output_size // subframe_length\n\n frame = torch.arange(0, output_subframes)\n return frame\n\n model = ArangeModel()\n model.eval()\n M, C, K, N = 1, 2, 3, 4\n x = torch.randint(5, (M, C, K, N))\n y = torch.randint(5, (M, C + 1, K + 1, N + 1))\n self.run_test(model, x)\n self.run_test(model, x, input_names=['x'],\n dynamic_axes={'x' : [0, 1, 2, 3]}, test_with_inputs=[(x,), (y,)])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_symbolic_shape_inference_box(self):\n # test NonZero\n class BoxModel(torch.nn.Module):\n def forward(self, boxes):\n min_size = 1e-2\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\n keep = (ws >= min_size) & (hs >= min_size)\n keep = torch.where(keep)[0]\n return keep\n\n model = BoxModel()\n model.eval()\n x = torch.ones(2, 4)\n y = torch.ones(3, 5)\n self.run_test(model, x)\n self.run_test(model, x, input_names=['x'],\n dynamic_axes={'x' : [0, 1]}, test_with_inputs=[(x,), (y,)])\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_symbolic_shape_inference_box_if(self):\n # test If\n class BoxIfModel(torch.nn.Module):\n def forward(self, boxes, scores):\n score_thresh = 0.0\n inds = torch.where(scores > score_thresh)[0]\n boxes_1 = boxes[inds]\n if boxes_1.numel() > 3:\n return boxes_1\n else:\n return boxes_1 * 2\n\n model = BoxIfModel()\n model.eval()\n boxes = torch.ones(2, 4)\n scores = torch.ones(1, 4)\n self.run_test(model, (boxes, scores))\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_symbolic_shape_inference_arange_2(self):\n # test Range\n class ArangeModel(torch.nn.Module):\n def forward(self, start):\n return torch.arange(start.size(0), 8.5, 1.5, dtype=torch.int64)\n x = torch.randn(2, 3, 4)\n self.run_test(ArangeModel(), (x,))\n\n class ArangeModel2(torch.nn.Module):\n def forward(self, start):\n return torch.arange(start.size(0), 8.5, 1.5, dtype=torch.double)\n x = torch.randn(2, 3, 4)\n self.run_test(ArangeModel2(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_symbolic_shape_inference_nonzero(self):\n class OneLikeModel(torch.nn.Module):\n def forward(self, x):\n ones = torch.ones_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))\n return torch.nonzero(ones)\n\n x = torch.randn(2)\n self.run_test(OneLikeModel(), x)\n x = torch.randn(2, 3, 4)\n self.run_test(OneLikeModel(), x)\n\n class ZeroLikeModel(torch.nn.Module):\n def forward(self, x):\n zeros = torch.zeros_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))\n return torch.nonzero(zeros)\n\n x = torch.randn(2)\n self.run_test(ZeroLikeModel(), x)\n x = torch.randn(2, 3, 4)\n self.run_test(ZeroLikeModel(), x)\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_symbolic_shape_inference_expand_1(self):\n class ExpandModel(torch.nn.Module):\n def forward(self, x):\n return x.expand(4, 6, 2)\n x = torch.randn(6, 1, requires_grad=True)\n self.run_test(ExpandModel(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n @disableScriptTest() # Test code not scriptable\n def test_symbolic_shape_inference_expand_2(self):\n class M(torch.nn.Module):\n def forward(self, x):\n input_shape = x.size()\n batch_size, seq_length = input_shape\n seq_ids = torch.arange(seq_length)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n return causal_mask.transpose(0, 1)\n x = torch.randn(3, 16)\n self.run_test(M(), (x,))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n @disableScriptTest() # Test code not scriptable\n def test_symbolic_shape_inference_slice(self):\n class M(torch.nn.Module):\n def forward(self, x, position_bias):\n input_shape = x.size()\n batch_size, seq_length = input_shape\n position_bias = position_bias[:, :, -seq_length:, :]\n return position_bias.transpose(0, 1)\n x = torch.randn(3, 16)\n position_bias = torch.randn(1, 3, 20, 8)\n self.run_test(M(), (x, position_bias))\n\n def test_symbolic_shape_inference_slice_2(self):\n class M(torch.nn.Module):\n def forward(self, position_bias):\n position_bias = position_bias[:, :, -2:, :]\n return position_bias.transpose(0, 1)\n position_bias = torch.randn(1, 3, 20, 8)\n self.run_test(M(), (position_bias,))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n @disableScriptTest()\n def test_symbolic_shape_inference_time(self):\n input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)\n c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)\n model_lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)\n self.run_test(model_lstm, (input, (h0, c0)), input_names=['x', 'y'],\n dynamic_axes={'x' : [0, 1]})\n model_gru = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False, bias=False)\n self.run_test(model_gru, (input, h0), input_names=['x', 'y'],\n dynamic_axes={'x' : [0, 1]})\n model_rnn = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False, bias=False)\n self.run_test(model_rnn, (input, h0), input_names=['x', 'y'],\n dynamic_axes={'x' : [0, 1]})\n\n def test_symbolic_shape_inference_dynamic_axes(self):\n class M(torch.nn.Module):\n def forward(self, input_ids):\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n return input_ids.transpose(0, 1)\n x = torch.randn(3, 16)\n self.run_test(M(), (x,), input_names=['input_ids'],\n dynamic_axes={'input_ids': {0: 'batch', 1: 'sequence'}})\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_hann_window_periodic(self):\n class HannWindowModule_Periodic(torch.nn.Module):\n def __init__(self):\n super(HannWindowModule_Periodic, self).__init__()\n self.window_length = 0\n\n def forward(self, x, window_length: int):\n self.window_length = window_length\n return torch.add(x, torch.hann_window(self.window_length, periodic=True, dtype=torch.float))\n\n win_length = 100\n x = torch.randn(win_length)\n\n module = HannWindowModule_Periodic()\n self.run_test(module, (x, win_length))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n def test_hann_window_not_periodic(self):\n class HannWindowModule_NotPeriodic(torch.nn.Module):\n def __init__(self):\n super(HannWindowModule_NotPeriodic, self).__init__()\n self.window_length = 0\n\n def forward(self, x, window_length: int):\n self.window_length = window_length\n return torch.add(x, torch.hann_window(self.window_length, periodic=False, dtype=torch.float))\n\n win_length = 100\n x = torch.randn(win_length)\n\n module = HannWindowModule_NotPeriodic()\n self.run_test(module, (x, win_length))\n\n @skipIfUnsupportedMinOpsetVersion(9)\n @disableScriptTest()\n def test_hann_window_default_values(self):\n class HannWindowModule(torch.nn.Module):\n def __init__(self):\n super(HannWindowModule, self).__init__()\n self.window_length = 0\n\n def forward(self, x, window_length: int):\n import torch.nn.functional as F\n self.window_length = window_length\n return torch.add(x, F.relu(torch.hann_window(self.window_length)))\n\n win_length = 100\n x = torch.randn(win_length, dtype=torch.float)\n module = HannWindowModule()\n\n output = module(x, win_length)\n self.run_test(module, (x, win_length))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n @disableScriptTest()\n def test_tensordot_dim_count(self):\n class M(torch.nn.Module):\n def forward(self, x, y):\n output = torch.tensordot(x, y, 2)\n return output\n\n x = torch.randint(6, (7, 5, 3, 4))\n y = torch.randint(6, (3, 4, 9, 2))\n\n self.run_test(M(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n def test_tensordot_dim_list(self):\n class M(torch.nn.Module):\n def forward(self, x, y):\n output = torch.tensordot(x, y, ([1, -2, -1], [1, 0, 3]))\n return output\n\n x = torch.randint(6, (7, 4, 3, 5, 2))\n y = torch.randint(6, (5, 4, 4, 2, 6))\n\n self.run_test(M(), (x, y))\n\n @skipIfUnsupportedMinOpsetVersion(12)\n @disableScriptTest()\n def test_tensordot_dynamic_dim(self):\n class M(torch.nn.Module):\n def forward(self, x, y):\n output = torch.tensordot(x, y, 2)\n return output\n\n x = torch.randint(6, (7, 5, 3, 4))\n y = torch.randint(6, (3, 4, 9, 2))\n\n new_x = torch.randint(6, (8, 6, 2, 5))\n new_y = torch.randint(6, (2, 5, 3, 4))\n\n self.run_test(M(), (x, y), test_with_inputs=[(new_x, new_y)],\n input_names=['input_x', 'input_y'],\n dynamic_axes={'input_x': [0, 1, 2, 3], 'input_y': [0, 1, 2, 3]})\n\ndef make_test(name, base, layer, bidirectional, initial_state,\n variable_length, dropout,\n **extra_kwargs):\n test_name = str('_'.join([\n 'test', name, layer[1],\n bidirectional[1], initial_state[1],\n variable_length[1], dropout[1]\n ]))\n\n # Cannot export with older opsets because of 'ConstantFill' op\n # ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime\n # There are still some issues prevent us from enabling script test for these scenarios:\n # test_gru_*:\n # Operator aten::as_tensor is not supported by exporter yet.\n # - https://msdata.visualstudio.com/Vienna/_workitems/edit/1055382\n # Operator aten::_pack_padded_sequence is not supported by exporter yet.\n # - https://msdata.visualstudio.com/Vienna/_workitems/edit/1055384\n @disableScriptTest()\n @skipIfUnsupportedMinOpsetVersion(9)\n def f(self):\n self._dispatch_rnn_test(\n base,\n layers=layer[0],\n bidirectional=bidirectional[0],\n initial_state=initial_state[0],\n packed_sequence=variable_length[0],\n dropout=dropout[0],\n **extra_kwargs)\n\n f.__name__ = test_name\n setattr(TestONNXRuntime, f.__name__, f)\n\ndef setup_rnn_tests():\n layers_opts = [\n (1, 'unilayer'),\n (3, 'trilayer')\n ]\n bidirectional_opts = [\n (False, 'forward'),\n (True, 'bidirectional')\n ]\n initial_state_opts = [\n (True, 'with_initial_state'),\n (False, 'no_initial_state')\n ]\n variable_length_opts = [\n (0, 'without_sequence_lengths'),\n (1, 'with_variable_length_sequences'),\n (2, 'with_batch_first_sequence_lengths')\n ]\n dropout_opts = [\n (0.2, 'with_dropout'),\n (0.0, 'without_dropout')\n ]\n test_count = 0\n for (layer, bidirectional, initial_state, variable_length, dropout) in \\\n itertools.product(\n layers_opts,\n bidirectional_opts,\n initial_state_opts,\n variable_length_opts,\n dropout_opts,):\n\n for base, name, extra_kwargs in (\n ('elman', 'elman_relu', {'nonlinearity': u'relu'}),\n ('elman', 'elman_tanh', {'nonlinearity': u'tanh'}),\n ('lstm', 'lstm', {}),\n ('gru', 'gru', {})\n ):\n make_test(name, base, layer, bidirectional, initial_state,\n variable_length, dropout,\n **extra_kwargs)\n test_count += 1\n\n # sanity check that a representative example does exist\n TestONNXRuntime.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout\n\n # make sure no one accidentally disables all the tests without\n # noticing\n if test_count != 192:\n raise ValueError('Expected 192 tests but found {}'.format(test_count))\n\nsetup_rnn_tests()\n\n\n# opset 7 tests\nTestONNXRuntime_opset7 = type(str(\"TestONNXRuntime_opset7\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=7))\n\n# opset 8 tests\nTestONNXRuntime_opset8 = type(str(\"TestONNXRuntime_opset8\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=8))\n\n\n# opset 10 tests\nTestONNXRuntime_opset10 = type(str(\"TestONNXRuntime_opset10\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=10))\n\n# opset 11 tests\nTestONNXRuntime_opset11 = type(str(\"TestONNXRuntime_opset11\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=11))\n\n# opset 12 tests\nTestONNXRuntime_opset12 = type(str(\"TestONNXRuntime_opset12\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=12))\n\n# opset 9 tests, with keep_initializers_as_inputs=False for\n# IR version 4 style export.\nTestONNXRuntime_opset9_IRv4 = type(str(\"TestONNXRuntime_opset9_IRv4\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__,\n keep_initializers_as_inputs=False))\n\n\n# opset 10 tests, with keep_initializers_as_inputs=False for\n# IR version 4 style export.\nTestONNXRuntime_opset10_IRv4 = type(str(\"TestONNXRuntime_opset10_IRv4\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=10,\n keep_initializers_as_inputs=False))\n\n\n# opset 11 tests, with keep_initializers_as_inputs=False for\n# IR version 4 style export.\nTestONNXRuntime_opset11_IRv4 = type(str(\"TestONNXRuntime_opset11_IRv4\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=11,\n keep_initializers_as_inputs=False))\n\n# opset 12 tests, with keep_initializers_as_inputs=False for\n# IR version 4 style export.\nTestONNXRuntime_opset12_IRv4 = type(str(\"TestONNXRuntime_opset12_IRv4\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=12,\n keep_initializers_as_inputs=False))\n\n# opset 13 tests\nTestONNXRuntime_opset13 = type(str(\"TestONNXRuntime_opset13\"),\n (unittest.TestCase,),\n dict(TestONNXRuntime.__dict__, opset_version=13,\n keep_initializers_as_inputs=False,\n onnx_shape_inference=True))\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\nimport inspect\nfrom io import BytesIO\nfrom sys import version_info\nfrom textwrap import dedent\nfrom unittest import skipIf\n\nfrom torch.package import PackageExporter, PackageImporter, is_from_package\nfrom torch.testing._internal.common_utils import run_tests\n\ntry:\n from .common import PackageTestCase\nexcept ImportError:\n # Support the case where we run this file directly.\n from common import PackageTestCase\n\n\nclass TestMisc(PackageTestCase):\n \"\"\"Tests for one-off or random functionality. Try not to add to this!\"\"\"\n\n def test_file_structure(self):\n \"\"\"\n Tests package's Directory structure representation of a zip file. Ensures\n that the returned Directory prints what is expected and filters\n inputs/outputs correctly.\n \"\"\"\n buffer = BytesIO()\n\n export_plain = dedent(\n \"\"\"\\\n ├── .data\n │ ├── extern_modules\n │ └── version\n ├── main\n │ └── main\n ├── obj\n │ └── obj.pkl\n ├── package_a\n │ ├── __init__.py\n │ └── subpackage.py\n └── module_a.py\n \"\"\"\n )\n export_include = dedent(\n \"\"\"\\\n ├── obj\n │ └── obj.pkl\n └── package_a\n └── subpackage.py\n \"\"\"\n )\n import_exclude = dedent(\n \"\"\"\\\n ├── .data\n │ ├── extern_modules\n │ └── version\n ├── main\n │ └── main\n ├── obj\n │ └── obj.pkl\n ├── package_a\n │ ├── __init__.py\n │ └── subpackage.py\n └── module_a.py\n \"\"\"\n )\n\n with PackageExporter(buffer, verbose=False) as he:\n import module_a\n import package_a\n import package_a.subpackage\n\n obj = package_a.subpackage.PackageASubpackageObject()\n he.save_module(module_a.__name__)\n he.save_module(package_a.__name__)\n he.save_pickle(\"obj\", \"obj.pkl\", obj)\n he.save_text(\"main\", \"main\", \"my string\")\n\n\n buffer.seek(0)\n hi = PackageImporter(buffer)\n\n file_structure = hi.file_structure()\n # remove first line from testing because WINDOW/iOS/Unix treat the buffer differently\n self.assertEqual(\n dedent(\"\\n\".join(str(file_structure).split(\"\\n\")[1:])),\n export_plain,\n )\n file_structure = hi.file_structure(\n include=[\"**/subpackage.py\", \"**/*.pkl\"]\n )\n self.assertEqual(\n dedent(\"\\n\".join(str(file_structure).split(\"\\n\")[1:])),\n export_include,\n )\n\n file_structure = hi.file_structure(exclude=\"**/*.storage\")\n self.assertEqual(\n dedent(\"\\n\".join(str(file_structure).split(\"\\n\")[1:])),\n import_exclude,\n )\n\n def test_file_structure_has_file(self):\n \"\"\"\n Test Directory's has_file() method.\n \"\"\"\n buffer = BytesIO()\n with PackageExporter(buffer, verbose=False) as he:\n import package_a.subpackage\n\n obj = package_a.subpackage.PackageASubpackageObject()\n he.save_pickle(\"obj\", \"obj.pkl\", obj)\n\n buffer.seek(0)\n\n importer = PackageImporter(buffer)\n file_structure = importer.file_structure()\n self.assertTrue(file_structure.has_file(\"package_a/subpackage.py\"))\n self.assertFalse(file_structure.has_file(\"package_a/subpackage\"))\n\n def test_is_from_package(self):\n \"\"\"is_from_package should work for objects and modules\"\"\"\n import package_a.subpackage\n\n buffer = BytesIO()\n obj = package_a.subpackage.PackageASubpackageObject()\n\n with PackageExporter(buffer, verbose=False) as pe:\n pe.save_pickle(\"obj\", \"obj.pkl\", obj)\n\n buffer.seek(0)\n pi = PackageImporter(buffer)\n mod = pi.import_module(\"package_a.subpackage\")\n loaded_obj = pi.load_pickle(\"obj\", \"obj.pkl\")\n\n self.assertFalse(is_from_package(package_a.subpackage))\n self.assertTrue(is_from_package(mod))\n\n self.assertFalse(is_from_package(obj))\n self.assertTrue(is_from_package(loaded_obj))\n\n\n @skipIf(version_info < (3, 7), \"mock uses __getattr__ a 3.7 feature\")\n def test_custom_requires(self):\n buffer = BytesIO()\n\n class Custom(PackageExporter):\n def require_module(self, name, dependencies):\n if name == \"module_a\":\n self.save_mock_module(\"module_a\")\n elif name == \"package_a\":\n self.save_source_string(\n \"package_a\", \"import module_a\\nresult = 5\\n\"\n )\n else:\n raise NotImplementedError(\"wat\")\n\n with Custom(buffer, verbose=False) as he:\n he.save_source_string(\"main\", \"import package_a\\n\")\n\n buffer.seek(0)\n hi = PackageImporter(buffer)\n hi.import_module(\"module_a\").should_be_mocked\n bar = hi.import_module(\"package_a\")\n self.assertEqual(bar.result, 5)\n\n def test_inspect_class(self):\n \"\"\"Should be able to retrieve source for a packaged class.\"\"\"\n import package_a.subpackage\n\n buffer = BytesIO()\n obj = package_a.subpackage.PackageASubpackageObject()\n\n with PackageExporter(buffer, verbose=False) as pe:\n pe.save_pickle(\"obj\", \"obj.pkl\", obj)\n\n buffer.seek(0)\n pi = PackageImporter(buffer)\n packaged_class = pi.import_module(\n \"package_a.subpackage\"\n ).PackageASubpackageObject\n regular_class = package_a.subpackage.PackageASubpackageObject\n\n packaged_src = inspect.getsourcelines(packaged_class)\n regular_src = inspect.getsourcelines(regular_class)\n self.assertEqual(packaged_src, regular_src)\n\n def test_dunder_package_present(self):\n \"\"\"\n The attribute '__torch_package__' should be populated on imported modules.\n \"\"\"\n import package_a.subpackage\n\n buffer = BytesIO()\n obj = package_a.subpackage.PackageASubpackageObject()\n\n with PackageExporter(buffer, verbose=False) as pe:\n pe.save_pickle(\"obj\", \"obj.pkl\", obj)\n\n buffer.seek(0)\n pi = PackageImporter(buffer)\n mod = pi.import_module(\n \"package_a.subpackage\"\n )\n self.assertTrue(hasattr(mod, \"__torch_package__\"))\n\n def test_dunder_package_works_from_package(self):\n \"\"\"\n The attribute '__torch_package__' should be accessible from within\n the module itself, so that packaged code can detect whether it's\n being used in a packaged context or not.\n \"\"\"\n import package_a.use_dunder_package as mod\n\n buffer = BytesIO()\n\n with PackageExporter(buffer, verbose=False) as pe:\n pe.save_module(mod.__name__)\n\n buffer.seek(0)\n pi = PackageImporter(buffer)\n imported_mod = pi.import_module(\n mod.__name__\n )\n self.assertTrue(imported_mod.is_from_package())\n self.assertFalse(mod.is_from_package())\n\n\nif __name__ == \"__main__\":\n run_tests()\n" ]
[ [ "torch.fmod", "torch.randint", "torch.max", "torch.zeros", "torch.nn.GRU", "torch.narrow", "torch.multinomial", "torch.numel", "torch.where", "torch.topk", "torch.device", "numpy.where", "torch.pow", "torch.nn.EmbeddingBag", "torch.outer", "torch.sqrt", "torch.randn", "torch.logical_xor", "torch.square", "torch.nn.functional.hardswish", "torch.linalg.det", "torch.nn.GroupNorm", "torch.squeeze", "torch.var_mean", "torch.full", "torch.nn.ReplicationPad1d", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.min", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.log1p", "torch.nn.BatchNorm2d", "numpy.sum", "torch.take", "torch.nn.ReflectionPad2d", "torch.nn.KLDivLoss", "torch._C._jit_set_profiling_executor", "torch.nn.functional.log_softmax", "torch.nn.ConvTranspose3d", "torch.unsafe_chunk", "torch.nn.functional.glu", "torch.nn.Hardswish", "torch.nn.RNN", "torch.nn.utils.rnn.pad_sequence", "torch.sum", "torch.repeat_interleave", "torch.jit._flatten", "torch.FloatTensor", "torch.cuda.is_available", "torch.split", "torch.logical_and", "torch.nn.ReplicationPad2d", "torch.norm", "torch.nn.ConstantPad2d", "torch.einsum", "torch.argmin", "torch.tensor", "torch.logdet", "torch.fake_quantize_per_tensor_affine", "torch.rand", "torch.reciprocal", "torch.nn.TransformerEncoder", "torch.nonzero", "torch.sort", "torch.tensordot", "torch.nn.AdaptiveMaxPool1d", "torch.nn.LogSoftmax", "torch.mv", "torch.std_mean", "torch.zeros_like", "torch.nn.functional.hardtanh", "torch.stack", "torch.nn.ReflectionPad1d", "torch.nn.LSTM", "torch.manual_seed", "torch.matmul", "torch.onnx._export", "torch.IntTensor", "torch.nn.functional.unfold", "torch.nn.ReLU", "torch.meshgrid", "torch.nn.BatchNorm3d", "torch.hann_window", "torch.nn.AvgPool1d", "torch.randn_like", "torch.cat", "torch.set_default_dtype", "torch.nn.Embedding", "torch.unique", "torch.cuda.manual_seed_all", "torch.nn.functional.interpolate", "torch.full_like", "numpy.random.randint", "torch.nn.AvgPool3d", "torch.softmax", "torch.mm", "torch.jit.trace", "torch.ones", "torch.add", "torch.round", "torch.from_numpy", "torch.nn.MaxPool1d", "torch.nn.CELU", "torch.std", "torch.arange", "torch.index_select", "torch.nn.functional.pad", "torch._dim_arange", "torch.nn.ConvTranspose2d", "torch.pixel_shuffle", "torch.unsqueeze", "torch.nn.TransformerEncoderLayer", "torch.nn.Conv3d", "torch.log", "numpy.testing.assert_allclose", "torch.nn.Hardsigmoid", "torch.nn.SiLU", "torch.logical_or", "torch.fake_quantize_per_channel_affine", "torch.nn.functional.normalize", "torch.nn.functional.embedding", "torch.true_divide", "torch.chunk", "torch.cumsum", "torch.nn.ConstantPad1d", "torch.nn.Softmax", "torch.rand_like", "torch.remainder", "torch._C._jit_set_profiling_mode", "torch.nn.functional.one_hot", "torch.no_grad", "torch.flatten", "torch.logsumexp", "torch.jit.script", "torch.onnx.export", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.nn.functional.embedding_bag", "numpy.arange", "torch.eye", "torch._shape_as_tensor", "torch.get_default_dtype", "torch.nn.ZeroPad2d", "torch.nn.functional.max_pool2d", "torch.nn.functional.linear", "torch.nn.BatchNorm1d", "torch.nn.Sequential", "torch.nn.NLLLoss", "torch.div", "torch.empty", "torch.nn.PReLU", "torch.nn.functional.gelu", "torch.nn.Conv1d", "torch.flip", "torch.baddbmm", "torch.nn.Hardtanh", "numpy.random.seed", "torch.nn.LayerNorm", "torch.nn.MaxPool2d", "torch.nn.MaxPool3d", "torch.var", "torch.nn.ConvTranspose1d", "torch.clamp", "torch.argmax" ], [ "torch.package.PackageExporter", "torch.package.PackageImporter", "torch.testing._internal.common_utils.run_tests", "torch.package.is_from_package" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CALFEM/calfem-py
[ "26d4082ca6b907c48ad814733c733ae30a959657", "26d4082ca6b907c48ad814733c733ae30a959657", "26d4082ca6b907c48ad814733c733ae30a959657" ]
[ "calfem/_export.py", "examples/exs_flw_diff2.py", "examples/exv4.py" ]
[ "import pickle\nimport scipy.io\nimport numpy as np\n\n'''\nHandle reading and writing of geometry and generated mesh from the program\n'''\n\n\ndef loadGeometry(name):\n with open(name, 'rb') as file:\n test = pickle.load(file)\n return test\n\n\ndef saveGeometry(g, name=\"Untitled\"):\n if not name.endswith(\".cfg\"):\n name = name + \".cfg\"\n with open(name, 'wb') as file:\n pickle.dump(g, file)\n\n\ndef loadMesh(name):\n with open(name, 'rb') as file:\n mesh = pickle.load(file)\n return mesh\n\n\ndef saveMesh(mesh, name=\"Untitled\"):\n if not name.endswith(\".cfm\"):\n name = name + \".cfm\"\n with open(name, 'wb') as file:\n pickle.dump(mesh, file)\n\n\ndef saveArrays(coords, edof, dofs, bdofs, elementmarkers, boundaryElements, markerDict ,name=\"Untitled\"):\n if not name.endswith(\".cfma\"):\n name = name + \".cfma\"\n with open(name, 'wb') as file:\n pickle.dump(coords, file)\n pickle.dump(edof, file)\n pickle.dump(dofs, file)\n #for key in bdofs.items():\n # print(key, markerDict[key])\n pickle.dump(bdofs, file)\n pickle.dump(elementmarkers, file)\n pickle.dump(boundaryElements, file)\n pickle.dump(markerDict, file)\n\n\ndef loadArrays(name):\n with open(name, 'rb') as file:\n coords = pickle.load(file)\n edof= pickle.load(file)\n dofs = pickle.load(file)\n bdofs = pickle.load(file)\n elementmarkers = pickle.load(file)\n boundaryElements = pickle.load(file)\n markerDict = pickle.load(file)\n\n return coords, edof, dofs, bdofs, elementmarkers, boundaryElements, markerDict\n\n\ndef saveMatlabArrays(coords, edof, dofs, bdofs, elementmarkers, boundaryElements, markerDict, name=\"Untitled\"):\n if not name.endswith(\".mat\"):\n name = name + \".mat\"\n saveDict = {}\n saveDict[\"coords\"] = coords.astype('double')\n # Convert to CALFEM Edof definition with element number as first index\n new_column = np.arange(1, np.size(edof, 0) + 1)[:, np.newaxis]\n edof = np.append(new_column, edof, axis=1)\n\n saveDict[\"edof\"] = edof.astype('double')\n saveDict[\"dofs\"] = dofs.astype('double')\n # bdofs = {str(k): v for k, v in bdofs.items()} # MATLAB struct needs keys as strings\n #print(markerDict)\n newBdof = {}\n for name, index in bdofs.items():\n print(name, index)\n if index == 0:\n newBdof[\"None\"] = 0\n else:\n newBdof[markerDict[index]] = name\n\n saveDict[\"bdofs\"] = newBdof\n elementmarkers = np.asarray(elementmarkers)\n elementmarkers = elementmarkers + 1 # To avoid problems with one indexing in MATLAB\n saveDict[\"elementmarkers\"] = elementmarkers\n scipy.io.savemat(name, saveDict)\n\n", "# -*- coding: utf-8 -*-\n\n# example exs8\n#----------------------------------------------------------------\n# PURPOSE \n# Analysis of two dimensional diffusion\n#----------------------------------------------------------------\n\n# REFERENCES\n# Karl-Gunnar Olsson 1995-10-08\n# Ola Dahlblom 2004-09-14\n#----------------------------------------------------------------\n\nimport numpy as np\nimport calfem.vis_mpl as cfv\nimport calfem.core as cfc\n\n# ----- System matrices -----\n\nK = np.zeros((15,15))\nf = np.zeros((15,1))\nCoord = np.array([\n [0, 0 ],[0.025, 0 ],\n [0.05, 0 ],[0, 0.025],\n [0.025, 0.025],[0.05, 0.025],\n [0, 0.05 ],[0.025, 0.05 ],\n [0.05, 0.05 ],[0, 0.075],\n [0.025, 0.075],[0.05, 0.075],\n [0, 0.1 ],[0.025, 0.1 ],\n [0.05, 0.1 ]\n])\n\nDof = np.array([\n [1 ],[2 ],[3 ],\n [4 ],[5 ],[6 ],\n [7 ],[8 ],[9 ],\n [10],[11],[12],\n [13],[14],[15]\n])\n\n# ----- Element properties, topology and coordinates -----\n\nep = np.array([1])\nD = np.array([\n [1, 0],\n [0, 1]\n])\nEdof = np.array([\n [ 1, 2, 5, 4],\n [ 2, 3, 6, 5],\n [ 4, 5, 8, 7],\n [ 5, 6, 9, 8],\n [ 7, 8,11,10],\n [ 8, 9,12,11],\n [10,11,14,13],\n [11,12,15,14],\n])\nEx,Ey = cfc.coordxtr(Edof,Coord,Dof)\n\n# ----- Generate FE-mesh -----\n\n#clf; eldraw2(Ex,Ey,[1 3 0],Edof(:,1));\n#disp('PRESS ENTER TO CONTINUE'); pause; clf;\n\n# ----- Create and assemble element matrices -----\n\nfor i in range(8):\n Ke = cfc.flw2qe(Ex[i],Ey[i],ep,D)\n K = cfc.assem(Edof[i],K,Ke)\n\n# ----- Solve equation system -----\n\nbcPrescr = np.array([1,2,3,4,7,10,13,14,15])\nbcVal = np.array([0,0,0,0,0,0,0.5e-3,1e-3,1e-3])\na,r = cfc.solveq(K,f,bcPrescr,bcVal)\n\n# ----- Compute element flux vector -----\n\nEd = cfc.extractEldisp(Edof,a)\nEs = np.zeros((8,2))\nfor i in range(8):\n Es[i],Et = cfc.flw2qs(Ex[i],Ey[i],ep,D,Ed[i])\n\n# ----- Draw flux vectors and contourlines -----\n\nprint(Ex)\nprint(Ey)\nprint(a)\nprint(Ed)\n\ncfv.eldraw2(Ex, Ey, [1, 2, 1], range(1,Ex.shape[0]+1))\ncfv.eliso2_mpl(Ex,Ey,Ed)\ncfv.showAndWaitMpl()\n\n#cfv.showAndWait()\n#sfac=scalfact2(Ex,Ey,Es,0.5);\n#eldraw2(Ex,Ey); \n#elflux2(Ex,Ey,Es,[1,4],sfac); \n#pltscalb2(sfac,[2e-2 0.06 0.01],4);\n#disp('PRESS ENTER TO CONTINUE'); pause; clf;\n#eldraw2(Ex,Ey,[1,3,0]); \n#eliso2(Ex,Ey,Ed,5,[1,4]);\n#hold off; \n#echo off;\n\n# ----------------- End --------------------------------\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\n3D example using Vedo, solid elements\n\n@author: Andreas Åmand\n\"\"\"\n\nimport numpy as np\nimport calfem.core as cfc\nimport calfem.vedo as cfv\nimport calfem.vedo_utils as cfvu\n\nedof,coord,dof,a,ed,bc,f_dofs,Stress_tensors,vM_el,vM_n,lamb,eig = cfvv.import_mat('exv4',['edof','coord','dof','a','ed','bc','force_dofs','Stress_tensors','vM_el','vM_n','lambda','eig'])\n\nex,ey,ez = cfc.coordxtr(edof,coord,dof)\n\neigenmode = 0 # Choose what eigenmode to display in figure 5/6\n\nndof = np.size(dof, axis = 0)*np.size(dof, axis = 1)\nncoord = np.size(coord, axis = 0)\nnel = np.size(edof, axis = 0)\n\nmode_a = np.zeros((nel, 1))\ntot_deform = np.zeros(8)\nfor i in range(nel):\n\tcoords = cfvu.get_coord_from_edof(edof[i,:],dof,4)\n\tfor j in range(8):\n\t\tdeform = cfvu.get_a_from_coord(coords[j],3,eig[:,eigenmode])\n\t\ttot_deform[j] = np.sqrt(deform[0]**2 + deform[1]**2 + deform[2]**2)\n\n\tmode_a[i,:] = np.average(tot_deform)\n\nFreq=np.sqrt(lamb[eigenmode]/(2*np.pi))\n\n''' Principal stresses '''\n\nps_val = np.zeros((nel,3))\nps_vec = np.zeros((nel,3,3))\nfor i in range(nel):\n ps_val[i,:], ps_vec[i,:,:] = np.linalg.eig(Stress_tensors[:,:,i])\n\n\n\nupd_ed = np.zeros((nel,8))\nfor i in range(nel):\n upd_ed[i,0] = np.sqrt( ed[i,0]**2 + ed[i,1]**2 + ed[i,2]**2 )\n upd_ed[i,1] = np.sqrt( ed[i,3]**2 + ed[i,4]**2 + ed[i,5]**2 )\n\n upd_ed[i,2] = np.sqrt( ed[i,6]**2 + ed[i,7]**2 + ed[i,8]**2 )\n upd_ed[i,3] = np.sqrt( ed[i,9]**2 + ed[i,10]**2 + ed[i,11]**2 )\n\n upd_ed[i,4] = np.sqrt( ed[i,12]**2 + ed[i,13]**2 + ed[i,14]**2 )\n upd_ed[i,5] = np.sqrt( ed[i,15]**2 + ed[i,16]**2 + ed[i,17]**2 )\n\n upd_ed[i,6] = np.sqrt( ed[i,18]**2 + ed[i,19]**2 + ed[i,20]**2 )\n upd_ed[i,7] = np.sqrt( ed[i,21]**2 + ed[i,22]**2 + ed[i,23]**2 )\n\nbcPrescr = bc\nbc = np.zeros((np.size(bc[:,0]),1))\nf = -5000*np.ones((np.size(f_dofs[:,0]),1))\n\n# First plot, undeformed mesh\ncfv.figure(1)\n\ncfv.draw_mesh(edof,coord,dof,4,scale=0.005,bcPrescr=bcPrescr[:,0],bc=bc[:,0],fPrescr=f_dofs[:,0],f=f[:,0])\ncfv.add_text('Undeformed mesh + Forces & BCs for static analysis')\ncfv.show_and_wait()\n\n# Second plot, deformed mesh with element stresses\ncfv.figure(2)\n\nscalefact = 3 #deformation scale factor\nstatic = cfv.draw_displaced_mesh(edof,coord,dof,4,a,vM_el/1000000,def_scale=scalefact,scalar_title='von Mises [MPa]')\n\ncfv.add_text('Static analysis: self-weight & ecc. vertical load', pos='top-left')\ncfv.add_text(f'Deformation scalefactor: {scalefact}',pos='top-right')\ncfv.add_scalar_bar('von Mises [MPa]')\ncfv.show_and_wait()\n\n# Third plot, animation of figure 2\ncfv.figure(3)\nscalefact = 3 #deformation scale factor\n\ncfv.animation(edof,coord,dof,4,a,vM_el/1000000,def_scale=scalefact)\n\ncfv.add_text('Static analysis: self-weight & ecc. vertical load', pos='top-left')\ncfv.add_text(f'Deformation scalefactor: {scalefact}',pos='top-right')\n\ncfv.add_scalar_bar('von Mises [MPa]')\n\n#Start Calfem-vedo visualization\ncfv.show_and_wait()\n\n# Fourth plot, principal stresses for static analysis\ncfv.figure(4)\n\n# Return the mesh for export\ncfv.draw_displaced_mesh(edof,coord,dof,4,a,upd_ed*1000,wireframe=True)\ncfv.elprinc(ex,ey,ez,ps_val/1000000,ps_vec,ed,colormap='coolwarm',unit='MPa')\ncfv.add_scalar_bar('Deformation [mm]')\ncfv.add_text('Static analysis',pos='top-left')\ncfv.add_text('Deformation scalefactor: 1',pos='top-right')\ncfv.add_text('Princ. stress vectors',pos='top-middle')\n\n#Start Calfem-vedo visualization\ncfv.show_and_wait()\n\n# Fifth plot, first mode from eigenvalue analysis\ncfv.figure(5)\n\nscalefact = 100 #deformation scale factor\nmodal = cfv.draw_displaced_mesh(edof,coord,dof,4,eig[:,eigenmode],mode_a*1000,def_scale=scalefact,lines=True,scalar_title='Tot. el. displacement [mm]')\ncfv.add_text(f'Modal analysis: {eigenmode+1}st mode',pos='top-left')\ncfv.add_text(f'Frequency: {round(Freq[0],2)} Hz')\ncfv.add_text(f'Deformation scalefactor: {scalefact}',pos='top-right')\ncfv.add_scalar_bar('Tot. el. displacement [mm]')\ncfv.add_projection(plane='xz',rulers=True)\ncfv.show_and_wait()\n\n# Sixth plot, animation of figure 5\ncfv.figure(6)\n\ncfv.add_text(f'Modal analysis: {eigenmode+1}st mode',pos='top-left')\ncfv.add_text(f'Frequency: {round(Freq[0],2)} Hz')\ncfv.add_text(f'Deformation scalefactor: {scalefact}',pos='top-right')\n\ncfv.animation(edof,coord,dof,4,eig[:,eigenmode],mode_a*1000,def_scale=scalefact,negative=True,scalar_title='Tot. el. displacement [mm]',export=True,file='export/exv4/anim/exv4_modal')\n\ncfv.add_scalar_bar('Tot. el. displacement [mm]')\n\n#Start Calfem-vedo visualization\ncfv.show_and_wait()\n\n# Export the two meshes\ncfv.export_vtk('export/exv4/exv4_static', static)\ncfv.export_vtk('export/exv4/exv4_modal', modal)\n\n# For not exporting animation\n#cfv.animation(edof,coord,dof,4,eig[:,eigenmode],mode_a*1000,def_scale=scalefact,negative=True)\n\n" ]
[ [ "numpy.asarray", "numpy.append", "numpy.size" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.sqrt", "numpy.linalg.eig", "numpy.size", "numpy.average", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
langner/mahotas
[ "1840b629fab325e7fb700f90cf2b662ff35e4205", "1840b629fab325e7fb700f90cf2b662ff35e4205", "1840b629fab325e7fb700f90cf2b662ff35e4205" ]
[ "mahotas/tests/test_center_of_mass.py", "mahotas/tests/test_freeimage.py", "mahotas/thin.py" ]
[ "import numpy as np\nfrom scipy import ndimage\nimport mahotas.center_of_mass\n\nnp.random.seed(2321)\ndef _mean_out(img, axis):\n if len(img.shape) == 2: return img.mean(1-axis)\n if axis == 0:\n return _mean_out(img.mean(1), 0)\n return _mean_out(img.mean(0), axis - 1)\n\ndef slow_center_of_mass(img):\n '''\n Returns the center of mass of img.\n '''\n xs = []\n for axis,si in enumerate(img.shape):\n xs.append(np.mean(_mean_out(img, axis) * np.arange(si)))\n xs = np.array(xs)\n xs /= img.mean()\n return xs\n\n\ndef test_cmp_ndimage():\n R = (255*np.random.rand(128,256)).astype(np.uint16)\n R += np.arange(256, dtype=np.uint16)\n m0,m1 = mahotas.center_of_mass(R)\n n0,n1 = ndimage.center_of_mass(R)\n assert np.abs(n0 - m0) < 1.\n assert np.abs(n1 - m1) < 1.\n\ndef test_cmp_ndimage3():\n R = (255*np.random.rand(32,128,8,16)).astype(np.uint16)\n R += np.arange(16, dtype=np.uint16)\n m = mahotas.center_of_mass(R)\n n = ndimage.center_of_mass(R)\n p = slow_center_of_mass(R)\n assert np.abs(n - m).max() < 1.\n assert np.abs(p - m).max() < 1.\n\ndef test_simple():\n R = (255*np.random.rand(128,256)).astype(np.uint16)\n R += np.arange(256, dtype=np.uint16)\n m0,m1 = mahotas.center_of_mass(R)\n\n assert 0 < m0 < 128\n assert 0 < m1 < 256\n\n\ndef test_labels():\n R = (255*np.random.rand(128,256)).astype(np.uint16)\n labels = np.zeros(R.shape, np.intc)\n labels[100:,:] += 1\n labels[100:,100:] += 1\n centres = mahotas.center_of_mass(R, labels)\n for label,cm in enumerate(centres):\n assert np.all(cm == mahotas.center_of_mass(R * (labels == label)))\n\n\n\ndef test_labels_not_intc():\n img = np.arange(256).reshape((16,16))\n labels = img.copy()\n labels %= 3\n cm = mahotas.center_of_mass(img, labels)\n assert cm.shape == (3,2)\n\n labels = labels.T.copy()\n cm = mahotas.center_of_mass(img, labels.T)\n assert cm.shape == (3,2)\n\n labels = labels.T.copy()\n labels = labels.astype(np.uint16)\n cm = mahotas.center_of_mass(img, labels)\n assert cm.shape == (3,2)\n\n", "import numpy as np\nfrom os import path\nfrom nose.tools import with_setup\n\n_test_dir = None\n_testimgname = None\n\ntry:\n from mahotas.io import freeimage\nexcept OSError:\n from nose import SkipTest\n raise SkipTest(\"FreeImage not found\")\n\ndef _create_tempdir():\n import tempfile\n global _test_dir, _testimgname\n _test_dir = tempfile.mkdtemp(prefix='mh_test')\n _testimgname = path.join(_test_dir, \"mahotas_test.png\")\n\ndef _remove_tempdir():\n from shutil import rmtree\n rmtree(_test_dir)\n\ncreate_remove = with_setup(setup=_create_tempdir, teardown=_remove_tempdir)\n\n@create_remove\ndef test_freeimage():\n img = np.arange(256).reshape((16,16)).astype(np.uint8)\n\n freeimage.imsave(_testimgname, img)\n img_ = freeimage.imread(_testimgname)\n assert img.shape == img_.shape\n assert np.all(img == img_)\n\n\n@create_remove\ndef test_as_grey():\n colour = np.arange(16*16*3).reshape((16,16,3))\n freeimage.imsave(_testimgname, colour.astype(np.uint8))\n c2 = freeimage.imread(_testimgname, as_grey=True)\n assert len(c2.shape) == 2\n assert c2.shape == colour.shape[:-1]\n\ndef test_rgba():\n rgba = path.join(\n path.dirname(__file__),\n 'data',\n 'rgba.png')\n rgba = freeimage.imread(rgba)\n assert np.all(np.diff(rgba[:,:,3].mean(1)) < 0 ) # the image contains an alpha gradient\n\n\n@create_remove\ndef test_save_load_rgba():\n img = np.arange(256).reshape((8,8,4)).astype(np.uint8)\n freeimage.imsave(_testimgname, img)\n img_ = freeimage.imread(_testimgname)\n assert img.shape == img_.shape\n assert np.all(img == img_)\n\ndef test_fromblob():\n img = np.arange(100, dtype=np.uint8).reshape((10,10))\n s = freeimage.imsavetoblob(img, 't.png')\n assert np.all(freeimage.imreadfromblob(s) == img)\n\n s = freeimage.imsavetoblob(img, 't.bmp')\n assert np.all(freeimage.imreadfromblob(s) == img)\n\n\ndef test_1bpp():\n bpp = path.join(\n path.dirname(__file__),\n 'data',\n '1bpp.bmp')\n bpp = freeimage.imread(bpp)\n assert bpp.sum()\n assert bpp.sum() < bpp.size\n\n@create_remove\ndef test_multi():\n testtif = _test_dir + '/mahotas_test.tif'\n f = np.zeros((16,16), np.uint8)\n fs = []\n for t in range(8):\n f[:t,:t] = t\n fs.append(f.copy())\n freeimage.write_multipage(fs, testtif)\n fs2 = freeimage.read_multipage(testtif)\n for f,f2 in zip(fs,fs2):\n assert np.all(f == f2)\n\n\n@create_remove\ndef test_uint16():\n img = np.zeros((32,32), dtype=np.uint16)\n freeimage.imsave(_testimgname, img)\n img_ = freeimage.imread(_testimgname)\n\n assert img.shape == img_.shape\n assert img.dtype == img_.dtype\n assert np.all(img == img_)\n\n", "# -*- coding: utf-8 -*-\n# Copyright (C) 2006-2015 Luis Pedro Coelho <[email protected]>\n# vim: set ts=4 sts=4 sw=4 expandtab smartindent:\n#\n# License: MIT (see COPYING file)\n\nfrom __future__ import division\nimport numpy as np\n\n__all__ = ['thin']\n\ndef thin(binimg, max_iter=-1):\n \"\"\"\n skel = thin(binimg)\n\n Skeletonisation by thinning\n\n Parameters\n ----------\n binimg : ndarray\n Binary input image\n max_iter : int, optional\n Maximum number of iterations (set to a negative number, the default, to\n run full skeletonization)\n\n Returns\n -------\n skel : Skeletonised version of `binimg`\n \"\"\"\n from .bbox import bbox\n from ._thin import thin as _thin\n\n res = np.zeros_like(binimg)\n min0,max0,min1,max1 = bbox(binimg)\n r,c = (max0-min0,max1-min1)\n\n image_exp = np.zeros((r+2, c+2), bool)\n image_exp[1:r+1, 1:c+1] = binimg[min0:max0,min1:max1]\n imagebuf = np.empty((r+2,c+2), bool)\n\n _thin(image_exp, imagebuf, int(max_iter))\n res[min0:max0,min1:max1] = image_exp[1:r+1, 1:c+1]\n return res\n\n" ]
[ [ "numpy.abs", "numpy.random.seed", "numpy.arange", "scipy.ndimage.center_of_mass", "numpy.random.rand", "numpy.array", "numpy.zeros" ], [ "numpy.all", "numpy.arange", "numpy.zeros" ], [ "numpy.empty", "numpy.zeros_like", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wjh720/pymarl
[ "9392407568d440c4808a1c7c98ddf1ef52e0c009" ]
[ "src/modules/mixers/point_like.py" ]
[ "import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass PointLikeMixer(nn.Module):\n def __init__(self, args):\n super(PointLikeMixer, self).__init__()\n\n self.args = args\n self.n_agents = args.n_agents\n self.n_groups = args.mixing_group_dim\n self.state_dim = int(np.prod(args.state_shape))\n\n self.embed_dim = args.mixing_embed_dim\n self.group_dim = args.mixing_group_dim\n\n self.hyper_w_1 = nn.Linear(self.state_dim, self.embed_dim * self.n_groups)\n self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)\n\n # State dependent bias for hidden layer\n self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)\n\n # V(s) instead of a bias for the last layers\n self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),\n nn.ReLU(),\n nn.Linear(self.embed_dim, 1))\n\n def forward(self, agent_qs, states):\n bs = agent_qs.size(0)\n states = states.reshape(-1, self.state_dim)\n agent_qs = agent_qs.view(-1, self.group_dim, self.n_agents)\n group_qs = agent_qs.sum(dim=2).view(-1, 1, self.group_dim)\n # First layer\n w1 = th.abs(self.hyper_w_1(states))\n b1 = self.hyper_b_1(states)\n w1 = w1.view(-1, self.n_groups, self.embed_dim)\n b1 = b1.view(-1, 1, self.embed_dim)\n hidden = F.elu(th.bmm(group_qs, w1) + b1)\n # Second layer\n w_final = th.abs(self.hyper_w_final(states))\n w_final = w_final.view(-1, self.embed_dim, 1)\n # State-dependent bias\n v = self.V(states).view(-1, 1, 1)\n # Compute final output\n y = th.bmm(hidden, w_final) + v\n # Reshape and return\n q_tot = y.view(bs, -1, 1)\n return q_tot\n" ]
[ [ "torch.nn.Linear", "torch.nn.ReLU", "torch.bmm", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DustinAngerhofer/muzero-general-1
[ "aab58da92f5793fdfd6850af6a1028d7e31fd234" ]
[ "games/twentyone.py" ]
[ "\"\"\"\nThis is a very simple form of twenty one. Ace only counts as value 1 not 1 or\n11 for simplicity. This means that there is no such thing as a natural or two\ncard 21. This is a good example of showing how it can provide a good solution\nto even luck based games.\n\"\"\"\n\nimport datetime\nimport os\nfrom random import randint\n\nimport gym\nimport numpy\nimport torch\n\nfrom .abstract_game import AbstractGame\n\n\nclass MuZeroConfig:\n def __init__(self):\n self.seed = 0 # Seed for numpy, torch and the game\n\n\n\n ### Game\n self.observation_shape = (3,3,3) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)\n self.action_space = [i for i in range(2)] # Fixed list of all possible actions. You should only edit the length\n self.players = [i for i in range(1)] # List of players. You should only edit the length\n self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation\n\n # Evaluate\n self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)\n self.opponent = None # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, \"random\" or \"expert\" if implemented in the Game class\n\n\n\n ### Self-Play\n self.num_actors = 4 # Number of simultaneous threads self-playing to feed the replay buffer\n self.max_moves = 21 # Maximum number of moves if game is not finished before\n self.num_simulations = 21 # Number of future moves self-simulated\n self.discount = 1 # Chronological discount of the reward\n self.temperature_threshold = None # Number of moves before dropping temperature to 0 (ie playing according to the max)\n\n # Root prior exploration noise\n self.root_dirichlet_alpha = 0.25\n self.root_exploration_fraction = 0.25\n\n # UCB formula\n self.pb_c_base = 19652\n self.pb_c_init = 1.25\n\n\n\n ### Network\n self.network = \"resnet\" # \"resnet\" / \"fullyconnected\"\n self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size\n\n # Residual Network\n self.downsample = False # Downsample observations before representation network (See paper appendix Network Architecture)\n self.blocks = 2 # Number of blocks in the ResNet\n self.channels = 32 # Number of channels in the ResNet\n self.reduced_channels_reward = 32 # Number of channels in reward head\n self.reduced_channels_value = 32 # Number of channels in value head\n self.reduced_channels_policy = 32 # Number of channels in policy head\n self.resnet_fc_reward_layers = [16] # Define the hidden layers in the reward head of the dynamic network\n self.resnet_fc_value_layers = [16] # Define the hidden layers in the value head of the prediction network\n self.resnet_fc_policy_layers = [16] # Define the hidden layers in the policy head of the prediction network\n\n # Fully Connected Network\n self.encoding_size = 32\n self.fc_representation_layers = [16] # Define the hidden layers in the representation network\n self.fc_dynamics_layers = [16] # Define the hidden layers in the dynamics network\n self.fc_reward_layers = [16] # Define the hidden layers in the reward network\n self.fc_value_layers = [16] # Define the hidden layers in the value network\n self.fc_policy_layers = [16] # Define the hidden layers in the policy network\n\n\n\n ### Training\n self.results_path = os.path.join(os.path.dirname(__file__), \"../results\", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")) # Path to store the model weights and TensorBoard logs\n self.training_steps = 15000 # Total number of training steps (ie weights update according to a batch)\n self.batch_size = 64 # Number of parts of games to train on at each training step\n self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing\n self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)\n self.training_device = \"cuda\" if torch.cuda.is_available() else \"cpu\" # Train on GPU if available\n\n self.optimizer = \"SGD\" # \"Adam\" or \"SGD\". Paper uses SGD\n self.weight_decay = 1e-4 # L2 weights regularization\n self.momentum = 0.9 # Used only if optimizer is SGD\n\n # Exponential learning rate schedule\n self.lr_init = 0.03 # Initial learning rate\n self.lr_decay_rate = 0.75 # Set it to 1 to use a constant learning rate\n self.lr_decay_steps = 150000\n\n\n\n ### Replay Buffer\n self.window_size = 10000 # Number of self-play games to keep in the replay buffer\n self.num_unroll_steps = 20 # Number of game moves to keep for every batch element\n self.td_steps = 50 # Number of steps in the future to take into account for calculating the target value\n self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)\n\n # Prioritized Replay (See paper appendix Training)\n self.PER = True # Select in priority the elements in the replay buffer which are unexpected for the network\n self.use_max_priority = False # If False, use the n-step TD error as initial priority. Better for large replay buffer\n self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1\n self.PER_beta = 1.0\n\n\n\n ### Adjust the self play / training ratio to avoid over/underfitting\n self.self_play_delay = 0 # Number of seconds to wait after each played game\n self.training_delay = 0 # Number of seconds to wait after each training step\n self.ratio = None # Desired self played games per training step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it\n\n\n def visit_softmax_temperature_fn(self, trained_steps):\n \"\"\"\n Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.\n The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.\n\n Returns:\n Positive float.\n \"\"\"\n if trained_steps < 500e3:\n return 1.0\n elif trained_steps < 750e3:\n return 0.5\n else:\n return 0.25\n\nclass Game(AbstractGame):\n \"\"\"\n Game wrapper.\n \"\"\"\n\n def __init__(self, seed=None):\n self.env = TwentyOne()\n\n def step(self, action):\n \"\"\"\n Apply action to the game.\n \n Args:\n action : action of the action_space to take.\n\n Returns:\n The new observation, the reward and a boolean if the game has ended.\n \"\"\"\n observation, reward, done = self.env.step(action)\n return observation, reward * 10, done\n\n def to_play(self):\n \"\"\"\n Return the current player.\n\n Returns:\n The current player, it should be an element of the players list in the config. \n \"\"\"\n return self.env.to_play()\n\n def legal_actions(self):\n \"\"\"\n Should return the legal actions at each turn, if it is not available, it can return\n the whole action space. At each turn, the game have to be able to handle one of returned actions.\n \n For complex game where calculating legal moves is too long, the idea is to define the legal actions\n equal to the action space but to return a negative reward if the action is illegal.\n\n Returns:\n An array of integers, subset of the action space.\n \"\"\"\n return self.env.legal_actions()\n\n def reset(self):\n \"\"\"\n Reset the game for a new game.\n \n Returns:\n Initial observation of the game.\n \"\"\"\n return self.env.reset()\n\n def render(self):\n \"\"\"\n Display the game observation.\n \"\"\"\n self.env.render()\n input(\"Press enter to take a step \")\n\n def human_to_action(self):\n \"\"\"\n For multiplayer games, ask the user for a legal action\n and return the corresponding action number.\n\n Returns:\n An integer from the action space.\n \"\"\"\n choice = input(\"Enter the action (0) Hit, or (1) Stand for the player {}: \".format(self.to_play()))\n while choice not in [str(action) for action in self.legal_actions()]:\n choice = input(\"Enter either (0) Hit or (1) Stand : \")\n return int(choice)\n\n def action_to_string(self, action_number):\n \"\"\"\n Convert an action number to a string representing the action.\n\n Args:\n action_number: an integer from the action space.\n\n Returns:\n String representing the action.\n \"\"\"\n actions = {\n 0: \"Hit\",\n 1: \"Stand\",\n }\n return \"{}. {}\".format(action_number, actions[action_number])\n\nclass TwentyOne:\n def __init__(self):\n self.player_hand = self.deal_card_value()\n self.dealer_hand = self.deal_card_value()\n\n self.player = 1\n\n def to_play(self):\n return 0 if self.player == 1 else 1\n\n def reset(self):\n self.player_hand = self.deal_card_value()\n self.dealer_hand = self.deal_card_value()\n self.player = 1\n return self.get_observation()\n\n \"\"\"\n Action: 0 = Hit\n Action: 1 = Stand\n \"\"\"\n def step(self, action):\n\n if action == 0:\n self.player_hand += self.deal_card_value()\n\n done = self.is_busted() or action == 1 or self.player_hand == 21\n\n reward = 0\n\n if done:\n self.dealer_plays() \n reward = self.get_reward(True)\n\n return self.get_observation(), self.get_reward(done), done\n\n def get_observation(self):\n return [\n numpy.array(numpy.full((3, 3), self.player_hand).astype(float)),\n numpy.array(numpy.full((3, 3), self.dealer_hand).astype(float)),\n numpy.array(numpy.full((3, 3), 0))\n ]\n\n def legal_actions(self):\n # 0 = hit\n # 1 = stand\n return [0, 1]\n\n def get_reward(self, done):\n if not done:\n return 0\n if self.player_hand <= 21 and self.dealer_hand < self.player_hand:\n return 1\n if self.player_hand <= 21 and self.dealer_hand > 21:\n return 1\n if self.player_hand > 21:\n return -1\n if self.player_hand == self.dealer_hand:\n return 0\n return -1\n\n\n def deal_card_value(self):\n card = randint(1,13)\n if card >= 10:\n value = 10\n else:\n value = card\n return value\n\n def dealer_plays(self):\n if self.player_hand > 21:\n return\n while self.dealer_hand<=16:\n self.dealer_hand += self.deal_card_value()\n\n def is_busted(self):\n if self.player_hand > 21:\n return True\n\n def render(self):\n print(\"Dealer hand: \" + str(self.dealer_hand))\n print(\"Player hand: \" + str(self.player_hand))\n" ]
[ [ "torch.cuda.is_available", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hebamohsen04/sqlalchemy-challenge
[ "d084705436c60a6fda0dc211c9668ed90f66882f" ]
[ "app.py" ]
[ "import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nimport datetime as dt \n\nfrom flask import Flask, jsonify\n\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# 2. Create an app, being sure to pass __name__\napp = Flask(__name__)\n\n\n# 3. Define what to do when a user hits the index route\[email protected](\"/\")\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:<br/>\"\n f\"/<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/2012-02-28<br/>\"\n f\"/api/v1.0/2012-02-28/2012-03-05<br/>\"\n )\n\n\n# 4. Define what to do when a user hits the /about route\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n \n last_date_record = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n last_date = dt.date.fromisoformat(last_date_record[0])\n\n # Calculate the date 1 year ago from the last data point in the database\n last_year = last_date - dt.timedelta(days=365)\n\n # Perform a query to retrieve the data and precipitation scores\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= last_year).all()\n session.close()\n \n all_prcp = []\n for date, prcp in results:\n prcp_dict = {}\n prcp_dict[\"date\"] = date\n prcp_dict[\"prcp\"] = prcp\n all_prcp.append(prcp_dict)\n \n return jsonify(all_prcp)\n \[email protected](\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n \n results= session.query(Station.station).all()\n session.close()\n \n all_names = list(np.ravel(results))\n \n return jsonify(all_names)\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n session = Session(engine)\n \n most_active_stations = session.query(Measurement.station, func.count(Measurement.station)).\\\n group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()\n \n most_active_station = most_active_stations[0]\n \n last_date_record = session.query(Measurement.date).\\\n filter(Measurement.station == most_active_station[0]).\\\n order_by(Measurement.date.desc()).first()\n \n last_date = dt.date.fromisoformat(last_date_record[0])\n \n # Calculate the date 1 year ago from the last data point in the database\n last_year = last_date - dt.timedelta(days=365)\n \n # Perform a query to retrieve the data and precipitation scores\n results = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= last_year).all()\n session.close()\n \n all_tobs = []\n for date, tobs in results:\n tobs_dict = {}\n tobs_dict[\"date\"] = date\n tobs_dict[\"tobs\"] = tobs\n all_tobs.append(tobs_dict)\n \n return jsonify(all_tobs)\n\[email protected](\"/api/v1.0/<start>\")\ndef temps_with_start(start):\n result = calc_temps_start(start)\n #TMIN, TAVG, and TMAX\n dict = {\n \"TMIN\" : result[0][0],\n \"TAVG\" : result[0][1],\n \"TMAX\" : result[0][2],\n \"start\" : start\n }\n return jsonify(dict)\n\[email protected](\"/api/v1.0/<start>/<end>\")\ndef temps_with_start_end(start, end):\n result = calc_temps_start_end(start, end)\n #TMIN, TAVG, and TMAX\n dict = {\n \"TMIN\" : result[0][0],\n \"TAVG\" : result[0][1],\n \"TMAX\" : result[0][2],\n \"start\" : start,\n \"end\" : end\n }\n return jsonify(dict)\n\ndef calc_temps_start(start_date):\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).all()\n session.close()\n return results\n\ndef calc_temps_start_end(start_date, end_date):\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n session.close()\n return results\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" ]
[ [ "numpy.ravel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nsavinov/gym-vizdoom
[ "f098f297aeff79875c59861478c54e7727588bf7" ]
[ "gym_vizdoom/envs/constants.py" ]
[ "from os import path as osp\nimport numpy as np\n\n# vizdoom\nMAP_NAME_TEMPLATE = 'map%02d'\nMOVE_FORWARD = [0, 0, 0, 1, 0, 0, 0]\nMOVE_BACKWARD = [0, 0, 0, 0, 1, 0, 0]\nMOVE_LEFT = [1, 0, 0, 0, 0, 0, 0]\nMOVE_RIGHT = [0, 1, 0, 0, 0, 0, 0]\nSTAY_IDLE = [0, 0, 0, 0, 0, 0, 0]\nTURN_LEFT = [0, 0, 0, 0, 0, 1, 0]\nTURN_RIGHT = [0, 0, 0, 0, 0, 0, 1]\nACTIONS_LIST = [MOVE_FORWARD, MOVE_BACKWARD, MOVE_LEFT, MOVE_RIGHT, STAY_IDLE, TURN_LEFT, TURN_RIGHT]\nACTION_NAMES = ['MOVE_FORWARD', 'MOVE_BACKWARD', 'MOVE_LEFT', 'MOVE_RIGHT', 'STAY_IDLE', 'TURN_LEFT', 'TURN_RIGHT']\n# ACTIONS_LIST = [MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, STAY_IDLE]\n# ACTION_NAMES = ['MOVE_FORWARD', 'TURN_LEFT', 'TURN_RIGHT', 'STAY_IDLE']\nVIZDOOM_TO_TF = [1, 2, 0]\nACTION_CLASSES = len(ACTIONS_LIST)\nMIN_RANDOM_TEXTURE_MAP_INDEX = 2\nMAX_RANDOM_TEXTURE_MAP_INDEX = 401\nREPEAT = 4\nNET_WIDTH = 160\nNET_HEIGHT = 120\nNET_CHANNELS = 3\n\n# paths\nDIR = osp.dirname(__file__)\nDEFAULT_CONFIG = osp.join(DIR, 'data', 'default.cfg')\n\n# test envs\nSTATE_AFTER_GAME_END = np.zeros((NET_HEIGHT, NET_WIDTH, NET_CHANNELS), dtype=np.uint8)\nEXPLORATION_GOAL_FRAME = np.zeros((NET_HEIGHT, NET_WIDTH, NET_CHANNELS), dtype=np.uint8) - 1\nMAX_STEP_NAVIGATION = 5000 // REPEAT\nMAX_STEP_EXPLORATION = 10000 // REPEAT\nGOAL_DISTANCE_ALLOWANCE = 63\nEXPLORATION_STATUS = 0\nNAVIGATION_STATUS = 1\nGOAL_REACHING_REWARD = 800.0\n# test naming\nDATA_PATH = 'data'\nDEFAULT_TEST_MAPS = ['map02', 'map03', 'map04', 'map05']\nDEFAULT_TEST_EXPLORATION_MAP = 'map06'\nDEFAULT_TEST_GOAL_NAMES = ['tall_red_pillar',\n 'candelabra',\n 'tall_blue_torch',\n 'short_green_pillar']\nGOAL_EXTENDED_OBSERVATION_SHAPE = [NET_HEIGHT, NET_WIDTH, 2 * NET_CHANNELS]\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tombury182/ews_functions
[ "1c2569a4671dee4a6cd29d5681f6a280469b3210" ]
[ "ewstools/helpers.py" ]
[ "#################################################################################################################\n# ewstools\n# Description: Python package for computing, analysing and visualising \n# early warning signals (EWS) in time-series data\n# Author: Thomas M Bury\n# Web: http://www.math.uwaterloo.ca/~tbury/\n# Code repo: https://github.com/ThomasMBury/ewstools\n# Documentation: https://ewstools.readthedocs.io/\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2019 Thomas Bury http://www.math.uwaterloo.ca/~tbury/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#################################################################################################################\n\n\n#---------------------------------\n# Import relevant packages\n#--------------------------------\n\n# For numeric computation and DataFrames\nimport numpy as np\nimport pandas as pd\n\n# To compute power spectrum using Welch's method\nfrom scipy import signal\n\nimport scipy.linalg\n\n# For fitting power spectrum models and computing AIC weights\nfrom lmfit import Model\n \n\n\ndef pspec_welch(yVals,\n dt,\n ham_length=40,\n ham_offset=0.5,\n w_cutoff=1,\n scaling='spectrum'):\n\n '''\n Computes the power spectrum of a time-series using Welch's method.\n \n The time-series is assumed to be stationary and to have equally spaced\n measurements in time. The power spectrum is computed using Welch's method,\n which computes the power spectrum over a rolling window of subsets of the\n time-series and then takes the average.\n \n Args\n ----\n yVals: array of floats\n Array of time-series values.\n dt: float\n Seperation between data points.\n ham_length: int\n Length of Hamming window (number of data points).\n ham_offset: float\n Hamming offset as a proportion of the Hamming window size.\n w_cutoff: float\n Cutoff frequency used in power spectrum. Given as a proportion of the \n maximum permissable frequency in the empirical\n power spectrum.\n scaling: {'spectrum', 'density'}\n Whether to compute the power spectrum ('spectrum') or\n the power spectral density ('density'). The power spectral density\n is the power spectrum normalised (such that the area underneath equals one). \n \n Returns\n -------\n pd.Series: \n Power values indexed by frequency\n \n '''\n\n ## Assign properties of *series* to parameters\n \n # Compute the sampling frequency \n fs = 1/dt\n # Number of data points\n num_points = len(yVals)\n # If ham_length given as a proportion - compute number of data points in ham_length\n if 0 < ham_length <= 1:\n ham_length = num_points * ham_length\n # If Hamming length given is less than the length of the t-series, make ham_length=length of tseries.\n if ham_length >= num_points:\n ham_length = num_points\n # Compute number of points in offset\n ham_offset_points = int(ham_offset*ham_length)\n \n ## Compute the periodogram using Welch's method (scipy.signal function)\n pspec_raw = signal.welch(yVals,\n fs,\n nperseg=ham_length,\n noverlap=ham_offset_points,\n return_onesided=False,\n scaling=scaling)\n \n # Put into a pandas series and index by frequency (scaled by 2*pi)\n pspec_series = pd.Series(pspec_raw[1], index=2*np.pi*pspec_raw[0], name='Power spectrum')\n pspec_series.index.name = 'Frequency'\n \n # Sort into ascending frequency\n pspec_series.sort_index(inplace=True)\n \n # Append power spectrum with first value (by symmetry)\n pspec_series.at[-min(pspec_series.index)] = pspec_series.iat[0]\n \n # Impose cutoff frequency\n wmax = w_cutoff*max(pspec_series.index) # cutoff frequency\n pspec_output = pspec_series[-wmax:wmax] # subset of power spectrum\n \n \n return pspec_output\n\n\n\n\n\n\n#------------Functional forms of power spectra to fit------------#\n \ndef psd_fold(w,sigma,lam):\n\t'''\n\tAnalytical approximation for the power spectrum prior to a Fold bifurcation\n\n\t'''\n\treturn (sigma**2 / (2*np.pi))*(1/(w**2+lam**2))\n \n\n\ndef psd_flip(w,sigma,r):\n\t'''\n\tAnalytical approximation for the power spectrum prior to a Flip bifurcation\n\t'''\n\treturn (sigma**2 / (2*np.pi))*(1/(1 + r**2 - 2*r*np.cos(w)))\n\n\n\ndef psd_hopf(w,sigma,mu,w0):\n\t'''\n\tAnalytical approximation for the power spectrum prior to a Hopf bifurcation\n\n\t'''\n\treturn (sigma**2/(4*np.pi))*(1/((w+w0)**2+mu**2)+1/((w-w0)**2 +mu**2))\n \n \n\n\ndef psd_null(w,sigma):\n\t'''\n\tPower spectrum of white noise (flat).\n\t'''\n\treturn sigma**2/(2*np.pi) * w**0\n \n \n \n\n\n\n#-------Obtain 'best guess' intitialisation parameters for optimisation------%\n\n\ndef shopf_init(smax, stot, wdom):\n '''\n Compute the 'best guess' initialisation values for sigma, mu and w0,\n when fitting sHopf to the empirical power spectrum.\n \n Args\n ----\n smax: float\n Maximum power in the power spectrum.\n stot: float\n Total power in the power spectrum.\n wdom: float\n Frequency that has the highest power.\n \n Return\n ------\n list of floats: \n List containing the initialisation parameters [sigma, mu, w0]\n \n '''\n \n # Define chunky term (use \\ to continue eqn to new line)\n def alpha(smax, stot, wdom):\n return stot**3 \\\n + 9*(np.pi**2)*(wdom**2)*(smax**2)*stot \\\n +3*np.sqrt(3)*np.pi*np.sqrt(\n 64*(np.pi**4)*(wdom**6)*(smax**6) \\\n -13*(np.pi**2)*(wdom**4)*(smax**4)*(stot**2) \\\n +2*(wdom**2)*(smax**2)*(stot**4) \\\n )\n \n # Initialisation for mu \n mu = -(1/(3*np.pi*smax))*(stot \\\n +alpha(smax,stot,wdom)**(1/3) \\\n +(stot**2-12*(np.pi**2)*(wdom**2)*(smax**2))/(alpha(smax,stot,wdom)**(1/3)))\n \n \n # Initialisation for sigma\n sigma = np.sqrt(\n -2*mu*stot)\n \n # Initialisation for w0\n w0 = wdom\n \n # Return list\n return [sigma, mu, w0]\n\n\n\n \n \ndef sfold_init(smax, stot):\n '''\n Compute the 'best guess' initialisation values for sigma and lamda\n when fitting sfold to the empirical power spectrum.\n \n Args\n --------------\n smax: float\n Maximum power in the power spectrum.\n stot: float\n Total power in the power spectrum.\n \n Return\n -----------------\n list of floats: \n List containing the initialisation parameters [sigma, lambda]\n \n '''\n \n # Initialisation for sigma\n sigma = np.sqrt(2*stot**2/(np.pi*smax))\n \n # Initialisation for lamda\n lamda = -stot/(np.pi*smax)\n\n # Return list\n return [sigma, lamda]\n\n\n\ndef sflip_init(smax, stot):\n '''\n Compute the 'best guess' initialisation values for sigma and r\n when fitting sflip to the empirical power spectrum.\n \n Args\n --------------\n smax: float\n Maximum power in the power spectrum.\n stot: float\n Total power in the power spectrum.\n \n Return\n -----------------\n list of floats: \n List containing the initialisation parameters [sigma, r]\n \n '''\n \n \n # Initialisation for r\n r =(stot - 2*np.pi*smax)/(stot + 2*np.pi*smax)\n \n # Initialisation for sigma\n sigma = np.sqrt(stot*(1-r**2))\n \n # Return list\n return [sigma, r]\n\n\n\ndef snull_init(stot):\n '''\n Compute the 'best guess' initialisation values for sigma\n when fitting snull to the empirical power spectrum.\n \n Args\n --------------\n stot: float\n Total power in the power spectrum.\n \n Return\n -----------------\n list of floats: \n List containing the initialisation parameters [sigma].\n \n '''\n \n # Initialisation for sigma\n sigma = np.sqrt(stot)\n\n # Return list\n return [sigma]\n\n\n\n\n\n#---------Run optimisation to compute best fits-----------#\n \n# Fold fit\ndef fit_fold(pspec, init):\n '''\n Fit the Fold power spectrum model to pspec and compute AIC score.\n Uses the package LMFIT for optimisation.\n \n Args\n --------------\n pspec: pd.Series\n Power spectrum data as a Series indexed by frequency.\n init: list of floats\n Initial parameter guesses of the form [sigma_init, lambda_init].\n \n Returns\n ----------------\n list:\n Form [aic, result] where aic is the AIC score for the model fit,\n and result is a handle that contains further information on the fit.\n\n '''\n \n \n # Put frequency values and power values as a list to use LMFIT\n freq_vals = pspec.index.tolist()\n power_vals = pspec.tolist()\n \n sigma_init, lambda_init = init\n # Assign model object\n model = Model(psd_fold)\n # Set up constraint S(wMax) < psi_fold*S(0)\n psi_fold = 0.5\n wMax = max(freq_vals)\n # Parameter constraints for sigma\n model.set_param_hint('sigma', value=sigma_init, min=0, max=10*sigma_init)\n # Parameter constraints for lambda\n model.set_param_hint('lam', min=-np.sqrt(psi_fold/(1-psi_fold))*wMax, max=0, value=lambda_init)\n \n # Assign initial parameter values and constraints\n params = model.make_params() \n # Fit model to the empircal spectrum\n result = model.fit(power_vals, params, w=freq_vals)\n # Compute AIC score\n aic = result.aic\n \n # Export AIC score and model fit\n return [aic, result]\n\n\n\n\n\n# Fold fit\ndef fit_flip(pspec, init):\n '''\n Fit the Flip power spectrum model to pspec and compute AIC score.\n Uses the package LMFIT for optimisation.\n \n Args\n --------------\n pspec: pd.Series\n Power spectrum data as a Series indexed by frequency.\n init: list of floats\n Initial parameter guesses of the form [sigma_init, r_init].\n \n Returns\n ----------------\n list:\n Form [aic, result] where aic is the AIC score for the model fit,\n and result is a handle that contains further information on the fit.\n\n '''\n \n \n # Put frequency values and power values as a list to use LMFIT\n freq_vals = pspec.index.tolist()\n power_vals = pspec.tolist()\n \n sigma_init, r_init = init\n # Assign model object\n model = Model(psd_flip)\n # Parameter constraints for sigma\n model.set_param_hint('sigma', value=sigma_init, min=0, max=10*sigma_init)\n # Parameter constraints for r\n model.set_param_hint('r', min=-1, max=0, value=r_init)\n \n # Assign initial parameter values and constraints\n params = model.make_params() \n # Fit model to the empircal spectrum\n result = model.fit(power_vals, params, w=freq_vals)\n # Compute AIC score\n aic = result.aic\n# print('flip aic is {}'.format(aic))\n\n # Export AIC score and model fit\n return [aic, result]\n\n\n\n\n\n# Function to fit Hopf model to empirical specrum with specified initial parameter guess\ndef fit_hopf(pspec, init): \n \n '''\n Fit the Hopf power spectrum model to pspec and compute AIC score.\n Uses the package LMFIT for optimisation.\n \n Args\n --------------\n pspec: pd.Series\n Power spectrum data as a Series indexed by frequency\n init: list of floats\n Initial parameter guesses of the form [sigma_init, mu_init, w0_init]\n \n Returns\n ----------------\n list:\n Form [aic, result] where aic is the AIC score for the model fit,\n and result is a handle that contains further information on the fit.\n\n '''\n \n \n # Put frequency values and power values as a list to use LMFIT\n freq_vals = pspec.index.tolist()\n power_vals = pspec.tolist()\n \n # Assign labels to initialisation values\n sigma_init, mu_init, w0_init = init\n \n \n # If any labels are nan, resort to default values \n if np.isnan(sigma_init) or np.isnan(mu_init) or np.isnan(w0_init):\n sigma_init, mu_init, w0_init = [1,-0.1,1]\n \n # Constraint parameter\n psi_hopf = 0.2\n \n # Compute initialisation value for the dummy variable delta (direct map with w0)\n # It must be positive to adhere to constraint - thus if negative set to 0.\n delta_init = max(\n w0_init + (mu_init/(2*np.sqrt(psi_hopf)))*np.sqrt(4-3*psi_hopf + np.sqrt(psi_hopf**2-16*psi_hopf+16)),\n 0.0001)\n \n\n # Assign model object \n model = Model(psd_hopf)\n \n ## Set initialisations parameters in model attributes\n \n # Sigma must be positive, and set a (high) upper bound to avoid runaway computation\n model.set_param_hint('sigma', value=sigma_init, min=0)\n # Psi is a fixed parameter (not used in optimisation)\n model.set_param_hint('psi', value=psi_hopf, vary=False)\n # Mu must be negative \n model.set_param_hint('mu', value=mu_init, max=0, vary=True)\n # Delta is a dummy parameter, satisfying d = w0 - wThresh (see paper for wThresh). It is allowed to vary, in place of w0.\n model.set_param_hint('delta', value = delta_init, min=0, vary=True)\n # w0 is a fixed parameter dependent on delta (w0 = delta + wThresh)\n model.set_param_hint('w0',expr='delta - (mu/(2*sqrt(psi)))*sqrt(4-3*psi + sqrt(psi**2-16*psi+16))',max=2.5,vary=False)\n \n # Assign initial parameter values and constraints\n params = model.make_params() \n # Fit model to the empircal spectrum\n result = model.fit(power_vals, params, w=freq_vals)\n # Compute AIC score\n aic = result.aic\n# print('hopf aic is {}'.format(aic))\n # Export AIC score and model fit\n return [aic, result]\n\n\n\n# Function to fit Null model to empirical specrum with specified initial parameter guess\ndef fit_null(pspec, init):\n '''\n Fit the Null power spectrum model to pspec and compute AIC score.\n Uses the package LMFIT for optimisation.\n \n Args\n --------------\n pspec: pd.Series\n Power spectrum data as a Series indexed by frequency\n init: list of floats\n Initial parameter guesses of the form [sigma_init]\n \n Returns\n ----------------\n list:\n Form [aic, result] where aic is the AIC score for the model fit,\n and result is a handle that contains further information on the fit.\n\n '''\n \n # Put frequency values and power values as a list to use LMFIT\n freq_vals = pspec.index.tolist()\n power_vals = pspec.tolist()\n \n sigma_init = init[0]\n \n # Assign model object\n model = Model(psd_null)\n \n # Initial parameter value for Null fit \n model.set_param_hint('sigma', value=sigma_init, vary=True, min=0, max=10*sigma_init)\n \n # Assign initial parameter values and constraints\n params = model.make_params() \n # Fit model to the empircal spectrum\n result = model.fit(power_vals, params, w=freq_vals)\n # Compute AIC score\n aic = result.aic\n \n # Export AIC score and model fit\n return [aic, result]\n\n\n\n\n\ndef aic_weights(aic_scores):\n '''\n Computes AIC weights, given AIC scores.\n \n Args\n ----------------\n aic_scores: np.array\n An array of AIC scores\n \n Returns\n -----------------\n np.array\n Array of the corresponding AIC weights\n \n '''\n \n \n # Best AIC score\n aic_best = min(aic_scores)\n \n # Differences in score from best model\n aic_diff = aic_scores - aic_best\n \n # Likelihoods for each model\n llhd = np.exp(-(1/2)*aic_diff)\n \n # Normalise to get AIC weights\n return llhd/sum(llhd)\n \n \n \n\n#-----------Compute spectral metrics (EWS) from power spectrum------#\n\n\ndef pspec_metrics(pspec,\n ews = ['smax','cf','aic'],\n aic = ['Fold','Hopf','Null'],\n sweep = False):\n\n\n '''\n Compute the metrics associated with pspec that can be\n used as EWS.\n \n Args\n -------------------\n pspec: pd.Series\n Power spectrum as a Series indexed by frequency\n ews: list of {'smax', 'cf', 'aic'}\n EWS to be computed. Options include peak in the power spectrum ('smax'),\n coherence factor ('cf'), AIC weights ('aic').\n aic: AIC weights to compute\n sweep: bool\n If 'True', sweep over a range of intialisation \n parameters when optimising to compute AIC scores, at the expense of \n longer computation. If 'False', intialisation parameter is taken as the\n 'best guess'.\n \n Return\n -------------------\n dict:\n A dictionary of spectral EWS obtained from pspec\n \n '''\n \n \n # Initialise a dictionary for EWS\n spec_ews = {}\n \n ## Compute Smax\n if 'smax' in ews:\n smax = max(pspec)\n # add to DataFrame\n spec_ews['Smax'] = smax\n \n \n \n ## Compute the coherence factor\n if 'cf' in ews:\n \n # frequency at which peak occurs\n w_peak = abs(pspec.idxmax())\n \n # power of peak frequency\n power_peak = pspec.max()\n \n # compute the first frequency from -w_peak at which power<power_peak/2\n w_half = next( (w for w in pspec[-w_peak:].index if pspec.loc[w] < power_peak/2 ), 'None')\n \n # if there was no such frequency, or if peak crosses zero frequency,\n # set w_peak = 0 (makes CF=0) \n if w_half == 'None' or w_half > 0:\n w_peak = 0\n \n else:\n # double the difference between w_half and -w_peak to get the width of the peak\n w_width = 2*(w_half - (-w_peak))\n \n # compute coherence factor (height/relative width)\n coher_factor = power_peak/(w_width/w_peak) if w_peak != 0 else 0\n\n # add to dataframe\n spec_ews['Coherence factor'] = coher_factor\n \n\n ## Compute AIC weights of fitted analytical forms\n if 'aic' in ews:\n \n # Compute the empirical metrics that allow us to choose sensible initialisation parameters\n # Peak in power spectrum\n smax = pspec.max()\n # Area underneath power spectrum (~ variance)\n stot = pspec.sum()*(pspec.index[1]-pspec.index[0])\n # Dominant frequency (take positive value)\n wdom = abs(pspec.idxmax())\n \n ## Create array of initialisation parmaeters \n \n # Sweep values (as proportion of baseline guess) if sweep = True\n sweep_vals = np.array([0.5,1,1.5]) if sweep else np.array([1])\n\n \n # Baseline parameter initialisations (computed using empirical spectrum)\n # Sfold\n [sigma_init_fold, lambda_init] = sfold_init(smax,stot)\n # Sflip\n [sigma_init_flip, r_init] = sflip_init(smax,stot)\n # Shopf\n [sigma_init_hopf, mu_init, w0_init] = shopf_init(smax,stot,wdom)\n # Snull\n [sigma_init_null] = snull_init(stot)\n \n \n # Arrays of initial values\n init_fold_array = {'sigma': sweep_vals*sigma_init_fold,\n 'lambda': sweep_vals*lambda_init}\n \n # r parameter cannot go below -1\n r_sweep_vals = [0.5*r_init,r_init,0.5*r_init+0.5] if sweep else [r_init]\n init_flip_array = {'sigma': sweep_vals*sigma_init_flip,\n 'r': r_sweep_vals} \n \n init_hopf_array = {'sigma': sweep_vals*sigma_init_hopf,\n 'mu': sweep_vals*mu_init,\n 'w0': sweep_vals*w0_init}\n\n init_null_array = {'sigma': sweep_vals*sigma_init_null}\n\n\n ## Compute AIC values and fits\n \n ## Fold\n \n # Initialise list to store AIC and model fits\n fold_aic_fits = []\n\n # Sweep over initial parameter guesses and pick best convergence\n for i in range(len(init_fold_array['sigma'])):\n for j in range(len(init_fold_array['lambda'])):\n # Initial parameter guess\n init_fold = [init_fold_array['sigma'][i],init_fold_array['lambda'][j]]\n # Compute fold fit and AIC score\n [aic_temp, model_temp] = fit_fold(pspec, init_fold)\n # Store in list\n fold_aic_fits.append([aic_temp, model_temp])\n # Put list into array\n array_temp = np.array(fold_aic_fits)\n # Pick out the best model\n [aic_fold, model_fold] = array_temp[array_temp[:,0].argmin()] \n \n \n \n ## Flip\n \n # Initialise list to store AIC and model fits\n flip_aic_fits = []\n\n # Sweep over initial parameter guesses and pick best convergence\n for i in range(len(init_flip_array['sigma'])):\n for j in range(len(init_flip_array['r'])):\n # Initial parameter guess\n init_flip = [init_flip_array['sigma'][i],init_flip_array['r'][j]]\n # Compute fold fit and AIC score\n [aic_temp, model_temp] = fit_flip(pspec, init_flip)\n # Store in list\n flip_aic_fits.append([aic_temp, model_temp])\n # Put list into array\n array_temp = np.array(flip_aic_fits)\n # Pick out the best model\n [aic_flip, model_flip] = array_temp[array_temp[:,0].argmin()] \n \n \n \n \n \n \n ## Hopf\n \n # Initialise list to store AIC and model fits\n hopf_aic_fits = []\n\n # Sweep over initial parameter guesses and pick best convergence\n for i in range(len(init_hopf_array['sigma'])):\n for j in range(len(init_hopf_array['mu'])):\n for k in range(len(init_hopf_array['w0'])):\n # Initial parameter guess\n init_hopf = [init_hopf_array['sigma'][i],init_hopf_array['mu'][j],init_hopf_array['w0'][k]]\n # Compute fold fit and AIC score\n [aic_temp, model_temp] = fit_hopf(pspec, init_hopf)\n # Store in list\n hopf_aic_fits.append([aic_temp, model_temp])\n # Put list into array\n array_temp = np.array(hopf_aic_fits)\n # Pick out the best model\n [aic_hopf, model_hopf] = array_temp[array_temp[:,0].argmin()] \n \n \n \n \n ## Null\n \n # Initialise list to store AIC and model fits\n null_aic_fits = []\n\n # Sweep over initial parameter guesses and pick best convergence\n for i in range(len(init_null_array['sigma'])):\n # Initial parameter guess\n init_null = [init_null_array['sigma'][i]]\n # Compute fold fit and AIC score\n [aic_temp, model_temp] = fit_null(pspec, init_null)\n # Store in list\n null_aic_fits.append([aic_temp, model_temp])\n # Put list into array\n array_temp = np.array(null_aic_fits)\n # Pick out the best model\n [aic_null, model_null] = array_temp[array_temp[:,0].argmin()] \n \n \n # Compute chosen AIC weights from the AIC scores\n aic_scores = {}\n if 'Fold' in aic:\n aic_scores['Fold']=aic_fold\n if 'Flip' in aic:\n aic_scores['Flip']=aic_flip\n if 'Hopf' in aic:\n aic_scores['Hopf']=aic_hopf\n if 'Null' in aic:\n aic_scores['Null']=aic_null\n \n aicw = aic_weights(np.array([aic_scores[x] for x in aic]))\n aic_dict = dict(zip(aic,aicw)) \n \n # Add to Dataframe\n if 'Fold' in aic:\n spec_ews['AIC fold'] = aic_dict['Fold']\n if 'Flip' in aic:\n spec_ews['AIC flip'] = aic_dict['Flip']\n if 'Hopf' in aic:\n spec_ews['AIC hopf'] = aic_dict['Hopf']\n if 'Null' in aic:\n spec_ews['AIC null'] = aic_dict['Null']\n \n \n # Add fitted parameter values to DataFrame\n spec_ews['Params fold'] = dict((k, model_fold.values[k]) for k in ('sigma','lam')) # don't include dummy params\n spec_ews['Params flip'] = dict((k, model_flip.values[k]) for k in ('sigma','r'))\n spec_ews['Params hopf'] = dict((k, model_hopf.values[k]) for k in ('sigma','mu','w0','delta','psi'))\n spec_ews['Params null'] = model_null.values\n\n\n # Return DataFrame of metrics\n return spec_ews\n\n \n\n\n\n\n#------------------------\n## Function to compute lag-1 autocovariance matrix\n\ndef compute_autocov(df_in):\n '''\n Computes the autocovariance (lag-1) matrix of n \n time series provided in df_in.\n Using the definition\n phi_ij = < X_i(t+1) X_j(t) >\n for each element of the autocovariance matrix phi.\n \n Args\n -------------------\n df_in: DataFrame with n columns indexed by time\n\n \n Return\n -------------------\n np.array:\n autocovariance matrix\n \n '''\n\n\n\n \n # Obtain column names of df_in\n col_names = df_in.columns\n # Number of variables\n n = len(col_names)\n \n \n # Define function to compute autocovariance of two columns\n def autocov_cols(a,b):\n '''\n Computes autocovariance of two columns (can be the same)\n Note that this does not commute (a<->b) in general\n Input:\n a,b: Series indexed by time\n Output:\n float: autocovariance between the columns\n '''\n \n # Shift the column of a by 1\n a_shift = a.shift(1)\n \n # Put into a dataframe\n df_temp = pd.concat([a_shift,b], axis=1)\n \n # Compute covariance of columns a and b_shift\n cov = df_temp.cov().iloc[0,1]\n \n # Output\n return cov\n \n \n # Compute elements of autocovariance matrix\n list_elements = []\n \n for i in range(n):\n for j in range(n):\n a = df_in[col_names[i]]\n b = df_in[col_names[j]]\n # Compute autocovaraince between cols\n autocov = autocov_cols(a,b)\n # Append to list of elements\n list_elements.append(autocov)\n \n # Create autocovariance matrix from list of elements\n ar_autocov = np.array(list_elements).reshape(n,n)\n\n # Output\n return ar_autocov\n\n\n\n\n '''\n Computes the autocovariance (lag-1) matrix of n \n time series provided in df_in.\n Using the definition\n phi_ij = < X_i(t+1) X_j(t) >\n for each element of the autocovariance matrix phi.\n \n Args\n -------------------\n df_in: DataFrame with n columns indexed by time\n\n \n Return\n -------------------\n np.array:\n autocovariance matrix\n \n '''\n\n\n\n\n#---------------------------------------\n## Function to do Jacobian and eval reconstruction \n\n\ndef eval_recon(df_in):\n '''\n Constructs estimate of Jacobian matrix from stationary time-series data\n and outputs the eigenvalues, eigenvectors and jacobian.\n \n Args\n -------------------\n df_in: DataFrame with two columns indexed by time\n \n \n Return\n -------------------\n dict\n\t\tConsists of\n\t\t\t- 'Eigenvalues': np.array of eigenvalues\n \t\t- 'Eigenvectors': np.array of eigenvectors\n \t\t- 'Jacobian': pd.DataFrame of Jacobian entries\n \t\t\n '''\n \n # Get the time-separation between data points\n dt = df_in.index[1] -df_in.index[0]\n \n # Compute autocovaraince matrix from columns\n ar_autocov = compute_autocov(df_in)\n \n # Compute the covariance matrix (built in function)\n ar_cov = df_in.cov()\n \n # Estimate of discrete Jacobian (formula in Williamson (2015))\n # Requires computation of an inverse matrix\n jac = np.matmul(ar_autocov, np.linalg.inv(ar_cov))\n\n # Write the Jacobian as a df for output (so we have col lables)\n df_jac = pd.DataFrame(jac, columns = df_in.columns, index=df_in.columns)\n \n # Compute eigenvalues and eigenvectors\n evals, evecs = np.linalg.eig(jac)\n\t\n # Dictionary of data output\n dic_out = {'Eigenvalues':evals, \n 'Eigenvectors':evecs,\n 'Jacobian':df_jac}\n \n return dic_out\n\n\n\n\n\n\n\n \n\n\n \n \n \n" ]
[ [ "pandas.concat", "numpy.sqrt", "pandas.Series", "numpy.isnan", "numpy.linalg.eig", "numpy.linalg.inv", "numpy.cos", "pandas.DataFrame", "numpy.array", "numpy.exp", "scipy.signal.welch" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
TarekAloui/pytorchfi
[ "29915e158941a21fc786e6a59c958ec751a59167" ]
[ "test/unit_tests/test_weight_fi.py" ]
[ "import torch\nfrom pytorchfi.core import fault_injection as pfi_core\n\nfrom .util_test import helper_setUp_CIFAR10_same\n\n\nclass TestWeightFIcpu:\n \"\"\"\n Testing focuses on weight perturbations.\n \"\"\"\n\n def setup_class(self):\n torch.manual_seed(0)\n\n self.BATCH_SIZE = 1\n self.WORKERS = 1\n self.channels = 3\n self.img_size = 32\n self.USE_GPU = False\n\n self.model, self.dataset = helper_setUp_CIFAR10_same(\n self.BATCH_SIZE, self.WORKERS\n )\n self.dataiter = iter(self.dataset)\n\n self.images, self.labels = self.dataiter.next()\n\n self.model.eval()\n with torch.no_grad():\n self.output = self.model(self.images)\n\n self.p = pfi_core(\n self.model,\n self.BATCH_SIZE,\n input_shape=[self.channels, self.img_size, self.img_size],\n use_cuda=self.USE_GPU,\n )\n\n def test_neuronFI_singleElement(self):\n layer_i = 1\n k = 15\n c_i = 20\n h_i = 2\n w_i = 3\n\n inj_value_i = 10000.0\n\n self.inj_model = self.p.declare_weight_fi(\n layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i\n )\n\n self.inj_model.eval()\n with torch.no_grad():\n corrupted_output_1 = self.inj_model(self.images)\n\n if torch.all(corrupted_output_1.eq(self.output)):\n raise AssertionError\n\n self.inj_model = self.p.declare_weight_fi(\n layer_num=layer_i,\n k=k,\n dim1=c_i,\n dim2=h_i,\n dim3=w_i,\n value=0.01388985849916935,\n )\n\n self.inj_model.eval()\n with torch.no_grad():\n uncorrupted_output = self.inj_model(self.images)\n\n if not torch.all(uncorrupted_output.eq(self.output)):\n raise AssertionError\n\n self.inj_model = self.p.declare_weight_fi(\n layer_num=layer_i,\n k=k,\n dim1=c_i,\n dim2=h_i,\n dim3=w_i,\n value=inj_value_i * 2,\n )\n\n self.inj_model.eval()\n with torch.no_grad():\n corrupted_output_2 = self.inj_model(self.images)\n\n if torch.all(corrupted_output_2.eq(self.output)):\n raise AssertionError\n if not torch.all(corrupted_output_2.eq(corrupted_output_2)):\n raise AssertionError\n\n def test_neuronFI_singleElement_noErr(self):\n layer_i = 4\n k = 153\n c_i = 254\n h_i = 0\n w_i = 0\n\n inj_value_i = 10000.0\n\n self.inj_model = self.p.declare_weight_fi(\n layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i\n )\n\n self.inj_model.eval()\n with torch.no_grad():\n corrupted_output_1 = self.inj_model(self.images)\n\n if not torch.all(corrupted_output_1.eq(self.output)):\n raise AssertionError\n" ]
[ [ "torch.manual_seed", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Licht-T/vision
[ "052edcecef3eb0ae9fe9e4b256fa2a488f9f395b" ]
[ "torchvision/ops/roi_pool.py" ]
[ "import torch\nfrom torch import nn, Tensor\n\nfrom torch.nn.modules.utils import _pair\nfrom torch.jit.annotations import List, BroadcastingList2\n\nfrom torchvision.extension import _assert_has_ops\nfrom ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape\n\n\ndef roi_pool(\n input: Tensor,\n boxes: Tensor,\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Pool operator described in Fast R-CNN\n\n Arguments:\n input (Tensor[N, C, H, W]): input tensor\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from. If a single Tensor is passed,\n then the first column should contain the batch index. If a list of Tensors\n is passed, then each Tensor will correspond to the boxes for an element i\n in a batch\n output_size (int or Tuple[int, int]): the size of the output after the cropping\n is performed, as (height, width)\n spatial_scale (float): a scaling factor that maps the input coordinates to\n the box coordinates. Default: 1.0\n\n Returns:\n output (Tensor[K, C, output_size[0], output_size[1]])\n \"\"\"\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale,\n output_size[0], output_size[1])\n return output\n\n\nclass RoIPool(nn.Module):\n \"\"\"\n See roi_pool\n \"\"\"\n def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):\n super(RoIPool, self).__init__()\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n\n def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n return roi_pool(input, rois, self.output_size, self.spatial_scale)\n\n def __repr__(self) -> str:\n tmpstr = self.__class__.__name__ + '('\n tmpstr += 'output_size=' + str(self.output_size)\n tmpstr += ', spatial_scale=' + str(self.spatial_scale)\n tmpstr += ')'\n return tmpstr\n" ]
[ [ "torch.ops.torchvision.roi_pool", "torch.nn.modules.utils._pair" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mariodmtrv/models
[ "006ed77052b56bbd60965a833dc54ce9dbda39db" ]
[ "research/deeplab/datasets/data_generator.py" ]
[ "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Wrapper for providing semantic segmentaion data.\n\nThe SegmentationDataset class provides both images and annotations (semantic\nsegmentation and/or instance segmentation) for TensorFlow. Currently, we\nsupport the following datasets:\n\n1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/).\n\nPASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects\n(e.g., bike, person, and so on) and leaves all the other semantic classes as\none background class. The dataset contains 1464, 1449, and 1456 annotated\nimages for the training, validation and test respectively.\n\n2. Cityscapes dataset (https://www.cityscapes-dataset.com)\n\nThe Cityscapes dataset contains 19 semantic labels (such as road, person, car,\nand so on) for urban street scenes.\n\n3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K)\n\nThe ADE20K dataset contains 150 semantic labels both urban street scenes and\nindoor scenes.\n\nReferences:\n M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn,\n and A. Zisserman, The pascal visual object classes challenge a retrospective.\n IJCV, 2014.\n\n M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson,\n U. Franke, S. Roth, and B. Schiele, \"The cityscapes dataset for semantic urban\n scene understanding,\" In Proc. of CVPR, 2016.\n\n B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, \"Scene Parsing\n through ADE20K dataset\", In Proc. of CVPR, 2017.\n\"\"\"\n\nimport collections\nimport os\nimport tensorflow as tf\nfrom deeplab import common\nfrom deeplab import input_preprocess\n\n# Named tuple to describe the dataset properties.\nDatasetDescriptor = collections.namedtuple(\n 'DatasetDescriptor',\n [\n 'splits_to_sizes', # Splits of the dataset into training, val and test.\n 'num_classes', # Number of semantic classes, including the\n # background class (if exists). For example, there\n # are 20 foreground classes + 1 background class in\n # the PASCAL VOC 2012 dataset. Thus, we set\n # num_classes=21.\n 'ignore_label', # Ignore label value.\n ])\n\n_CITYSCAPES_INFORMATION = DatasetDescriptor(\n splits_to_sizes={'train': 2975,\n 'train_coarse': 22973,\n 'trainval_fine': 3475,\n 'trainval_coarse': 23473,\n 'val': 500,\n 'test_fine': 1525},\n num_classes=19,\n ignore_label=255,\n)\n\n_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor(\n splits_to_sizes={\n 'train': 1464,\n 'train_aug': 10582,\n 'trainval': 2913,\n 'val': 1449,\n },\n num_classes=21,\n ignore_label=255,\n)\n\n_ADE20K_INFORMATION = DatasetDescriptor(\n splits_to_sizes={\n 'train': 20210, # num of samples in images/training\n 'val': 2000, # num of samples in images/validation\n },\n num_classes=151,\n ignore_label=0,\n)\n\n_DATASETS_INFORMATION = {\n 'cityscapes': _CITYSCAPES_INFORMATION,\n 'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION,\n 'ade20k': _ADE20K_INFORMATION,\n}\n\n# Default file pattern of TFRecord of TensorFlow Example.\n_FILE_PATTERN = '%s-*'\n\n\ndef get_cityscapes_dataset_name():\n return 'cityscapes'\n\n\nclass Dataset(object):\n \"\"\"Represents input dataset for deeplab model.\"\"\"\n\n def __init__(self,\n dataset_name,\n split_name,\n dataset_dir,\n batch_size,\n crop_size,\n min_resize_value=None,\n max_resize_value=None,\n resize_factor=None,\n min_scale_factor=1.,\n max_scale_factor=1.,\n scale_factor_step_size=0,\n model_variant=None,\n num_readers=1,\n is_training=False,\n should_shuffle=False,\n should_repeat=False):\n \"\"\"Initializes the dataset.\n\n Args:\n dataset_name: Dataset name.\n split_name: A train/val Split name.\n dataset_dir: The directory of the dataset sources.\n batch_size: Batch size.\n crop_size: The size used to crop the image and label.\n min_resize_value: Desired size of the smaller image side.\n max_resize_value: Maximum allowed size of the larger image side.\n resize_factor: Resized dimensions are multiple of factor plus one.\n min_scale_factor: Minimum scale factor value.\n max_scale_factor: Maximum scale factor value.\n scale_factor_step_size: The step size from min scale factor to max scale\n factor. The input is randomly scaled based on the value of\n (min_scale_factor, max_scale_factor, scale_factor_step_size).\n model_variant: Model variant (string) for choosing how to mean-subtract\n the images. See feature_extractor.network_map for supported model\n variants.\n num_readers: Number of readers for data provider.\n is_training: Boolean, if dataset is for training or not.\n should_shuffle: Boolean, if should shuffle the input data.\n should_repeat: Boolean, if should repeat the input data.\n\n Raises:\n ValueError: Dataset name and split name are not supported.\n \"\"\"\n if dataset_name not in _DATASETS_INFORMATION:\n raise ValueError('The specified dataset is not supported yet.')\n self.dataset_name = dataset_name\n\n splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes\n\n if split_name not in splits_to_sizes:\n raise ValueError('data split name %s not recognized' % split_name)\n\n if model_variant is None:\n tf.logging.warning('Please specify a model_variant. See '\n 'feature_extractor.network_map for supported model '\n 'variants.')\n\n self.split_name = split_name\n self.dataset_dir = dataset_dir\n self.batch_size = batch_size\n self.crop_size = crop_size\n self.min_resize_value = min_resize_value\n self.max_resize_value = max_resize_value\n self.resize_factor = resize_factor\n self.min_scale_factor = min_scale_factor\n self.max_scale_factor = max_scale_factor\n self.scale_factor_step_size = scale_factor_step_size\n self.model_variant = model_variant\n self.num_readers = num_readers\n self.is_training = is_training\n self.should_shuffle = should_shuffle\n self.should_repeat = should_repeat\n\n self.num_of_classes = _DATASETS_INFORMATION[self.dataset_name].num_classes\n self.ignore_label = _DATASETS_INFORMATION[self.dataset_name].ignore_label\n\n def _parse_function(self, example_proto):\n \"\"\"Function to parse the example proto.\n\n Args:\n example_proto: Proto in the format of tf.Example.\n\n Returns:\n A dictionary with parsed image, label, height, width and image name.\n\n Raises:\n ValueError: Label is of wrong shape.\n \"\"\"\n\n # Currently only supports jpeg and png.\n # Need to use this logic because the shape is not known for\n # tf.image.decode_image and we rely on this info to\n # extend label if necessary.\n def _decode_image(content, channels):\n return tf.cond(\n tf.image.is_jpeg(content),\n lambda: tf.image.decode_jpeg(content, channels),\n lambda: tf.image.decode_png(content, channels))\n\n features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/segmentation/class/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n image = _decode_image(parsed_features['image/encoded'], channels=3)\n\n label = None\n if self.split_name != common.TEST_SET:\n label = _decode_image(\n parsed_features['image/segmentation/class/encoded'], channels=1)\n\n image_name = parsed_features['image/filename']\n if image_name is None:\n image_name = tf.constant('')\n\n sample = {\n common.IMAGE: image,\n common.IMAGE_NAME: image_name,\n common.HEIGHT: parsed_features['image/height'],\n common.WIDTH: parsed_features['image/width'],\n }\n\n if label is not None:\n if label.get_shape().ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n label.set_shape([None, None, 1])\n\n sample[common.LABELS_CLASS] = label\n\n return sample\n\n def _preprocess_image(self, sample):\n \"\"\"Preprocesses the image and label.\n\n Args:\n sample: A sample containing image and label.\n\n Returns:\n sample: Sample with preprocessed image and label.\n\n Raises:\n ValueError: Ground truth label not provided during training.\n \"\"\"\n image = sample[common.IMAGE]\n label = sample[common.LABELS_CLASS]\n\n original_image, image, label = input_preprocess.preprocess_image_and_label(\n image=image,\n label=label,\n crop_height=self.crop_size[0],\n crop_width=self.crop_size[1],\n min_resize_value=self.min_resize_value,\n max_resize_value=self.max_resize_value,\n resize_factor=self.resize_factor,\n min_scale_factor=self.min_scale_factor,\n max_scale_factor=self.max_scale_factor,\n scale_factor_step_size=self.scale_factor_step_size,\n ignore_label=self.ignore_label,\n is_training=self.is_training,\n model_variant=self.model_variant)\n\n sample[common.IMAGE] = image\n\n if not self.is_training:\n # Original image is only used during visualization.\n sample[common.ORIGINAL_IMAGE] = original_image\n\n if label is not None:\n sample[common.LABEL] = label\n\n # Remove common.LABEL_CLASS key in the sample since it is only used to\n # derive label and not used in training and evaluation.\n sample.pop(common.LABELS_CLASS, None)\n\n return sample\n\n def get_one_shot_iterator(self):\n \"\"\"Gets an iterator that iterates across the dataset once.\n\n Returns:\n An iterator of type tf.data.Iterator.\n \"\"\"\n\n files = self._get_all_files()\n\n dataset = (\n tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)\n .map(self._parse_function, num_parallel_calls=self.num_readers)\n .map(self._preprocess_image, num_parallel_calls=self.num_readers))\n\n if self.should_shuffle:\n dataset = dataset.shuffle(buffer_size=100)\n\n if self.should_repeat:\n dataset = dataset.repeat() # Repeat forever for training.\n else:\n dataset = dataset.repeat(1)\n\n dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)\n return dataset.make_one_shot_iterator()\n\n def _get_all_files(self):\n \"\"\"Gets all the files to read data from.\n\n Returns:\n A list of input files.\n \"\"\"\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(self.dataset_dir,\n file_pattern % self.split_name)\n return tf.gfile.Glob(file_pattern)\n" ]
[ [ "tensorflow.logging.warning", "tensorflow.constant", "tensorflow.FixedLenFeature", "tensorflow.data.TFRecordDataset", "tensorflow.image.decode_png", "tensorflow.expand_dims", "tensorflow.image.is_jpeg", "tensorflow.gfile.Glob", "tensorflow.parse_single_example", "tensorflow.image.decode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jvstinian/Python-Reinforcement-Learning-Projects
[ "6c97c68351fc4af426cb5c3583d75aebfabac8aa" ]
[ "Chapter04/dpg.py" ]
[ "'''\nCreated on Apr 12, 2018\n\n@author: ywz\n'''\nimport numpy, os\nimport tensorflow as tf\nfrom replay_memory import ReplayMemory\nfrom optimizer import Optimizer\nfrom actor_critic_net import ActorCriticNet\n\n\nclass DPG:\n \n def __init__(self, config, task, directory, callback=None, summary_writer=None):\n \n self.task = task\n self.directory = directory\n self.callback = callback\n self.summary_writer = summary_writer\n \n self.config = config\n self.batch_size = config['batch_size']\n self.n_episode = config['num_episode']\n self.capacity = config['capacity']\n self.history_len = config['history_len']\n self.epsilon_decay = config['epsilon_decay']\n self.epsilon_min = config['epsilon_min']\n self.time_between_two_copies = config['time_between_two_copies']\n self.update_interval = config['update_interval']\n self.tau = config['tau']\n \n self.action_dim = task.get_action_dim()\n self.state_dim = task.get_state_dim() * self.history_len\n self.critic_layers = [50, 50]\n self.actor_layers = [50, 50]\n self.actor_activation = task.get_activation_fn()\n \n self._init_modules()\n \n def _init_modules(self):\n \n # Replay memory\n self.replay_memory = ReplayMemory(history_len=self.history_len, \n capacity=self.capacity)\n # Actor critic network\n self.ac_network = ActorCriticNet(input_dim=self.state_dim, \n action_dim=self.action_dim, \n critic_layers=self.critic_layers, \n actor_layers=self.actor_layers, \n actor_activation=self.actor_activation,\n scope='ac_network')\n # Target network\n self.target_network = ActorCriticNet(input_dim=self.state_dim, \n action_dim=self.action_dim, \n critic_layers=self.critic_layers, \n actor_layers=self.actor_layers, \n actor_activation=self.actor_activation,\n scope='target_network')\n # Optimizer\n self.optimizer = Optimizer(config=self.config, \n ac_network=self.ac_network, \n target_network=self.target_network, \n replay_memory=self.replay_memory)\n # Ops for updating target network\n self.clone_op = self.target_network.get_clone_op(self.ac_network, tau=self.tau)\n # For tensorboard\n self.t_score = tf.placeholder(dtype=tf.float32, shape=[], name='new_score')\n tf.summary.scalar(\"score\", self.t_score, collections=['dpg'])\n self.summary_op = tf.summary.merge_all('dpg')\n \n def set_summary_writer(self, summary_writer=None):\n self.summary_writer = summary_writer\n self.optimizer.set_summary_writer(summary_writer)\n \n def choose_action(self, sess, state, epsilon=0.1):\n x = numpy.asarray(numpy.expand_dims(state, axis=0), dtype=numpy.float32)\n action = self.ac_network.get_action(sess, x)[0]\n return action + epsilon * numpy.random.randn(len(action))\n \n def play(self, action):\n r, new_state, termination = self.task.play_action(action)\n return r, new_state, termination\n \n def update_target_network(self, sess):\n sess.run(self.clone_op)\n \n def train(self, sess, saver=None):\n \n num_of_trials = -1\n for episode in range(self.n_episode):\n frame = self.task.reset()\n for _ in range(self.history_len+1):\n self.replay_memory.add(frame, 0, 0, 0)\n \n for _ in range(self.config['T']):\n num_of_trials += 1\n epsilon = self.epsilon_min + \\\n max(self.epsilon_decay - num_of_trials, 0) / \\\n self.epsilon_decay * (1 - self.epsilon_min)\n print(\"epi {}, frame {}k, epsilon {}\".format(episode, num_of_trials // 1000, epsilon))\n if num_of_trials % self.update_interval == 0:\n self.optimizer.train_one_step(sess, num_of_trials, self.batch_size)\n \n state = self.replay_memory.phi(frame)\n action = self.choose_action(sess, state, epsilon) \n r, new_frame, termination = self.play(action)\n self.replay_memory.add(frame, action, r, termination)\n frame = new_frame\n \n if num_of_trials % self.time_between_two_copies == 0:\n self.update_target_network(sess)\n self.save(sess, saver)\n \n if self.callback:\n self.callback()\n if termination:\n score = self.task.get_total_reward()\n summary_str = sess.run(self.summary_op, feed_dict={self.t_score: score})\n self.summary_writer.add_summary(summary_str, num_of_trials)\n self.summary_writer.flush()\n break\n \n def evaluate(self, sess):\n \n for episode in range(self.n_episode):\n frame = self.task.reset()\n for _ in range(self.history_len+1):\n self.replay_memory.add(frame, 0, 0, 0)\n \n for _ in range(self.config['T']):\n print(\"episode {}, total reward {}\".format(episode, \n self.task.get_total_reward()))\n \n state = self.replay_memory.phi(frame)\n action = self.choose_action(sess, state, self.epsilon_min) \n r, new_frame, termination = self.play(action)\n self.replay_memory.add(frame, action, r, termination)\n frame = new_frame\n\n if self.callback:\n self.callback()\n if termination:\n break\n \n def save(self, sess, saver, model_name='model.ckpt'):\n if saver:\n try:\n checkpoint_path = os.path.join(self.directory, model_name)\n saver.save(sess, checkpoint_path)\n except:\n pass\n \n def load(self, sess, saver, model_name='model.ckpt'):\n if saver:\n try:\n checkpoint_path = os.path.join(self.directory, model_name)\n saver.restore(sess, checkpoint_path)\n except:\n pass\n " ]
[ [ "numpy.expand_dims", "tensorflow.summary.scalar", "tensorflow.placeholder", "tensorflow.summary.merge_all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
slowy07/pythonApps
[ "f90b8823f939b98f7bf1dea7ed35fe6e22e2f730" ]
[ "pythonProject/face_landmark/train.py" ]
[ "from imutils import face_utils\nimport dlib\nimport cv2\nimport numpy as np\n\n\npre_trained_model = 'classifier/shape_predictor_68_face_landmarks.dat'\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(pre_trained_model)\n\nvideo = cv2.VideoCapture('video/somi.mp4')\n\nwhile video.read():\n _, image_input = video.read()\n\n resize = cv2.resize(image_input, (1050,600)) \n image = resize\n \n \n out_face = np.zeros_like(image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n rects = detector(gray, 1)\n\n for (i, rect) in enumerate(rects):\n \n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n \n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 255, 5), -5)\n \n #face extraction\n remapped_shape = np.zeros_like(shape) \n feature_mask = np.zeros((image.shape[0], image.shape[1])) \n remapped_shape = cv2.convexHull(shape)\n cv2.fillConvexPoly(feature_mask, remapped_shape[0:27], 1)\n feature_mask = feature_mask.astype(np.bool)\n out_face[feature_mask] = image[feature_mask]\n\n\n #output window\n cv2.imshow(\"Output\", out_face)\n cv2.resizeWindow('Output', 30,30)\n \n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()\ncap.release()" ]
[ [ "numpy.zeros_like", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TexasInstruments/vision
[ "abaf29de0798e8e8d3f996dc272cd3c515562695" ]
[ "references/edgeailite/engine/train_pixel2pixel.py" ]
[ "# Copyright (c) 2018-2021, Texas Instruments\n# All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport shutil\nimport time\nimport math\nimport copy\n\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.onnx\nimport onnx\n\nimport datetime\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport random\nimport cv2\nfrom colorama import Fore\nimport progiter\nfrom packaging import version\nimport warnings\n\nfrom torchvision.edgeailite import xnn\nfrom torchvision.edgeailite import xvision\nfrom torchvision.edgeailite.xvision.transforms import image_transforms\nfrom torchvision.edgeailite.xvision import losses as pixel2pixel_losses\nfrom .infer_pixel2pixel import compute_accuracy\n\n##################################################\nwarnings.filterwarnings('ignore', category=torch.jit.TracerWarning)\n\n##################################################\ndef get_config():\n args = xnn.utils.ConfigNode()\n\n args.dataset_config = xnn.utils.ConfigNode()\n args.dataset_config.split_name = 'val'\n args.dataset_config.max_depth_bfr_scaling = 80\n args.dataset_config.depth_scale = 1\n args.dataset_config.train_depth_log = 1\n args.use_semseg_for_depth = False\n\n # model config\n args.model_config = xnn.utils.ConfigNode()\n args.model_config.output_type = ['segmentation'] # the network is used to predict flow or depth or sceneflow\n args.model_config.output_channels = None # number of output channels\n args.model_config.prediction_channels = None # intermediate number of channels before final output_channels\n args.model_config.input_channels = None # number of input channels\n args.model_config.final_upsample = True # use final upsample to input resolution or not\n args.model_config.output_range = None # max range of output\n args.model_config.num_decoders = None # number of decoders to use. [options: 0, 1, None]\n args.model_config.freeze_encoder = False # do not update encoder weights\n args.model_config.freeze_decoder = False # do not update decoder weights\n args.model_config.multi_task_type = 'learned' # find out loss multiplier by learning, choices=[None, 'learned', 'uncertainty', 'grad_norm', 'dwa_grad_norm']\n args.model_config.target_input_ratio = 1 # Keep target size same as input size\n args.model_config.input_nv12 = False # convert input to nv12 format\n args.model_config.enable_fp16 = False # faster training if the GPU supports fp16\n\n args.model = None # the model itself can be given from ouside\n args.model_name = 'deeplabv2lite_mobilenetv2' # model architecture, overwritten if pretrained is specified\n args.dataset_name = 'cityscapes_segmentation' # dataset type\n args.transforms = None # the transforms itself can be given from outside\n args.input_channel_reverse = False # reverse input channels, for example RGB to BGR\n\n args.data_path = './data/cityscapes' # 'path to dataset'\n args.save_path = None # checkpoints save path\n args.phase = 'training' # training/calibration/validation\n args.date = None # date to add to save path. if this is None, current date will be added.\n\n args.logger = None # logger stream to output into\n args.show_gpu_usage = False # Shows gpu usage at the begining of each training epoch\n\n args.split_file = None # train_val split file\n args.split_files = None # split list files. eg: train.txt val.txt\n args.split_value = None # test_val split proportion (between 0 (only test) and 1 (only train))\n\n args.optimizer = 'adam' # optimizer algorithms, choices=['adam','sgd']\n args.scheduler = 'step' # scheduler algorithms, choices=['step','poly', 'cosine']\n args.workers = 8 # number of data loading workers\n\n args.epochs = 250 # number of total epochs to run\n args.start_epoch = 0 # manual epoch number (useful on restarts)\n\n args.epoch_size = 0 # manual epoch size (will match dataset size if not specified)\n args.epoch_size_val = 0 # manual epoch size (will match dataset size if not specified)\n args.batch_size = 12 # mini_batch size\n args.total_batch_size = None # accumulated batch size. total_batch_size = batch_size*iter_size\n args.iter_size = 1 # iteration size. total_batch_size = batch_size*iter_size\n\n args.lr = 1e-4 # initial learning rate\n args.lr_clips = None # use args.lr itself if it is None\n args.lr_calib = 0.05 # lr for bias calibration\n args.warmup_epochs = 5 # number of epochs to warmup\n args.warmup_factor = 1e-3 # max lr allowed for the first epoch during warmup (as a factor of initial lr)\n\n args.momentum = 0.9 # momentum for sgd, alpha parameter for adam\n args.beta = 0.999 # beta parameter for adam\n args.weight_decay = 1e-4 # weight decay\n args.bias_decay = None # bias decay\n\n args.sparse = True # avoid invalid/ignored target pixels from loss computation, use NEAREST for interpolation\n\n args.tensorboard_num_imgs = 5 # number of imgs to display in tensorboard\n args.pretrained = None # path to pre_trained model\n args.resume = None # path to latest checkpoint (default: none)\n args.no_date = False # don\\'t append date timestamp to folder\n args.print_freq = 100 # print frequency (default: 100)\n\n args.milestones = (100, 200) # epochs at which learning rate is divided by 2\n\n args.losses = ['segmentation_loss'] # loss functions to mchoices=['step','poly', 'cosine'],loss multiplication factor')\n args.metrics = ['segmentation_metrics'] # metric/measurement/error functions for train/validation\n args.multi_task_factors = None # loss mult factors\n args.class_weights = None # class weights\n\n args.loss_mult_factors = None # fixed loss mult factors - per loss - not: this is different from multi_task_factors (which is per task)\n\n args.multistep_gamma = 0.5 # steps for step scheduler\n args.polystep_power = 1.0 # power for polynomial scheduler\n\n args.rand_seed = 1 # random seed\n args.img_border_crop = None # image border crop rectangle. can be relative or absolute\n args.target_mask = None # mask rectangle. can be relative or absolute. last value is the mask value\n\n args.rand_resize = None # random image size to be resized to during training\n args.rand_output_size = None # output size to be resized to during training\n args.rand_scale = (1.0, 2.0) # random scale range for training\n args.rand_crop = None # image size to be cropped to\n\n args.img_resize = None # image size to be resized to during evaluation\n args.output_size = None # target output size to be resized to\n\n args.count_flops = True # count flops and report\n\n args.shuffle = True # shuffle or not\n args.shuffle_val = True # shuffle val dataset or not\n\n args.transform_rotation = 0. # apply rotation augumentation. value is rotation in degrees. 0 indicates no rotation\n args.is_flow = None # whether entries in images and targets lists are optical flow or not\n\n args.upsample_mode = 'bilinear' # upsample mode to use, choices=['nearest','bilinear']\n\n args.image_prenorm = True # whether normalization is done before all other the transforms\n args.image_mean = (128.0,) # image mean for input image normalization\n args.image_scale = (1.0 / (0.25 * 256),) # image scaling/mult for input iamge normalization\n\n args.max_depth = 80 # maximum depth to be used for visualization\n\n args.pivot_task_idx = 0 # task id to select best model\n\n args.parallel_model = True # Usedata parallel for model\n args.parallel_criterion = True # Usedata parallel for loss and metric\n\n args.evaluate_start = True # evaluate right at the begining of training or not\n args.save_onnx = True # apply quantized inference or not\n args.print_model = False # print the model to text\n args.run_soon = True # To start training after generating configs/models\n\n args.quantize = False # apply quantized inference or not\n #args.model_surgery = None # replace activations with PAct2 activation module. Helpful in quantized training.\n args.bitwidth_weights = 8 # bitwidth for weights\n args.bitwidth_activations = 8 # bitwidth for activations\n args.histogram_range = True # histogram range for calibration\n args.bias_calibration = True # apply bias correction during quantized inference calibration\n args.per_channel_q = False # apply separate quantizion factor for each channel in depthwise or not\n args.constrain_bias = None # constrain bias according to the constraints of convolution engine\n\n args.save_mod_files = False # saves modified files after last commit. Also stores commit id.\n args.make_score_zero_mean = False # make score zero mean while learning\n args.no_q_for_dws_layer_idx = 0 # no_q_for_dws_layer_idx\n\n args.viz_colormap = 'rainbow' # colormap for tensorboard: 'rainbow', 'plasma', 'magma', 'bone'\n\n args.freeze_bn = False # freeze the statistics of bn\n args.tensorboard_enable = True # en/disable of TB writing\n args.print_train_class_iou = False\n args.print_val_class_iou = False\n args.freeze_layers = None\n args.opset_version = 11 # onnx opset_version\n args.prob_color_to_gray = (0.0,0.0) # this will be used for controlling color 2 gray augmentation\n\n args.interpolation = None # interpolation method to be used for resize. one of cv2.INTER_\n return args\n\n\n# ################################################\n# to avoid hangs in data loader with multi threads\n# this was observed after using cv2 image processing functions\n# https://github.com/pytorch/pytorch/issues/1355\ncv2.setNumThreads(0)\n\n# ################################################\ndef main(args):\n # ensure pytorch version is 1.2 or higher\n assert version.parse(torch.__version__) >= version.parse('1.1'), \\\n 'torch version must be 1.1 or higher, due to the change in scheduler.step() and optimiser.step() call order'\n\n assert (not hasattr(args, 'evaluate')), 'args.evaluate is deprecated. use args.phase=training or calibration or validation'\n assert is_valid_phase(args.phase), f'invalid phase {args.phase}'\n assert not hasattr(args, 'model_surgery'), 'the argument model_surgery is deprecated, it is not needed now - remove it'\n\n if (args.phase == 'validation' and args.bias_calibration):\n args.bias_calibration = False\n warnings.warn('switching off bias calibration in validation')\n #\n\n #################################################\n args.rand_resize = args.img_resize if args.rand_resize is None else args.rand_resize\n args.rand_crop = args.img_resize if args.rand_crop is None else args.rand_crop\n args.output_size = args.img_resize if args.output_size is None else args.output_size\n # resume has higher priority\n args.pretrained = None if (args.resume is not None) else args.pretrained\n\n # prob_color_to_gray will be used for controlling color 2 gray augmentation\n if 'tiad' in args.dataset_name and args.prob_color_to_gray == (0.0, 0.0):\n #override in case of 'tiad' if default values are used\n args.prob_color_to_gray = (0.5, 0.0)\n\n if args.save_path is None:\n save_path = get_save_path(args)\n else:\n save_path = args.save_path\n #\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if args.save_mod_files:\n #store all the files after the last commit.\n mod_files_path = save_path+'/mod_files'\n os.makedirs(mod_files_path)\n \n cmd = \"git ls-files --modified | xargs -i cp {} {}\".format(\"{}\", mod_files_path)\n print(\"cmd:\", cmd) \n os.system(cmd)\n\n #stoe last commit id. \n cmd = \"git log -n 1 >> {}\".format(mod_files_path + '/commit_id.txt')\n print(\"cmd:\", cmd) \n os.system(cmd)\n\n #################################################\n if args.logger is None:\n log_file = os.path.splitext(os.path.basename(__file__))[0] + '.log'\n args.logger = xnn.utils.TeeLogger(filename=os.path.join(save_path,log_file))\n\n #################################################\n # global settings. rand seeds for repeatability\n random.seed(args.rand_seed)\n np.random.seed(args.rand_seed)\n torch.manual_seed(args.rand_seed)\n torch.cuda.manual_seed(args.rand_seed)\n\n ################################\n # args check and config\n if args.iter_size != 1 and args.total_batch_size is not None:\n warnings.warn(\"only one of --iter_size or --total_batch_size must be set\")\n #\n if args.total_batch_size is not None:\n args.iter_size = args.total_batch_size//args.batch_size\n else:\n args.total_batch_size = args.batch_size*args.iter_size\n\n #################################################\n # set some global flags and initializations\n # keep it in args for now - although they don't belong here strictly\n # using pin_memory is seen to cause issues, especially when when lot of memory is used.\n args.use_pinned_memory = False\n args.n_iter = 0\n args.best_metric = -1\n cudnn.benchmark = True\n # torch.autograd.set_detect_anomaly(True)\n\n ################################\n # reset character color, in case it is different\n print('{}'.format(Fore.RESET))\n # print everything for log\n print('=> args: {}'.format(args))\n print('\\n'.join(\"%s: %s\" % item for item in sorted(vars(args).items())))\n\n print('=> will save everything to {}'.format(save_path))\n\n #################################################\n train_writer = SummaryWriter(os.path.join(save_path,'train')) if args.tensorboard_enable else None\n val_writer = SummaryWriter(os.path.join(save_path,'val')) if args.tensorboard_enable else None\n transforms = get_transforms(args) if args.transforms is None else args.transforms\n assert isinstance(transforms, (list,tuple)) and len(transforms) == 2, 'incorrect transforms were given'\n\n print(\"=> fetching images in '{}'\".format(args.data_path))\n split_arg = args.split_file if args.split_file else (args.split_files if args.split_files else args.split_value)\n train_dataset, val_dataset = xvision.datasets.__dict__[args.dataset_name](args.dataset_config, args.data_path, split=split_arg, transforms=transforms)\n\n #################################################\n print('=> {} samples found, {} train samples and {} test samples '.format(len(train_dataset)+len(val_dataset),\n len(train_dataset), len(val_dataset)))\n train_sampler = get_dataset_sampler(train_dataset, args.epoch_size) if args.epoch_size != 0 else None\n shuffle_train = args.shuffle and (train_sampler is None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,\n num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=train_sampler, shuffle=shuffle_train)\n\n val_sampler = get_dataset_sampler(val_dataset, args.epoch_size_val) if args.epoch_size_val != 0 else None\n shuffle_val = args.shuffle_val and (val_sampler is None)\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size,\n num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=val_sampler, shuffle=shuffle_val)\n\n #################################################\n if (args.model_config.input_channels is None):\n args.model_config.input_channels = (3,)\n print(\"=> input channels is not given - setting to {}\".format(args.model_config.input_channels))\n\n if (args.model_config.output_channels is None):\n if ('num_classes' in dir(train_dataset)):\n args.model_config.output_channels = train_dataset.num_classes()\n else:\n args.model_config.output_channels = (2 if args.model_config.output_type == 'flow' else args.model_config.output_channels)\n xnn.utils.print_yellow(\"=> output channels is not given - setting to {} - not sure to work\".format(args.model_config.output_channels))\n #\n if not isinstance(args.model_config.output_channels,(list,tuple)):\n args.model_config.output_channels = [args.model_config.output_channels]\n\n if (args.class_weights is None) and ('class_weights' in dir(train_dataset)):\n args.class_weights = train_dataset.class_weights()\n if not isinstance(args.class_weights, (list,tuple)):\n args.class_weights = [args.class_weights]\n #\n print(\"=> class weights available for dataset: {}\".format(args.class_weights))\n\n #################################################\n pretrained_data = None\n model_surgery_quantize = False\n pretrained_data = None\n if args.pretrained and args.pretrained != \"None\":\n pretrained_data = []\n pretrained_files = args.pretrained if isinstance(args.pretrained,(list,tuple)) else [args.pretrained]\n for p in pretrained_files:\n if isinstance(p, dict):\n p_data = p\n else:\n if p.startswith('http://') or p.startswith('https://'):\n p_file = xnn.utils.download_url(p, './data/downloads')\n else:\n p_file = p\n #\n print(f'=> loading pretrained weights file: {p}')\n p_data = torch.load(p_file)\n #\n pretrained_data.append(p_data)\n model_surgery_quantize = p_data['quantize'] if 'quantize' in p_data else False\n #\n\n #################################################\n # create model\n is_onnx_model = False\n if isinstance(args.model, torch.nn.Module):\n model, change_names_dict = args.model if isinstance(args.model, (list, tuple)) else (args.model, None)\n assert isinstance(model, torch.nn.Module), 'args.model, if provided must be a valid torch.nn.Module'\n elif isinstance(args.model, str) and args.model.endswith('.onnx'):\n model = xnn.onnx.import_onnx(args.model)\n is_onnx_model = True\n else:\n xnn.utils.print_yellow(\"=> creating model '{}'\".format(args.model_name))\n model = xvision.models.pixel2pixel.__dict__[args.model_name](args.model_config)\n # check if we got the model as well as parameters to change the names in pretrained\n model, change_names_dict = model if isinstance(model, (list,tuple)) else (model,None)\n #\n\n if args.quantize:\n # dummy input is used by quantized models to analyze graph\n is_cuda = next(model.parameters()).is_cuda\n dummy_input = create_rand_inputs(args, is_cuda=is_cuda)\n #\n if 'training' in args.phase:\n model = xnn.quantize.QuantTrainModule(model, per_channel_q=args.per_channel_q,\n histogram_range=args.histogram_range, bitwidth_weights=args.bitwidth_weights,\n bitwidth_activations=args.bitwidth_activations, constrain_bias=args.constrain_bias,\n dummy_input=dummy_input)\n elif 'calibration' in args.phase:\n model = xnn.quantize.QuantCalibrateModule(model, per_channel_q=args.per_channel_q,\n bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.bitwidth_activations,\n histogram_range=args.histogram_range, constrain_bias=args.constrain_bias,\n bias_calibration=args.bias_calibration, dummy_input=dummy_input, lr_calib=args.lr_calib)\n elif 'validation' in args.phase:\n # Note: bias_calibration is not emabled\n model = xnn.quantize.QuantTestModule(model, per_channel_q=args.per_channel_q,\n bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.bitwidth_activations,\n histogram_range=args.histogram_range, constrain_bias=args.constrain_bias,\n dummy_input=dummy_input, model_surgery_quantize=model_surgery_quantize)\n else:\n assert False, f'invalid phase {args.phase}'\n #\n\n # load pretrained model\n if pretrained_data is not None and not is_onnx_model:\n model_orig = get_model_orig(model)\n for (p_data,p_file) in zip(pretrained_data, pretrained_files):\n print(\"=> using pretrained weights from: {}\".format(p_file))\n if hasattr(model_orig, 'load_weights'):\n model_orig.load_weights(pretrained=p_data, change_names_dict=change_names_dict)\n else:\n xnn.utils.load_weights(get_model_orig(model), pretrained=p_data, change_names_dict=change_names_dict)\n #\n #\n #\n\n #################################################\n if args.count_flops:\n count_flops(args, model)\n\n #################################################\n if args.save_onnx:\n write_onnx_model(args, get_model_orig(model), save_path, save_traced_model=False)\n #\n\n #################################################\n if args.print_model:\n print(model)\n print('\\n')\n else:\n args.logger.debug(str(model))\n args.logger.debug('\\n')\n\n #################################################\n if (not args.run_soon):\n print(\"Training not needed for now\")\n close(args)\n exit()\n\n #################################################\n # DataParallel does not work for QuantCalibrateModule or QuantTestModule\n if args.parallel_model and (not isinstance(model, (xnn.quantize.QuantCalibrateModule, xnn.quantize.QuantTestModule))):\n model = torch.nn.DataParallel(model)\n\n #################################################\n model = model.cuda()\n\n #################################################\n # for help in debug/print\n for name, module in model.named_modules():\n module.name = name\n\n #################################################\n args.loss_modules = copy.deepcopy(args.losses)\n for task_dx, task_losses in enumerate(args.losses):\n for loss_idx, loss_fn in enumerate(task_losses):\n kw_args = {}\n loss_args = pixel2pixel_losses.__dict__[loss_fn].args()\n for arg in loss_args:\n if arg == 'weight' and (args.class_weights is not None):\n kw_args.update({arg:args.class_weights[task_dx]})\n elif arg == 'num_classes':\n kw_args.update({arg:args.model_config.output_channels[task_dx]})\n elif arg == 'sparse':\n kw_args.update({arg:args.sparse})\n elif arg == 'enable_fp16':\n kw_args.update({arg:args.model_config.enable_fp16})\n #\n #\n loss_fn_raw = pixel2pixel_losses.__dict__[loss_fn](**kw_args)\n if args.parallel_criterion:\n loss_fn = torch.nn.DataParallel(loss_fn_raw).cuda() if args.parallel_criterion else loss_fn_raw.cuda()\n loss_fn.info = loss_fn_raw.info\n loss_fn.clear = loss_fn_raw.clear\n else:\n loss_fn = loss_fn_raw.cuda()\n #\n args.loss_modules[task_dx][loss_idx] = loss_fn\n #\n\n args.metric_modules = copy.deepcopy(args.metrics)\n for task_dx, task_metrics in enumerate(args.metrics):\n for midx, metric_fn in enumerate(task_metrics):\n kw_args = {}\n loss_args = pixel2pixel_losses.__dict__[metric_fn].args()\n for arg in loss_args:\n if arg == 'weight':\n kw_args.update({arg:args.class_weights[task_dx]})\n elif arg == 'num_classes':\n kw_args.update({arg:args.model_config.output_channels[task_dx]})\n elif arg == 'sparse':\n kw_args.update({arg:args.sparse})\n elif arg == 'enable_fp16':\n kw_args.update({arg:args.model_config.enable_fp16})\n #\n #\n metric_fn_raw = pixel2pixel_losses.__dict__[metric_fn](**kw_args)\n if args.parallel_criterion:\n metric_fn = torch.nn.DataParallel(metric_fn_raw).cuda()\n metric_fn.info = metric_fn_raw.info\n metric_fn.clear = metric_fn_raw.clear\n else:\n metric_fn = metric_fn_raw.cuda()\n #\n args.metric_modules[task_dx][midx] = metric_fn\n #\n\n #################################################\n if args.phase=='validation':\n with torch.no_grad():\n validate(args, val_dataset, val_loader, model, 0, val_writer)\n #\n close(args)\n return\n\n #################################################\n assert(args.optimizer in ['adam', 'sgd'])\n print('=> setting {} optimizer'.format(args.optimizer))\n if args.lr_clips is not None:\n learning_rate_clips = args.lr_clips if 'training' in args.phase else 0.0\n clips_decay = args.bias_decay if (args.bias_decay is not None and args.bias_decay != 0.0) else args.weight_decay\n clips_params = [p for n,p in model.named_parameters() if 'clips' in n]\n other_params = [p for n,p in model.named_parameters() if 'clips' not in n]\n param_groups = [{'params': clips_params, 'weight_decay': clips_decay, 'lr': learning_rate_clips},\n {'params': other_params, 'weight_decay': args.weight_decay}]\n else:\n param_groups = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'weight_decay': args.weight_decay}]\n #\n\n learning_rate = args.lr if ('training'in args.phase) else 0.0\n if args.optimizer == 'adam':\n optimizer = torch.optim.Adam(param_groups, learning_rate, betas=(args.momentum, args.beta))\n elif args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(param_groups, learning_rate, momentum=args.momentum)\n else:\n raise ValueError('Unknown optimizer type{}'.format(args.optimizer))\n #\n\n #################################################\n max_iter = args.epochs * len(train_loader)\n scheduler = xnn.optim.lr_scheduler.SchedulerWrapper(scheduler_type=args.scheduler, optimizer=optimizer,\n epochs=args.epochs, start_epoch=args.start_epoch,\n warmup_epochs=args.warmup_epochs, warmup_factor=args.warmup_factor,\n max_iter=max_iter, polystep_power=args.polystep_power,\n milestones=args.milestones, multistep_gamma=args.multistep_gamma)\n\n # optionally resume from a checkpoint\n if args.resume:\n if not os.path.isfile(args.resume):\n print(\"=> no checkpoint found at '{}'\".format(args.resume)) \n else:\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n\n checkpoint = torch.load(args.resume)\n model = xnn.utils.load_weights(model, checkpoint)\n \n if args.start_epoch == 0:\n args.start_epoch = checkpoint['epoch']\n \n if 'best_metric' in list(checkpoint.keys()): \n args.best_metric = checkpoint['best_metric']\n\n if 'optimizer' in list(checkpoint.keys()): \n optimizer.load_state_dict(checkpoint['optimizer'])\n\n if 'scheduler' in list(checkpoint.keys()):\n scheduler.load_state_dict(checkpoint['scheduler'])\n\n if 'multi_task_factors' in list(checkpoint.keys()):\n args.multi_task_factors = checkpoint['multi_task_factors']\n\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']))\n\n #################################################\n if args.evaluate_start:\n with torch.no_grad():\n validate(args, val_dataset, val_loader, model, args.start_epoch, val_writer)\n\n grad_scaler = torch.cuda.amp.GradScaler() if args.model_config.enable_fp16 else None\n\n for epoch in range(args.start_epoch, args.epochs):\n # epoch is needed to seed shuffling in DistributedSampler, every epoch.\n # otherwise seed of 0 is used every epoch, which seems incorrect.\n if train_sampler and isinstance(train_sampler, torch.utils.data.DistributedSampler):\n train_sampler.set_epoch(epoch)\n if val_sampler and isinstance(val_sampler, torch.utils.data.DistributedSampler):\n val_sampler.set_epoch(epoch)\n\n # train for one epoch\n train(args, train_dataset, train_loader, model, optimizer, epoch, train_writer, scheduler, grad_scaler)\n\n # evaluate on validation set\n with torch.no_grad():\n val_metric, metric_name = validate(args, val_dataset, val_loader, model, epoch, val_writer)\n\n if args.best_metric < 0:\n args.best_metric = val_metric\n\n if \"iou\" in metric_name.lower() or \"acc\" in metric_name.lower():\n is_best = val_metric >= args.best_metric\n args.best_metric = max(val_metric, args.best_metric)\n elif \"error\" in metric_name.lower() or \"diff\" in metric_name.lower() or \"norm\" in metric_name.lower() \\\n or \"loss\" in metric_name.lower() or \"outlier\" in metric_name.lower():\n is_best = val_metric <= args.best_metric\n args.best_metric = min(val_metric, args.best_metric)\n else:\n raise ValueError(\"Metric is not known. Best model could not be saved.\")\n #\n\n checkpoint_dict = { 'epoch': epoch + 1, 'model_name': args.model_name,\n 'state_dict': get_model_orig(model).state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'best_metric': args.best_metric,\n 'multi_task_factors': args.multi_task_factors,\n 'quantize' : args.quantize}\n\n save_checkpoint(args, save_path, get_model_orig(model), checkpoint_dict, is_best)\n\n if args.tensorboard_enable:\n train_writer.file_writer.flush()\n val_writer.file_writer.flush()\n\n # adjust the learning rate using lr scheduler\n if 'training' in args.phase:\n scheduler.step()\n #\n #\n\n # close and cleanup\n close(args)\n#\n\n###################################################################\ndef is_valid_phase(phase):\n phases = ('training', 'calibration', 'validation')\n return any(p in phase for p in phases)\n\n\n###################################################################\ndef train(args, train_dataset, train_loader, model, optimizer, epoch, train_writer, scheduler, grad_scaler):\n batch_time = xnn.utils.AverageMeter()\n data_time = xnn.utils.AverageMeter()\n # if the loss/ metric is already an average, no need to further average\n avg_loss = [xnn.utils.AverageMeter(print_avg=(not task_loss[0].info()['is_avg'])) for task_loss in args.loss_modules]\n avg_loss_orig = [xnn.utils.AverageMeter(print_avg=(not task_loss[0].info()['is_avg'])) for task_loss in args.loss_modules]\n avg_metric = [xnn.utils.AverageMeter(print_avg=(not task_metric[0].info()['is_avg'])) for task_metric in args.metric_modules]\n\n ##########################\n # switch to train mode\n model.train()\n\n # freeze bn and range after some epochs during quantization\n if args.freeze_bn or (args.quantize and epoch > 2 and epoch >= ((args.epochs//2)-1)):\n xnn.utils.print_once('Freezing BN for subsequent epochs')\n xnn.utils.freeze_bn(model)\n #\n if (args.quantize and epoch > 4 and epoch >= ((args.epochs//2)+1)):\n xnn.utils.print_once('Freezing ranges for subsequent epochs')\n xnn.layers.freeze_quant_range(model)\n #\n\n #freeze layers \n if args.freeze_layers is not None:\n # 'freeze_layer_name' could be part of 'name', i.e. 'name' need not be exact same as 'freeze_layer_name'\n # e.g. freeze_layer_name = 'encoder.0' then all layers like, 'encoder.0.0' 'encoder.0.1' will be frozen\n for freeze_layer_name in args.freeze_layers:\n for name, module in model.named_modules():\n if freeze_layer_name in name:\n xnn.utils.print_once(\"Freezing the module : {}\".format(name))\n module.eval()\n for param in module.parameters():\n param.requires_grad = False\n\n ##########################\n for task_dx, task_losses in enumerate(args.loss_modules):\n for loss_idx, loss_fn in enumerate(task_losses):\n loss_fn.clear()\n for task_dx, task_metrics in enumerate(args.metric_modules):\n for midx, metric_fn in enumerate(task_metrics):\n metric_fn.clear()\n\n num_iter = len(train_loader)\n progress_bar = progiter.ProgIter(np.arange(num_iter), chunksize=1)\n metric_name = \"Metric\"\n metric_ctx = [None] * len(args.metric_modules)\n end_time = time.time()\n writer_idx = 0\n last_update_iter = -1\n\n # change color to yellow for calibration\n progressbar_color = (Fore.YELLOW if (('calibration' in args.phase) or ('training' in args.phase and args.quantize)) else Fore.WHITE)\n print('{}'.format(progressbar_color), end='')\n\n ##########################\n for iter_id, (inputs, targets) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end_time)\n\n lr = scheduler.get_lr()[0]\n\n input_list = [[jj.cuda() for jj in img] if isinstance(img,(list,tuple)) else img.cuda() for img in inputs]\n target_list = [tgt.cuda(non_blocking=True) for tgt in targets]\n target_sizes = [tgt.shape for tgt in target_list]\n batch_size_cur = target_sizes[0][0]\n\n ##########################\n # compute output\n task_outputs = model(input_list)\n\n task_outputs = task_outputs if isinstance(task_outputs,(list,tuple)) else [task_outputs]\n # upsample output to target resolution\n if args.upsample_mode is not None:\n task_outputs = upsample_tensors(task_outputs, target_sizes, args.upsample_mode)\n\n if args.model_config.multi_task_type is not None and len(args.model_config.output_channels) > 1:\n args.multi_task_factors, args.multi_task_offsets = xnn.layers.get_loss_scales(model)\n else:\n args.multi_task_factors = None\n args.multi_task_offsets = None\n\n loss_total, loss_list, loss_names, loss_types, loss_list_orig = \\\n compute_task_objectives(args, args.loss_modules, input_list, task_outputs, target_list,\n task_mults=args.multi_task_factors, task_offsets=args.multi_task_offsets,\n loss_mult_factors=args.loss_mult_factors)\n\n if args.print_train_class_iou:\n metric_total, metric_list, metric_names, metric_types, _, confusion_matrix = \\\n compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list, \n get_confusion_matrix=args.print_train_class_iou)\n else: \n metric_total, metric_list, metric_names, metric_types, _ = \\\n compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list, \n get_confusion_matrix=args.print_train_class_iou)\n\n if args.model_config.multi_task_type is not None and len(args.model_config.output_channels) > 1:\n xnn.layers.set_losses(model, loss_list_orig)\n\n if 'training' in args.phase:\n # accumulate gradients\n if args.model_config.enable_fp16:\n grad_scaler.scale(loss_total).backward()\n else:\n loss_total.backward()\n #\n\n # optimization step\n if ((iter_id+1) % args.iter_size) == 0:\n if args.model_config.enable_fp16:\n grad_scaler.step(optimizer)\n grad_scaler.update()\n else:\n optimizer.step()\n #\n # zero gradients so that we can accumulate gradients\n # setting grad=None is a faster alternative instead of optimizer.zero_grad()\n xnn.utils.clear_grad(model)\n #\n #\n\n # record loss.\n for task_idx, task_losses in enumerate(args.loss_modules):\n avg_loss[task_idx].update(float(loss_list[task_idx].cpu()), batch_size_cur)\n avg_loss_orig[task_idx].update(float(loss_list_orig[task_idx].cpu()), batch_size_cur)\n if args.tensorboard_enable:\n train_writer.add_scalar('Training/Task{}_{}_Loss_Iter'.format(task_idx,loss_names[task_idx]), float(loss_list[task_idx]), args.n_iter)\n if args.model_config.multi_task_type is not None and len(args.model_config.output_channels) > 1:\n train_writer.add_scalar('Training/multi_task_Factor_Task{}_{}'.format(task_idx,loss_names[task_idx]), float(args.multi_task_factors[task_idx]), args.n_iter)\n\n # record error/accuracy.\n for task_idx, task_metrics in enumerate(args.metric_modules):\n avg_metric[task_idx].update(float(metric_list[task_idx].cpu()), batch_size_cur)\n\n ##########################\n if args.tensorboard_enable:\n write_output(args, 'Training_', num_iter, iter_id, epoch, train_dataset, train_writer, input_list, task_outputs, target_list, metric_name, writer_idx)\n\n if ((iter_id % args.print_freq) == 0) or (iter_id == (num_iter-1)):\n output_string = ''\n for task_idx, task_metrics in enumerate(args.metric_modules):\n output_string += '[{}={}]'.format(metric_names[task_idx], str(avg_metric[task_idx]))\n\n epoch_str = '{}/{}'.format(epoch + 1, args.epochs)\n progress_bar.set_description(\"{}=> {} \".format(progressbar_color, args.phase))\n multi_task_factors_print = ['{:.3f}'.format(float(lmf)) for lmf in args.multi_task_factors] if args.multi_task_factors is not None else None\n progress_bar.set_postfix(Epoch=epoch_str, LR=lr, DataTime=str(data_time), LossMult=multi_task_factors_print, Loss=avg_loss, Output=output_string)\n progress_bar.update(iter_id-last_update_iter)\n last_update_iter = iter_id\n\n args.n_iter += 1\n end_time = time.time()\n writer_idx = (writer_idx + 1) % args.tensorboard_num_imgs\n\n # add onnx graph to tensorboard\n # commenting out due to issues in transitioning to pytorch 0.4\n # (bilinear mode in upsampling causes hang or crash - may be due to align_borders change, nearest is fine)\n #if epoch == 0 and iter_id == 0:\n # input_zero = torch.zeros(input_var.shape)\n # train_writer.add_graph(model, input_zero)\n #This cache operation slows down tranining \n #torch.cuda.empty_cache()\n #\n\n if args.print_train_class_iou:\n print_class_iou(args=args, confusion_matrix=confusion_matrix, task_idx=task_idx)\n \n progress_bar.close()\n\n # to print a new line - do not provide end=''\n print('{}'.format(Fore.RESET), end='')\n\n if args.tensorboard_enable:\n for task_idx, task_losses in enumerate(args.loss_modules):\n train_writer.add_scalar('Training/Task{}_{}_Loss_Epoch'.format(task_idx,loss_names[task_idx]), float(avg_loss[task_idx]), epoch)\n\n for task_idx, task_metrics in enumerate(args.metric_modules):\n train_writer.add_scalar('Training/Task{}_{}_Metric_Epoch'.format(task_idx,metric_names[task_idx]), float(avg_metric[task_idx]), epoch)\n\n output_name = metric_names[args.pivot_task_idx]\n output_metric = float(avg_metric[args.pivot_task_idx])\n\n ##########################\n if args.quantize:\n def debug_format(v):\n return ('{:.3f}'.format(v) if v is not None else 'None')\n #\n clips_act = [m.get_clips_act()[1] for n,m in model.named_modules() if isinstance(m,xnn.layers.PAct2)]\n if len(clips_act) > 0:\n args.logger.debug('\\nclips_act : ' + ' '.join(map(debug_format, clips_act)))\n args.logger.debug('')\n #\n return output_metric, output_name\n\n\n###################################################################\ndef validate(args, val_dataset, val_loader, model, epoch, val_writer):\n data_time = xnn.utils.AverageMeter()\n # if the loss/ metric is already an average, no need to further average\n avg_metric = [xnn.utils.AverageMeter(print_avg=(not task_metric[0].info()['is_avg'])) for task_metric in args.metric_modules]\n\n ##########################\n # switch to evaluate mode\n model.eval()\n\n ##########################\n for task_dx, task_metrics in enumerate(args.metric_modules):\n for midx, metric_fn in enumerate(task_metrics):\n metric_fn.clear()\n\n metric_name = \"Metric\"\n end_time = time.time()\n writer_idx = 0\n last_update_iter = -1\n metric_ctx = [None] * len(args.metric_modules)\n\n num_iter = len(val_loader)\n progress_bar = progiter.ProgIter(np.arange(num_iter), chunksize=1)\n\n # change color to green\n print('{}'.format(Fore.GREEN), end='')\n\n ##########################\n for iter_id, (inputs, targets) in enumerate(val_loader):\n data_time.update(time.time() - end_time)\n input_list = [[jj.cuda() for jj in img] if isinstance(img,(list,tuple)) else img.cuda() for img in inputs]\n target_list = [j.cuda(non_blocking=True) for j in targets]\n target_sizes = [tgt.shape for tgt in target_list]\n batch_size_cur = target_sizes[0][0]\n\n # compute output\n task_outputs = model(input_list)\n\n task_outputs = task_outputs if isinstance(task_outputs, (list, tuple)) else [task_outputs]\n if args.upsample_mode is not None:\n task_outputs = upsample_tensors(task_outputs, target_sizes, args.upsample_mode)\n \n if args.print_val_class_iou:\n metric_total, metric_list, metric_names, metric_types, _, confusion_matrix = \\\n compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list, \n get_confusion_matrix = args.print_val_class_iou)\n else: \n metric_total, metric_list, metric_names, metric_types, _ = \\\n compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list, \n get_confusion_matrix = args.print_val_class_iou)\n\n # record error/accuracy.\n for task_idx, task_metrics in enumerate(args.metric_modules):\n avg_metric[task_idx].update(float(metric_list[task_idx].cpu()), batch_size_cur)\n\n if args.tensorboard_enable:\n write_output(args, 'Validation_', num_iter, iter_id, epoch, val_dataset, val_writer, input_list, task_outputs, target_list, metric_names, writer_idx)\n\n if ((iter_id % args.print_freq) == 0) or (iter_id == (num_iter-1)):\n output_string = ''\n for task_idx, task_metrics in enumerate(args.metric_modules):\n output_string += '[{}={}]'.format(metric_names[task_idx], str(avg_metric[task_idx]))\n\n epoch_str = '{}/{}'.format(epoch + 1, args.epochs)\n progress_bar.set_description(\"=> validation\")\n progress_bar.set_postfix(Epoch=epoch_str, DataTime=data_time, Output=\"{}\".format(output_string))\n progress_bar.update(iter_id-last_update_iter)\n last_update_iter = iter_id\n #\n\n end_time = time.time()\n writer_idx = (writer_idx + 1) % args.tensorboard_num_imgs\n #\n\n if args.print_val_class_iou:\n print_class_iou(args = args, confusion_matrix = confusion_matrix, task_idx=task_idx)\n #\n\n #print_conf_matrix(conf_matrix=conf_matrix, en=False)\n progress_bar.close()\n\n # to print a new line - do not provide end=''\n print('{}'.format(Fore.RESET), end='')\n\n if args.tensorboard_enable:\n for task_idx, task_metrics in enumerate(args.metric_modules):\n val_writer.add_scalar('Validation/Task{}_{}_Metric_Epoch'.format(task_idx,metric_names[task_idx]), float(avg_metric[task_idx]), epoch)\n\n output_name = metric_names[args.pivot_task_idx]\n output_metric = float(avg_metric[args.pivot_task_idx])\n return output_metric, output_name\n\n\n###################################################################\ndef close(args):\n if args.logger is not None:\n del args.logger\n args.logger = None\n #\n args.best_metric = -1\n#\n\n\ndef get_save_path(args, phase=None):\n date = args.date if args.date else datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n save_path = os.path.join('./data/checkpoints/edgeailite', args.dataset_name, date + '_' + args.dataset_name + '_' + args.model_name)\n save_path += '_resize{}x{}_traincrop{}x{}'.format(args.img_resize[1], args.img_resize[0], args.rand_crop[1], args.rand_crop[0])\n phase = phase if (phase is not None) else args.phase\n save_path = os.path.join(save_path, phase)\n return save_path\n\n\ndef get_model_orig(model):\n is_parallel_model = isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))\n model_orig = (model.module if is_parallel_model else model)\n model_orig = (model_orig.module if isinstance(model_orig, (xnn.quantize.QuantBaseModule)) else model_orig)\n return model_orig\n\n\ndef create_rand_inputs(args, is_cuda):\n dummy_input = []\n if not args.model_config.input_nv12:\n for i_ch in args.model_config.input_channels:\n x = torch.rand((1, i_ch, args.img_resize[0], args.img_resize[1]))\n x = x.cuda() if is_cuda else x\n dummy_input.append(x)\n else: #nv12 \n for i_ch in args.model_config.input_channels:\n y = torch.rand((1, 1, args.img_resize[0], args.img_resize[1]))\n uv = torch.rand((1, 1, args.img_resize[0]//2, args.img_resize[1]))\n y = y.cuda() if is_cuda else y\n uv = uv.cuda() if is_cuda else uv\n dummy_input.append([y,uv])\n\n return dummy_input\n\ndef count_flops(args, model):\n is_cuda = next(model.parameters()).is_cuda\n dummy_input = create_rand_inputs(args, is_cuda)\n #\n model.eval()\n flops = xnn.utils.forward_count_flops(model, dummy_input)\n gflops = flops/1e9\n print('=> Size = {}, GFLOPs = {}, GMACs = {}'.format(args.img_resize, gflops, gflops/2))\n\n\ndef derive_node_name(input_name):\n #take last entry of input names for deciding node name\n #print(\"input_name[-1]: \", input_name[-1])\n node_name = input_name[-1].rsplit('.', 1)[0]\n #print(\"formed node_name: \", node_name)\n return node_name\n\n\n#torch onnx export does not update names. Do it using onnx.save\ndef add_node_names(onnx_model_name):\n onnx_model = onnx.load(onnx_model_name)\n for i in range(len(onnx_model.graph.node)):\n for j in range(len(onnx_model.graph.node[i].input)):\n #print('-'*60)\n #print(\"name: \", onnx_model.graph.node[i].name)\n #print(\"input: \", onnx_model.graph.node[i].input)\n #print(\"output: \", onnx_model.graph.node[i].output)\n onnx_model.graph.node[i].input[j] = onnx_model.graph.node[i].input[j].split(':')[0]\n onnx_model.graph.node[i].name = derive_node_name(onnx_model.graph.node[i].input)\n #\n #\n #update model inplace\n onnx.save(onnx_model, onnx_model_name)\n\n\ndef write_onnx_model(args, model, save_path, name='checkpoint.onnx', save_traced_model=False):\n is_cuda = next(model.parameters()).is_cuda\n input_list = create_rand_inputs(args, is_cuda=is_cuda)\n onnx_file = os.path.join(save_path, name)\n model.eval()\n torch.onnx.export(model, input_list, onnx_file, export_params=True, verbose=False,\n do_constant_folding=True, opset_version=args.opset_version)\n\n #torch onnx export does not update names. Do it using onnx.save\n add_node_names(onnx_model_name=onnx_file)\n # infer shapes\n onnx.shape_inference.infer_shapes_path(onnx_file, onnx_file)\n\n if save_traced_model:\n traced_model = torch.jit.trace(model, (input_list,))\n traced_save_path = os.path.join(save_path, 'traced_model.pth')\n torch.jit.save(traced_model, traced_save_path)\n #\n\n\n###################################################################\ndef write_output(args, prefix, val_epoch_size, iter_id, epoch, dataset, output_writer, input_images, task_outputs, task_targets, metric_names, writer_idx):\n write_freq = (args.tensorboard_num_imgs / float(val_epoch_size))\n write_prob = np.random.random()\n if (write_prob > write_freq):\n return\n if args.model_config.input_nv12:\n batch_size = input_images[0][0].shape[0]\n else:\n batch_size = input_images[0].shape[0]\n b_index = random.randint(0, batch_size - 1)\n\n input_image = None\n for img_idx, img in enumerate(input_images):\n if args.model_config.input_nv12:\n #convert NV12 to BGR for tensorboard\n input_image = xvision.transforms.image_transforms_xv12.nv12_to_bgr_image(Y = input_images[img_idx][0][b_index], UV = input_images[img_idx][1][b_index],\n image_scale=args.image_scale, image_mean=args.image_mean)\n else:\n input_image = input_images[img_idx][b_index].cpu().numpy().transpose((1, 2, 0))\n # convert back to original input range (0-255)\n input_image = input_image / args.image_scale + args.image_mean\n\n if args.is_flow and args.is_flow[0][img_idx]:\n #input corresponding to flow is assumed to have been generated by adding 128\n flow = input_image - 128\n flow_hsv = xnn.utils.flow2hsv(flow.transpose(2, 0, 1), confidence=False).transpose(2, 0, 1)\n #flow_hsv = (flow_hsv / 255.0).clip(0, 1) #TODO: check this\n output_writer.add_image(prefix +'Input{}/{}'.format(img_idx, writer_idx), flow_hsv, epoch)\n else:\n input_image = (input_image/255.0).clip(0,1) #.astype(np.uint8)\n output_writer.add_image(prefix + 'Input{}/{}'.format(img_idx, writer_idx), input_image.transpose((2,0,1)), epoch)\n\n # for sparse data, chroma blending does not look good\n for task_idx, output_type in enumerate(args.model_config.output_type):\n # metric_name = metric_names[task_idx]\n output = task_outputs[task_idx]\n target = task_targets[task_idx]\n if (output_type == 'segmentation') and hasattr(dataset, 'decode_segmap'):\n segmentation_target = dataset.decode_segmap(target[b_index,0].cpu().numpy())\n segmentation_output = output.max(dim=1,keepdim=True)[1].data.cpu().numpy() if(output.shape[1]>1) else output.data.cpu().numpy()\n segmentation_output = dataset.decode_segmap(segmentation_output[b_index,0])\n segmentation_output_blend = xnn.utils.chroma_blend(input_image, segmentation_output)\n #\n output_writer.add_image(prefix+'Task{}_{}_GT/{}'.format(task_idx,output_type,writer_idx), segmentation_target.transpose(2,0,1), epoch)\n if not args.sparse:\n segmentation_target_blend = xnn.utils.chroma_blend(input_image, segmentation_target)\n output_writer.add_image(prefix + 'Task{}_{}_GT_ColorBlend/{}'.format(task_idx, output_type, writer_idx), segmentation_target_blend.transpose(2, 0, 1), epoch)\n #\n output_writer.add_image(prefix+'Task{}_{}_Output/{}'.format(task_idx,output_type,writer_idx), segmentation_output.transpose(2,0,1), epoch)\n output_writer.add_image(prefix+'Task{}_{}_Output_ColorBlend/{}'.format(task_idx,output_type,writer_idx), segmentation_output_blend.transpose(2,0,1), epoch)\n elif (output_type in ('depth', 'disparity')):\n depth_chanidx = 0\n output_writer.add_image(prefix+'Task{}_{}_GT_Color_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(target[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap).transpose(2,0,1), epoch)\n if not args.sparse:\n output_writer.add_image(prefix + 'Task{}_{}_GT_ColorBlend_Visualization/{}'.format(task_idx, output_type, writer_idx), xnn.utils.tensor2array(target[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap, input_blend=input_image).transpose(2, 0, 1), epoch)\n #\n output_writer.add_image(prefix+'Task{}_{}_Output_Color_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(output.data[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap).transpose(2,0,1), epoch)\n output_writer.add_image(prefix + 'Task{}_{}_Output_ColorBlend_Visualization/{}'.format(task_idx, output_type, writer_idx),xnn.utils.tensor2array(output.data[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap, input_blend=input_image).transpose(2, 0, 1), epoch)\n elif (output_type == 'flow'):\n max_value_flow = 10.0 # only for visualization\n output_writer.add_image(prefix+'Task{}_{}_GT/{}'.format(task_idx,output_type,writer_idx), xnn.utils.flow2hsv(target[b_index][:2].cpu().numpy(), max_value=max_value_flow).transpose(2,0,1), epoch)\n output_writer.add_image(prefix+'Task{}_{}_Output/{}'.format(task_idx,output_type,writer_idx), xnn.utils.flow2hsv(output.data[b_index][:2].cpu().numpy(), max_value=max_value_flow).transpose(2,0,1), epoch)\n elif (output_type == 'interest_pt'):\n score_chanidx = 0\n target_score_to_write = target[b_index][score_chanidx].cpu()\n output_score_to_write = output.data[b_index][score_chanidx].cpu()\n \n #if score is learnt as zero mean add offset to make it [0-255]\n if args.make_score_zero_mean:\n # target_score_to_write!=0 : value 0 indicates GT unavailble. Leave them to be 0.\n target_score_to_write[target_score_to_write!=0] += 128.0\n output_score_to_write += 128.0\n\n max_value_score = float(torch.max(target_score_to_write)) #0.002\n output_writer.add_image(prefix+'Task{}_{}_GT_Bone_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(target_score_to_write, max_value=max_value_score, colormap='bone').transpose(2,0,1), epoch)\n output_writer.add_image(prefix+'Task{}_{}_Output_Bone_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(output_score_to_write, max_value=max_value_score, colormap='bone').transpose(2,0,1), epoch)\n #\n\ndef print_conf_matrix(conf_matrix = [], en = False):\n if not en:\n return\n num_rows = conf_matrix.shape[0]\n num_cols = conf_matrix.shape[1]\n print(\"-\"*64)\n num_ele = 1\n for r_idx in range(num_rows):\n print(\"\\n\")\n for c_idx in range(0,num_cols,num_ele):\n print(conf_matrix[r_idx][c_idx:c_idx+num_ele], end=\"\")\n print(\"\\n\")\n print(\"-\" * 64)\n\ndef compute_task_objectives(args, objective_fns, input_var, task_outputs, task_targets, task_mults=None, \n task_offsets=None, loss_mult_factors=None, get_confusion_matrix = False):\n \n ##########################\n objective_total = torch.zeros_like(task_outputs[0].view(-1)[0])\n objective_list = []\n objective_list_orig = []\n objective_names = []\n objective_types = []\n for task_idx, task_objectives in enumerate(objective_fns):\n output_type = args.model_config.output_type[task_idx]\n objective_sum_value = torch.zeros_like(task_outputs[task_idx].view(-1)[0])\n objective_sum_name = ''\n objective_sum_type = ''\n\n task_mult = task_mults[task_idx] if task_mults is not None else 1.0\n task_offset = task_offsets[task_idx] if task_offsets is not None else 0.0\n\n for oidx, objective_fn in enumerate(task_objectives):\n objective_batch = objective_fn(input_var, task_outputs[task_idx], task_targets[task_idx])\n objective_batch = objective_batch.mean() if isinstance(objective_fn, torch.nn.DataParallel) else objective_batch\n objective_name = objective_fn.info()['name']\n objective_type = objective_fn.info()['is_avg']\n if get_confusion_matrix:\n confusion_matrix = objective_fn.info()['confusion_matrix']\n\n loss_mult = loss_mult_factors[task_idx][oidx] if (loss_mult_factors is not None) else 1.0\n # --\n objective_batch_not_nan = (objective_batch if not torch.isnan(objective_batch) else 0.0)\n objective_sum_value = objective_batch_not_nan*loss_mult + objective_sum_value\n objective_sum_name += (objective_name if (objective_sum_name == '') else ('+' + objective_name))\n assert (objective_sum_type == '' or objective_sum_type == objective_type), 'metric types (avg/val) for a given task should match'\n objective_sum_type = objective_type\n\n objective_list.append(objective_sum_value)\n objective_list_orig.append(objective_sum_value)\n objective_names.append(objective_sum_name)\n objective_types.append(objective_sum_type)\n\n objective_total = objective_sum_value*task_mult + task_offset + objective_total\n\n return_list = [objective_total, objective_list, objective_names, objective_types, objective_list_orig]\n if get_confusion_matrix:\n return_list.append(confusion_matrix)\n\n return return_list \n\n\ndef save_checkpoint(args, save_path, model, checkpoint_dict, is_best, filename='checkpoint.pth'):\n torch.save(checkpoint_dict, os.path.join(save_path,filename))\n if is_best:\n shutil.copyfile(os.path.join(save_path,filename), os.path.join(save_path,'model_best.pth'))\n #\n if args.save_onnx:\n write_onnx_model(args, model, save_path, name='checkpoint.onnx')\n if is_best:\n write_onnx_model(args, model, save_path, name='model_best.onnx')\n #\n\n\ndef get_dataset_sampler(dataset_object, epoch_size):\n print('=> creating a random sampler as epoch_size is specified')\n num_samples = len(dataset_object)\n epoch_size = int(epoch_size * num_samples) if epoch_size < 1 else int(epoch_size)\n dataset_sampler = torch.utils.data.sampler.RandomSampler(data_source=dataset_object, replacement=True, num_samples=epoch_size)\n return dataset_sampler\n\n\ndef get_train_transform(args):\n # image normalization can be at the beginning of transforms or at the end\n image_mean = np.array(args.image_mean, dtype=np.float32)\n image_scale = np.array(args.image_scale, dtype=np.float32)\n image_prenorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if args.image_prenorm else None\n image_postnorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if (not image_prenorm) else None\n reverse_channels = image_transforms.ReverseImageChannels() if args.input_channel_reverse else None\n color_2_gray = image_transforms.RandomColor2Gray(is_flow=args.is_flow, random_threshold=args.prob_color_to_gray[0]) if args.prob_color_to_gray[0] != 0.0 else None\n\n # crop size used only for training\n image_train_output_scaling = image_transforms.Scale(args.rand_resize, target_size=args.rand_output_size, is_flow=args.is_flow) \\\n if (args.rand_output_size is not None and args.rand_output_size != args.rand_resize) else None\n train_transform = image_transforms.Compose([\n reverse_channels,\n image_prenorm,\n image_transforms.AlignImages(interpolation=args.interpolation),\n image_transforms.MaskTarget(args.target_mask, 0),\n image_transforms.CropRect(args.img_border_crop),\n image_transforms.RandomRotate(args.transform_rotation, is_flow=args.is_flow) if args.transform_rotation else None,\n image_transforms.RandomScaleCrop(args.rand_resize, scale_range=args.rand_scale, is_flow=args.is_flow, interpolation=args.interpolation),\n image_transforms.RandomHorizontalFlip(is_flow=args.is_flow),\n image_transforms.RandomCrop(args.rand_crop),\n color_2_gray,\n image_train_output_scaling,\n image_postnorm,\n image_transforms.ConvertToTensor()\n ])\n return train_transform\n\n\ndef get_validation_transform(args):\n # image normalization can be at the beginning of transforms or at the end\n image_mean = np.array(args.image_mean, dtype=np.float32)\n image_scale = np.array(args.image_scale, dtype=np.float32)\n image_prenorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if args.image_prenorm else None\n image_postnorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if (not image_prenorm) else None\n reverse_channels = image_transforms.ReverseImageChannels() if args.input_channel_reverse else None\n color_2_gray = image_transforms.RandomColor2Gray(is_flow=args.is_flow, random_threshold=args.prob_color_to_gray[1]) if args.prob_color_to_gray[1] != 0.0 else None\n\n # prediction is resized to output_size before evaluation.\n val_transform = image_transforms.Compose([\n reverse_channels,\n image_prenorm,\n image_transforms.AlignImages(interpolation=args.interpolation),\n image_transforms.MaskTarget(args.target_mask, 0),\n image_transforms.CropRect(args.img_border_crop),\n image_transforms.Scale(args.img_resize, target_size=args.output_size, is_flow=args.is_flow, interpolation=args.interpolation),\n color_2_gray,\n image_postnorm,\n image_transforms.ConvertToTensor()\n ])\n return val_transform\n\n\ndef get_transforms(args):\n # Provision to train with val transform - provide rand_scale as (0, 0)\n # Fixing the train-test resolution discrepancy, https://arxiv.org/abs/1906.06423\n always_use_val_transform = (args.rand_scale[0] == 0)\n train_transform = get_validation_transform(args) if always_use_val_transform else get_train_transform(args)\n val_transform = get_validation_transform(args)\n return train_transform, val_transform\n\n\ndef _upsample_impl(tensor, output_size, upsample_mode):\n # upsample of long tensor is not supported currently. covert to float, just to avoid error.\n # we can do thsi only in the case of nearest mode, otherwise output will have invalid values.\n convert_to_float = False\n if isinstance(tensor, (torch.LongTensor,torch.cuda.LongTensor)):\n convert_to_float = True\n original_dtype = tensor.dtype\n tensor = tensor.float()\n upsample_mode = 'nearest'\n\n dim_added = False\n if len(tensor.shape) < 4:\n tensor = tensor[np.newaxis,...]\n dim_added = True\n\n if (tensor.size()[-2:] != output_size):\n tensor = torch.nn.functional.interpolate(tensor, output_size, mode=upsample_mode)\n\n if dim_added:\n tensor = tensor[0,...]\n\n if convert_to_float:\n tensor = tensor.long() #tensor.astype(original_dtype)\n\n return tensor\n\n\ndef upsample_tensors(tensors, output_sizes, upsample_mode):\n if isinstance(tensors, (list,tuple)):\n for tidx, tensor in enumerate(tensors):\n tensors[tidx] = _upsample_impl(tensor, output_sizes[tidx][-2:], upsample_mode)\n #\n else:\n tensors = _upsample_impl(tensors, output_sizes[0][-2:], upsample_mode)\n return tensors\n\n#print IoU for each class\ndef print_class_iou(args = None, confusion_matrix = None, task_idx = 0): \n n_classes = args.model_config.output_channels[task_idx]\n [accuracy, mean_iou, iou, f1_score] = compute_accuracy(args, confusion_matrix, n_classes)\n print(\"\\n Class IoU: [\", end = \"\")\n for class_iou in iou:\n print(\"{:0.3f}\".format(class_iou), end=\",\")\n print(\"]\") \n\nif __name__ == '__main__':\n train_args = get_config()\n main(train_args)\n" ]
[ [ "torch.max", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.nn.functional.interpolate", "torch.onnx.export", "torch.jit.trace", "numpy.arange", "torch.rand", "torch.optim.SGD", "torch.utils.data.sampler.RandomSampler", "torch.optim.Adam", "torch.cuda.amp.GradScaler", "numpy.array", "torch.jit.save", "numpy.random.random", "torch.cuda.manual_seed", "numpy.random.seed", "torch.isnan", "torch.manual_seed", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tedzhouhk/GCNP
[ "3b0646450620419dc6e2de457fc8f1afcbf03754" ]
[ "GNN/graph_samplers.py" ]
[ "from GNN.globals import *\r\nimport numpy as np\r\nimport scipy.sparse\r\nimport abc\r\nimport time\r\nimport math\r\nimport pdb\r\nfrom math import ceil\r\nimport GNN.cython_sampler as cy\r\n\r\n\r\nclass graph_sampler:\r\n __metaclass__ = abc.ABCMeta\r\n def __init__(self,adj_train,node_train,size_subgraph,args_preproc):\r\n self.adj_train = adj_train\r\n self.node_train = np.unique(node_train).astype(np.int32)\r\n # size in terms of number of vertices in subgraph\r\n self.size_subgraph = size_subgraph\r\n self.name_sampler = 'None'\r\n self.node_subgraph = None\r\n self.preproc(**args_preproc)\r\n\r\n @abc.abstractmethod\r\n def preproc(self,**kwargs):\r\n pass\r\n\r\n def par_sample(self,stage,**kwargs):\r\n return self.cy_sampler.par_sample()\r\n\r\n\r\nclass rw_sampling(graph_sampler):\r\n def __init__(self,adj_train,node_train,size_subgraph,size_root,size_depth):\r\n self.size_root = size_root\r\n self.size_depth = size_depth\r\n size_subgraph = size_root*size_depth\r\n super().__init__(adj_train,node_train,size_subgraph,dict())\r\n self.cy_sampler = cy.RW(self.adj_train.indptr,self.adj_train.indices,self.node_train,\\\r\n NUM_PAR_SAMPLER,SAMPLES_PER_PROC,self.size_root,self.size_depth)\r\n def preproc(self,**kwargs):\r\n pass\r\n\r\nclass edge_sampling(graph_sampler):\r\n def __init__(self,adj_train,node_train,num_edges_subgraph):\r\n \"\"\"\r\n num_edges_subgraph: specify the size of subgraph by the edge budget. NOTE: other samplers specify node budget.\r\n \"\"\"\r\n self.num_edges_subgraph = num_edges_subgraph\r\n self.size_subgraph = num_edges_subgraph*2 # this may not be true in many cases. But for now just use this.\r\n self.deg_train = np.array(adj_train.sum(1)).flatten()\r\n self.adj_train_norm = scipy.sparse.dia_matrix((1/self.deg_train,0),shape=adj_train.shape).dot(adj_train)\r\n super().__init__(adj_train,node_train,self.size_subgraph,dict())\r\n #self.cy_sampler = cy.Edge(self.adj_train.indptr,self.adj_train.indices,self.node_train,\\\r\n # NUM_PAR_SAMPLER,SAMPLES_PER_PROC,self.edge_prob_tri.row,self.edge_prob_tri.col,self.edge_prob_tri.data)\r\n self.cy_sampler = cy.Edge2(self.adj_train.indptr,self.adj_train.indices,self.node_train,\\\r\n NUM_PAR_SAMPLER,SAMPLES_PER_PROC,self.edge_prob_tri.row,self.edge_prob_tri.col,self.edge_prob_tri.data.cumsum(),self.num_edges_subgraph)\r\n def preproc(self,**kwargs):\r\n self.edge_prob = scipy.sparse.csr_matrix((np.zeros(self.adj_train.size),\\\r\n self.adj_train.indices,self.adj_train.indptr),shape=self.adj_train.shape)\r\n self.edge_prob.data[:] = self.adj_train_norm.data[:]\r\n _adj_trans = scipy.sparse.csr_matrix.tocsc(self.adj_train_norm)\r\n self.edge_prob.data += _adj_trans.data # P_e \\propto a_{u,v} + a_{v,u}\r\n self.edge_prob.data *= 2*self.num_edges_subgraph/self.edge_prob.data.sum()\r\n # now edge_prob is a symmetric matrix, we only keep the upper triangle part, since adj is assumed to be undirected.\r\n self.edge_prob_tri = scipy.sparse.triu(self.edge_prob).astype(np.float32) # NOTE: in coo format\r\n\r\n\r\n\r\nclass mrw_sampling(graph_sampler):\r\n\r\n def __init__(self,adj_train,node_train,size_subgraph,size_frontier,max_deg=10000):\r\n self.p_dist = None\r\n super().__init__(adj_train,node_train,size_subgraph,dict())\r\n self.size_frontier = size_frontier\r\n self.deg_train = np.bincount(self.adj_train.nonzero()[0])\r\n self.name_sampler = 'MRW'\r\n self.max_deg = int(max_deg)\r\n self.cy_sampler = cy.MRW(self.adj_train.indptr,self.adj_train.indices,self.node_train,\\\r\n NUM_PAR_SAMPLER,SAMPLES_PER_PROC,self.p_dist,self.max_deg,self.size_frontier,self.size_subgraph)\r\n\r\n def preproc(self,**kwargs):\r\n _adj_hop = self.adj_train\r\n self.p_dist = np.array([_adj_hop.data[_adj_hop.indptr[v]:_adj_hop.indptr[v+1]].sum() for v in range(_adj_hop.shape[0])], dtype=np.int32)\r\n\r\n\r\n\r\n\r\nclass node_sampling(graph_sampler):\r\n \r\n def __init__(self,adj_train,node_train,size_subgraph):\r\n self.p_dist = np.zeros(len(node_train))\r\n super().__init__(adj_train,node_train,size_subgraph,dict())\r\n self.cy_sampler = cy.Node(self.adj_train.indptr,self.adj_train.indices,self.node_train,\\\r\n NUM_PAR_SAMPLER,SAMPLES_PER_PROC,self.p_dist,self.size_subgraph)\r\n\r\n def preproc(self,**kwargs):\r\n _p_dist = np.array([self.adj_train.data[self.adj_train.indptr[v]:self.adj_train.indptr[v+1]].sum() for v in self.node_train], dtype=np.int64)\r\n self.p_dist = _p_dist.cumsum()\r\n if self.p_dist[-1] > 2**31-1:\r\n print('warning: total deg exceeds 2**31')\r\n self.p_dist = self.p_dist.astype(np.float64)\r\n self.p_dist /= self.p_dist[-1]/(2**31-1)\r\n self.p_dist = self.p_dist.astype(np.int32)\r\n\r\n\r\nclass full_batch_sampling(graph_sampler):\r\n \r\n def __init__(self,adj_train,node_train,size_subgraph):\r\n super().__init__(adj_train,node_train,size_subgraph,dict())\r\n self.cy_sampler = cy.FullBatch(self.adj_train.indptr,self.adj_train.indices,self.node_train,\\\r\n NUM_PAR_SAMPLER,SAMPLES_PER_PROC)\r\n\r\n" ]
[ [ "numpy.zeros", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
malarinv/seq2seq-keyphrase-pytorch
[ "14350477867bbaafe285d6ac0e7a814f4cda1bdf" ]
[ "evaluate.py" ]
[ "import json\nimport math\nimport logging\nimport string\n\nimport nltk\nimport scipy\nimport torch\nfrom nltk.stem.porter import *\nimport numpy as np\nfrom collections import Counter\n\nimport os\n\nfrom torch.autograd import Variable\n\nimport config\nimport pykp\nfrom utils import Progbar\nfrom pykp.metric.bleu import bleu\n\nstemmer = PorterStemmer()\n\ndef process_predseqs(pred_seqs, oov, id2word, opt):\n '''\n :param pred_seqs:\n :param src_str:\n :param oov:\n :param id2word:\n :param opt:\n :return:\n '''\n processed_seqs = []\n if_valid = []\n\n for seq in pred_seqs:\n # print('-' * 50)\n # print('seq.sentence: ' + str(seq.sentence))\n # print('oov: ' + str(oov))\n #\n # for x in seq.sentence[:-1]:\n # if x >= opt.vocab_size and len(oov)==0:\n # print('ERROR')\n\n # convert to words and remove the EOS token\n seq_sentence_np = [int(x) for x in seq.sentence]\n processed_seq = [id2word[x] if x < opt.vocab_size else oov[x - opt.vocab_size] for x in seq_sentence_np[:-1]]\n # print('processed_seq: ' + str(processed_seq))\n\n # print('%s - %s' % (str(seq.sentence[:-1]), str(processed_seq)))\n\n keep_flag = True\n\n if len(processed_seq) == 0:\n keep_flag = False\n\n if keep_flag and any([w == pykp.io.UNK_WORD for w in processed_seq]):\n keep_flag = False\n\n if keep_flag and any([w == '.' or w == ',' for w in processed_seq]):\n keep_flag = False\n\n if_valid.append(keep_flag)\n processed_seqs.append((seq, processed_seq, seq.score))\n\n unzipped = list(zip(*(processed_seqs)))\n processed_seqs, processed_str_seqs, processed_scores = unzipped if len(processed_seqs) > 0 and len(unzipped) == 3 else ([], [], [])\n\n assert len(processed_seqs) == len(processed_str_seqs) == len(processed_scores) == len(if_valid)\n return if_valid, processed_seqs, processed_str_seqs, processed_scores\n\n\ndef post_process_predseqs(seqs, num_oneword_seq=1):\n processed_seqs = []\n\n # -1 means no filter applied\n if num_oneword_seq == -1:\n return seqs\n\n for seq, str_seq, score in zip(*seqs):\n keep_flag = True\n\n if len(str_seq) == 1 and num_oneword_seq <= 0:\n keep_flag = False\n\n if keep_flag:\n processed_seqs.append((seq, str_seq, score))\n # update the number of one-word sequeces to keep\n if len(str_seq) == 1:\n num_oneword_seq -= 1\n\n unzipped = list(zip(*(processed_seqs)))\n if len(unzipped) != 3:\n return ([], [], [])\n else:\n return unzipped\n\n\ndef if_present_phrase(src_str_tokens, phrase_str_tokens):\n \"\"\"\n\n :param src_str_tokens: a list of strings (words) of source text\n :param phrase_str_tokens: a list of strings (words) of a phrase\n :return:\n \"\"\"\n match_pos_idx = -1\n for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1):\n match_flag = True\n # iterate each word in target, if one word does not match, set match=False and break\n for seq_idx, seq_w in enumerate(phrase_str_tokens):\n src_w = src_str_tokens[src_start_idx + seq_idx]\n if src_w != seq_w:\n match_flag = False\n break\n if match_flag:\n match_pos_idx = src_start_idx\n break\n\n return match_flag, match_pos_idx\n\n\ndef if_present_duplicate_phrases(src_str, trgs_str, do_stemming=True, check_duplicate=True):\n if do_stemming:\n src_to_match = stem_word_list(src_str)\n else:\n src_to_match = src_str\n\n present_indices = []\n present_flags = []\n phrase_set = set() # some phrases are duplicate after stemming, like \"model\" and \"models\" would be same after stemming, thus we ignore the following ones\n\n for trg_str in trgs_str:\n if do_stemming:\n trg_to_match = stem_word_list(trg_str)\n else:\n trg_to_match = trg_str\n\n # check if the phrase appears in source text\n # iterate each word in source\n match_flag, match_pos_idx = if_present_phrase(src_to_match, trg_to_match)\n\n # check if it is duplicate, if true then ignore it\n if check_duplicate and '_'.join(trg_to_match) in phrase_set:\n present_flags.append(False)\n present_indices.append(match_pos_idx)\n continue\n else:\n # if it reaches the end of source and no match, means it doesn't appear in the source\n present_flags.append(match_flag)\n present_indices.append(match_pos_idx)\n\n phrase_set.add('_'.join(trg_to_match))\n\n assert len(present_flags) == len(present_indices)\n\n return present_flags, present_indices\n\n\ndef evaluate_beam_search(generator, data_loader, opt, title='', epoch=1, predict_save_path=None):\n logger = config.init_logging(title, predict_save_path + '/%s.log' % title, redirect_to_stdout=False)\n progbar = Progbar(logger=logger, title=title, target=len(data_loader.dataset.examples), batch_size=data_loader.batch_size,\n total_examples=len(data_loader.dataset.examples))\n\n topk_range = [5, 10]\n score_names = ['precision', 'recall', 'f_score']\n\n example_idx = 0\n score_dict = {} # {'precision@5':[],'recall@5':[],'f1score@5':[], 'precision@10':[],'recall@10':[],'f1score@10':[]}\n for i, batch in enumerate(data_loader):\n # if i > 5:\n # break\n one2many_batch, one2one_batch = batch\n src_list, src_len, trg_list, _, trg_copy_target_list, src_oov_map_list, oov_list, src_str_list, trg_str_list = one2many_batch\n\n if torch.cuda.is_available():\n src_list = src_list.cuda()\n src_oov_map_list = src_oov_map_list.cuda()\n\n print(\"batch size - %s\" % str(src_list.size(0)))\n print(\"src size - %s\" % str(src_list.size()))\n print(\"target size - %s\" % len(trg_copy_target_list))\n\n pred_seq_list = generator.beam_search(src_list, src_len, src_oov_map_list, oov_list, opt.word2id)\n\n '''\n process each example in current batch\n '''\n for src, src_str, trg, trg_str_seqs, trg_copy, pred_seq, oov in zip(src_list, src_str_list, trg_list, trg_str_list, trg_copy_target_list, pred_seq_list, oov_list):\n logger.info('====================== %d =========================' % (i))\n print_out = ''\n print_out += '[Source][%d]: %s \\n' % (len(src_str), ' '.join(src_str))\n src = src.cpu().data.numpy() if torch.cuda.is_available() else src.data.numpy()\n print_out += '\\nSource Input: \\n %s\\n' % (' '.join([opt.id2word[x] for x in src[:len(src_str) + 5]]))\n print_out += 'Real Target String [%d] \\n\\t\\t%s \\n' % (len(trg_str_seqs), trg_str_seqs)\n print_out += 'Real Target Input: \\n\\t\\t%s \\n' % str([[opt.id2word[x] for x in t] for t in trg])\n print_out += 'Real Target Copy: \\n\\t\\t%s \\n' % str([[opt.id2word[x] if x < opt.vocab_size else oov[x - opt.vocab_size] for x in t] for t in trg_copy])\n trg_str_is_present_flags, _ = if_present_duplicate_phrases(src_str, trg_str_seqs)\n\n # ignore the cases that there's no present phrases\n if opt.must_appear_in_src and np.sum(trg_str_is_present_flags) == 0:\n logger.error('found no present targets')\n continue\n\n print_out += '[GROUND-TRUTH] #(present)/#(all targets)=%d/%d\\n' % (sum(trg_str_is_present_flags), len(trg_str_is_present_flags))\n print_out += '\\n'.join(['\\t\\t[%s]' % ' '.join(phrase) if is_present else '\\t\\t%s' % ' '.join(phrase) for phrase, is_present in zip(trg_str_seqs, trg_str_is_present_flags)])\n print_out += '\\noov_list: \\n\\t\\t%s \\n' % str(oov)\n\n # 1st filtering\n pred_is_valid_flags, processed_pred_seqs, processed_pred_str_seqs, processed_pred_score = process_predseqs(pred_seq, oov, opt.id2word, opt)\n # 2nd filtering: if filter out phrases that don't appear in text, and keep unique ones after stemming\n if opt.must_appear_in_src:\n pred_is_present_flags, _ = if_present_duplicate_phrases(src_str, processed_pred_str_seqs)\n filtered_trg_str_seqs = np.asarray(trg_str_seqs)[trg_str_is_present_flags]\n else:\n pred_is_present_flags = [True] * len(processed_pred_str_seqs)\n\n valid_and_present = np.asarray(pred_is_valid_flags) * np.asarray(pred_is_present_flags)\n match_list = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=processed_pred_str_seqs)\n print_out += '[PREDICTION] #(valid)=%d, #(present)=%d, #(retained&present)=%d, #(all)=%d\\n' % (sum(pred_is_valid_flags), sum(pred_is_present_flags), sum(valid_and_present), len(pred_seq))\n print_out += ''\n '''\n Print and export predictions\n '''\n preds_out = ''\n for p_id, (seq, word, score, match, is_valid, is_present) in enumerate(\n zip(processed_pred_seqs, processed_pred_str_seqs, processed_pred_score, match_list, pred_is_valid_flags, pred_is_present_flags)):\n # if p_id > 5:\n # break\n\n preds_out += '%s\\n' % (' '.join(word))\n if is_present:\n print_phrase = '[%s]' % ' '.join(word)\n else:\n print_phrase = ' '.join(word)\n\n if is_valid:\n print_phrase = '*%s' % print_phrase\n\n if match == 1.0:\n correct_str = '[correct!]'\n else:\n correct_str = ''\n if any([t >= opt.vocab_size for t in seq.sentence]):\n copy_str = '[copied!]'\n else:\n copy_str = ''\n\n print_out += '\\t\\t[%.4f]\\t%s \\t %s %s%s\\n' % (-score, print_phrase, str(seq.sentence), correct_str, copy_str)\n\n '''\n Evaluate predictions w.r.t different filterings and metrics\n '''\n processed_pred_seqs = np.asarray(processed_pred_seqs)[valid_and_present]\n filtered_processed_pred_str_seqs = np.asarray(processed_pred_str_seqs)[valid_and_present]\n filtered_processed_pred_score = np.asarray(processed_pred_score)[valid_and_present]\n\n # 3rd round filtering (one-word phrases)\n num_oneword_seq = -1\n filtered_pred_seq, filtered_pred_str_seqs, filtered_pred_score = post_process_predseqs((processed_pred_seqs, filtered_processed_pred_str_seqs, filtered_processed_pred_score), num_oneword_seq)\n\n match_list_exact = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=filtered_pred_str_seqs, type='exact')\n match_list_soft = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=filtered_pred_str_seqs, type='partial')\n\n assert len(filtered_pred_seq) == len(filtered_pred_str_seqs) == len(filtered_pred_score) == len(match_list_exact) == len(match_list_soft)\n\n print_out += \"\\n =======================================================\"\n print_pred_str_seqs = [\" \".join(item) for item in filtered_pred_str_seqs]\n print_trg_str_seqs = [\" \".join(item) for item in filtered_trg_str_seqs]\n # print_out += \"\\n PREDICTION: \" + \" / \".join(print_pred_str_seqs)\n # print_out += \"\\n GROUND TRUTH: \" + \" / \".join(print_trg_str_seqs)\n for topk in topk_range:\n results_exact = evaluate(match_list_exact, filtered_pred_str_seqs, filtered_trg_str_seqs, topk=topk)\n for k, v in zip(score_names, results_exact):\n if '%s@%d_exact' % (k, topk) not in score_dict:\n score_dict['%s@%d_exact' % (k, topk)] = []\n score_dict['%s@%d_exact' % (k, topk)].append(v)\n\n print_out += \"\\n ------------------------------------------------- EXACT, k=%d\" % (topk)\n print_out += \"\\n --- batch precision, recall, fscore: \" + str(results_exact[0]) + \" , \" + str(results_exact[1]) + \" , \" + str(results_exact[2])\n print_out += \"\\n --- total precision, recall, fscore: \" + str(np.average(score_dict['precision@%d_exact' % (topk)])) + \" , \" +\\\n str(np.average(score_dict['recall@%d_exact' % (topk)])) + \" , \" +\\\n str(np.average(score_dict['f_score@%d_exact' % (topk)]))\n\n for topk in topk_range:\n results_soft = evaluate(match_list_soft, filtered_pred_str_seqs, filtered_trg_str_seqs, topk=topk)\n for k, v in zip(score_names, results_soft):\n if '%s@%d_soft' % (k, topk) not in score_dict:\n score_dict['%s@%d_soft' % (k, topk)] = []\n score_dict['%s@%d_soft' % (k, topk)].append(v)\n\n print_out += \"\\n ------------------------------------------------- SOFT, k=%d\" % (topk)\n print_out += \"\\n --- batch precision, recall, fscore: \" + str(results_soft[0]) + \" , \" + str(results_soft[1]) + \" , \" + str(results_soft[2])\n print_out += \"\\n --- total precision, recall, fscore: \" + str(np.average(score_dict['precision@%d_soft' % (topk)])) + \" , \" +\\\n str(np.average(score_dict['recall@%d_soft' % (topk)])) + \" , \" +\\\n str(np.average(score_dict['f_score@%d_soft' % (topk)]))\n\n print_out += \"\\n =======================================================\"\n logger.info(print_out)\n\n '''\n write predictions to disk\n '''\n if predict_save_path:\n if not os.path.exists(os.path.join(predict_save_path, title + '_detail')):\n os.makedirs(os.path.join(predict_save_path, title + '_detail'))\n with open(os.path.join(predict_save_path, title + '_detail', str(example_idx) + '_print.txt'), 'w') as f_:\n f_.write(print_out)\n with open(os.path.join(predict_save_path, title + '_detail', str(example_idx) + '_prediction.txt'), 'w') as f_:\n f_.write(preds_out)\n\n out_dict = {}\n out_dict['src_str'] = src_str\n out_dict['trg_str'] = trg_str_seqs\n out_dict['trg_present_flag'] = trg_str_is_present_flags\n out_dict['pred_str'] = processed_pred_str_seqs\n out_dict['pred_score'] = [float(s) for s in processed_pred_score]\n out_dict['present_flag'] = pred_is_present_flags\n out_dict['valid_flag'] = pred_is_valid_flags\n out_dict['match_flag'] = [float(m) for m in match_list]\n\n for k,v in out_dict.items():\n out_dict[k] = list(v)\n # print('len(%s) = %d' % (k, len(v)))\n\n # print(out_dict)\n\n assert len(out_dict['trg_str']) == len(out_dict['trg_present_flag'])\n assert len(out_dict['pred_str']) == len(out_dict['present_flag']) \\\n == len(out_dict['valid_flag']) == len(out_dict['match_flag']) == len(out_dict['pred_score'])\n\n with open(os.path.join(predict_save_path, title + '_detail', str(example_idx) + '.json'), 'w') as f_:\n f_.write(json.dumps(out_dict))\n\n progbar.update(epoch, example_idx, [('f_score@5_exact', np.average(score_dict['f_score@5_exact'])),\n ('f_score@5_soft', np.average(score_dict['f_score@5_soft'])),\n ('f_score@10_exact', np.average(score_dict['f_score@10_exact'])),\n ('f_score@10_soft', np.average(score_dict['f_score@10_soft'])),])\n\n example_idx += 1\n\n # print('#(f_score@5#oneword=-1)=%d, sum=%f' % (len(score_dict['f_score@5#oneword=-1']), sum(score_dict['f_score@5#oneword=-1'])))\n # print('#(f_score@10#oneword=-1)=%d, sum=%f' % (len(score_dict['f_score@10#oneword=-1']), sum(score_dict['f_score@10#oneword=-1'])))\n # print('#(f_score@5#oneword=1)=%d, sum=%f' % (len(score_dict['f_score@5#oneword=1']), sum(score_dict['f_score@5#oneword=1'])))\n # print('#(f_score@10#oneword=1)=%d, sum=%f' % (len(score_dict['f_score@10#oneword=1']), sum(score_dict['f_score@10#oneword=1'])))\n\n if predict_save_path:\n # export scores. Each row is scores (precision, recall and f-score) of different way of filtering predictions (how many one-word predictions to keep)\n with open(predict_save_path + os.path.sep + title + '_result.csv', 'w') as result_csv:\n csv_lines = []\n for mode in [\"exact\", \"soft\"]:\n for topk in topk_range:\n csv_line = \"\"\n for k in score_names:\n csv_line += ',%f' % np.average(score_dict['%s@%d_%s' % (k, topk, mode)])\n csv_lines.append(csv_line + '\\n')\n\n result_csv.writelines(csv_lines)\n\n # precision, recall, f_score = macro_averaged_score(precisionlist=score_dict['precision'], recalllist=score_dict['recall'])\n # logging.info(\"Macro@5\\n\\t\\tprecision %.4f\\n\\t\\tmacro recall %.4f\\n\\t\\tmacro fscore %.4f \" % (np.average(score_dict['precision@5']), np.average(score_dict['recall@5']), np.average(score_dict['f1score@5'])))\n # logging.info(\"Macro@10\\n\\t\\tprecision %.4f\\n\\t\\tmacro recall %.4f\\n\\t\\tmacro fscore %.4f \" % (np.average(score_dict['precision@10']), np.average(score_dict['recall@10']), np.average(score_dict['f1score@10'])))\n # precision, recall, f_score = evaluate(true_seqs=target_all, pred_seqs=prediction_all, topn=5)\n # logging.info(\"micro precision %.4f , micro recall %.4f, micro fscore %.4f \" % (precision, recall, f_score))\n\n for k,v in score_dict.items():\n print('#(%s) = %d' % (k, len(v)))\n\n return score_dict\n\ndef predict_beam_search(generator, data_loader, opt, title='', epoch=1, predict_save_path=None):\n if predict_save_path:\n logger = config.init_logging(title, predict_save_path + '/%s.log' % title, redirect_to_stdout=False)\n else:\n logger = config.init_logging(title, '', redirect_to_stdout=False)\n progbar = Progbar(logger=logger, title=title, target=len(data_loader.dataset.examples), batch_size=data_loader.batch_size,\n total_examples=len(data_loader.dataset.examples))\n\n topk_range = [5, 10]\n score_names = ['precision', 'recall', 'f_score']\n\n example_idx = 0\n score_dict = {} # {'precision@5':[],'recall@5':[],'f1score@5':[], 'precision@10':[],'recall@10':[],'f1score@10':[]}\n for i, batch in enumerate(data_loader):\n # if i > 5:\n # break\n one2many_batch, one2one_batch = batch\n src_list, src_len, trg_list, _, trg_copy_target_list, src_oov_map_list, oov_list, src_str_list, trg_str_list = one2many_batch\n\n if torch.cuda.is_available():\n src_list = src_list.cuda()\n src_oov_map_list = src_oov_map_list.cuda()\n\n print(\"batch size - %s\" % str(src_list.size(0)))\n print(\"src size - %s\" % str(src_list.size()))\n print(\"target size - %s\" % len(trg_copy_target_list))\n\n pred_seq_list = generator.beam_search(src_list, src_len, src_oov_map_list, oov_list, opt.word2id)\n\n '''\n process each example in current batch\n '''\n for src, src_str, trg, trg_str_seqs, trg_copy, pred_seq, oov in zip(src_list, src_str_list, trg_list, trg_str_list, trg_copy_target_list, pred_seq_list, oov_list):\n logger.info('====================== %d =========================' % (i))\n print_out = ''\n print_out += '[Source][%d]: %s \\n' % (len(src_str), ' '.join(src_str))\n src = src.cpu().data.numpy() if torch.cuda.is_available() else src.data.numpy()\n print_out += '\\nSource Input: \\n %s\\n' % (' '.join([opt.id2word[x] for x in src[:len(src_str) + 5]]))\n print_out += 'Real Target String [%d] \\n\\t\\t%s \\n' % (len(trg_str_seqs), trg_str_seqs)\n print_out += 'Real Target Input: \\n\\t\\t%s \\n' % str([[opt.id2word[x] for x in t] for t in trg])\n print_out += 'Real Target Copy: \\n\\t\\t%s \\n' % str([[opt.id2word[x] if x < opt.vocab_size else oov[x - opt.vocab_size] for x in t] for t in trg_copy])\n trg_str_is_present_flags, _ = if_present_duplicate_phrases(src_str, trg_str_seqs)\n\n # ignore the cases that there's no present phrases\n # if opt.must_appear_in_src and np.sum(trg_str_is_present_flags) == 0:\n # logger.error('found no present targets')\n # continue\n\n print_out += '[GROUND-TRUTH] #(present)/#(all targets)=%d/%d\\n' % (sum(trg_str_is_present_flags), len(trg_str_is_present_flags))\n print_out += '\\n'.join(['\\t\\t[%s]' % ' '.join(phrase) if is_present else '\\t\\t%s' % ' '.join(phrase) for phrase, is_present in zip(trg_str_seqs, trg_str_is_present_flags)])\n print_out += '\\noov_list: \\n\\t\\t%s \\n' % str(oov)\n\n # 1st filtering\n pred_is_valid_flags, processed_pred_seqs, processed_pred_str_seqs, processed_pred_score = process_predseqs(pred_seq, oov, opt.id2word, opt)\n # 2nd filtering: if filter out phrases that don't appear in text, and keep unique ones after stemming\n if opt.must_appear_in_src:\n pred_is_present_flags, _ = if_present_duplicate_phrases(src_str, processed_pred_str_seqs)\n filtered_trg_str_seqs = np.asarray(trg_str_seqs)[trg_str_is_present_flags]\n else:\n pred_is_present_flags = [True] * len(processed_pred_str_seqs)\n\n valid_and_present = np.asarray(pred_is_valid_flags) * np.asarray(pred_is_present_flags)\n match_list = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=processed_pred_str_seqs)\n print_out += '[PREDICTION] #(valid)=%d, #(present)=%d, #(retained&present)=%d, #(all)=%d\\n' % (sum(pred_is_valid_flags), sum(pred_is_present_flags), sum(valid_and_present), len(pred_seq))\n print_out += ''\n '''\n Print and export predictions\n '''\n preds_out = ''\n output_keywords = list(map(lambda x:' '.join(x),processed_pred_str_seqs))\n return output_keywords\n return []\n\ndef evaluate_greedy(model, data_loader, test_examples, opt):\n model.eval()\n\n logging.info('====================== Checking GPU Availability =========================')\n if torch.cuda.is_available():\n logging.info('Running on GPU!')\n model.cuda()\n else:\n logging.info('Running on CPU!')\n\n logging.info('====================== Start Predicting =========================')\n progbar = Progbar(title='Testing', target=len(data_loader), batch_size=data_loader.batch_size,\n total_examples=len(data_loader.dataset))\n\n '''\n Note here each batch only contains one data example, thus decoder_probs is flattened\n '''\n for i, (batch, example) in enumerate(zip(data_loader, test_examples)):\n src = batch.src\n\n logging.info('====================== %d =========================' % (i + 1))\n logging.info('\\nSource text: \\n %s\\n' % (' '.join([opt.id2word[wi] for wi in src.data.numpy()[0]])))\n\n if torch.cuda.is_available():\n src.cuda()\n\n # trg = Variable(torch.from_numpy(np.zeros((src.size(0), opt.max_sent_length), dtype='int64')))\n trg = Variable(torch.LongTensor([[opt.word2id[pykp.io.BOS_WORD]] * opt.max_sent_length]))\n\n max_words_pred = model.greedy_predict(src, trg)\n progbar.update(None, i, [])\n\n sentence_pred = [opt.id2word[x] for x in max_words_pred]\n sentence_real = example['trg_str']\n\n if '</s>' in sentence_real:\n index = sentence_real.index('</s>')\n sentence_pred = sentence_pred[:index]\n\n logging.info('\\t\\tPredicted : %s ' % (' '.join(sentence_pred)))\n logging.info('\\t\\tReal : %s ' % (sentence_real))\n\n\ndef stem_word_list(word_list):\n return [stemmer.stem(w.strip().lower()) for w in word_list]\n\n\ndef macro_averaged_score(precisionlist, recalllist):\n precision = np.average(precisionlist)\n recall = np.average(recalllist)\n f_score = 0\n if(precision or recall):\n f_score = round((2 * (precision * recall)) / (precision + recall), 2)\n return precision, recall, f_score\n\n\ndef get_match_result(true_seqs, pred_seqs, do_stem=True, type='exact'):\n '''\n :param true_seqs:\n :param pred_seqs:\n :param do_stem:\n :param topn:\n :param type: 'exact' or 'partial'\n :return:\n '''\n micro_metrics = []\n micro_matches = []\n\n # do processing to baseline predictions\n match_score = np.asarray([0.0] * len(pred_seqs), dtype='float32')\n target_number = len(true_seqs)\n predicted_number = len(pred_seqs)\n\n metric_dict = {'target_number': target_number, 'prediction_number': predicted_number, 'correct_number': match_score}\n\n # convert target index into string\n if do_stem:\n true_seqs = [stem_word_list(seq) for seq in true_seqs]\n pred_seqs = [stem_word_list(seq) for seq in pred_seqs]\n\n for pred_id, pred_seq in enumerate(pred_seqs):\n if type == 'exact':\n match_score[pred_id] = 0\n for true_id, true_seq in enumerate(true_seqs):\n match = True\n if len(pred_seq) != len(true_seq):\n continue\n for pred_w, true_w in zip(pred_seq, true_seq):\n # if one two words are not same, match fails\n if pred_w != true_w:\n match = False\n break\n # if every word in pred_seq matches one true_seq exactly, match succeeds\n if match:\n match_score[pred_id] = 1\n break\n elif type == 'partial':\n max_similarity = 0.\n pred_seq_set = set(pred_seq)\n # use the jaccard coefficient as the degree of partial match\n for true_id, true_seq in enumerate(true_seqs):\n true_seq_set = set(true_seq)\n jaccard = len(set.intersection(*[set(true_seq_set), set(pred_seq_set)])) / float(len(set.union(*[set(true_seq_set), set(pred_seq_set)])))\n if jaccard > max_similarity:\n max_similarity = jaccard\n match_score[pred_id] = max_similarity\n\n elif type == 'bleu':\n # account for the match of subsequences, like n-gram-based (BLEU) or LCS-based\n match_score[pred_id] = bleu(pred_seq, true_seqs, [0.1, 0.3, 0.6])\n\n return match_score\n\n\ndef evaluate(match_list, predicted_list, true_list, topk=5):\n if len(match_list) > topk:\n match_list = match_list[:topk]\n if len(predicted_list) > topk:\n predicted_list = predicted_list[:topk]\n\n # Micro-Averaged Method\n micropk = float(sum(match_list)) / float(len(predicted_list)) if len(predicted_list) > 0 else 0.0\n micrork = float(sum(match_list)) / float(len(true_list)) if len(true_list) > 0 else 0.0\n\n if micropk + micrork > 0:\n microf1 = float(2 * (micropk * micrork)) / (micropk + micrork)\n else:\n microf1 = 0.0\n\n return micropk, micrork, microf1\n\n\ndef f1_score(prediction, ground_truth):\n # both prediction and grount_truth should be list of words\n common = Counter(prediction) & Counter(ground_truth)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction)\n recall = 1.0 * num_same / len(ground_truth)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n\ndef self_redundancy(_input):\n # _input shoule be list of list of words\n if len(_input) == 0:\n return None\n _len = len(_input)\n scores = np.ones((_len, _len), dtype=\"float32\") * -1.0\n for i in range(_len):\n for j in range(_len):\n if scores[i][j] != -1:\n continue\n elif i == j:\n scores[i][j] = 0.0\n else:\n f1 = f1_score(_input[i], _input[j])\n scores[i][j] = f1\n scores[j][i] = f1\n res = np.max(scores, 1)\n res = np.mean(res)\n return res\n" ]
[ [ "torch.LongTensor", "numpy.asarray", "numpy.ones", "numpy.max", "numpy.mean", "torch.cuda.is_available", "numpy.average", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
choderalab/autonomous-molecular-design
[ "8536382c3006d0270a4ca544cf6de1a67beea954" ]
[ "scripts/simpleaddscenariocolab.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"SimpleADDScenarioColab.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1yBHXw30_rc1E3NI6g8NsvbZ54_t1DMXX\n\nRetrieve/Import Necessary Packages\n\"\"\"\n\nimport sys\nsys.path.append('/usr/local/lib/python3.7/site-packages/')\nimport os\n\nimport math\nimport numpy as np\nnp.random.seed(0)\nimport pandas as pd\nimport deepchem as dc\nfrom deepchem.utils.save import load_from_disk\nfrom deepchem.data import data_loader\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\"\"\"\nPrepare Data and Model Definitions\n\nLoad and Featurize Data\n\"\"\"\ndataset_file = \"./enamineSubset100KGroundTruth.csv\"\nground_truth_dataset = pd.read_csv(dataset_file)\nlow_bace_dataset = ground_truth_dataset.sort_values(by=\"bace\")[ : len(ground_truth_dataset)//4 ] #take quarter worst binder potential starters\n\ntop_5_percent_index = len(ground_truth_dataset) // 20\ntop_5_percent_bace_cutoff = ground_truth_dataset.sort_values(by=\"bace\", ascending=False)[\"bace\"].tolist()[top_5_percent_index]\n\n###featurized ground truth for scoring\nfeaturizer = dc.feat.ConvMolFeaturizer()\nloader = dc.data.CSVLoader(tasks=[\"bace\", \"esol\", \"logD\"], smiles_field=\"SMILES\", featurizer=featurizer)\ndataset_feat = loader.featurize(dataset_file) #featurize the molecules from the ground truth dataset\ntransformer = dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset_feat)\nground_truth_for_scoring = transformer.transform(dataset_feat)\n\n\n\"\"\"Define Main Experimenter Model\"\"\"\n\n###define Abstract Data Type to hold search information, including ensemble\n\nclass Experimenter():\n \"\"\"Class representing a research scientist/team going through the drug development process.\n \n Parameters\n ----------\n N : int\n Number of samples to initially train the experimenter ensemble on.\n M : int\n Number of molecules to purchase in each batch.\n ensemble_size : int, optional\n Number of models in experimenter ensemble.\n epochs : int, optional\n Number of epochs to train ensemble models for at each stage.\n molecule_cost : int or float, optional\n Monetary cost of purchasing a single molecule.\n target_bounds : dictionary of str:tuples(floats), optional\n Desired range for each property.\n sampling_mode : string {\"thompson\", \"highest_mean\", \"random\"}\n The means of choosing the ensemble outputs/molecules.\n \n Attributes\n ----------\n ensemble : dictionary of deepchem.models.GrachConvModel\n Models representing the experimenter knowledge/predictions and uncertainty.\n history : list of dictionaries storing model attributes\n Snapshots of the model state at each time step.\n samples_seen : pandas.DataFrame\n Ground truth values of the molecules seen before. Includes initial training set.\n smiles_seen : list of str\n SMILES strings of the molecules seen before.\n selected_prediction : pandas.DataFrame\n The molecule values used to make the next decision.\n all_predictions : dict<int,pandas.DataFrame>\n Predicted values of entire ensemble at this time step. Ensemble model keys (random seeds) map to model's prediction.\n cost : int or float\n Total monetary cost incurred at the current time.\n number_molecules : int\n Total number of molecules purchased at the current time.\n time : int\n Total number of days spent up to the current time.\n \n \"\"\"\n def __init__(self, N, M, ensemble_size=3, epochs=10, molecule_cost=200,\n target_bounds={\"bace\":(4, math.inf), \"esol\":(-5, math.inf), \"logD\":(-0.4, 5.6)}, sampling_method=\"highest_mean\"):\n self.N = N #initial samples\n self.M = M #batch size\n self.ensemble_size = ensemble_size\n self.epochs = epochs\n self.molecule_cost = molecule_cost\n self.target_bounds = target_bounds\n if sampling_method == \"thompson\" or sampling_method == \"highest_mean\" or sampling_method == \"random\":\n self.sampling_method = sampling_method\n else:\n raise ValueError(\"Input for sampling method was not allowed argument. Choices are thompson, highest_mean, and random.\")\n \n self.ensemble = {i:dc.models.GraphConvModel(n_tasks=3, mode='regression', batch_size=20, random_seed=i, tensorboard=True) \n for i in range(self.ensemble_size)} #map each model to its seed\n self.history = [] #save snapshot of model, on disk\n self.samples_seen = None\n self.smiles_seen = []\n self.selected_prediction = pd.DataFrame()\n self.all_predictions = {}\n self.cost = 0\n self.number_molecules = 0\n self.time = 0 #days\n \n \n def train_model(self, model, dataset):\n \"\"\"Helper function to train a given ensemble model on a given dataset.\n \n Parameters\n ----------\n model : Keras model (generally deepchem.GraphConvModel)\n Model to be trained.\n dataset : pandas.DataFrame\n Dataset to train on. Must include \"SMILES\", \"bace\", \"esol\", and \"logD\" headers.\n \n \"\"\"\n #convert DataFrame to CSV and read in as deepchem.Dataset via deepchem.CSVLoader\n \n dataset.to_csv(\"training_dataset.csv\")\n \n featurizer = dc.feat.ConvMolFeaturizer()\n loader = dc.data.CSVLoader(tasks=[\"bace\", \"esol\", \"logD\"], smiles_field=\"SMILES\", featurizer=featurizer)\n\n dataset_feat = loader.featurize(\"training_dataset.csv\")\n \n transformer = dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset_feat)\n dataset_feat = transformer.transform(dataset_feat)\n\n model.fit(dataset_feat, nb_epoch=self.epochs, deterministic=True, restore=False)\n \n \n def train_ensemble(self, dataset):\n \"\"\"Helper function to train model ensemble.\n \n Parameters\n ----------\n dataset : pandas.Dataset\n Dataset on which to train models. Must include \"SMILES\", \"bace\", \"esol\", and \"logD\" headers.\n \n \"\"\"\n for model in self.ensemble.values():\n self.train_model(model, dataset)\n\n \n def initial_training(self, verbose=False):\n \"\"\"Train model ensemble for the first time on self.N samples randomly chosen from the 2500 lowest bace affinity-scored \n molecules.\n \n Parameters\n ----------\n verbose : bool\n Whether to print progress updates.\n \n Notes\n -----\n If self.N > 1/4 of dataset, ensemble will be trained on 2500 samples.\n Records first history object.\n \n \"\"\"\n idx_range = self.N if self.N < low_bace_dataset.shape[0] else low_bace_dataset.shape[0]\n rand_indices = np.random.choice(range(low_bace_dataset.shape[0]), idx_range, replace=False) #select random row indices\n \n init_ensemble_dataset = pd.DataFrame()\n for idx in rand_indices:\n init_ensemble_dataset = init_ensemble_dataset.append( low_bace_dataset.iloc[idx], ignore_index=True )\n \n if verbose:\n print(\"Training set selected.\")\n \n self.samples_seen = init_ensemble_dataset ### collect the examples seen during initial training (ground truth values)\n self.smiles_seen = init_ensemble_dataset[\"SMILES\"].tolist()\n \n #cost/time to initially train? free initial knowledge?\n self.cost += self.molecule_cost * len(init_ensemble_dataset)\n self.number_molecules += len(init_ensemble_dataset)\n self.time = 0\n \n\n if self.sampling_method != \"random\":\n if verbose:\n print(\"Training ensemble...\")\n self.train_ensemble(init_ensemble_dataset) #train ensemble on initial dataset, unless we are randomly sampling and do not need to \n if verbose:\n print(\"Ensemble trained.\")\n \n self.record_history()\n\n \n def get_component_score(self, arr, keys):\n \"\"\"Helper function to get the scaled \"goodness\" of the input scores.\n \n Parameters\n ----------\n array : numpy.array\n Array with bace, esol, and logD scores.\n keys : collection of strings from {\"bace\", \"esol\", \"logD\"}\n Which scores to incorporate into the overall goodness.\n \n Returns\n -------\n numpy.array\n Sum of component scores.\n \n \"\"\"\n scores = []\n if \"bace\" in keys:\n #higher bace => higher score\n bace = arr[:,0]\n bace_range = self.target_bounds[\"bace\"]\n scores.append( np.where(bace < bace_range[0], 0.2*bace-0.8, 0.05*bace-0.2) )\n #dec penalty when score>low end of range\n \n if \"esol\" in keys:\n esol = arr[:,1]\n esol_range = self.target_bounds[\"esol\"]\n scores.append( np.where(esol < esol_range[0], esol - np.absolute(esol-esol_range[1])**2, esol) )\n \n if \"logD\" in keys:\n #logD within range is not penalized\n logD = arr[:,2]\n logD_range = self.target_bounds[\"logD\"]\n #handle lower end of range\n int_arr = np.where(logD < logD_range[0], logD - np.absolute(logD-logD_range[0]), logD)\n #handle upper end of range\n scores.append(np.where(int_arr > logD_range[1], int_arr - np.absolute(int_arr-logD_range[1]), int_arr) )\n\n return sum(scores)\n \n \n def score_and_select_top(self):\n \"\"\"Scores all molecules and selects the top M for \"purchase\".\n \n \"\"\"\n if self.sampling_method == \"highest_mean\":\n #generate and store all predictions\n predicted = np.zeros( (len(ground_truth_for_scoring),3) )\n for key in self.ensemble.keys():\n pred = self.ensemble[key].predict(ground_truth_for_scoring)\n pred = transformer.untransform(pred) #undo normalization on outputs\n predicted += pred #sum model predictions\n self.all_predictions[key] = self.prediction_array_to_dataframe(pred) #store each prediction as a labeled dataframe\n predicted /= len(self.ensemble) #avg model predictions\n results_df = self.prediction_array_to_dataframe(predicted)\n \n \n elif self.sampling_method == \"thompson\":\n #generate and store all predictions\n for key in self.ensemble.keys():\n pred = self.ensemble[key].predict(ground_truth_for_scoring)\n pred = transformer.untransform(pred) #undo normalization on outputs\n self.all_predictions = { key : self.prediction_array_to_dataframe( pred ) } #store all labeled dataframes \n\n \n #Thompson sampling\n results_df = pd.DataFrame()\n for row_idx in range( len(ground_truth_for_scoring) ):\n pred_key = np.random.randint(low=0, high=len(self.ensemble)) #select one random prediction array to select a row from\n pred_df = self.all_predictions[pred_key]\n pred_row = pred_df.iloc[[row_idx]]\n results_df = pd.concat([results_df, pred_row], sort=False) \n \n \n elif self.sampling_method == \"random\":\n ###randomly select up to M points from those not seen\n unseen = ground_truth_dataset.loc[~ground_truth_dataset['SMILES'].isin(self.smiles_seen)] #remove prev seen\n unseen = unseen.iloc[np.random.permutation(len(unseen))] #shuffle remaining samples\n unseen = unseen[:self.M] if (len(unseen) > self.M) else unseen #select up to self.M samples\n \n self.samples_seen = pd.concat([self.samples_seen,unseen], sort=False)\n self.smiles_seen = self.samples_seen[\"SMILES\"].tolist()\n self.cost += self.molecule_cost * len(unseen)\n self.number_molecules += len(unseen)\n self.time += 28 #4 weeks to buy and experiment \n return \n \n self.selected_prediction = results_df #also store the dataframe with the data we chose to make decisions with\n \n unseen_predicted_rows = results_df.loc[~results_df['SMILES'].isin(self.smiles_seen)] #also remove predicted values previously seen\n unseen_predicted_rows = unseen_predicted_rows.sort_values(by=\"goodness\", ascending=False) #sort predictions with highest goodness at top\n \n predicted_subset = unseen_predicted_rows[:self.M] if (len(unseen_predicted_rows) > self.M) else unseen_predicted_rows #select up to self.M samples from the predictions\n predicted_subset_smiles = predicted_subset[\"SMILES\"].tolist()\n \n new_batch_ground_truth = ground_truth_dataset.loc[ground_truth_dataset['SMILES'].isin(predicted_subset_smiles)]\n \n self.samples_seen = pd.concat([self.samples_seen,new_batch_ground_truth], sort=False)\n self.smiles_seen = self.samples_seen[\"SMILES\"].tolist()\n self.cost += self.molecule_cost * len(new_batch_ground_truth)\n self.number_molecules += len(new_batch_ground_truth)\n self.time += 28 #4 weeks to buy and experiment\n \n \n def prediction_array_to_dataframe(self, array):\n #copy SMILES and assign calculated scores, store in self.predictions\n df = pd.DataFrame()\n df[\"SMILES\"] = ground_truth_dataset[\"SMILES\"] \n goodness = self.get_component_score(array, [\"bace\", \"esol\", \"logD\"])\n df[\"bace\"] = array[:,0]\n df[\"esol\"] = array[:,1]\n df[\"logD\"] = array[:,2]\n df[\"goodness\"] = goodness\n return df\n \n \n def record_history(self):\n \"\"\"Stores model costs and experience for later analysis.\n \n Notes\n -----\n Does not save self.history attribute, in order to avoid redundantly storing the data in it.\n Only saves attributes that change in each time step.\n \n \"\"\"\n hist = {}\n hist[\"samples_seen\"] = self.samples_seen\n hist[\"smiles_seen\"] = self.smiles_seen\n hist[\"cost\"] = self.cost\n hist[\"number_molecules\"] = self.number_molecules\n hist[\"time\"] = self.time\n hist[\"selected_prediction\"] = self.selected_prediction\n hist[\"all_predictions\"] = self.all_predictions\n self.history.append(hist)\n \n\n def run(self):\n \"\"\"Simple wrapper to automate calls to select molecules and update models. \n \n Returns\n -------\n candidates : pandas.DataFrame\n The candidate compounds that satisfy the given criteria.\n\n Notes\n -----\n Must be preceded by initial training of model ensemble.\n \n \"\"\" \n itr = 0\n while len(self.samples_seen) < len(ground_truth_dataset): #search entire database, with early stopping \n self.score_and_select_top()\n self.record_history()\n\n print(\"PROGRESS:\",len(self.samples_seen),\"of\",len(ground_truth_dataset))\n if self.sampling_method != \"random\":\n self.train_ensemble(self.samples_seen)\n \n with open(f\"{self.sampling_method}_model_{itr}.pickle\", \"wb\") as f:\n pickle.dump(self.history,f)\n itr += 1\n\n\"\"\"#Run the Model and Obtain Data\"\"\"\n\n#N = [96, 384, 1536] #initial train set size\nN = [384]\n#M = [96, 384, 1536] #batch size -> 96 wells, multiples\nM = [384]\n\nnumber_reps = 10\n\nfor method in (\"highest_mean\", \"thompson\", \"random\"):\n for i in range(number_reps):\n os.chdir(f\"./{method}_models/{i}/\")\n\n print(\"\\n\",\"Iteration:\",i,\"\\n\")\n model = Experimenter(n, m, ensemble_size=5, epochs=10, sampling_method=method)\n model.initial_training()\n model.run()" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.absolute", "numpy.random.seed", "pandas.DataFrame", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jasmainak/mne-python
[ "039cb1bf52770019bd48ac028795af0861792fa2", "039cb1bf52770019bd48ac028795af0861792fa2", "039cb1bf52770019bd48ac028795af0861792fa2", "039cb1bf52770019bd48ac028795af0861792fa2", "039cb1bf52770019bd48ac028795af0861792fa2", "039cb1bf52770019bd48ac028795af0861792fa2", "039cb1bf52770019bd48ac028795af0861792fa2" ]
[ "mne/io/proj.py", "mne/channels/channels.py", "tutorials/plot_mne_dspm_source_localization.py", "mne/io/array/tests/test_array.py", "mne/time_frequency/tests/test_tfr.py", "mne/decoding/tests/test_ems.py", "mne/stats/tests/test_cluster_level.py" ]
[ "# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Denis Engemann <[email protected]>\n# Teon Brooks <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom copy import deepcopy\nfrom itertools import count\nfrom math import sqrt\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .tree import dir_tree_find\nfrom .tag import find_tag\nfrom .constants import FIFF\nfrom .pick import pick_types\nfrom .write import (write_int, write_float, write_string, write_name_list,\n write_float_matrix, end_block, start_block)\nfrom ..utils import logger, verbose, warn\nfrom ..externals.six import string_types\n\n\nclass Projection(dict):\n \"\"\"Projection vector.\n\n A basic class to proj a meaningful print for projection vectors.\n \"\"\"\n\n def __repr__(self): # noqa: D105\n s = \"%s\" % self['desc']\n s += \", active : %s\" % self['active']\n s += \", n_channels : %s\" % self['data']['ncol']\n return \"<Projection | %s>\" % s\n\n # Can't use copy_ function here b/c of circular import\n def plot_topomap(self, layout=None, cmap=None, sensors=True,\n colorbar=False, res=64, size=1, show=True,\n outlines='head', contours=6, image_interp='bilinear',\n axes=None, info=None):\n \"\"\"Plot topographic maps of SSP projections.\n\n Parameters\n ----------\n layout : None | Layout | list of Layout\n Layout instance specifying sensor positions (does not need to be\n specified for Neuromag data). Or a list of Layout if projections\n are from different sensor types.\n cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None\n Colormap to use. If tuple, the first value indicates the colormap to\n use and the second value is a boolean defining interactivity. In\n interactive mode (only works if ``colorbar=True``) the colors are\n adjustable by clicking and dragging the colorbar with left and right\n mouse button. Left mouse button moves the scale up and down and right\n mouse button adjusts the range. Hitting space bar resets the range. Up\n and down arrows can be used to change the colormap. If None (default),\n 'Reds' is used for all positive data, otherwise defaults to 'RdBu_r'.\n If 'interactive', translates to (None, True).\n sensors : bool | str\n Add markers for sensor locations to the plot. Accepts matplotlib plot\n format string (e.g., 'r+' for red plusses). If True, a circle will be\n used (via .add_artist). Defaults to True.\n colorbar : bool\n Plot a colorbar.\n res : int\n The resolution of the topomap image (n pixels along each side).\n size : scalar\n Side length of the topomaps in inches (only applies when plotting\n multiple topomaps at a time).\n show : bool\n Show figure if True.\n outlines : 'head' | 'skirt' | dict | None\n The outlines to be drawn. If 'head', the default head scheme will be\n drawn. If 'skirt' the head scheme will be drawn, but sensors are\n allowed to be plotted outside of the head circle. If dict, each key\n refers to a tuple of x and y positions, the values in 'mask_pos' will\n serve as image mask, and the 'autoshrink' (bool) field will trigger\n automated shrinking of the positions due to points outside the outline.\n Alternatively, a matplotlib patch object can be passed for advanced\n masking options, either directly or as a function that returns patches\n (required for multi-axis plots). If None, nothing will be drawn.\n Defaults to 'head'.\n contours : int | array of float\n The number of contour lines to draw. If 0, no contours will be drawn.\n When an integer, matplotlib ticker locator is used to find suitable\n values for the contour thresholds (may sometimes be inaccurate, use\n array for accuracy). If an array, the values represent the levels for\n the contours. Defaults to 6.\n image_interp : str\n The image interpolation to be used. All matplotlib options are\n accepted.\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of projectors. If instance of Axes,\n there must be only one projector. Defaults to None.\n info : instance of Info | None\n The measurement information to use to determine the layout.\n If not None, ``layout`` must be None.\n\n Returns\n -------\n fig : instance of matplotlib figure\n Figure distributing one image per channel across sensor topography.\n\n Notes\n -----\n .. versionadded:: 0.15.0\n \"\"\" # noqa: E501\n from ..viz.topomap import plot_projs_topomap\n return plot_projs_topomap([self], layout, cmap, sensors, colorbar,\n res, size, show, outlines,\n contours, image_interp, axes, info)\n\n\nclass ProjMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\n\n Notes\n -----\n This mixin adds a proj attribute as a property to data containers.\n It is True if at least one proj is present and all of them are active.\n The projs might not be applied yet if data are not preloaded. In\n this case it's the _projector attribute that does the job.\n If a private _data attribute is present then the projs applied\n to it are the ones marked as active.\n\n A proj parameter passed in constructor of raw or epochs calls\n apply_proj and hence after the .proj attribute is True.\n\n As soon as you've applied the projs it will stay active in the\n remaining pipeline.\n\n The suggested pipeline is proj=True in epochs (it's cheaper than for raw).\n\n When you use delayed SSP in Epochs, projs are applied when you call\n get_data() method. They are not applied to the evoked._data unless you call\n apply_proj(). The reason is that you want to reject with projs although\n it's not stored in proj mode.\n \"\"\"\n\n @property\n def proj(self):\n \"\"\"Whether or not projections are active.\"\"\"\n return (len(self.info['projs']) > 0 and\n all(p['active'] for p in self.info['projs']))\n\n @verbose\n def add_proj(self, projs, remove_existing=False, verbose=None):\n \"\"\"Add SSP projection vectors.\n\n Parameters\n ----------\n projs : list\n List with projection vectors.\n remove_existing : bool\n Remove the projection vectors currently in the file.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Returns\n -------\n self : instance of Raw | Epochs | Evoked\n The data container.\n \"\"\"\n if isinstance(projs, Projection):\n projs = [projs]\n\n if (not isinstance(projs, list) and\n not all(isinstance(p, Projection) for p in projs)):\n raise ValueError('Only projs can be added. You supplied '\n 'something else.')\n\n # mark proj as inactive, as they have not been applied\n projs = deactivate_proj(projs, copy=True, verbose=self.verbose)\n if remove_existing:\n # we cannot remove the proj if they are active\n if any(p['active'] for p in self.info['projs']):\n raise ValueError('Cannot remove projectors that have '\n 'already been applied')\n self.info['projs'] = projs\n else:\n self.info['projs'].extend(projs)\n # We don't want to add projectors that are activated again.\n self.info['projs'] = _uniquify_projs(self.info['projs'],\n check_active=False, sort=False)\n return self\n\n def apply_proj(self):\n \"\"\"Apply the signal space projection (SSP) operators to the data.\n\n Notes\n -----\n Once the projectors have been applied, they can no longer be\n removed. It is usually not recommended to apply the projectors at\n too early stages, as they are applied automatically later on\n (e.g. when computing inverse solutions).\n Hint: using the copy method individual projection vectors\n can be tested without affecting the original data.\n With evoked data, consider the following example::\n\n projs_a = mne.read_proj('proj_a.fif')\n projs_b = mne.read_proj('proj_b.fif')\n # add the first, copy, apply and see ...\n evoked.add_proj(a).copy().apply_proj().plot()\n # add the second, copy, apply and see ...\n evoked.add_proj(b).copy().apply_proj().plot()\n # drop the first and see again\n evoked.copy().del_proj(0).apply_proj().plot()\n evoked.apply_proj() # finally keep both\n\n Returns\n -------\n self : instance of Raw | Epochs | Evoked\n The instance.\n \"\"\"\n from ..epochs import BaseEpochs\n from ..evoked import Evoked\n from .base import BaseRaw\n if self.info['projs'] is None or len(self.info['projs']) == 0:\n logger.info('No projector specified for this dataset. '\n 'Please consider the method self.add_proj.')\n return self\n\n # Exit delayed mode if you apply proj\n if isinstance(self, BaseEpochs) and self._do_delayed_proj:\n logger.info('Leaving delayed SSP mode.')\n self._do_delayed_proj = False\n\n if all(p['active'] for p in self.info['projs']):\n logger.info('Projections have already been applied. '\n 'Setting proj attribute to True.')\n return self\n\n _projector, info = setup_proj(deepcopy(self.info), add_eeg_ref=False,\n activate=True, verbose=self.verbose)\n # let's not raise a RuntimeError here, otherwise interactive plotting\n if _projector is None: # won't be fun.\n logger.info('The projections don\\'t apply to these data.'\n ' Doing nothing.')\n return self\n self._projector, self.info = _projector, info\n if isinstance(self, (BaseRaw, Evoked)):\n if self.preload:\n self._data = np.dot(self._projector, self._data)\n else: # BaseEpochs\n if self.preload:\n for ii, e in enumerate(self._data):\n self._data[ii] = self._project_epoch(e)\n else:\n self.load_data() # will automatically apply\n logger.info('SSP projectors applied...')\n return self\n\n def del_proj(self, idx='all'):\n \"\"\"Remove SSP projection vector.\n\n Note: The projection vector can only be removed if it is inactive\n (has not been applied to the data).\n\n Parameters\n ----------\n idx : int | list of int | str\n Index of the projector to remove. Can also be \"all\" (default)\n to remove all projectors.\n\n Returns\n -------\n self : instance of Raw | Epochs | Evoked\n \"\"\"\n if isinstance(idx, string_types) and idx == 'all':\n idx = list(range(len(self.info['projs'])))\n idx = np.atleast_1d(np.array(idx, int)).ravel()\n if any(self.info['projs'][ii]['active'] for ii in idx):\n raise ValueError('Cannot remove projectors that have already '\n 'been applied')\n keep = np.ones(len(self.info['projs']))\n keep[idx] = False # works with negative indexing and does checks\n self.info['projs'] = [p for p, k in zip(self.info['projs'], keep) if k]\n return self\n\n def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):\n \"\"\"Plot SSP vector.\n\n Parameters\n ----------\n ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List\n The channel type to plot. For 'grad', the gradiometers are collec-\n ted in pairs and the RMS for each pair is plotted. If None\n (default), it will return all channel types present. If a list of\n ch_types is provided, it will return multiple figures.\n layout : None | Layout | List of Layouts\n Layout instance specifying sensor positions (does not need to\n be specified for Neuromag data). If possible, the correct\n layout file is inferred from the data; if no appropriate layout\n file was found, the layout is automatically generated from the\n sensor locations. Or a list of Layout if projections\n are from different sensor types.\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of projectors. If instance of Axes,\n there must be only one projector. Defaults to None.\n\n Returns\n -------\n fig : instance of matplotlib figure\n Figure distributing one image per channel across sensor topography.\n \"\"\"\n if self.info['projs'] is not None or len(self.info['projs']) != 0:\n from ..viz.topomap import plot_projs_topomap\n from ..channels.layout import find_layout\n if layout is None:\n layout = []\n if ch_type is None:\n ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]\n elif isinstance(ch_type, string_types):\n ch_type = [ch_type]\n for ch in ch_type:\n if ch in self:\n layout.append(find_layout(self.info, ch, exclude=[]))\n else:\n warn('Channel type %s is not found in info.' % ch)\n fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)\n else:\n raise ValueError(\"Info is missing projs. Nothing to plot.\")\n\n return fig\n\n\ndef _proj_equal(a, b, check_active=True):\n \"\"\"Test if two projectors are equal.\"\"\"\n equal = ((a['active'] == b['active'] or not check_active) and\n a['kind'] == b['kind'] and\n a['desc'] == b['desc'] and\n a['data']['col_names'] == b['data']['col_names'] and\n a['data']['row_names'] == b['data']['row_names'] and\n a['data']['ncol'] == b['data']['ncol'] and\n a['data']['nrow'] == b['data']['nrow'] and\n np.all(a['data']['data'] == b['data']['data']))\n return equal\n\n\n@verbose\ndef _read_proj(fid, node, verbose=None):\n \"\"\"Read spatial projections from a FIF file.\n\n Parameters\n ----------\n fid : file\n The file descriptor of the open file.\n node : tree node\n The node of the tree where to look.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n projs : list of Projection\n The list of projections.\n \"\"\"\n projs = list()\n\n # Locate the projection data\n nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)\n if len(nodes) == 0:\n return projs\n\n tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)\n if tag is not None:\n global_nchan = int(tag.data)\n\n items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)\n for item in items:\n # Find all desired tags in one item\n tag = find_tag(fid, item, FIFF.FIFF_NCHAN)\n if tag is not None:\n nchan = int(tag.data)\n else:\n nchan = global_nchan\n\n tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)\n if tag is not None:\n desc = tag.data\n else:\n tag = find_tag(fid, item, FIFF.FIFF_NAME)\n if tag is not None:\n desc = tag.data\n else:\n raise ValueError('Projection item description missing')\n\n # XXX : is this useful ?\n # tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)\n # if tag is not None:\n # namelist = tag.data\n # else:\n # raise ValueError('Projection item channel list missing')\n\n tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)\n if tag is not None:\n kind = int(tag.data)\n else:\n raise ValueError('Projection item kind missing')\n\n tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)\n if tag is not None:\n nvec = int(tag.data)\n else:\n raise ValueError('Number of projection vectors not specified')\n\n tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)\n if tag is not None:\n names = tag.data.split(':')\n else:\n raise ValueError('Projection item channel list missing')\n\n tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)\n if tag is not None:\n data = tag.data\n else:\n raise ValueError('Projection item data missing')\n\n tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)\n if tag is not None:\n active = bool(tag.data)\n else:\n active = False\n\n tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR)\n if tag is not None:\n explained_var = tag.data\n else:\n explained_var = None\n\n # handle the case when data is transposed for some reason\n if data.shape[0] == len(names) and data.shape[1] == nvec:\n data = data.T\n\n if data.shape[1] != len(names):\n raise ValueError('Number of channel names does not match the '\n 'size of data matrix')\n\n # Use exactly the same fields in data as in a named matrix\n one = Projection(kind=kind, active=active, desc=desc,\n data=dict(nrow=nvec, ncol=nchan, row_names=None,\n col_names=names, data=data),\n explained_var=explained_var)\n\n projs.append(one)\n\n if len(projs) > 0:\n logger.info(' Read a total of %d projection items:' % len(projs))\n for k in range(len(projs)):\n if projs[k]['active']:\n misc = 'active'\n else:\n misc = ' idle'\n logger.info(' %s (%d x %d) %s'\n % (projs[k]['desc'], projs[k]['data']['nrow'],\n projs[k]['data']['ncol'], misc))\n\n return projs\n\n\n###############################################################################\n# Write\n\ndef _write_proj(fid, projs):\n \"\"\"Write a projection operator to a file.\n\n Parameters\n ----------\n fid : file\n The file descriptor of the open file.\n projs : dict\n The projection operator.\n \"\"\"\n if len(projs) == 0:\n return\n start_block(fid, FIFF.FIFFB_PROJ)\n\n for proj in projs:\n start_block(fid, FIFF.FIFFB_PROJ_ITEM)\n write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])\n write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,\n proj['data']['col_names'])\n write_string(fid, FIFF.FIFF_NAME, proj['desc'])\n write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])\n if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:\n write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)\n\n write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])\n write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])\n write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,\n proj['data']['data'])\n if proj['explained_var'] is not None:\n write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,\n proj['explained_var'])\n end_block(fid, FIFF.FIFFB_PROJ_ITEM)\n\n end_block(fid, FIFF.FIFFB_PROJ)\n\n\n###############################################################################\n# Utils\n\ndef _check_projs(projs, copy=True):\n \"\"\"Check that projs is a list of Projection.\"\"\"\n if not isinstance(projs, (list, tuple)):\n raise TypeError('projs must be a list or tuple, got %s'\n % (type(projs),))\n for pi, p in enumerate(projs):\n if not isinstance(p, Projection):\n raise TypeError('All entries in projs list must be Projection '\n 'instances, but projs[%d] is type %s'\n % (pi, type(p)))\n return deepcopy(projs) if copy else projs\n\n\ndef make_projector(projs, ch_names, bads=(), include_active=True):\n \"\"\"Create an SSP operator from SSP projection vectors.\n\n Parameters\n ----------\n projs : list\n List of projection vectors.\n ch_names : list of str\n List of channels to include in the projection matrix.\n bads : list of str\n Some bad channels to exclude. If bad channels were marked\n in the raw file when projs were calculated using mne-python,\n they should not need to be included here as they will\n have been automatically omitted from the projectors.\n include_active : bool\n Also include projectors that are already active.\n\n Returns\n -------\n proj : array of shape [n_channels, n_channels]\n The projection operator to apply to the data.\n nproj : int\n How many items in the projector.\n U : array\n The orthogonal basis of the projection vectors (optional).\n \"\"\"\n return _make_projector(projs, ch_names, bads, include_active)\n\n\ndef _make_projector(projs, ch_names, bads=(), include_active=True,\n inplace=False):\n \"\"\"Subselect projs based on ch_names and bads.\n\n Use inplace=True mode to modify ``projs`` inplace so that no\n warning will be raised next time projectors are constructed with\n the given inputs. If inplace=True, no meaningful data are returned.\n \"\"\"\n nchan = len(ch_names)\n if nchan == 0:\n raise ValueError('No channel names specified')\n\n default_return = (np.eye(nchan, nchan), 0, [])\n\n # Check trivial cases first\n if projs is None:\n return default_return\n\n nvec = 0\n nproj = 0\n for p in projs:\n if not p['active'] or include_active:\n nproj += 1\n nvec += p['data']['nrow']\n\n if nproj == 0:\n return default_return\n\n # Pick the appropriate entries\n vecs = np.zeros((nchan, nvec))\n nvec = 0\n nonzero = 0\n bads = set(bads)\n for k, p in enumerate(projs):\n if not p['active'] or include_active:\n if (len(p['data']['col_names']) !=\n len(np.unique(p['data']['col_names']))):\n raise ValueError('Channel name list in projection item %d'\n ' contains duplicate items' % k)\n\n # Get the two selection vectors to pick correct elements from\n # the projection vectors omitting bad channels\n sel = []\n vecsel = []\n p_set = set(p['data']['col_names']) # faster membership access\n for c, name in enumerate(ch_names):\n if name not in bads and name in p_set:\n sel.append(c)\n vecsel.append(p['data']['col_names'].index(name))\n\n # If there is something to pick, pickit\n nrow = p['data']['nrow']\n this_vecs = vecs[:, nvec:nvec + nrow]\n if len(sel) > 0:\n this_vecs[sel] = p['data']['data'][:, vecsel].T\n\n # Rescale for better detection of small singular values\n for v in range(p['data']['nrow']):\n psize = sqrt(np.sum(this_vecs[:, v] * this_vecs[:, v]))\n if psize > 0:\n orig_n = p['data']['data'].any(axis=0).sum()\n # Average ref still works if channels are removed\n if len(vecsel) < 0.9 * orig_n and not inplace and \\\n (p['kind'] != FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF or\n len(vecsel) == 1):\n warn('Projection vector \"%s\" has magnitude %0.2f '\n '(should be unity), applying projector with '\n '%s/%s of the original channels available may '\n 'be dangerous, consider recomputing and adding '\n 'projection vectors for channels that are '\n 'eventually used. If this is intentional, '\n 'consider using info.normalize_proj()'\n % (p['desc'], psize, len(vecsel), orig_n))\n this_vecs[:, v] /= psize\n nonzero += 1\n # If doing \"inplace\" mode, \"fix\" the projectors to only operate\n # on this subset of channels.\n if inplace:\n p['data']['data'] = this_vecs[sel].T\n p['data']['col_names'] = [p['data']['col_names'][ii]\n for ii in vecsel]\n nvec += p['data']['nrow']\n\n # Check whether all of the vectors are exactly zero\n if nonzero == 0 or inplace:\n return default_return\n\n # Reorthogonalize the vectors\n U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)\n\n # Throw away the linearly dependent guys\n nproj = np.sum((S / S[0]) > 1e-2)\n U = U[:, :nproj]\n\n # Here is the celebrated result\n proj = np.eye(nchan, nchan) - np.dot(U, U.T)\n\n return proj, nproj, U\n\n\ndef _normalize_proj(info):\n \"\"\"Normalize proj after subselection to avoid warnings.\n\n This is really only useful for tests, and might not be needed\n eventually if we change or improve our handling of projectors\n with picks.\n \"\"\"\n # Here we do info.get b/c info can actually be a noise cov\n _make_projector(info['projs'], info.get('ch_names', info.get('names')),\n info['bads'], include_active=True, inplace=True)\n\n\ndef make_projector_info(info, include_active=True):\n \"\"\"Make an SSP operator using the measurement info.\n\n Calls make_projector on good channels.\n\n Parameters\n ----------\n info : dict\n Measurement info.\n include_active : bool\n Also include projectors that are already active.\n\n Returns\n -------\n proj : array of shape [n_channels, n_channels]\n The projection operator to apply to the data.\n nproj : int\n How many items in the projector.\n \"\"\"\n proj, nproj, _ = make_projector(info['projs'], info['ch_names'],\n info['bads'], include_active)\n return proj, nproj\n\n\n@verbose\ndef activate_proj(projs, copy=True, verbose=None):\n \"\"\"Set all projections to active.\n\n Useful before passing them to make_projector.\n\n Parameters\n ----------\n projs : list\n The projectors.\n copy : bool\n Modify projs in place or operate on a copy.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n projs : list\n The projectors.\n \"\"\"\n if copy:\n projs = deepcopy(projs)\n\n # Activate the projection items\n for proj in projs:\n proj['active'] = True\n\n logger.info('%d projection items activated' % len(projs))\n\n return projs\n\n\n@verbose\ndef deactivate_proj(projs, copy=True, verbose=None):\n \"\"\"Set all projections to inactive.\n\n Useful before saving raw data without projectors applied.\n\n Parameters\n ----------\n projs : list\n The projectors.\n copy : bool\n Modify projs in place or operate on a copy.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n projs : list\n The projectors.\n \"\"\"\n if copy:\n projs = deepcopy(projs)\n\n # Deactivate the projection items\n for proj in projs:\n proj['active'] = False\n\n logger.info('%d projection items deactivated' % len(projs))\n\n return projs\n\n\n@verbose\ndef make_eeg_average_ref_proj(info, activate=True, verbose=None):\n \"\"\"Create an EEG average reference SSP projection vector.\n\n Parameters\n ----------\n info : dict\n Measurement info.\n activate : bool\n If True projections are activated.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n eeg_proj: instance of Projection\n The SSP/PCA projector.\n \"\"\"\n if info.get('custom_ref_applied', False):\n raise RuntimeError('A custom reference has been applied to the '\n 'data earlier. Please use the '\n 'mne.io.set_eeg_reference function to move from '\n 'one EEG reference to another.')\n\n logger.info(\"Adding average EEG reference projection.\")\n eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,\n exclude='bads')\n ch_names = info['ch_names']\n eeg_names = [ch_names[k] for k in eeg_sel]\n n_eeg = len(eeg_sel)\n if n_eeg == 0:\n raise ValueError('Cannot create EEG average reference projector '\n '(no EEG data found)')\n vec = np.ones((1, n_eeg))\n vec /= n_eeg\n explained_var = None\n eeg_proj_data = dict(col_names=eeg_names, row_names=None,\n data=vec, nrow=1, ncol=n_eeg)\n eeg_proj = Projection(active=activate, data=eeg_proj_data,\n desc='Average EEG reference',\n kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF,\n explained_var=explained_var)\n return eeg_proj\n\n\ndef _has_eeg_average_ref_proj(projs, check_active=False):\n \"\"\"Determine if a list of projectors has an average EEG ref.\n\n Optionally, set check_active=True to additionally check if the CAR\n has already been applied.\n \"\"\"\n for proj in projs:\n if (proj['desc'] == 'Average EEG reference' or\n proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):\n if not check_active or proj['active']:\n return True\n return False\n\n\ndef _needs_eeg_average_ref_proj(info):\n \"\"\"Determine if the EEG needs an averge EEG reference.\n\n This returns True if no custom reference has been applied and no average\n reference projection is present in the list of projections.\n \"\"\"\n eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,\n exclude='bads')\n return (len(eeg_sel) > 0 and\n not info['custom_ref_applied'] and\n not _has_eeg_average_ref_proj(info['projs']))\n\n\n@verbose\ndef setup_proj(info, add_eeg_ref=True, activate=True, verbose=None):\n \"\"\"Set up projection for Raw and Epochs.\n\n Parameters\n ----------\n info : dict\n The measurement info.\n add_eeg_ref : bool\n If True, an EEG average reference will be added (unless one\n already exists).\n activate : bool\n If True projections are activated.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n projector : array of shape [n_channels, n_channels]\n The projection operator to apply to the data.\n info : dict\n The modified measurement info (Warning: info is modified inplace).\n \"\"\"\n # Add EEG ref reference proj if necessary\n if add_eeg_ref and _needs_eeg_average_ref_proj(info):\n eeg_proj = make_eeg_average_ref_proj(info, activate=activate)\n info['projs'].append(eeg_proj)\n\n # Create the projector\n projector, nproj = make_projector_info(info)\n if nproj == 0:\n if verbose:\n logger.info('The projection vectors do not apply to these '\n 'channels')\n projector = None\n else:\n logger.info('Created an SSP operator (subspace dimension = %d)'\n % nproj)\n\n # The projection items have been activated\n if activate:\n info['projs'] = activate_proj(info['projs'], copy=False)\n\n return projector, info\n\n\ndef _uniquify_projs(projs, check_active=True, sort=True):\n \"\"\"Make unique projs.\"\"\"\n final_projs = []\n for proj in projs: # flatten\n if not any(_proj_equal(p, proj, check_active) for p in final_projs):\n final_projs.append(proj)\n\n my_count = count(len(final_projs))\n\n def sorter(x):\n \"\"\"Sort in a nice way.\"\"\"\n digits = [s for s in x['desc'] if s.isdigit()]\n if digits:\n sort_idx = int(digits[-1])\n else:\n sort_idx = next(my_count)\n return (sort_idx, x['desc'])\n\n return sorted(final_projs, key=sorter) if sort else final_projs\n", "# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Denis Engemann <[email protected]>\n# Andrew Dykstra <[email protected]>\n# Teon Brooks <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..externals.six import string_types\nfrom ..utils import verbose, logger, warn, copy_function_doc_to_method_doc\nfrom ..utils import _check_preload, _validate_type\nfrom ..io.compensator import get_current_comp\nfrom ..io.constants import FIFF\nfrom ..io.meas_info import anonymize_info, Info\nfrom ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,\n _check_excludes_includes, _PICK_TYPES_KEYS,\n channel_indices_by_type, pick_channels)\n\n\ndef _get_meg_system(info):\n \"\"\"Educated guess for the helmet type based on channels.\"\"\"\n system = '306m'\n for ch in info['chs']:\n if ch['kind'] == FIFF.FIFFV_MEG_CH:\n # Only take first 16 bits, as higher bits store CTF grad comp order\n coil_type = ch['coil_type'] & 0xFFFF\n if coil_type == FIFF.FIFFV_COIL_NM_122:\n system = '122m'\n break\n elif coil_type // 1000 == 3: # All Vectorview coils are 30xx\n system = '306m'\n break\n elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or\n coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):\n nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH\n for c in info['chs']])\n system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'\n break\n elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:\n system = 'CTF_275'\n break\n elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:\n system = 'KIT'\n break\n elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:\n system = 'BabySQUID'\n break\n elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:\n system = 'ARTEMIS123'\n break\n return system\n\n\ndef _contains_ch_type(info, ch_type):\n \"\"\"Check whether a certain channel type is in an info object.\n\n Parameters\n ----------\n info : instance of Info\n The measurement information.\n ch_type : str\n the channel type to be checked for\n\n Returns\n -------\n has_ch_type : bool\n Whether the channel type is present or not.\n \"\"\"\n _validate_type(ch_type, 'str', \"ch_type\")\n\n meg_extras = ['mag', 'grad', 'planar1', 'planar2']\n fnirs_extras = ['hbo', 'hbr']\n valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS\n if key != 'meg'] + meg_extras + fnirs_extras)\n if ch_type not in valid_channel_types:\n raise ValueError('ch_type must be one of %s, not \"%s\"'\n % (valid_channel_types, ch_type))\n if info is None:\n raise ValueError('Cannot check for channels of type \"%s\" because info '\n 'is None' % (ch_type,))\n return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]\n\n\ndef _get_ch_type(inst, ch_type):\n \"\"\"Choose a single channel type (usually for plotting).\n\n Usually used in plotting to plot a single datatype, e.g. look for mags,\n then grads, then ... to plot.\n \"\"\"\n if ch_type is None:\n for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:\n if isinstance(inst, Info):\n if _contains_ch_type(inst, type_):\n ch_type = type_\n break\n elif type_ in inst:\n ch_type = type_\n break\n else:\n raise RuntimeError('No plottable channel types found')\n return ch_type\n\n\n@verbose\ndef equalize_channels(candidates, verbose=None):\n \"\"\"Equalize channel picks for a collection of MNE-Python objects.\n\n Parameters\n ----------\n candidates : list\n list Raw | Epochs | Evoked | AverageTFR\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Notes\n -----\n This function operates inplace.\n \"\"\"\n from ..io.base import BaseRaw\n from ..epochs import BaseEpochs\n from ..evoked import Evoked\n from ..time_frequency import _BaseTFR\n\n for candidate in candidates:\n _validate_type(candidate,\n (BaseRaw, BaseEpochs, Evoked, _BaseTFR),\n \"Instances to be modified\",\n \"Raw, Epochs, Evoked or TFR\")\n\n chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])\n chan_template = candidates[chan_max_idx].ch_names\n logger.info('Identifying common channels ...')\n channels = [set(c.ch_names) for c in candidates]\n common_channels = set(chan_template).intersection(*channels)\n dropped = list()\n for c in candidates:\n drop_them = list(set(c.ch_names) - common_channels)\n if drop_them:\n c.drop_channels(drop_them)\n dropped.extend(drop_them)\n if dropped:\n dropped = list(set(dropped))\n logger.info('Dropped the following channels:\\n%s' % dropped)\n else:\n logger.info('all channels are corresponding, nothing to do.')\n\n\nclass ContainsMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\"\"\"\n\n def __contains__(self, ch_type):\n \"\"\"Check channel type membership.\n\n Parameters\n ----------\n ch_type : str\n Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.\n\n Returns\n -------\n in : bool\n Whether or not the instance contains the given channel type.\n\n Examples\n --------\n Channel type membership can be tested as::\n\n >>> 'meg' in inst # doctest: +SKIP\n True\n >>> 'seeg' in inst # doctest: +SKIP\n False\n\n \"\"\"\n if ch_type == 'meg':\n has_ch_type = (_contains_ch_type(self.info, 'mag') or\n _contains_ch_type(self.info, 'grad'))\n else:\n has_ch_type = _contains_ch_type(self.info, ch_type)\n return has_ch_type\n\n @property\n def compensation_grade(self):\n \"\"\"The current gradient compensation grade.\"\"\"\n return get_current_comp(self.info)\n\n\n# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py\n_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,\n 'eeg': FIFF.FIFFV_EEG_CH,\n 'emg': FIFF.FIFFV_EMG_CH,\n 'eog': FIFF.FIFFV_EOG_CH,\n 'exci': FIFF.FIFFV_EXCI_CH,\n 'ias': FIFF.FIFFV_IAS_CH,\n 'misc': FIFF.FIFFV_MISC_CH,\n 'resp': FIFF.FIFFV_RESP_CH,\n 'seeg': FIFF.FIFFV_SEEG_CH,\n 'stim': FIFF.FIFFV_STIM_CH,\n 'syst': FIFF.FIFFV_SYST_CH,\n 'bio': FIFF.FIFFV_BIO_CH,\n 'ecog': FIFF.FIFFV_ECOG_CH,\n 'hbo': FIFF.FIFFV_FNIRS_CH,\n 'hbr': FIFF.FIFFV_FNIRS_CH}\n_human2unit = {'ecg': FIFF.FIFF_UNIT_V,\n 'eeg': FIFF.FIFF_UNIT_V,\n 'emg': FIFF.FIFF_UNIT_V,\n 'eog': FIFF.FIFF_UNIT_V,\n 'exci': FIFF.FIFF_UNIT_NONE,\n 'ias': FIFF.FIFF_UNIT_NONE,\n 'misc': FIFF.FIFF_UNIT_V,\n 'resp': FIFF.FIFF_UNIT_NONE,\n 'seeg': FIFF.FIFF_UNIT_V,\n 'stim': FIFF.FIFF_UNIT_NONE,\n 'syst': FIFF.FIFF_UNIT_NONE,\n 'bio': FIFF.FIFF_UNIT_V,\n 'ecog': FIFF.FIFF_UNIT_V,\n 'hbo': FIFF.FIFF_UNIT_MOL,\n 'hbr': FIFF.FIFF_UNIT_MOL}\n_unit2human = {FIFF.FIFF_UNIT_V: 'V',\n FIFF.FIFF_UNIT_T: 'T',\n FIFF.FIFF_UNIT_T_M: 'T/m',\n FIFF.FIFF_UNIT_MOL: 'M',\n FIFF.FIFF_UNIT_NONE: 'NA'}\n\n\ndef _check_set(ch, projs, ch_type):\n \"\"\"Ensure type change is compatible with projectors.\"\"\"\n new_kind = _human2fiff[ch_type]\n if ch['kind'] != new_kind:\n for proj in projs:\n if ch['ch_name'] in proj['data']['col_names']:\n raise RuntimeError('Cannot change channel type for channel %s '\n 'in projector \"%s\"'\n % (ch['ch_name'], proj['desc']))\n ch['kind'] = new_kind\n\n\nclass SetChannelsMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\"\"\"\n\n @verbose\n def set_eeg_reference(self, ref_channels='average', projection=False,\n verbose=None):\n \"\"\"Specify which reference to use for EEG data.\n\n By default, MNE-Python will automatically re-reference the EEG signal\n to use an average reference (see below). Use this function to\n explicitly specify the desired reference for EEG. This can be either an\n existing electrode or a new virtual channel. This function will\n re-reference the data according to the desired reference and prevent\n MNE-Python from automatically adding an average reference projection.\n\n Some common referencing schemes and the corresponding value for the\n ``ref_channels`` parameter:\n\n No re-referencing:\n If the EEG data is already using the proper reference, set\n ``ref_channels=[]``. This will prevent MNE-Python from\n automatically adding an average reference projection.\n\n Average reference:\n A new virtual reference electrode is created by averaging the\n current EEG signal by setting ``ref_channels='average'``. Bad EEG\n channels are automatically excluded if they are properly set in\n ``info['bads']``.\n\n A single electrode:\n Set ``ref_channels`` to a list containing the name of the channel\n that will act as the new reference, for example\n ``ref_channels=['Cz']``.\n\n The mean of multiple electrodes:\n A new virtual reference electrode is created by computing the\n average of the current EEG signal recorded from two or more\n selected channels. Set ``ref_channels`` to a list of channel names,\n indicating which channels to use. For example, to apply an average\n mastoid reference, when using the 10-20 naming scheme, set\n ``ref_channels=['M1', 'M2']``.\n\n Parameters\n ----------\n ref_channels : list of str | str\n The name(s) of the channel(s) used to construct the reference. To\n apply an average reference, specify ``'average'`` here (default).\n If an empty list is specified, the data is assumed to already have\n a proper reference and MNE will not attempt any re-referencing of\n the data. Defaults to an average reference.\n projection : bool\n If ``ref_channels='average'`` this argument specifies if the\n average reference should be computed as a projection (True) or not\n (False; default). If ``projection=True``, the average reference is\n added as a projection and is not applied to the data (it can be\n applied afterwards with the ``apply_proj`` method). If\n ``projection=False``, the average reference is directly applied to\n the data. If ``ref_channels`` is not ``'average'``, ``projection``\n must be set to ``False`` (the default in this case).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n Data with EEG channels re-referenced. If ``ref_channels='average'``\n and ``projection=True`` a projection will be added instead of\n directly re-referencing the data.\n\n See Also\n --------\n mne.set_bipolar_reference : Convenience function for creating bipolar\n references.\n\n Notes\n -----\n 1. If a reference is requested that is not the average reference, this\n function removes any pre-existing average reference projections.\n\n 2. During source localization, the EEG signal should have an average\n reference.\n\n 3. In order to apply a reference, the data must be preloaded. This is\n not necessary if ``ref_channels='average'`` and ``projection=True``.\n\n 4. For an average reference, bad EEG channels are automatically\n excluded if they are properly set in ``info['bads']``.\n\n .. versionadded:: 0.9.0\n \"\"\"\n from ..io.reference import set_eeg_reference\n return set_eeg_reference(self, ref_channels=ref_channels, copy=False,\n projection=projection)[0]\n\n def _get_channel_positions(self, picks=None):\n \"\"\"Get channel locations from info.\n\n Parameters\n ----------\n picks : array-like of int | None\n Indices of channels to include. If None (default), all meg and eeg\n channels that are available are returned (bad channels excluded).\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if picks is None:\n picks = pick_types(self.info, meg=True, eeg=True)\n chs = self.info['chs']\n pos = np.array([chs[k]['loc'][:3] for k in picks])\n n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)\n if n_zero > 1: # XXX some systems have origin (0, 0, 0)\n raise ValueError('Could not extract channel positions for '\n '{} channels'.format(n_zero))\n return pos\n\n def _set_channel_positions(self, pos, names):\n \"\"\"Update channel locations in info.\n\n Parameters\n ----------\n pos : array-like | np.ndarray, shape (n_points, 3)\n The channel positions to be set.\n names : list of str\n The names of the channels to be set.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if len(pos) != len(names):\n raise ValueError('Number of channel positions not equal to '\n 'the number of names given.')\n pos = np.asarray(pos, dtype=np.float)\n if pos.shape[-1] != 3 or pos.ndim != 2:\n msg = ('Channel positions must have the shape (n_points, 3) '\n 'not %s.' % (pos.shape,))\n raise ValueError(msg)\n for name, p in zip(names, pos):\n if name in self.ch_names:\n idx = self.ch_names.index(name)\n self.info['chs'][idx]['loc'][:3] = p\n else:\n msg = ('%s was not found in the info. Cannot be updated.'\n % name)\n raise ValueError(msg)\n\n def set_channel_types(self, mapping):\n \"\"\"Define the sensor type of channels.\n\n Note: The following sensor types are accepted:\n ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,\n hbo, hbr\n\n Parameters\n ----------\n mapping : dict\n a dictionary mapping a channel to a sensor type (str)\n {'EEG061': 'eog'}.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n ch_names = self.info['ch_names']\n\n # first check and assemble clean mappings of index and name\n unit_changes = dict()\n for ch_name, ch_type in mapping.items():\n if ch_name not in ch_names:\n raise ValueError(\"This channel name (%s) doesn't exist in \"\n \"info.\" % ch_name)\n\n c_ind = ch_names.index(ch_name)\n if ch_type not in _human2fiff:\n raise ValueError('This function cannot change to this '\n 'channel type: %s. Accepted channel types '\n 'are %s.'\n % (ch_type,\n \", \".join(sorted(_human2unit.keys()))))\n # Set sensor type\n _check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)\n unit_old = self.info['chs'][c_ind]['unit']\n unit_new = _human2unit[ch_type]\n if unit_old not in _unit2human:\n raise ValueError(\"Channel '%s' has unknown unit (%s). Please \"\n \"fix the measurement info of your data.\"\n % (ch_name, unit_old))\n if unit_old != _human2unit[ch_type]:\n this_change = (_unit2human[unit_old], _unit2human[unit_new])\n if this_change not in unit_changes:\n unit_changes[this_change] = list()\n unit_changes[this_change].append(ch_name)\n self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]\n if ch_type in ['eeg', 'seeg', 'ecog']:\n coil_type = FIFF.FIFFV_COIL_EEG\n elif ch_type == 'hbo':\n coil_type = FIFF.FIFFV_COIL_FNIRS_HBO\n elif ch_type == 'hbr':\n coil_type = FIFF.FIFFV_COIL_FNIRS_HBR\n else:\n coil_type = FIFF.FIFFV_COIL_NONE\n self.info['chs'][c_ind]['coil_type'] = coil_type\n msg = \"The unit for channel(s) {0} has changed from {1} to {2}.\"\n for this_change, names in unit_changes.items():\n warn(msg.format(\", \".join(sorted(names)), *this_change))\n\n def rename_channels(self, mapping):\n \"\"\"Rename channels.\n\n Parameters\n ----------\n mapping : dict | callable\n a dictionary mapping the old channel to a new channel name\n e.g. {'EEG061' : 'EEG161'}. Can also be a callable function\n that takes and returns a string (new in version 0.10.0).\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n rename_channels(self.info, mapping)\n\n @verbose\n def set_montage(self, montage, set_dig=True, verbose=None):\n \"\"\"Set EEG sensor configuration and head digitization.\n\n Parameters\n ----------\n montage : instance of Montage | instance of DigMontage | str | None\n The montage to use (None removes any location information).\n set_dig : bool\n If True, update the digitization information (``info['dig']``)\n in addition to the channel positions (``info['chs'][idx]['loc']``).\n\n .. versionadded: 0.15\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Notes\n -----\n Operates in place.\n\n .. versionadded:: 0.9.0\n \"\"\"\n from .montage import _set_montage\n _set_montage(self.info, montage, set_dig=set_dig)\n return self\n\n def plot_sensors(self, kind='topomap', ch_type=None, title=None,\n show_names=False, ch_groups=None, to_sphere=True,\n axes=None, block=False, show=True):\n \"\"\"Plot sensor positions.\n\n Parameters\n ----------\n kind : str\n Whether to plot the sensors as 3d, topomap or as an interactive\n sensor selection dialog. Available options 'topomap', '3d',\n 'select'. If 'select', a set of channels can be selected\n interactively by using lasso selector or clicking while holding\n control key. The selected channels are returned along with the\n figure instance. Defaults to 'topomap'.\n ch_type : None | str\n The channel type to plot. Available options 'mag', 'grad', 'eeg',\n 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,\n eeg, seeg and ecog channels are plotted. If None (default), then\n channels are chosen in the order given above.\n title : str | None\n Title for the figure. If None (default), equals to ``'Sensor\n positions (%s)' % ch_type``.\n show_names : bool | array of str\n Whether to display all channel names. If an array, only the channel\n names in the array are shown. Defaults to False.\n ch_groups : 'position' | array of shape (ch_groups, picks) | None\n Channel groups for coloring the sensors. If None (default), default\n coloring scheme is used. If 'position', the sensors are divided\n into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If\n array, the channels are divided by picks given in the array.\n\n .. versionadded:: 0.13.0\n\n to_sphere : bool\n Whether to project the 3d locations to a sphere. When False, the\n sensor array appears similar as to looking downwards straight above\n the subject's head. Has no effect when kind='3d'. Defaults to True.\n\n .. versionadded:: 0.14.0\n\n axes : instance of Axes | instance of Axes3D | None\n Axes to draw the sensors to. If ``kind='3d'``, axes must be an\n instance of Axes3D. If None (default), a new axes will be created.\n\n .. versionadded:: 0.13.0\n\n block : bool\n Whether to halt program execution until the figure is closed.\n Defaults to False.\n\n .. versionadded:: 0.13.0\n\n show : bool\n Show figure if True. Defaults to True.\n\n Returns\n -------\n fig : instance of matplotlib figure\n Figure containing the sensor topography.\n selection : list\n A list of selected channels. Only returned if ``kind=='select'``.\n\n See Also\n --------\n mne.viz.plot_layout\n\n Notes\n -----\n This function plots the sensor locations from the info structure using\n matplotlib. For drawing the sensors using mayavi see\n :func:`mne.viz.plot_alignment`.\n\n .. versionadded:: 0.12.0\n \"\"\"\n from ..viz.utils import plot_sensors\n return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,\n show_names=show_names, ch_groups=ch_groups,\n to_sphere=to_sphere, axes=axes, block=block,\n show=show)\n\n @copy_function_doc_to_method_doc(anonymize_info)\n def anonymize(self):\n \"\"\"\n .. versionadded:: 0.13.0\n \"\"\"\n anonymize_info(self.info)\n return self\n\n\nclass UpdateChannelsMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs, AverageTFR.\"\"\"\n\n @verbose\n def pick_types(self, meg=True, eeg=False, stim=False, eog=False,\n ecg=False, emg=False, ref_meg='auto', misc=False,\n resp=False, chpi=False, exci=False, ias=False, syst=False,\n seeg=False, dipole=False, gof=False, bio=False, ecog=False,\n fnirs=False, include=(), exclude='bads', selection=None,\n verbose=None):\n \"\"\"Pick some channels by type and names.\n\n Parameters\n ----------\n meg : bool | str\n If True include all MEG channels. If False include None\n If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select\n only magnetometers, all gradiometers, or a specific type of\n gradiometer.\n eeg : bool\n If True include EEG channels.\n stim : bool\n If True include stimulus channels.\n eog : bool\n If True include EOG channels.\n ecg : bool\n If True include ECG channels.\n emg : bool\n If True include EMG channels.\n ref_meg: bool | str\n If True include CTF / 4D reference channels. If 'auto', the\n reference channels are only included if compensations are present.\n misc : bool\n If True include miscellaneous analog channels.\n resp : bool\n If True include response-trigger channel. For some MEG systems this\n is separate from the stim channel.\n chpi : bool\n If True include continuous HPI coil channels.\n exci : bool\n Flux excitation channel used to be a stimulus channel.\n ias : bool\n Internal Active Shielding data (maybe on Triux only).\n syst : bool\n System status channel information (on Triux systems only).\n seeg : bool\n Stereotactic EEG channels.\n dipole : bool\n Dipole time course channels.\n gof : bool\n Dipole goodness of fit channels.\n bio : bool\n Bio channels.\n ecog : bool\n Electrocorticography channels.\n fnirs : bool | str\n Functional near-infrared spectroscopy channels. If True include all\n fNIRS channels. If False (default) include none. If string it can\n be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to\n include channels measuring deoxyhemoglobin).\n include : list of string\n List of additional channels to include. If empty do not include\n any.\n exclude : list of string | str\n List of channels to exclude. If 'bads' (default), exclude channels\n in ``info['bads']``.\n selection : list of string\n Restrict sensor channels (MEG, EEG) to this list of channel names.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n pick_channels\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n idx = pick_types(\n self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,\n ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,\n ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,\n ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,\n selection=selection)\n return self._pick_drop_channels(idx)\n\n def pick_channels(self, ch_names):\n \"\"\"Pick some channels.\n\n Parameters\n ----------\n ch_names : list\n The list of channels to select.\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n drop_channels\n pick_types\n reorder_channels\n\n Notes\n -----\n The channel names given are assumed to be a set, i.e. the order\n does not matter. The original order of the channels is preserved.\n You can use ``reorder_channels`` to set channel order if necessary.\n\n .. versionadded:: 0.9.0\n \"\"\"\n return self._pick_drop_channels(\n pick_channels(self.info['ch_names'], ch_names))\n\n def reorder_channels(self, ch_names):\n \"\"\"Reorder channels.\n\n Parameters\n ----------\n ch_names : list\n The desired channel order.\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n drop_channels\n pick_types\n pick_channels\n\n Notes\n -----\n Channel names must be unique. Channels that are not in ``ch_names``\n are dropped.\n\n .. versionadded:: 0.16.0\n \"\"\"\n _check_excludes_includes(ch_names)\n idx = list()\n for ch_name in ch_names:\n ii = self.ch_names.index(ch_name)\n if ii in idx:\n raise ValueError('Channel name repeated: %s' % (ch_name,))\n idx.append(ii)\n return self._pick_drop_channels(idx)\n\n def drop_channels(self, ch_names):\n \"\"\"Drop some channels.\n\n Parameters\n ----------\n ch_names : list\n List of the names of the channels to remove.\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n reorder_channels\n pick_channels\n pick_types\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n msg = (\"'ch_names' should be a list of strings (the name[s] of the \"\n \"channel to be dropped), not a {0}.\")\n if isinstance(ch_names, string_types):\n raise ValueError(msg.format(\"string\"))\n else:\n if not all([isinstance(ch_name, string_types)\n for ch_name in ch_names]):\n raise ValueError(msg.format(type(ch_names[0])))\n\n missing = [ch_name for ch_name in ch_names\n if ch_name not in self.ch_names]\n if len(missing) > 0:\n msg = \"Channel(s) {0} not found, nothing dropped.\"\n raise ValueError(msg.format(\", \".join(missing)))\n\n bad_idx = [self.ch_names.index(ch_name) for ch_name in ch_names\n if ch_name in self.ch_names]\n idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)\n return self._pick_drop_channels(idx)\n\n def _pick_drop_channels(self, idx):\n # avoid circular imports\n from ..time_frequency import AverageTFR, EpochsTFR\n\n _check_preload(self, 'adding, dropping, or reordering channels')\n\n if getattr(self, 'picks', None) is not None:\n self.picks = self.picks[idx]\n\n if hasattr(self, '_cals'):\n self._cals = self._cals[idx]\n\n pick_info(self.info, idx, copy=False)\n\n if getattr(self, '_projector', None) is not None:\n self._projector = self._projector[idx][:, idx]\n\n # All others (Evoked, Epochs, Raw) have chs axis=-2\n axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2\n self._data = self._data.take(idx, axis=axis)\n return self\n\n def add_channels(self, add_list, force_update_info=False):\n \"\"\"Append new channels to the instance.\n\n Parameters\n ----------\n add_list : list\n A list of objects to append to self. Must contain all the same\n type as the current object\n force_update_info : bool\n If True, force the info for objects to be appended to match the\n values in `self`. This should generally only be used when adding\n stim channels for which important metadata won't be overwritten.\n\n .. versionadded:: 0.12\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n drop_channels\n \"\"\"\n # avoid circular imports\n from ..io import BaseRaw, _merge_info\n from ..epochs import BaseEpochs\n\n _validate_type(add_list, (list, tuple), 'Input')\n\n # Object-specific checks\n for inst in add_list + [self]:\n _check_preload(inst, \"adding channels\")\n if isinstance(self, BaseRaw):\n con_axis = 0\n comp_class = BaseRaw\n elif isinstance(self, BaseEpochs):\n con_axis = 1\n comp_class = BaseEpochs\n else:\n con_axis = 0\n comp_class = type(self)\n for inst in add_list:\n _validate_type(inst, comp_class, 'All input')\n data = [inst._data for inst in [self] + add_list]\n\n # Make sure that all dimensions other than channel axis are the same\n compare_axes = [i for i in range(data[0].ndim) if i != con_axis]\n shapes = np.array([dat.shape for dat in data])[:, compare_axes]\n for shape in shapes:\n if not ((shapes[0] - shape) == 0).all():\n raise AssertionError('All data dimensions except channels '\n 'must match, got %s != %s'\n % (shapes[0], shape))\n\n # Create final data / info objects\n data = np.concatenate(data, axis=con_axis)\n infos = [self.info] + [inst.info for inst in add_list]\n new_info = _merge_info(infos, force_update_to_first=force_update_info)\n\n # Now update the attributes\n self._data = data\n self.info = new_info\n if isinstance(self, BaseRaw):\n self._cals = np.concatenate([getattr(inst, '_cals')\n for inst in [self] + add_list])\n return self\n\n\nclass InterpolationMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\"\"\"\n\n @verbose\n def interpolate_bads(self, reset_bads=True, mode='accurate',\n verbose=None):\n \"\"\"Interpolate bad MEG and EEG channels.\n\n Operates in place.\n\n Parameters\n ----------\n reset_bads : bool\n If True, remove the bads from info.\n mode : str\n Either ``'accurate'`` or ``'fast'``, determines the quality of the\n Legendre polynomial expansion used for interpolation of MEG\n channels.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg\n\n _check_preload(self, \"interpolation\")\n\n if len(self.info['bads']) == 0:\n warn('No bad channels to interpolate. Doing nothing...')\n return self\n\n _interpolate_bads_eeg(self)\n _interpolate_bads_meg(self, mode=mode)\n\n if reset_bads is True:\n self.info['bads'] = []\n\n return self\n\n\ndef rename_channels(info, mapping):\n \"\"\"Rename channels.\n\n .. warning:: The channel names must have at most 15 characters\n\n Parameters\n ----------\n info : dict\n Measurement info.\n mapping : dict | callable\n a dictionary mapping the old channel to a new channel name\n e.g. {'EEG061' : 'EEG161'}. Can also be a callable function\n that takes and returns a string (new in version 0.10.0).\n \"\"\"\n info._check_consistency()\n bads = list(info['bads']) # make our own local copies\n ch_names = list(info['ch_names'])\n\n # first check and assemble clean mappings of index and name\n if isinstance(mapping, dict):\n orig_names = sorted(list(mapping.keys()))\n missing = [orig_name not in ch_names for orig_name in orig_names]\n if any(missing):\n raise ValueError(\"Channel name(s) in mapping missing from info: \"\n \"%s\" % np.array(orig_names)[np.array(missing)])\n new_names = [(ch_names.index(ch_name), new_name)\n for ch_name, new_name in mapping.items()]\n elif callable(mapping):\n new_names = [(ci, mapping(ch_name))\n for ci, ch_name in enumerate(ch_names)]\n else:\n raise ValueError('mapping must be callable or dict, not %s'\n % (type(mapping),))\n\n # check we got all strings out of the mapping\n for new_name in new_names:\n _validate_type(new_name[1], 'str', 'New channel mappings')\n\n bad_new_names = [name for _, name in new_names if len(name) > 15]\n if len(bad_new_names):\n raise ValueError('Channel names cannot be longer than 15 '\n 'characters. These channel names are not '\n 'valid : %s' % new_names)\n\n # do the remapping locally\n for c_ind, new_name in new_names:\n for bi, bad in enumerate(bads):\n if bad == ch_names[c_ind]:\n bads[bi] = new_name\n ch_names[c_ind] = new_name\n\n # check that all the channel names are unique\n if len(ch_names) != len(np.unique(ch_names)):\n raise ValueError('New channel names are not unique, renaming failed')\n\n # do the reampping in info\n info['bads'] = bads\n for ch, ch_name in zip(info['chs'], ch_names):\n ch['ch_name'] = ch_name\n info._update_redundant()\n info._check_consistency()\n\n\ndef _recursive_flatten(cell, dtype):\n \"\"\"Unpack mat files in Python.\"\"\"\n if len(cell) > 0:\n while not isinstance(cell[0], dtype):\n cell = [c for d in cell for c in d]\n return cell\n\n\ndef read_ch_connectivity(fname, picks=None):\n \"\"\"Parse FieldTrip neighbors .mat file.\n\n More information on these neighbor definitions can be found on the related\n FieldTrip documentation pages:\n http://fieldtrip.fcdonders.nl/template/neighbours\n\n Parameters\n ----------\n fname : str\n The file name. Example: 'neuromag306mag', 'neuromag306planar',\n 'ctf275', 'biosemi64', etc.\n picks : array-like of int, shape (n_channels,)\n The indices of the channels to include. Must match the template.\n Defaults to None.\n\n Returns\n -------\n ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)\n The connectivity matrix.\n ch_names : list\n The list of channel names present in connectivity matrix.\n\n See Also\n --------\n find_ch_connectivity\n\n Notes\n -----\n This function is closely related to :func:`find_ch_connectivity`. If you\n don't know the correct file for the neighbor definitions,\n :func:`find_ch_connectivity` can compute the connectivity matrix from 2d\n sensor locations.\n \"\"\"\n from scipy.io import loadmat\n if not op.isabs(fname):\n templates_dir = op.realpath(op.join(op.dirname(__file__),\n 'data', 'neighbors'))\n templates = os.listdir(templates_dir)\n for f in templates:\n if f == fname:\n break\n if f == fname + '_neighb.mat':\n fname += '_neighb.mat'\n break\n else:\n raise ValueError('I do not know about this neighbor '\n 'template: \"{}\"'.format(fname))\n\n fname = op.join(templates_dir, fname)\n\n nb = loadmat(fname)['neighbours']\n ch_names = _recursive_flatten(nb['label'], string_types)\n neighbors = [_recursive_flatten(c, string_types) for c in\n nb['neighblabel'].flatten()]\n assert len(ch_names) == len(neighbors)\n if picks is not None:\n if max(picks) >= len(ch_names):\n raise ValueError('The picks must be compatible with '\n 'channels. Found a pick ({}) which exceeds '\n 'the channel range ({})'\n .format(max(picks), len(ch_names)))\n connectivity = _ch_neighbor_connectivity(ch_names, neighbors)\n if picks is not None:\n # picking before constructing matrix is buggy\n connectivity = connectivity[picks][:, picks]\n ch_names = [ch_names[p] for p in picks]\n return connectivity, ch_names\n\n\ndef _ch_neighbor_connectivity(ch_names, neighbors):\n \"\"\"Compute sensor connectivity matrix.\n\n Parameters\n ----------\n ch_names : list of str\n The channel names.\n neighbors : list of list\n A list of list of channel names. The neighbors to\n which the channels in ch_names are connected with.\n Must be of the same length as ch_names.\n\n Returns\n -------\n ch_connectivity : scipy.sparse matrix\n The connectivity matrix.\n \"\"\"\n if len(ch_names) != len(neighbors):\n raise ValueError('`ch_names` and `neighbors` must '\n 'have the same length')\n set_neighbors = set([c for d in neighbors for c in d])\n rest = set_neighbors - set(ch_names)\n if len(rest) > 0:\n raise ValueError('Some of your neighbors are not present in the '\n 'list of channel names')\n\n for neigh in neighbors:\n if (not isinstance(neigh, list) and\n not all(isinstance(c, string_types) for c in neigh)):\n raise ValueError('`neighbors` must be a list of lists of str')\n\n ch_connectivity = np.eye(len(ch_names), dtype=bool)\n for ii, neigbs in enumerate(neighbors):\n ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True\n ch_connectivity = sparse.csr_matrix(ch_connectivity)\n return ch_connectivity\n\n\ndef find_ch_connectivity(info, ch_type):\n \"\"\"Find the connectivity matrix for the given channels.\n\n This function tries to infer the appropriate connectivity matrix template\n for the given channels. If a template is not found, the connectivity matrix\n is computed using Delaunay triangulation based on 2d sensor locations.\n\n Parameters\n ----------\n info : instance of Info\n The measurement info.\n ch_type : str | None\n The channel type for computing the connectivity matrix. Currently\n supports 'mag', 'grad', 'eeg' and None. If None, the info must contain\n only one channel type.\n\n Returns\n -------\n ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)\n The connectivity matrix.\n ch_names : list\n The list of channel names present in connectivity matrix.\n\n See Also\n --------\n read_ch_connectivity\n\n Notes\n -----\n .. versionadded:: 0.15\n\n Automatic detection of an appropriate connectivity matrix template only\n works for MEG data at the moment. This means that the connectivity matrix\n is always computed for EEG data and never loaded from a template file. If\n you want to load a template for a given montage use\n :func:`read_ch_connectivity` directly.\n \"\"\"\n if ch_type is None:\n picks = channel_indices_by_type(info)\n if sum([len(p) != 0 for p in picks.values()]) != 1:\n raise ValueError('info must contain only one channel type if '\n 'ch_type is None.')\n ch_type = channel_type(info, 0)\n elif ch_type not in ['mag', 'grad', 'eeg']:\n raise ValueError(\"ch_type must be 'mag', 'grad' or 'eeg'. \"\n \"Got %s.\" % ch_type)\n (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,\n has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,\n has_eeg_coils_and_meg, has_eeg_coils_only) = _get_ch_info(info)\n conn_name = None\n if has_vv_mag and ch_type == 'mag':\n conn_name = 'neuromag306mag'\n elif has_vv_grad and ch_type == 'grad':\n conn_name = 'neuromag306planar'\n elif has_4D_mag:\n if 'MEG 248' in info['ch_names']:\n idx = info['ch_names'].index('MEG 248')\n grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD\n mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG\n if ch_type == 'grad' and grad:\n conn_name = 'bti248grad'\n elif ch_type == 'mag' and mag:\n conn_name = 'bti248'\n elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':\n idx = info['ch_names'].index('MEG 148')\n if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:\n conn_name = 'bti148'\n elif has_CTF_grad and ch_type == 'mag':\n if info['nchan'] < 100:\n conn_name = 'ctf64'\n elif info['nchan'] > 200:\n conn_name = 'ctf275'\n else:\n conn_name = 'ctf151'\n\n if conn_name is not None:\n logger.info('Reading connectivity matrix for %s.' % conn_name)\n return read_ch_connectivity(conn_name)\n logger.info('Could not find a connectivity matrix for the data. '\n 'Computing connectivity based on Delaunay triangulations.')\n return _compute_ch_connectivity(info, ch_type)\n\n\ndef _compute_ch_connectivity(info, ch_type):\n \"\"\"Compute channel connectivity matrix using Delaunay triangulations.\n\n Parameters\n ----------\n info : instance of mne.measuerment_info.Info\n The measurement info.\n ch_type : str\n The channel type for computing the connectivity matrix. Currently\n supports 'mag', 'grad' and 'eeg'.\n\n Returns\n -------\n ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)\n The connectivity matrix.\n ch_names : list\n The list of channel names present in connectivity matrix.\n \"\"\"\n from scipy.spatial import Delaunay\n from .. import spatial_tris_connectivity\n from ..channels.layout import _auto_topomap_coords, _pair_grad_sensors\n combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in\n np.unique([ch['coil_type'] for ch in info['chs']]))\n\n picks = dict(_picks_by_type(info, exclude=[]))[ch_type]\n ch_names = [info['ch_names'][pick] for pick in picks]\n if combine_grads:\n pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])\n if len(pairs) != len(picks):\n raise RuntimeError('Cannot find a pair for some of the '\n 'gradiometers. Cannot compute connectivity '\n 'matrix.')\n xy = _auto_topomap_coords(info, picks[::2]) # only for one of the pair\n else:\n xy = _auto_topomap_coords(info, picks)\n tri = Delaunay(xy)\n neighbors = spatial_tris_connectivity(tri.simplices)\n\n if combine_grads:\n ch_connectivity = np.eye(len(picks), dtype=bool)\n for idx, neigbs in zip(neighbors.row, neighbors.col):\n for ii in range(2): # make sure each pair is included\n for jj in range(2):\n ch_connectivity[idx * 2 + ii, neigbs * 2 + jj] = True\n ch_connectivity[idx * 2 + ii, idx * 2 + jj] = True # pair\n ch_connectivity = sparse.csr_matrix(ch_connectivity)\n else:\n ch_connectivity = sparse.lil_matrix(neighbors)\n ch_connectivity.setdiag(np.repeat(1, ch_connectivity.shape[0]))\n ch_connectivity = ch_connectivity.tocsr()\n\n return ch_connectivity, ch_names\n\n\ndef fix_mag_coil_types(info):\n \"\"\"Fix magnetometer coil types.\n\n Parameters\n ----------\n info : dict\n The info dict to correct. Corrections are done in-place.\n\n Notes\n -----\n This function changes magnetometer coil types 3022 (T1: SQ20483N) and\n 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition\n records in the info structure.\n\n Neuromag Vectorview systems can contain magnetometers with two\n different coil sizes (3022 and 3023 vs. 3024). The systems\n incorporating coils of type 3024 were introduced last and are used at\n the majority of MEG sites. At some sites with 3024 magnetometers,\n the data files have still defined the magnetometers to be of type\n 3022 to ensure compatibility with older versions of Neuromag software.\n In the MNE software as well as in the present version of Neuromag\n software coil type 3024 is fully supported. Therefore, it is now safe\n to upgrade the data files to use the true coil type.\n\n .. note:: The effect of the difference between the coil sizes on the\n current estimates computed by the MNE software is very small.\n Therefore the use of mne_fix_mag_coil_types is not mandatory.\n \"\"\"\n old_mag_inds = _get_T1T2_mag_inds(info)\n\n for ii in old_mag_inds:\n info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3\n logger.info('%d of %d T1/T2 magnetometer types replaced with T3.' %\n (len(old_mag_inds), len(pick_types(info, meg='mag'))))\n info._check_consistency()\n\n\ndef _get_T1T2_mag_inds(info):\n \"\"\"Find T1/T2 magnetometer coil types.\"\"\"\n picks = pick_types(info, meg='mag')\n old_mag_inds = []\n for ii in picks:\n ch = info['chs'][ii]\n if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,\n FIFF.FIFFV_COIL_VV_MAG_T2):\n old_mag_inds.append(ii)\n return old_mag_inds\n\n\ndef _get_ch_info(info):\n \"\"\"Get channel info for inferring acquisition device.\"\"\"\n chs = info['chs']\n # Only take first 16 bits, as higher bits store CTF comp order\n coil_types = set([ch['coil_type'] & 0xFFFF for ch in chs])\n channel_types = set([ch['kind'] for ch in chs])\n\n has_vv_mag = any(k in coil_types for k in\n [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,\n FIFF.FIFFV_COIL_VV_MAG_T3])\n has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,\n FIFF.FIFFV_COIL_VV_PLANAR_T2,\n FIFF.FIFFV_COIL_VV_PLANAR_T3])\n\n is_old_vv = ' ' in chs[0]['ch_name']\n\n has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types\n ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,\n FIFF.FIFFV_COIL_CTF_REF_GRAD,\n FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)\n has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or\n (FIFF.FIFFV_MEG_CH in channel_types and\n any(k in ctf_other_types for k in coil_types)))\n # hack due to MNE-C bug in IO of CTF\n # only take first 16 bits, as higher bits store CTF comp order\n n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD\n for ch in chs)\n\n has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,\n n_kit_grads])\n has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and\n FIFF.FIFFV_EEG_CH in channel_types)\n has_eeg_coils_and_meg = has_eeg_coils and has_any_meg\n has_eeg_coils_only = has_eeg_coils and not has_any_meg\n\n return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,\n has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,\n has_eeg_coils_and_meg, has_eeg_coils_only)\n\n\ndef make_1020_channel_selections(info, midline=\"z\"):\n \"\"\"Return dict mapping from ROI names to lists of picks for 10/20 setups.\n\n This passes through all channel names, and uses a simple heuristic to\n separate channel names into three Region of Interest-based selections:\n Left, Midline and Right. The heuristic is that channels ending on any of\n the characters in `midline` are filed under that heading, otherwise those\n ending in odd numbers under \"Left\", those in even numbers under \"Right\".\n Other channels are ignored. This is appropriate for 10/20 files, but not\n for other channel naming conventions.\n If an info object is provided, lists are sorted from posterior to anterior.\n\n Parameters\n ----------\n info : instance of info\n Where to obtain the channel names from. The picks will\n be in relation to the position in `info[\"ch_names\"]`. If possible, this\n lists will be sorted by y value position of the channel locations,\n i.e., from back to front.\n midline : str\n Names ending in any of these characters are stored under the `Midline`\n key. Defaults to 'z'. Note that capitalization is ignored.\n\n Returns\n -------\n selections : dict\n A dictionary mapping from ROI names to lists of picks (integers).\n \"\"\"\n _validate_type(info, \"info\")\n\n try:\n from .layout import find_layout\n layout = find_layout(info)\n pos = layout.pos\n ch_names = layout.names\n except RuntimeError: # no channel positions found\n ch_names = info[\"ch_names\"]\n pos = None\n\n selections = dict(Left=[], Midline=[], Right=[])\n for pick, channel in enumerate(ch_names):\n last_char = channel[-1].lower() # in 10/20, last char codes hemisphere\n if last_char in midline:\n selection = \"Midline\"\n elif last_char.isdigit():\n selection = \"Left\" if int(last_char) % 2 else \"Right\"\n else: # ignore the channel\n continue\n selections[selection].append(pick)\n\n if pos is not None:\n # sort channels from front to center\n # (y-coordinate of the position info in the layout)\n selections = {selection: np.array(picks)[pos[picks, 1].argsort()]\n for selection, picks in selections.items()}\n\n return selections\n", "\"\"\"\nSource localization with MNE/dSPM/sLORETA/eLORETA\n=================================================\n\nThe aim of this tutorial is to teach you how to compute and apply a linear\ninverse method such as MNE/dSPM/sLORETA/eLORETA on evoked/raw/epochs data.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.minimum_norm import make_inverse_operator, apply_inverse\n\n# sphinx_gallery_thumbnail_number = 9\n\n###############################################################################\n# Process MEG data\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\n\nraw = mne.io.read_raw_fif(raw_fname) # already has an average reference\nevents = mne.find_events(raw, stim_channel='STI 014')\n\nevent_id = dict(aud_r=1) # event trigger and conditions\ntmin = -0.2 # start of each epoch (200ms before the trigger)\ntmax = 0.5 # end of each epoch (500ms after the trigger)\nraw.info['bads'] = ['MEG 2443', 'EEG 053']\npicks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,\n exclude='bads')\nbaseline = (None, 0) # means from the first instant to t = 0\nreject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)\n\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,\n baseline=baseline, reject=reject)\n\n###############################################################################\n# Compute regularized noise covariance\n# ------------------------------------\n#\n# For more details see :ref:`tut_compute_covariance`.\n\nnoise_cov = mne.compute_covariance(\n epochs, tmax=0., method=['shrunk', 'empirical'], verbose=True)\n\nfig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)\n\n###############################################################################\n# Compute the evoked response\n# ---------------------------\n# Let's just use MEG channels for simplicity.\n\nevoked = epochs.average().pick_types(meg=True)\nevoked.plot(time_unit='s')\nevoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag',\n time_unit='s')\n\n# Show whitening\nevoked.plot_white(noise_cov, time_unit='s')\n\ndel epochs # to save memory\n\n###############################################################################\n# Inverse modeling: MNE/dSPM on evoked and raw data\n# -------------------------------------------------\n\n# Read the forward solution and compute the inverse operator\nfname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'\nfwd = mne.read_forward_solution(fname_fwd)\n\n# make an MEG inverse operator\ninfo = evoked.info\ninverse_operator = make_inverse_operator(info, fwd, noise_cov,\n loose=0.2, depth=0.8)\ndel fwd\n\n# You can write it to disk with::\n#\n# >>> from mne.minimum_norm import write_inverse_operator\n# >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',\n# inverse_operator)\n\n###############################################################################\n# Compute inverse solution\n# ------------------------\n\nmethod = \"dSPM\"\nsnr = 3.\nlambda2 = 1. / snr ** 2\nstc = apply_inverse(evoked, inverse_operator, lambda2,\n method=method, pick_ori=None)\n\n###############################################################################\n# Visualization\n# -------------\n# View activation time-series\n\nplt.figure()\nplt.plot(1e3 * stc.times, stc.data[::100, :].T)\nplt.xlabel('time (ms)')\nplt.ylabel('%s value' % method)\nplt.show()\n\n###############################################################################\n# Here we use peak getter to move visualization to the time point of the peak\n# and draw a marker at the maximum peak vertex.\n\nvertno_max, time_max = stc.get_peak(hemi='rh')\n\nsubjects_dir = data_path + '/subjects'\nsurfer_kwargs = dict(\n hemi='rh', subjects_dir=subjects_dir,\n clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',\n initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=5)\nbrain = stc.plot(**surfer_kwargs)\nbrain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',\n scale_factor=0.6, alpha=0.5)\nbrain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title',\n font_size=14)\n\n###############################################################################\n# Morph data to average brain\n# ---------------------------\n\nfs_vertices = [np.arange(10242)] * 2 # fsaverage is special this way\nmorph_mat = mne.compute_morph_matrix(\n 'sample', 'fsaverage', stc.vertices, fs_vertices, smooth=None,\n subjects_dir=subjects_dir)\nstc_fsaverage = stc.morph_precomputed('fsaverage', fs_vertices, morph_mat)\nbrain = stc_fsaverage.plot(**surfer_kwargs)\nbrain.add_text(0.1, 0.9, 'Morphed to fsaverage', 'title', font_size=20)\ndel stc_fsaverage\n\n###############################################################################\n# Dipole orientations\n# -------------------\n# The ``pick_ori`` parameter of the\n# :func:`mne.minimum_norm.apply_inverse` function controls\n# the orientation of the dipoles. One useful setting is ``pick_ori='vector'``,\n# which will return an estimate that does not only contain the source power at\n# each dipole, but also the orientation of the dipoles.\n\nstc_vec = apply_inverse(evoked, inverse_operator, lambda2,\n method=method, pick_ori='vector')\nbrain = stc_vec.plot(**surfer_kwargs)\nbrain.add_text(0.1, 0.9, 'Vector solution', 'title', font_size=20)\ndel stc_vec\n\n###############################################################################\n# Note that there is a relationship between the orientation of the dipoles and\n# the surface of the cortex. For this reason, we do not use an inflated\n# cortical surface for visualization, but the original surface used to define\n# the source space.\n#\n# For more information about dipole orientations, see\n# :ref:`sphx_glr_auto_tutorials_plot_dipole_orientations.py`.\n\n###############################################################################\n# Now let's look at each solver:\n\nfor mi, (method, lims) in enumerate((('dSPM', [8, 12, 15]),\n ('sLORETA', [3, 5, 7]),\n ('eLORETA', [0.75, 1.25, 1.75]),)):\n surfer_kwargs['clim']['lims'] = lims\n stc = apply_inverse(evoked, inverse_operator, lambda2,\n method=method, pick_ori=None)\n brain = stc.plot(figure=mi, **surfer_kwargs)\n brain.add_text(0.1, 0.9, method, 'title', font_size=20)\n del stc\n", "# Author: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport warnings\nimport matplotlib\n\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_allclose,\n assert_equal)\nimport pytest\n\nfrom mne import find_events, Epochs, pick_types, channels\nfrom mne.io import read_raw_fif\nfrom mne.io.array import RawArray\nfrom mne.io.tests.test_raw import _test_raw_reader\nfrom mne.io.meas_info import create_info, _kind_dict\nfrom mne.utils import requires_version, run_tests_if_main\n\nmatplotlib.use('Agg') # for testing don't use X server\n\nwarnings.simplefilter('always') # enable b/c these tests might throw warnings\n\nbase_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')\nfif_fname = op.join(base_dir, 'test_raw.fif')\n\n\ndef test_long_names():\n \"\"\"Test long name support.\"\"\"\n info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error')\n data = np.empty((2, 1000))\n raw = RawArray(data, info)\n assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1']\n info = create_info(['a' * 16] * 11, 1000., verbose='error')\n data = np.empty((11, 1000))\n raw = RawArray(data, info)\n assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)]\n\n\[email protected]\n@requires_version('scipy', '0.12')\ndef test_array_raw():\n \"\"\"Test creating raw from array.\"\"\"\n import matplotlib.pyplot as plt\n # creating\n raw = read_raw_fif(fif_fname).crop(2, 5)\n data, times = raw[:, :]\n sfreq = raw.info['sfreq']\n ch_names = [(ch[4:] if 'STI' not in ch else ch)\n for ch in raw.info['ch_names']] # change them, why not\n # del raw\n types = list()\n for ci in range(101):\n types.extend(('grad', 'grad', 'mag'))\n types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels\n types.extend(['stim'] * 9)\n types.extend(['eeg'] * 60)\n # wrong length\n pytest.raises(ValueError, create_info, ch_names, sfreq, types)\n # bad entry\n types.append('foo')\n pytest.raises(KeyError, create_info, ch_names, sfreq, types)\n types[-1] = 'eog'\n # default type\n info = create_info(ch_names, sfreq)\n assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])\n # use real types\n info = create_info(ch_names, sfreq, types)\n raw2 = _test_raw_reader(RawArray, test_preloading=False,\n data=data, info=info, first_samp=2 * data.shape[1])\n data2, times2 = raw2[:, :]\n assert_allclose(data, data2)\n assert_allclose(times, times2)\n assert ('RawArray' in repr(raw2))\n pytest.raises(TypeError, RawArray, info, data)\n\n # filtering\n picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]\n assert_equal(len(picks), 4)\n raw_lp = raw2.copy()\n kwargs = dict(fir_design='firwin', picks=picks)\n raw_lp.filter(None, 4.0, h_trans_bandwidth=4., n_jobs=2, **kwargs)\n raw_hp = raw2.copy()\n raw_hp.filter(16.0, None, l_trans_bandwidth=4., n_jobs=2, **kwargs)\n raw_bp = raw2.copy()\n raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,\n **kwargs)\n raw_bs = raw2.copy()\n raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,\n n_jobs=2, **kwargs)\n data, _ = raw2[picks, :]\n lp_data, _ = raw_lp[picks, :]\n hp_data, _ = raw_hp[picks, :]\n bp_data, _ = raw_bp[picks, :]\n bs_data, _ = raw_bs[picks, :]\n sig_dec = 15\n assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)\n assert_array_almost_equal(data, bp_data + bs_data, sig_dec)\n\n # plotting\n raw2.plot()\n raw2.plot_psd(tmax=np.inf, average=True, n_fft=1024, spatial_colors=False)\n plt.close('all')\n\n # epoching\n events = find_events(raw2, stim_channel='STI 014')\n events[:, 2] = 1\n assert (len(events) > 2)\n epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)\n epochs.plot_drop_log()\n epochs.plot()\n evoked = epochs.average()\n evoked.plot(time_unit='s')\n assert_equal(evoked.nave, len(events) - 1)\n plt.close('all')\n\n # complex data\n rng = np.random.RandomState(0)\n data = rng.randn(1, 100) + 1j * rng.randn(1, 100)\n raw = RawArray(data, create_info(1, 1000., 'eeg'))\n assert_allclose(raw._data, data)\n\n # Using digital montage to give MNI electrode coordinates\n n_elec = 10\n ts_size = 10000\n Fs = 512.\n elec_labels = [str(i) for i in range(n_elec)]\n elec_coords = np.random.randint(60, size=(n_elec, 3)).tolist()\n\n electrode = np.random.rand(n_elec, ts_size)\n dig_ch_pos = dict(zip(elec_labels, elec_coords))\n mon = channels.DigMontage(dig_ch_pos=dig_ch_pos)\n info = create_info(elec_labels, Fs, 'ecog', montage=mon)\n\n raw = RawArray(electrode, info)\n raw.plot_psd(average=False) # looking for inexistent layout\n raw.plot_psd_topo()\n\n\nrun_tests_if_main()\n", "import numpy as np\nimport os.path as op\nimport warnings\n\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_equal)\nimport pytest\n\nimport mne\nfrom mne import Epochs, read_events, pick_types, create_info, EpochsArray\nfrom mne.io import read_raw_fif\nfrom mne.utils import _TempDir, run_tests_if_main, requires_h5py, grand_average\nfrom mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss,\n tfr_multitaper, AverageTFR, read_tfrs,\n write_tfrs, combine_tfr, cwt, _compute_tfr,\n EpochsTFR)\nfrom mne.time_frequency import tfr_array_multitaper, tfr_array_morlet\nfrom mne.viz.utils import _fake_click\nfrom itertools import product\nimport matplotlib\nmatplotlib.use('Agg') # for testing don't use X server\n\ndata_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\nraw_fname = op.join(data_path, 'test_raw.fif')\nevent_fname = op.join(data_path, 'test-eve.fif')\nraw_ctf_fname = op.join(data_path, 'test_ctf_raw.fif')\n\n\ndef test_tfr_ctf():\n \"\"\"Test that TFRs can be calculated on CTF data.\"\"\"\n raw = read_raw_fif(raw_ctf_fname).crop(0, 1)\n raw.apply_gradient_compensation(3)\n events = mne.make_fixed_length_events(raw, duration=0.5)\n epochs = mne.Epochs(raw, events)\n for method in (tfr_multitaper, tfr_morlet):\n method(epochs, [10], 1) # smoke test\n\n\ndef test_morlet():\n \"\"\"Test morlet with and without zero mean.\"\"\"\n Wz = morlet(1000, [10], 2., zero_mean=True)\n W = morlet(1000, [10], 2., zero_mean=False)\n\n assert (np.abs(np.mean(np.real(Wz[0]))) < 1e-5)\n assert (np.abs(np.mean(np.real(W[0]))) > 1e-3)\n\n\ndef test_time_frequency():\n \"\"\"Test time-frequency transform (PSD and ITC).\"\"\"\n # Set parameters\n event_id = 1\n tmin = -0.2\n tmax = 0.498 # Allows exhaustive decimation testing\n\n # Setup for reading the raw data\n raw = read_raw_fif(raw_fname)\n events = read_events(event_fname)\n\n include = []\n exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more\n\n # picks MEG gradiometers\n picks = pick_types(raw.info, meg='grad', eeg=False,\n stim=False, include=include, exclude=exclude)\n\n picks = picks[:2]\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)\n data = epochs.get_data()\n times = epochs.times\n nave = len(data)\n\n epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)\n\n freqs = np.arange(6, 20, 5) # define frequencies of interest\n n_cycles = freqs / 4.\n\n # Test first with a single epoch\n power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,\n use_fft=True, return_itc=True)\n # Now compute evoked\n evoked = epochs.average()\n power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,\n return_itc=False)\n pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)\n power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,\n use_fft=True, return_itc=True)\n power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,\n use_fft=True, return_itc=True, decim=slice(0, 2))\n # Test picks argument and average parameter\n pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs,\n n_cycles=n_cycles, return_itc=True, average=False)\n\n power_picks, itc_picks = \\\n tfr_morlet(epochs_nopicks,\n freqs=freqs, n_cycles=n_cycles, use_fft=True,\n return_itc=True, picks=picks, average=True)\n\n epochs_power_picks = \\\n tfr_morlet(epochs_nopicks,\n freqs=freqs, n_cycles=n_cycles, use_fft=True,\n return_itc=False, picks=picks, average=False)\n power_picks_avg = epochs_power_picks.average()\n # the actual data arrays here are equivalent, too...\n assert_array_almost_equal(power.data, power_picks.data)\n assert_array_almost_equal(power.data, power_picks_avg.data)\n assert_array_almost_equal(itc.data, itc_picks.data)\n assert_array_almost_equal(power.data, power_evoked.data)\n # complex output\n pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,\n return_itc=False, average=True, output=\"complex\")\n pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,\n output=\"complex\", average=False, return_itc=True)\n epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles,\n output=\"complex\", average=False,\n return_itc=False)\n epochs_power_2 = abs(epochs_power_complex)\n epochs_power_3 = epochs_power_2.copy()\n epochs_power_3.data[:] = np.inf # test that it's actually copied\n assert_array_almost_equal(epochs_power_2.data, epochs_power_picks.data)\n power_2 = epochs_power_2.average()\n assert_array_almost_equal(power_2.data, power.data)\n\n print(itc) # test repr\n print(itc.ch_names) # test property\n itc += power # test add\n itc -= power # test sub\n\n power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')\n\n assert 'meg' in power\n assert 'grad' in power\n assert 'mag' not in power\n assert 'eeg' not in power\n\n assert_equal(power.nave, nave)\n assert_equal(itc.nave, nave)\n assert (power.data.shape == (len(picks), len(freqs), len(times)))\n assert (power.data.shape == itc.data.shape)\n assert (power_.data.shape == (len(picks), len(freqs), 2))\n assert (power_.data.shape == itc_.data.shape)\n assert (np.sum(itc.data >= 1) == 0)\n assert (np.sum(itc.data <= 0) == 0)\n\n # grand average\n itc2 = itc.copy()\n itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop\n gave = grand_average([itc2, itc])\n assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,\n itc2.data.shape[1],\n itc2.data.shape[2]))\n assert_equal(itc2.ch_names[1:], gave.ch_names)\n assert_equal(gave.nave, 2)\n itc2.drop_channels(itc2.info[\"bads\"])\n assert_array_almost_equal(gave.data, itc2.data)\n itc2.data = np.ones(itc2.data.shape)\n itc.data = np.zeros(itc.data.shape)\n itc2.nave = 2\n itc.nave = 1\n itc.drop_channels([itc.ch_names[0]])\n combined_itc = combine_tfr([itc2, itc])\n assert_array_almost_equal(combined_itc.data,\n np.ones(combined_itc.data.shape) * 2 / 3)\n\n # more tests\n power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,\n return_itc=True)\n\n assert (power.data.shape == (len(picks), len(freqs), len(times)))\n assert (power.data.shape == itc.data.shape)\n assert (np.sum(itc.data >= 1) == 0)\n assert (np.sum(itc.data <= 0) == 0)\n\n tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,\n return_itc=False).data[0]\n assert (tfr.shape == (len(picks), len(freqs), len(times)))\n tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,\n decim=slice(0, 2), average=False,\n return_itc=False).data[0]\n assert (tfr2.shape == (len(picks), len(freqs), 2))\n\n single_power = tfr_morlet(epochs, freqs, 2, average=False,\n return_itc=False).data\n single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),\n average=False, return_itc=False).data\n single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),\n average=False, return_itc=False).data\n single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),\n average=False, return_itc=False).data\n\n assert_array_almost_equal(np.mean(single_power, axis=0), power.data)\n assert_array_almost_equal(np.mean(single_power2, axis=0),\n power.data[:, :, :2])\n assert_array_almost_equal(np.mean(single_power3, axis=0),\n power.data[:, :, 1:3])\n assert_array_almost_equal(np.mean(single_power4, axis=0),\n power.data[:, :, 2:4])\n\n power_pick = power.pick_channels(power.ch_names[:10:2])\n assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))\n assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))\n power_drop = power.drop_channels(power.ch_names[1:10:2])\n assert_equal(power_drop.ch_names, power_pick.ch_names)\n assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))\n\n mne.equalize_channels([power_pick, power_drop])\n assert_equal(power_pick.ch_names, power_drop.ch_names)\n assert_equal(power_pick.data.shape, power_drop.data.shape)\n\n # Test decimation:\n # 2: multiple of len(times) even\n # 3: multiple odd\n # 8: not multiple, even\n # 9: not multiple, odd\n for decim in [2, 3, 8, 9]:\n for use_fft in [True, False]:\n power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,\n use_fft=use_fft, return_itc=True,\n decim=decim)\n assert_equal(power.data.shape[2],\n np.ceil(float(len(times)) / decim))\n freqs = list(range(50, 55))\n decim = 2\n _, n_chan, n_time = data.shape\n tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,\n return_itc=False).data[0]\n assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))\n\n # Test cwt modes\n Ws = morlet(512, [10, 20], n_cycles=2)\n pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')\n for use_fft in [True, False]:\n for mode in ['same', 'valid', 'full']:\n cwt(data[0], Ws, use_fft=use_fft, mode=mode)\n\n # Test decim parameter checks\n pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs,\n n_cycles=n_cycles, use_fft=True, return_itc=True,\n decim='decim')\n\n # When convolving in time, wavelets must not be longer than the data\n pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws,\n use_fft=False)\n with warnings.catch_warnings(record=True) as w:\n cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True)\n assert_equal(len(w), 1)\n\n # Check for off-by-one errors when using wavelets with an even number of\n # samples\n psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full')\n assert_equal(psd.shape, (2, 1, 420))\n\n\ndef test_dpsswavelet():\n \"\"\"Test DPSS tapers.\"\"\"\n freqs = np.arange(5, 25, 3)\n Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,\n zero_mean=True)\n\n assert (len(Ws) == 3) # 3 tapers expected\n\n # Check that zero mean is true\n assert (np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)\n\n assert (len(Ws[0]) == len(freqs)) # As many wavelets as asked for\n\n\[email protected]\ndef test_tfr_multitaper():\n \"\"\"Test tfr_multitaper.\"\"\"\n sfreq = 200.0\n ch_names = ['SIM0001', 'SIM0002']\n ch_types = ['grad', 'grad']\n info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)\n\n n_times = int(sfreq) # Second long epochs\n n_epochs = 3\n seed = 42\n rng = np.random.RandomState(seed)\n noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)\n t = np.arange(n_times, dtype=np.float) / sfreq\n signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal\n signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing\n on_time = np.logical_and(t >= 0.45, t <= 0.55)\n signal[on_time] *= np.hanning(on_time.sum()) # Ramping\n dat = noise + signal\n\n reject = dict(grad=4000.)\n events = np.empty((n_epochs, 3), int)\n first_event_sample = 100\n event_id = dict(sin50hz=1)\n for k in range(n_epochs):\n events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']\n\n epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,\n reject=reject)\n\n freqs = np.arange(35, 70, 5, dtype=np.float)\n\n power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,\n time_bandwidth=4.0)\n power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,\n time_bandwidth=4.0, decim=slice(0, 2))\n picks = np.arange(len(ch_names))\n power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,\n n_cycles=freqs / 2.,\n time_bandwidth=4.0, picks=picks)\n power_epochs = tfr_multitaper(epochs, freqs=freqs,\n n_cycles=freqs / 2., time_bandwidth=4.0,\n return_itc=False, average=False)\n power_averaged = power_epochs.average()\n power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,\n n_cycles=freqs / 2., time_bandwidth=4.0,\n return_itc=False, average=False).average()\n\n print(power_evoked) # test repr for EpochsTFR\n\n # Test channel picking\n power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])\n assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))\n assert_equal(power_epochs_picked.ch_names, ['SIM0001'])\n\n pytest.raises(ValueError, tfr_multitaper, epochs,\n freqs=freqs, n_cycles=freqs / 2.,\n return_itc=True, average=False)\n\n # test picks argument\n assert_array_almost_equal(power.data, power_picks.data)\n assert_array_almost_equal(power.data, power_averaged.data)\n assert_array_almost_equal(power.times, power_epochs.times)\n assert_array_almost_equal(power.times, power_averaged.times)\n assert_equal(power.nave, power_averaged.nave)\n assert_equal(power_epochs.data.shape, (3, 2, 7, 200))\n assert_array_almost_equal(itc.data, itc_picks.data)\n # one is squared magnitude of the average (evoked) and\n # the other is average of the squared magnitudes (epochs PSD)\n # so values shouldn't match, but shapes should\n assert_array_equal(power.data.shape, power_evoked.data.shape)\n pytest.raises(AssertionError, assert_array_almost_equal,\n power.data, power_evoked.data)\n\n tmax = t[np.argmax(itc.data[0, freqs == 50, :])]\n fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]\n assert (tmax > 0.3 and tmax < 0.7)\n assert not np.any(itc.data < 0.)\n assert (fmax > 40 and fmax < 60)\n assert (power2.data.shape == (len(picks), len(freqs), 2))\n assert (power2.data.shape == itc2.data.shape)\n\n # Test decim parameter checks and compatibility between wavelets length\n # and instance length in the time dimension.\n pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs,\n n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))\n pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs,\n n_cycles=1000, time_bandwidth=4.0)\n\n\ndef test_crop():\n \"\"\"Test TFR cropping.\"\"\"\n data = np.zeros((3, 2, 3))\n times = np.array([.1, .2, .3])\n freqs = np.array([.10, .20])\n info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,\n ['mag', 'mag', 'mag'])\n tfr = AverageTFR(info, data=data, times=times, freqs=freqs,\n nave=20, comment='test', method='crazy-tfr')\n tfr.crop(0.2, 0.3)\n assert_array_equal(tfr.times, [0.2, 0.3])\n assert_equal(tfr.data.shape[-1], 2)\n\n\n@requires_h5py\ndef test_io():\n \"\"\"Test TFR IO capacities.\"\"\"\n tempdir = _TempDir()\n fname = op.join(tempdir, 'test-tfr.h5')\n data = np.zeros((3, 2, 3))\n times = np.array([.1, .2, .3])\n freqs = np.array([.10, .20])\n\n info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,\n ['mag', 'mag', 'mag'])\n tfr = AverageTFR(info, data=data, times=times, freqs=freqs,\n nave=20, comment='test', method='crazy-tfr')\n tfr.save(fname)\n tfr2 = read_tfrs(fname, condition='test')\n\n assert_array_equal(tfr.data, tfr2.data)\n assert_array_equal(tfr.times, tfr2.times)\n assert_array_equal(tfr.freqs, tfr2.freqs)\n assert_equal(tfr.comment, tfr2.comment)\n assert_equal(tfr.nave, tfr2.nave)\n\n pytest.raises(IOError, tfr.save, fname)\n\n tfr.comment = None\n tfr.save(fname, overwrite=True)\n assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)\n tfr.comment = 'test-A'\n tfr2.comment = 'test-B'\n\n fname = op.join(tempdir, 'test2-tfr.h5')\n write_tfrs(fname, [tfr, tfr2])\n tfr3 = read_tfrs(fname, condition='test-A')\n assert_equal(tfr.comment, tfr3.comment)\n\n assert (isinstance(tfr.info, mne.Info))\n\n tfrs = read_tfrs(fname, condition=None)\n assert_equal(len(tfrs), 2)\n tfr4 = tfrs[1]\n assert_equal(tfr2.comment, tfr4.comment)\n\n pytest.raises(ValueError, read_tfrs, fname, condition='nonono')\n\n # Test save of EpochsTFR.\n data = np.zeros((5, 3, 2, 3))\n tfr = EpochsTFR(info, data=data, times=times, freqs=freqs,\n comment='test', method='crazy-tfr')\n tfr.save(fname, True)\n read_tfr = read_tfrs(fname)[0]\n assert_array_equal(tfr.data, read_tfr.data)\n\n\ndef test_plot():\n \"\"\"Test TFR plotting.\"\"\"\n import matplotlib.pyplot as plt\n\n data = np.zeros((3, 2, 3))\n times = np.array([.1, .2, .3])\n freqs = np.array([.10, .20])\n info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,\n ['mag', 'mag', 'mag'])\n tfr = AverageTFR(info, data=data, times=times, freqs=freqs,\n nave=20, comment='test', method='crazy-tfr')\n tfr.plot([1, 2], title='title', colorbar=False,\n mask=np.ones(tfr.data.shape[1:], bool))\n plt.close('all')\n ax = plt.subplot2grid((2, 2), (0, 0))\n ax2 = plt.subplot2grid((2, 2), (1, 1))\n ax3 = plt.subplot2grid((2, 2), (0, 1))\n tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])\n plt.close('all')\n\n tfr.plot([1, 2], title='title', colorbar=False, exclude='bads')\n plt.close('all')\n\n tfr.plot_topo(picks=[1, 2])\n plt.close('all')\n\n fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default\n fig.canvas.key_press_event('up')\n fig.canvas.key_press_event(' ')\n fig.canvas.key_press_event('down')\n\n cbar = fig.get_axes()[0].CB # Fake dragging with mouse.\n ax = cbar.cbar.ax\n _fake_click(fig, ax, (0.1, 0.1))\n _fake_click(fig, ax, (0.1, 0.2), kind='motion')\n _fake_click(fig, ax, (0.1, 0.3), kind='release')\n\n _fake_click(fig, ax, (0.1, 0.1), button=3)\n _fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')\n _fake_click(fig, ax, (0.1, 0.3), kind='release')\n\n fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down\n fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up\n\n plt.close('all')\n\n\ndef test_plot_joint():\n \"\"\"Test TFR joint plotting.\"\"\"\n import matplotlib.pyplot as plt\n\n raw = read_raw_fif(raw_fname)\n times = np.linspace(-0.1, 0.1, 200)\n n_freqs = 3\n nave = 1\n rng = np.random.RandomState(42)\n data = rng.randn(len(raw.ch_names), n_freqs, len(times))\n tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)\n\n topomap_args = {'res': 8, 'contours': 0, 'sensors': False}\n\n for combine in ('mean', 'rms', None):\n tfr.plot_joint(title='auto', colorbar=True,\n combine=combine, topomap_args=topomap_args)\n plt.close('all')\n\n # check various timefreqs\n for timefreqs in (\n {(tfr.times[0], tfr.freqs[1]): (0.1, 0.5),\n (tfr.times[-1], tfr.freqs[-1]): (0.2, 0.6)},\n [(tfr.times[1], tfr.freqs[1])]):\n tfr.plot_joint(timefreqs=timefreqs, topomap_args=topomap_args)\n plt.close('all')\n\n # test bad timefreqs\n timefreqs = ([(-100, 1)], tfr.times[1], [1],\n [(tfr.times[1], tfr.freqs[1], tfr.freqs[1])])\n for these_timefreqs in timefreqs:\n pytest.raises(ValueError, tfr.plot_joint, these_timefreqs)\n\n # test that the object is not internally modified\n tfr_orig = tfr.copy()\n tfr.plot_joint(baseline=(0, None), exclude=[tfr.ch_names[0]],\n topomap_args=topomap_args)\n plt.close('all')\n assert_array_equal(tfr.data, tfr_orig.data)\n assert (set(tfr.ch_names) == set(tfr_orig.ch_names))\n assert (set(tfr.times) == set(tfr_orig.times))\n\n\ndef test_add_channels():\n \"\"\"Test tfr splitting / re-appending channel types.\"\"\"\n data = np.zeros((6, 2, 3))\n times = np.array([.1, .2, .3])\n freqs = np.array([.10, .20])\n info = mne.create_info(\n ['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],\n 1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])\n tfr = AverageTFR(info, data=data, times=times, freqs=freqs,\n nave=20, comment='test', method='crazy-tfr')\n tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)\n tfr_meg = tfr.copy().pick_types(meg=True)\n tfr_stim = tfr.copy().pick_types(meg=False, stim=True)\n tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)\n tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])\n assert all(ch in tfr_new.ch_names\n for ch in tfr_stim.ch_names + tfr_meg.ch_names)\n tfr_new = tfr_meg.copy().add_channels([tfr_eeg])\n\n assert all(ch in tfr_new.ch_names\n for ch in tfr.ch_names if ch != 'STIM 001')\n assert_array_equal(tfr_new.data, tfr_eeg_meg.data)\n assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names)\n\n # Now test errors\n tfr_badsf = tfr_eeg.copy()\n tfr_badsf.info['sfreq'] = 3.1415927\n tfr_eeg = tfr_eeg.crop(-.1, .1)\n\n pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])\n pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])\n pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg])\n pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf)\n\n\ndef test_compute_tfr():\n \"\"\"Test _compute_tfr function.\"\"\"\n # Set parameters\n event_id = 1\n tmin = -0.2\n tmax = 0.498 # Allows exhaustive decimation testing\n\n # Setup for reading the raw data\n raw = read_raw_fif(raw_fname)\n events = read_events(event_fname)\n\n exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more\n\n # picks MEG gradiometers\n picks = pick_types(raw.info, meg='grad', eeg=False,\n stim=False, include=[], exclude=exclude)\n\n picks = picks[:2]\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)\n data = epochs.get_data()\n sfreq = epochs.info['sfreq']\n freqs = np.arange(10, 20, 3).astype(float)\n\n # Check all combination of options\n for func, use_fft, zero_mean, output in product(\n (tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),\n ('complex', 'power', 'phase',\n 'avg_power_itc', 'avg_power', 'itc')):\n # Check exception\n if (func == tfr_array_multitaper) and (output == 'phase'):\n pytest.raises(NotImplementedError, func, data, sfreq=sfreq,\n freqs=freqs, output=output)\n continue\n\n # Check runs\n out = func(data, sfreq=sfreq, freqs=freqs, use_fft=use_fft,\n zero_mean=zero_mean, n_cycles=2., output=output)\n # Check shapes\n shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]\n if ('avg' in output) or ('itc' in output):\n assert_array_equal(shape[1:], out.shape)\n else:\n assert_array_equal(shape, out.shape)\n\n # Check types\n if output in ('complex', 'avg_power_itc'):\n assert_equal(np.complex, out.dtype)\n else:\n assert_equal(np.float, out.dtype)\n assert (np.all(np.isfinite(out)))\n\n # Check errors params\n for _data in (None, 'foo', data[0]):\n pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq)\n for _freqs in (None, 'foo', [[0]]):\n pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq)\n for _sfreq in (None, 'foo'):\n pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq)\n for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):\n for value in (None, 'foo'):\n kwargs = {key: value} # FIXME pep8\n pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,\n **kwargs)\n\n # No time_bandwidth param in morlet\n pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,\n method='morlet', time_bandwidth=1)\n # No phase in multitaper XXX Check ?\n pytest.raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,\n method='multitaper', output='phase')\n\n # Inter-trial coherence tests\n out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)\n assert (np.sum(out >= 1) == 0)\n assert (np.sum(out <= 0) == 0)\n\n # Check decim shapes\n # 2: multiple of len(times) even\n # 3: multiple odd\n # 8: not multiple, even\n # 9: not multiple, odd\n for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):\n _decim = slice(None, None, decim) if isinstance(decim, int) else decim\n n_time = len(np.arange(data.shape[2])[_decim])\n shape = np.r_[data.shape[:2], len(freqs), n_time]\n for method in ('multitaper', 'morlet'):\n # Single trials\n out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,\n n_cycles=2.)\n assert_array_equal(shape, out.shape)\n # Averages\n out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,\n output='avg_power', n_cycles=2.)\n assert_array_equal(shape[1:], out.shape)\n\n\nrun_tests_if_main()\n", "# Author: Denis A. Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal, assert_equal\nimport pytest\n\nfrom mne import io, Epochs, read_events, pick_types\nfrom mne.utils import requires_version, check_version, run_tests_if_main\nfrom mne.decoding import compute_ems, EMS\n\ndata_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\ncurdir = op.join(op.dirname(__file__))\n\nraw_fname = op.join(data_dir, 'test_raw.fif')\nevent_name = op.join(data_dir, 'test-eve.fif')\n\ntmin, tmax = -0.2, 0.5\nevent_id = dict(aud_l=1, vis_l=3)\n\n\n@requires_version('sklearn', '0.15')\ndef test_ems():\n \"\"\"Test event-matched spatial filters.\"\"\"\n raw = io.read_raw_fif(raw_fname, preload=False)\n\n # create unequal number of events\n events = read_events(event_name)\n events[-2, 2] = 3\n picks = pick_types(raw.info, meg=True, stim=False, ecg=False,\n eog=False, exclude='bads')\n picks = picks[1:13:3]\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True)\n pytest.raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])\n epochs.equalize_event_counts(epochs.event_id)\n\n pytest.raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])\n surrogates, filters, conditions = compute_ems(epochs)\n assert_equal(list(set(conditions)), [1, 3])\n\n events = read_events(event_name)\n event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)\n epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True)\n epochs.equalize_event_counts(epochs.event_id)\n\n n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])\n\n pytest.raises(ValueError, compute_ems, epochs)\n surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])\n assert_equal(n_expected, len(surrogates))\n assert_equal(n_expected, len(conditions))\n assert_equal(list(set(conditions)), [2, 3])\n\n # test compute_ems cv\n epochs = epochs['aud_r', 'vis_l']\n epochs.equalize_event_counts(epochs.event_id)\n if check_version('sklearn', '0.18'):\n from sklearn.model_selection import StratifiedKFold\n cv = StratifiedKFold()\n else:\n from sklearn.cross_validation import StratifiedKFold\n cv = StratifiedKFold(epochs.events[:, 2])\n compute_ems(epochs, cv=cv)\n compute_ems(epochs, cv=2)\n pytest.raises(ValueError, compute_ems, epochs, cv='foo')\n pytest.raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1)\n raw.close()\n\n # EMS transformer, check that identical to compute_ems\n X = epochs.get_data()\n y = epochs.events[:, 2]\n X = X / np.std(X) # X scaled outside cv in compute_ems\n Xt, coefs = list(), list()\n ems = EMS()\n assert_equal(ems.__repr__(), '<EMS: not fitted.>')\n # manual leave-one-out to avoid sklearn version problem\n for test in range(len(y)):\n train = np.setdiff1d(range(len(y)), np.atleast_1d(test))\n ems.fit(X[train], y[train])\n coefs.append(ems.filters_)\n Xt.append(ems.transform(X[[test]]))\n assert_equal(ems.__repr__(), '<EMS: fitted with 4 filters on 2 classes.>')\n assert_array_almost_equal(filters, np.mean(coefs, axis=0))\n assert_array_almost_equal(surrogates, np.vstack(Xt))\n\n\nrun_tests_if_main()\n", "# Authors: Eric Larson <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom functools import partial\nimport os\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse, linalg, stats\nfrom numpy.testing import (assert_equal, assert_array_equal,\n assert_array_almost_equal)\nimport pytest\n\nfrom mne.parallel import _force_serial\nfrom mne.stats.cluster_level import (permutation_cluster_test,\n permutation_cluster_1samp_test,\n spatio_temporal_cluster_test,\n spatio_temporal_cluster_1samp_test,\n ttest_1samp_no_p, summarize_clusters_stc)\nfrom mne.utils import run_tests_if_main, _TempDir, catch_logging\n\nwarnings.simplefilter('always') # enable b/c these tests throw warnings\n\n\nn_space = 50\n\n\ndef _get_conditions():\n noise_level = 20\n n_time_1 = 20\n n_time_2 = 13\n normfactor = np.hanning(20).sum()\n rng = np.random.RandomState(42)\n condition1_1d = rng.randn(n_time_1, n_space) * noise_level\n for c in condition1_1d:\n c[:] = np.convolve(c, np.hanning(20), mode=\"same\") / normfactor\n\n condition2_1d = rng.randn(n_time_2, n_space) * noise_level\n for c in condition2_1d:\n c[:] = np.convolve(c, np.hanning(20), mode=\"same\") / normfactor\n\n pseudoekp = 10 * np.hanning(25)[None, :]\n condition1_1d[:, 25:] += pseudoekp\n condition2_1d[:, 25:] -= pseudoekp\n\n condition1_2d = condition1_1d[:, :, np.newaxis]\n condition2_2d = condition2_1d[:, :, np.newaxis]\n return condition1_1d, condition2_1d, condition1_2d, condition2_2d\n\n\ndef test_cache_dir():\n \"\"\"Test use of cache dir.\"\"\"\n tempdir = _TempDir()\n orig_dir = os.getenv('MNE_CACHE_DIR', None)\n orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)\n rng = np.random.RandomState(0)\n X = rng.randn(9, 2, 10)\n try:\n os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'\n os.environ['MNE_CACHE_DIR'] = tempdir\n # Fix error for #1507: in-place when memmapping\n with catch_logging() as log_file:\n permutation_cluster_1samp_test(\n X, buffer_size=None, n_jobs=2, n_permutations=1,\n seed=0, stat_fun=ttest_1samp_no_p, verbose=False)\n # ensure that non-independence yields warning\n stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)\n assert 'independently' not in log_file.getvalue()\n with warnings.catch_warnings(record=True): # independently\n permutation_cluster_1samp_test(\n X, buffer_size=10, n_jobs=2, n_permutations=1,\n seed=0, stat_fun=stat_fun, verbose=False)\n assert 'independently' in log_file.getvalue()\n finally:\n if orig_dir is not None:\n os.environ['MNE_CACHE_DIR'] = orig_dir\n else:\n del os.environ['MNE_CACHE_DIR']\n if orig_size is not None:\n os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size\n else:\n del os.environ['MNE_MEMMAP_MIN_SIZE']\n\n\ndef test_permutation_large_n_samples():\n \"\"\"Test that non-replacement works with large N.\"\"\"\n X = np.random.RandomState(0).randn(72, 1) + 1\n for n_samples in (11, 72):\n tails = (0, 1) if n_samples <= 20 else (0,)\n for tail in tails:\n H0 = permutation_cluster_1samp_test(\n X[:n_samples], threshold=1e-4, tail=tail)[-1]\n assert H0.shape == (1024,)\n assert len(np.unique(H0)) >= 1024 - (H0 == 0).sum()\n\n\ndef test_permutation_step_down_p():\n \"\"\"Test cluster level permutations with step_down_p.\"\"\"\n try:\n try:\n from sklearn.feature_extraction.image import grid_to_graph\n except ImportError:\n from scikits.learn.feature_extraction.image import grid_to_graph # noqa: F401,E501 analysis:ignore\n except ImportError:\n return\n rng = np.random.RandomState(0)\n # subjects, time points, spatial points\n X = rng.randn(9, 2, 10)\n # add some significant points\n X[:, 0:2, 0:2] += 2 # span two time points and two spatial points\n X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude\n thresh = 2\n # make sure it works when we use ALL points in step-down\n t, clusters, p, H0 = \\\n permutation_cluster_1samp_test(X, threshold=thresh,\n step_down_p=1.0)\n # make sure using step-down will actually yield improvements sometimes\n t, clusters, p_old, H0 = \\\n permutation_cluster_1samp_test(X, threshold=thresh,\n step_down_p=0.0)\n assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster\n t, clusters, p_new, H0 = \\\n permutation_cluster_1samp_test(X, threshold=thresh,\n step_down_p=0.05)\n assert_equal(np.sum(p_new < 0.05), 2) # time one rescued\n assert np.all(p_old >= p_new)\n\n\ndef test_cluster_permutation_test():\n \"\"\"Test cluster level permutations tests.\"\"\"\n condition1_1d, condition2_1d, condition1_2d, condition2_2d = \\\n _get_conditions()\n for condition1, condition2 in zip((condition1_1d, condition1_2d),\n (condition2_1d, condition2_2d)):\n T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(\n [condition1, condition2], n_permutations=100, tail=1, seed=1,\n buffer_size=None)\n assert_equal(np.sum(cluster_p_values < 0.05), 1)\n\n T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(\n [condition1, condition2], n_permutations=100, tail=0, seed=1,\n buffer_size=None)\n assert_equal(np.sum(cluster_p_values < 0.05), 1)\n\n # test with 2 jobs and buffer_size enabled\n buffer_size = condition1.shape[1] // 10\n T_obs, clusters, cluster_p_values_buff, hist =\\\n permutation_cluster_test([condition1, condition2],\n n_permutations=100, tail=0, seed=1,\n n_jobs=2, buffer_size=buffer_size)\n assert_array_equal(cluster_p_values, cluster_p_values_buff)\n\n def stat_fun(X, Y):\n return stats.f_oneway(X, Y)[0]\n\n with warnings.catch_warnings(record=True) as w:\n permutation_cluster_test([condition1, condition2], n_permutations=1,\n stat_fun=stat_fun)\n assert_equal(len(w), 1)\n assert 'is only valid' in str(w[0].message)\n\n\ndef test_cluster_permutation_t_test():\n \"\"\"Test cluster level permutations T-test.\"\"\"\n condition1_1d, condition2_1d, condition1_2d, condition2_2d = \\\n _get_conditions()\n\n # use a very large sigma to make sure Ts are not independent\n stat_funs = [ttest_1samp_no_p,\n partial(ttest_1samp_no_p, sigma=1e-1)]\n\n for stat_fun in stat_funs:\n for condition1 in (condition1_1d, condition1_2d):\n # these are so significant we can get away with fewer perms\n T_obs, clusters, cluster_p_values, hist =\\\n permutation_cluster_1samp_test(condition1, n_permutations=100,\n tail=0, seed=1,\n buffer_size=None)\n assert_equal(np.sum(cluster_p_values < 0.05), 1)\n\n T_obs_pos, c_1, cluster_p_values_pos, _ =\\\n permutation_cluster_1samp_test(condition1, n_permutations=100,\n tail=1, threshold=1.67, seed=1,\n stat_fun=stat_fun,\n buffer_size=None)\n\n T_obs_neg, _, cluster_p_values_neg, _ =\\\n permutation_cluster_1samp_test(-condition1, n_permutations=100,\n tail=-1, threshold=-1.67,\n seed=1, stat_fun=stat_fun,\n buffer_size=None)\n assert_array_equal(T_obs_pos, -T_obs_neg)\n assert_array_equal(cluster_p_values_pos < 0.05,\n cluster_p_values_neg < 0.05)\n\n # test with 2 jobs and buffer_size enabled\n buffer_size = condition1.shape[1] // 10\n with warnings.catch_warnings(record=True): # independently\n T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \\\n permutation_cluster_1samp_test(\n -condition1, n_permutations=100, tail=-1,\n threshold=-1.67, seed=1, n_jobs=2, stat_fun=stat_fun,\n buffer_size=buffer_size)\n\n assert_array_equal(T_obs_neg, T_obs_neg_buff)\n assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)\n\n\ndef test_cluster_permutation_with_connectivity():\n \"\"\"Test cluster level permutations with connectivity matrix.\"\"\"\n try:\n try:\n from sklearn.feature_extraction.image import grid_to_graph\n except ImportError:\n from scikits.learn.feature_extraction.image import grid_to_graph\n except ImportError:\n return\n condition1_1d, condition2_1d, condition1_2d, condition2_2d = \\\n _get_conditions()\n\n n_pts = condition1_1d.shape[1]\n # we don't care about p-values in any of these, so do fewer permutations\n args = dict(seed=None, max_step=1, exclude=None,\n step_down_p=0, t_power=1, threshold=1.67,\n check_disjoint=False, n_permutations=50)\n\n did_warn = False\n for X1d, X2d, func, spatio_temporal_func in \\\n [(condition1_1d, condition1_2d,\n permutation_cluster_1samp_test,\n spatio_temporal_cluster_1samp_test),\n ([condition1_1d, condition2_1d],\n [condition1_2d, condition2_2d],\n permutation_cluster_test,\n spatio_temporal_cluster_test)]:\n out = func(X1d, **args)\n connectivity = grid_to_graph(1, n_pts)\n out_connectivity = func(X1d, connectivity=connectivity, **args)\n assert_array_equal(out[0], out_connectivity[0])\n for a, b in zip(out_connectivity[1], out[1]):\n assert_array_equal(out[0][a], out[0][b])\n assert np.all(a[b])\n\n # test spatio-temporal w/o time connectivity (repeat spatial pattern)\n connectivity_2 = sparse.coo_matrix(\n linalg.block_diag(connectivity.asfptype().todense(),\n connectivity.asfptype().todense()))\n\n if isinstance(X1d, list):\n X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]\n else:\n X1d_2 = np.concatenate((X1d, X1d), axis=1)\n\n out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args)\n # make sure we were operating on the same values\n split = len(out[0])\n assert_array_equal(out[0], out_connectivity_2[0][:split])\n assert_array_equal(out[0], out_connectivity_2[0][split:])\n\n # make sure we really got 2x the number of original clusters\n n_clust_orig = len(out[1])\n assert len(out_connectivity_2[1]) == 2 * n_clust_orig\n\n # Make sure that we got the old ones back\n data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])\n data_2 = set([np.sum(out_connectivity_2[0][a]) for a in\n out_connectivity_2[1][:]])\n assert len(data_1.intersection(data_2)) == len(data_1)\n\n # now use the other algorithm\n if isinstance(X1d, list):\n X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]\n else:\n X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))\n\n out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50,\n connectivity=connectivity,\n max_step=0, threshold=1.67,\n check_disjoint=True)\n # make sure we were operating on the same values\n split = len(out[0])\n assert_array_equal(out[0], out_connectivity_3[0][0])\n assert_array_equal(out[0], out_connectivity_3[0][1])\n\n # make sure we really got 2x the number of original clusters\n assert len(out_connectivity_3[1]) == 2 * n_clust_orig\n\n # Make sure that we got the old ones back\n data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])\n data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in\n out_connectivity_3[1]])\n assert len(data_1.intersection(data_2)) == len(data_1)\n\n # test new versus old method\n out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50,\n connectivity=connectivity,\n max_step=2, threshold=1.67)\n out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50,\n connectivity=connectivity,\n max_step=1, threshold=1.67)\n\n # clusters could be in a different order\n sums_4 = [np.sum(out_connectivity_4[0][a])\n for a in out_connectivity_4[1]]\n sums_5 = [np.sum(out_connectivity_4[0][a])\n for a in out_connectivity_5[1]]\n sums_4 = np.sort(sums_4)\n sums_5 = np.sort(sums_5)\n assert_array_almost_equal(sums_4, sums_5)\n\n if not _force_serial:\n pytest.raises(ValueError, spatio_temporal_func, X1d_3,\n n_permutations=1, connectivity=connectivity,\n max_step=1, threshold=1.67, n_jobs=-1000)\n\n # not enough TFCE params\n pytest.raises(KeyError, spatio_temporal_func, X1d_3,\n connectivity=connectivity, threshold=dict(me='hello'))\n\n # too extreme a start threshold\n with warnings.catch_warnings(record=True) as w:\n spatio_temporal_func(X1d_3, connectivity=connectivity,\n threshold=dict(start=10, step=1))\n if not did_warn:\n assert len(w) == 1\n did_warn = True\n\n # too extreme a start threshold\n pytest.raises(ValueError, spatio_temporal_func, X1d_3,\n connectivity=connectivity, tail=-1,\n threshold=dict(start=1, step=-1))\n pytest.raises(ValueError, spatio_temporal_func, X1d_3,\n connectivity=connectivity, tail=-1,\n threshold=dict(start=-1, step=1))\n # Make sure connectivity has to be sparse\n pytest.raises(ValueError, spatio_temporal_func, X1d_3,\n n_permutations=50, connectivity=connectivity.todense(),\n max_step=1, threshold=1.67)\n\n # wrong type for threshold\n pytest.raises(TypeError, spatio_temporal_func, X1d_3,\n connectivity=connectivity, threshold=[])\n\n # wrong value for tail\n pytest.raises(ValueError, spatio_temporal_func, X1d_3,\n connectivity=connectivity, tail=2)\n\n # make sure it actually found a significant point\n out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50,\n connectivity=connectivity,\n max_step=1,\n threshold=dict(start=1,\n step=1))\n assert np.min(out_connectivity_6[2]) < 0.05\n\n\ndef test_permutation_connectivity_equiv():\n \"\"\"Test cluster level permutations with and without connectivity.\"\"\"\n try:\n try:\n from sklearn.feature_extraction.image import grid_to_graph\n except ImportError:\n from scikits.learn.feature_extraction.image import grid_to_graph\n except ImportError:\n return\n rng = np.random.RandomState(0)\n # subjects, time points, spatial points\n n_time = 2\n n_space = 4\n X = rng.randn(6, n_time, n_space)\n # add some significant points\n X[:, :, 0:2] += 10 # span two time points and two spatial points\n X[:, 1, 3] += 20 # span one time point\n max_steps = [1, 1, 1, 2, 1]\n # This will run full algorithm in two ways, then the ST-algorithm in 2 ways\n # All of these should give the same results\n conns = [None,\n grid_to_graph(n_time, n_space),\n grid_to_graph(1, n_space),\n grid_to_graph(1, n_space),\n None]\n stat_map = None\n thresholds = [2, 2, 2, 2, dict(start=0.01, step=1.0)]\n sig_counts = [2, 2, 2, 2, 5]\n stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)\n\n cs = None\n ps = None\n for thresh, count, max_step, conn in zip(thresholds, sig_counts,\n max_steps, conns):\n t, clusters, p, H0 = \\\n permutation_cluster_1samp_test(\n X, threshold=thresh, connectivity=conn, n_jobs=2,\n max_step=max_step, stat_fun=stat_fun)\n # make sure our output datatype is correct\n assert isinstance(clusters[0], np.ndarray)\n assert clusters[0].dtype == bool\n assert_array_equal(clusters[0].shape, X.shape[1:])\n\n # make sure all comparisons were done; for TFCE, no perm\n # should come up empty\n inds = np.where(p < 0.05)[0]\n assert_equal(len(inds), count)\n if isinstance(thresh, dict):\n assert_equal(len(clusters), n_time * n_space)\n assert np.all(H0 != 0)\n continue\n this_cs = [clusters[ii] for ii in inds]\n this_ps = p[inds]\n this_stat_map = np.zeros((n_time, n_space), dtype=bool)\n for ci, c in enumerate(this_cs):\n if isinstance(c, tuple):\n this_c = np.zeros((n_time, n_space), bool)\n for x, y in zip(c[0], c[1]):\n this_stat_map[x, y] = True\n this_c[x, y] = True\n this_cs[ci] = this_c\n c = this_c\n this_stat_map[c] = True\n if cs is None:\n ps = this_ps\n cs = this_cs\n if stat_map is None:\n stat_map = this_stat_map\n assert_array_equal(ps, this_ps)\n assert len(cs) == len(this_cs)\n for c1, c2 in zip(cs, this_cs):\n assert_array_equal(c1, c2)\n assert_array_equal(stat_map, this_stat_map)\n\n\ndef test_spatio_temporal_cluster_connectivity():\n \"\"\"Test spatio-temporal cluster permutations.\"\"\"\n try:\n try:\n from sklearn.feature_extraction.image import grid_to_graph\n except ImportError:\n from scikits.learn.feature_extraction.image import grid_to_graph\n except ImportError:\n return\n condition1_1d, condition2_1d, condition1_2d, condition2_2d = \\\n _get_conditions()\n\n rng = np.random.RandomState(0)\n noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10)\n data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1])\n\n noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10)\n data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1])\n\n conn = grid_to_graph(data1_2d.shape[-1], 1)\n\n threshold = dict(start=4.0, step=2)\n T_obs, clusters, p_values_conn, hist = \\\n spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn,\n n_permutations=50, tail=1, seed=1,\n threshold=threshold, buffer_size=None)\n\n buffer_size = data1_2d.size // 10\n T_obs, clusters, p_values_no_conn, hist = \\\n spatio_temporal_cluster_test([data1_2d, data2_2d],\n n_permutations=50, tail=1, seed=1,\n threshold=threshold, n_jobs=2,\n buffer_size=buffer_size)\n\n assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05))\n\n # make sure results are the same without buffer_size\n T_obs, clusters, p_values2, hist2 = \\\n spatio_temporal_cluster_test([data1_2d, data2_2d],\n n_permutations=50, tail=1, seed=1,\n threshold=threshold, n_jobs=2,\n buffer_size=None)\n assert_array_equal(p_values_no_conn, p_values2)\n pytest.raises(ValueError, spatio_temporal_cluster_test,\n [data1_2d, data2_2d], tail=1, threshold=-2.)\n pytest.raises(ValueError, spatio_temporal_cluster_test,\n [data1_2d, data2_2d], tail=-1, threshold=2.)\n pytest.raises(ValueError, spatio_temporal_cluster_test,\n [data1_2d, data2_2d], tail=0, threshold=-1)\n\n\ndef ttest_1samp(X):\n \"\"\"Return T-values.\"\"\"\n return stats.ttest_1samp(X, 0)[0]\n\n\ndef test_summarize_clusters():\n \"\"\"Test cluster summary stcs.\"\"\"\n clu = (np.random.random([1, 20484]),\n [(np.array([0]), np.array([0, 2, 4]))],\n np.array([0.02, 0.1]),\n np.array([12, -14, 30]))\n stc_sum = summarize_clusters_stc(clu)\n assert stc_sum.data.shape[1] == 2\n clu[2][0] = 0.3\n pytest.raises(RuntimeError, summarize_clusters_stc, clu)\n\n\ndef test_permutation_test_H0():\n \"\"\"Test that H0 is populated properly during testing.\"\"\"\n rng = np.random.RandomState(0)\n data = rng.rand(7, 10, 1) - 0.5\n with warnings.catch_warnings(record=True) as w:\n t, clust, p, h0 = spatio_temporal_cluster_1samp_test(\n data, threshold=100, n_permutations=1024, seed=rng)\n assert_equal(len(w), 1)\n assert 'No clusters found' in str(w[0].message)\n assert_equal(len(h0), 0)\n\n for n_permutations in (1024, 65, 64, 63):\n t, clust, p, h0 = spatio_temporal_cluster_1samp_test(\n data, threshold=0.1, n_permutations=n_permutations, seed=rng)\n assert_equal(len(h0), min(n_permutations, 64))\n assert isinstance(clust[0], tuple) # sets of indices\n for tail, thresh in zip((-1, 0, 1), (-0.1, 0.1, 0.1)):\n with warnings.catch_warnings(record=True) as w:\n t, clust, p, h0 = spatio_temporal_cluster_1samp_test(\n data, threshold=thresh, seed=rng, tail=tail, out_type='mask')\n assert_equal(len(w), 0)\n assert isinstance(clust[0], np.ndarray) # bool mask\n # same as \"128 if tail else 64\"\n assert_equal(len(h0), 2 ** (7 - (tail == 0))) # exact test\n\n\ndef test_tfce_thresholds():\n \"\"\"Test TFCE thresholds.\"\"\"\n rng = np.random.RandomState(0)\n data = rng.randn(7, 10, 1) - 0.5\n\n # if tail==-1, step must also be negative\n pytest.raises(ValueError, permutation_cluster_1samp_test, data, tail=-1,\n threshold=dict(start=0, step=0.1))\n # this works (smoke test)\n permutation_cluster_1samp_test(data, tail=-1,\n threshold=dict(start=0, step=-0.1))\n\n # thresholds must be monotonically increasing\n pytest.raises(ValueError, permutation_cluster_1samp_test, data, tail=1,\n threshold=dict(start=1, step=-0.5))\n\n\nrun_tests_if_main()\n" ]
[ [ "numpy.dot", "scipy.linalg.svd", "numpy.unique", "numpy.eye", "numpy.ones", "numpy.all", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.abs", "numpy.unique", "numpy.asarray", "scipy.spatial.Delaunay", "scipy.io.loadmat", "scipy.sparse.csr_matrix", "numpy.concatenate", "numpy.argmax", "numpy.repeat", "numpy.array", "numpy.sum", "scipy.sparse.lil_matrix" ], [ "numpy.linspace", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.testing.assert_equal", "matplotlib.use", "numpy.random.randint", "numpy.random.rand", "matplotlib.pyplot.close", "numpy.testing.assert_allclose", "numpy.random.RandomState", "numpy.empty", "numpy.testing.assert_array_almost_equal" ], [ "numpy.linspace", "numpy.mean", "numpy.any", "matplotlib.pyplot.subplot2grid", "numpy.testing.assert_equal", "numpy.arange", "numpy.sin", "numpy.real", "numpy.argmax", "matplotlib.pyplot.close", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "numpy.logical_or", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.random.RandomState", "numpy.isfinite", "matplotlib.use", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.empty" ], [ "sklearn.cross_validation.StratifiedKFold", "numpy.atleast_1d", "numpy.std", "numpy.mean", "numpy.vstack" ], [ "scipy.stats.f_oneway", "scipy.stats.ttest_1samp", "numpy.random.random", "numpy.min", "numpy.reshape", "numpy.unique", "numpy.sort", "numpy.dstack", "numpy.all", "numpy.testing.assert_array_equal", "numpy.concatenate", "numpy.where", "numpy.hanning", "numpy.array", "numpy.random.RandomState", "numpy.zeros", "numpy.sum", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Mootjee/IMM
[ "9bd9c9c12118a3e0d67ce5afa954dda8a5814e00" ]
[ "generate.py" ]
[ "import keras\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.mobilenetv2 import MobileNetV2\nfrom keras import backend as K\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras.preprocessing import image\nimport numpy as np\n\nimport magenta.music as mm\nfrom magenta.models.music_vae import configs\nfrom magenta.models.music_vae.trained_model import TrainedModel\n\nimport skvideo.io\nimport wget, tqdm, os, tfutil, sys\n\nimport pandas as pd\n\nfrom scipy.io import wavfile\n\nfrom argparse import ArgumentParser\n\nfrom subprocess import call\n\n# suppress warnings\nimport warnings\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n\nfrom sklearn.decomposition import PCA\n\nfrom skimage.transform import resize\n\nimport random\n\n\"\"\"\nGenerate a MIDI track from a movie file.\n\nThe pipeline used by this script works as follows.\n\n1) A pretrained model (InceptionV3) is used to map the each frame in the video to a vector of _image features_.\n2) A _mapper_ is used to convert each cluster of n frames to a latent vectors representing 16 bars of music. The mapper\nensures that the resulting latent variables fit the standard normal shape of the latent space. Two mappers are available:\n * PCA: This mapper only looks at the frames of the current video, and fits them to a standard normal distribution\nusing a Principal Component Analysis. This is the default mapper.\n * vae.???: This mapper is the encoder part of a VAE trained on image features extracted from the the whole openbeelden archive.\n Choose this mapper by adding the argument the argument \"-m vae.??\".\n3) The resulting latent vectors are fed to the MusicVAE decoder to generate 16 bars of music.\n4) The generated music is saved three ways: as a midi file, as a synthesized .wav file, as a .mkv file combining the\noriginal video with the new music (only works if ffmpeg is installed). If the input video had audio, this is discarded.\n\n## Notes\n\n* In principle, any input video should work, but video loading can be tricky. For best results, use videos from the sound\n and vision \"openbeelden\" archive. The file openbeelden.clean.csv contains URLs for over 3000 videos that should work.\n* The model is entirely unsupervised. The music will change in response to high level semantic features in the frames of the\nvideo, but which frames correspond to which music features is entirely random.\n* The PCA mapper will maximize the response to variance _within_ the same video. The pretrained mapper instead may\ngenerate more homogeneous music for a single video, but is more likely to assign separate videos their own characteristic\nmusic track.\n** The model is trained on 2 bar chunks for the melody and drum models and 16 bar chunks for the poly model, so those\n so generating chunks of that length should provide the most natural results. However, shorter values provide a more\n direct response to what is happening in the video.\n* (TODO) Any tensorflow model that maps a (b*???, ???)-tensor to a (b, 512) tensor can be used as a custom mapper. Just\nsave the model and load it with the \"-m\" switch. For good results, the model should map to points that are likely under\nthe multivariate standard normal distribution.\n\"\"\"\n\nFPS = 25 # We assume PAL for now\nSECONDS_PER_BAR = 2 # Default MIDI timing (120BPM, 480 BPQ)\nMVAE_URL_DRUMS = 'https://storage.googleapis.com/magentadata/models/music_vae/checkpoints/cat-drums_2bar_small.lokl.tar'\nMVAE_URL_MEL = 'https://storage.googleapis.com/magentadata/models/music_vae/checkpoints/cat-mel_2bar_big.tar'\nMVAE_URL_POLY = 'https://storage.googleapis.com/magentadata/models/music_vae/checkpoints/hierdec-trio_16bar.tar'\nSAMPLE_RATE = 44100\nFRAMECHUNK = 100 # Set as big as memory allows\n\ndef go(arg):\n\n # Check if GPU available\n print('GPUS available: ', K.tensorflow_backend._get_available_gpus())\n\n # Load pretrained models\n ## Load the Music VAE model\n if arg.decoder == 'melody':\n mfile = arg.model_dir + os.sep + 'musicmodel.melody.tar'\n if not os.path.isfile(mfile):\n print('Downloading MusicVAE (melody model).')\n wget.download(MVAE_URL_MEL, mfile)\n\n decoder_config = configs.CONFIG_MAP['cat-mel_2bar_big']\n decoder = TrainedModel(decoder_config, batch_size=4, checkpoint_dir_or_path=mfile)\n latent_size = 256\n chunk_length = 2\n\n elif arg.decoder == 'drums':\n mfile = arg.model_dir + os.sep + 'musicmodel.drums.tar'\n if not os.path.isfile(mfile):\n print('Downloading MusicVAE (drums model).')\n wget.download(MVAE_URL_DRUMS, mfile)\n\n decoder_config = configs.CONFIG_MAP['cat-drums_2bar_small']\n decoder = TrainedModel(decoder_config, batch_size=4, checkpoint_dir_or_path=mfile)\n latent_size = 128\n chunk_length = 2\n\n elif arg.decoder == 'poly':\n mfile = arg.model_dir + os.sep + 'musicmodel.poly.tar'\n if not os.path.isfile(mfile):\n print('Downloading MusicVAE (polyphonic model).')\n wget.download(MVAE_URL_POLY, mfile)\n\n decoder_config = configs.CONFIG_MAP['hierdec-trio_16bar']\n decoder = TrainedModel(decoder_config, batch_size=4, checkpoint_dir_or_path=mfile)\n latent_size = 256\n chunk_length = 16\n\n else:\n raise Exception('Decoder model {} not recognized. Use \"poly\", \"melody\" or \"drums\"'.format(arg.decoder))\n\n shape = None\n\n if arg.input not in ['none', 'slerp']:\n if arg.encoder == 'inceptionv3':\n encoder = InceptionV3(weights='imagenet', include_top=False)\n prep = keras.applications.inception_v3.preprocess_input\n flat = 6 * 8 * 2048\n elif arg.encoder == 'mobilenetv2':\n encoder = MobileNetV2(weights='imagenet', include_top=False)\n prep = keras.applications.mobilenetv2.preprocess_input\n flat = 7 * 7 * 1280\n shape = (224, 244)\n else:\n raise Exception('Encoder model {} not recognized'.format(arg.encoder))\n\n frames_per_chunk = chunk_length * SECONDS_PER_BAR * FPS\n\n has_video = True\n\n if arg.input == 'none':\n ## Generate 32 bars of random music\n z = np.random.randn(6, latent_size)\n has_video = False\n\n elif arg.input == 'slerp':\n ## Generate 6 bars of random music, interpolating betwene two points\n z0 = np.random.randn(latent_size) * 2\n z1 = np.random.randn(latent_size) * 2\n\n z = tfutil.slerp(z0, z1, steps=10)\n\n has_video = False\n else:\n # Load a random video from the openbeelden data\n if arg.input == 'random':\n # - data urls\n df = pd.read_csv(tfutil.DIR + os.sep + 'openbeelden.clean.csv', header=None)\n l = len(df)\n\n index = random.randint(0, l)\n url = df.iloc[index, 2]\n\n print('Downloading video', url)\n try:\n dir = './downloaded/'\n tfutil.ensure(dir)\n\n arg.input = wget.download(url, out=dir)\n except Exception as e:\n print('*** Could not download', url)\n raise e\n\n ## Load a video to 'inspire' the random music\n\n # Loop through the chunks\n length = tfutil.get_length(arg.input) # read through the video to get the nr of frames.\n gen = skvideo.io.vreader(arg.input, num_frames=length if arg.limit is None else arg.limit) # movie frame generator\n\n features = []\n\n print('Computing features')\n for i, frames in tqdm.tqdm(enumerate(tfutil.chunks(gen, size=FRAMECHUNK)), total=(length//FRAMECHUNK)+1):\n\n frames = np.concatenate([f[None, :, :, :] for f in frames], axis=0)\n # print('Loaded frame-chunk {}, with shape {}'.format(i, frames.shape))\n\n if shape is not None: # Resize the frame batch for the encoder model\n inshape = frames.shape\n\n frames = frames.transpose((1, 2, 3, 0))\n frames = frames.reshape(inshape[1], inshape[2], -1)\n frames = resize(frames, shape)\n frames = frames.reshape(shape[0], shape[1], 3, -1)\n frames = frames.transpose((3, 0, 1, 2))\n\n # print(' after resize:', frames.shape)\n frames = prep(frames)\n\n # Map to image features (1)\n features.append(encoder.predict(frames))\n\n features = np.concatenate(features, axis=0).squeeze()\n features = features.reshape(-1, flat)\n\n print('Computed features (shape {})'.format(features.shape))\n\n print(features[:, :10].var(axis=1))\n\n b, fdim = features.shape\n\n # Apply PCA\n pca = PCA(n_components=latent_size, whiten=True)\n z = pca.fit_transform(features)\n\n print(z.shape)\n print('per dimension variance (first 10)', z[:, :10].var(axis=1))\n print('per z norm', np.linalg.norm(z, axis=1))\n\n # Average over chunks of 50 frames so that each vector in the sequence\n # correponds to 2 bars\n chunks = []\n for f in range(0, b, frames_per_chunk):\n t = min(f + frames_per_chunk, b)\n chunks.append(z[f:t, :].mean(axis=0, keepdims=True))\n\n z = np.concatenate(chunks, axis=0)\n\n print('Averaged z vectors', z.shape)\n print(z[:, :10].var(axis=1))\n\n # Whiten (averaging will have decreased the variance, so we adjust the spread)\n z -= z.mean(axis=0, keepdims=True)\n z /= z.var(axis=0, keepdims=True)\n\n print('Whitened. per z norm', np.linalg.norm(z, axis=1))\n\n if arg.normalize:\n z = z / np.linalg.norm(z, axis=1, keepdims=True)\n\n print('Normalized. per z norm', np.linalg.norm(z, axis=1))\n\n z = z * arg.zmult\n\n # Generate MIDI (3)\n b, zdim = z.shape\n\n noise = np.repeat(np.random.randn(1, zdim), b, axis=0)\n z = np.concatenate([z, noise], axis=1)\n # -- We use the same epsilon noise vector throughout the video. That way, if subsequent chunks are similar, the\n # resulting bars of music will also be similar. Resampling the noise for each chunk would lead to a change in\n # style every 2 bars.\n\n ## Sample the music\n clength = chunk_length * 16\n note_sequences = decoder.decode(z=z, length=clength, temperature=arg.temp)\n\n print(len(note_sequences), ' note sequences produced')\n\n # for i, ns in enumerate(note_sequences):\n # print('chunk ', i, ns.total_time)\n # ## Output the MIDI file\n # mm.sequence_proto_to_midi_file(ns, '{}.{:03}.mid'.format(arg.name, i))\n\n # note_sequence = mm.concatenate_sequences(note_sequences, [0.75] * len(note_sequences))\n note_sequence = mm.sequences_lib.concatenate_sequences(note_sequences, [chunk_length * SECONDS_PER_BAR] * len(note_sequences))\n\n print('total time', note_sequence.total_time)\n\n ## Output the MIDI file\n mm.sequence_proto_to_midi_file(note_sequence, '{}.mid'.format(arg.name))\n\n ## Output the sequenced MIDI as a WAV file\n # Crappy synthesizer (probably to do with missing sound font)\n # pmidi = mm.midi_io.note_sequence_to_pretty_midi(note_sequence)\n # sequenced = pmidi.synthesize(fs=SAMPLE_RATE)\n #\n # with open(arg.name + '.wav', 'wb') as file:\n # wavfile.write(file, SAMPLE_RATE, sequenced)\n\n ## Use timidity to convert the MIDI to WAV\n\n # Run the following command: timidity output.mid -Ow\n call(['timidity', arg.name+'.mid', '-Ow'])\n\n ## Output the combined audio/video\n if has_video:\n # Run the following command: ffmpeg -i input.mp4 -i output.wav -c copy -map 0:v:0 -map 1:a:0 output.mkv\n call(['ffmpeg', '-i', arg.input, '-i', arg.name+'.wav', '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', arg.name+'.mkv'])\n\n print('Finished')\n\nif __name__ == \"__main__\":\n\n ## Parse the command line options\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--input\",\n dest=\"input\",\n help=\"Input movie file. Most common formats should work. The following keywords activate a special mode: 'none' generate music only, 'slerp' generate randomly interpolated music, 'random' download a random video file from the openbeelden archive.\",\n default=None, type=str)\n\n parser.add_argument(\"-m\", \"--mapper\",\n dest=\"latent-map\",\n help=\"Model to map the image features to the latent space. If none, PCA mapping is used.\",\n default=None, type=str)\n\n parser.add_argument(\"-t\", \"--temperature\",\n dest=\"temp\",\n help=\"Decoding temperature. (Higher temperature results in more variation, \",\n default=0.5, type=float)\n\n parser.add_argument(\"-M\", \"--mult\",\n dest=\"zmult\",\n help=\"Multiplier for the latent vectors. Set higher than 1 to create more extreme variation.\",\n default=1.0, type=float)\n\n # parser.add_argument(\"-c\", \"--chunk-length\",\n # dest=\"chunk_length\",\n # help=\"The length (in bars, lasting 2 seconds) of a chunk of frames for which the model generates a sequence of music.\",\n # default=2, type=int)\n\n parser.add_argument(\"-n\", \"--name\",\n dest=\"name\",\n help=\"Name of the output files (without extension).\",\n default='output', type=str)\n\n parser.add_argument(\"-F\", \"--final\", dest=\"final\",\n help=\"Use the canonical test set instead of a validation split.\",\n action=\"store_true\")\n\n parser.add_argument(\"-N\", \"--normalize\", dest=\"normalize\",\n help=\"Project the z vectors onto the hypersphere.\",\n action=\"store_true\")\n\n parser.add_argument(\"--limit\",\n dest=\"limit\",\n help=\"Limit the number of frames loaded (useful for debugging).\",\n default=None, type=int)\n\n parser.add_argument(\"--encoder-model\",\n dest=\"encoder\",\n help=\"Which model to use for feature extraction (inceptionv3, mobilenetv2)\",\n default='mobilenetv2', type=str)\n\n parser.add_argument(\"--decoder-model\",\n dest=\"decoder\",\n help=\"Which model to use for decoding (melody, drums, poly)\",\n default='melody', type=str)\n\n parser.add_argument(\"--model-dir\",\n dest=\"model_dir\",\n help=\"Directory to keep the downloaded models.\",\n default='.', type=str)\n\n options = parser.parse_args()\n\n print('OPTIONS ', options)\n\n go(options)" ]
[ [ "pandas.read_csv", "numpy.linalg.norm", "numpy.concatenate", "numpy.random.randn", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hmaschke/pandapower-1
[ "2e93969050d3d468ce57f73d358e97fabc6e5141", "2e93969050d3d468ce57f73d358e97fabc6e5141", "2e93969050d3d468ce57f73d358e97fabc6e5141", "2e93969050d3d468ce57f73d358e97fabc6e5141", "2e93969050d3d468ce57f73d358e97fabc6e5141" ]
[ "pandapower/plotting/plotly/get_colors.py", "pandapower/create.py", "pandapower/test/consistency_checks.py", "pandapower/converter/pypower/from_ppc.py", "pandapower/estimation/state_estimation.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport sys\nimport numpy as np\ntry:\n import matplotlib.pyplot as plt\n import matplotlib.cm as cm\n import matplotlib.colors as mplc\n MATPLOTLIB_INSTALLED = True\nexcept ImportError:\n MATPLOTLIB_INSTALLED = False\n\ntry:\n import seaborn\nexcept ImportError:\n pass\n\nfrom pandapower.auxiliary import soft_dependency_error\n\n\ndef get_plotly_color(color_string):\n if not MATPLOTLIB_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"matplotlib\")\n try:\n converted = _to_plotly_color(mplc.to_rgba(color_string))\n return converted\n except ValueError:\n return color_string\n\n\ndef get_plotly_color_palette(n):\n if not MATPLOTLIB_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"matplotlib\")\n if 'seaborn' in sys.modules:\n return _to_plotly_palette(seaborn.color_palette(\"hls\", n))\n else:\n hsv = plt.get_cmap('hsv')\n return _to_plotly_palette(hsv(np.linspace(0, 1.0, n)))\n\n\ndef _to_plotly_palette(scl, transparence=None):\n \"\"\"\n converts a rgb color palette in format (0-1,0-1,0-1) to a plotly color palette\n 'rgb(0-255,0-255,0-255)'\n \"\"\"\n if not MATPLOTLIB_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"matplotlib\")\n _out = []\n for color in scl:\n plotly_col = [255 * _c for _c in mplc.to_rgba(color)]\n if transparence:\n assert 0. <= transparence <= 1.0\n plotly_col[3] = transparence\n plotly_col = \"rgba({:.0f}, {:.0f}, {:.0f}, {:.4f})\".format(*plotly_col)\n else:\n plotly_col = \"rgb({:.0f}, {:.0f}, {:.0f})\".format(*plotly_col[:3])\n _out.append(plotly_col)\n return _out\n\n\ndef _to_plotly_color(scl, transparence=None):\n \"\"\"\n converts a rgb color in format (0-1,0-1,0-1) to a plotly color 'rgb(0-255,0-255,0-255)'\n \"\"\"\n if not MATPLOTLIB_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"matplotlib\")\n plotly_col = [255 * _c for _c in mplc.to_rgba(scl)] if len(scl) == 3 else [255 * _c for _c in\n mplc.to_rgb(scl)]\n if transparence is not None:\n assert 0. <= transparence <= 1.0\n plotly_col[3] = transparence\n return \"rgba({:.0f}, {:.0f}, {:.0f}, {:.4f})\".format(*plotly_col)\n else:\n return \"rgb({:.0f}, {:.0f}, {:.0f})\".format(*plotly_col[:3])\n\n\ndef get_plotly_cmap(values, cmap_name='jet', cmin=None, cmax=None):\n if not MATPLOTLIB_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"matplotlib\")\n cmap = cm.get_cmap(cmap_name)\n if cmin is None:\n cmin = values.min()\n if cmax is None:\n cmax = values.max()\n norm = mplc.Normalize(vmin=cmin, vmax=cmax)\n bus_fill_colors_rgba = cmap(norm(values).data)[:, 0:3] * 255.\n return ['rgb({0},{1},{2})'.format(r, g, b) for r, g, b in bus_fill_colors_rgba]\n", "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nfrom operator import itemgetter\n\nimport pandas as pd\nfrom numpy import nan, isnan, arange, dtype, isin, any as np_any, zeros, array, bool_, \\\n all as np_all, float64, intersect1d\nfrom packaging import version\n\nfrom pandapower import __version__\nfrom pandapower.auxiliary import pandapowerNet, get_free_id, _preserve_dtypes\nfrom pandapower.results import reset_results\nfrom pandapower.std_types import add_basic_std_types, load_std_type, check_entry_in_std_type\nimport numpy as np\n\ntry:\n import pandaplan.core.pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_empty_network(name=\"\", f_hz=50., sn_mva=1, add_stdtypes=True):\n \"\"\"\n This function initializes the pandapower datastructure.\n\n OPTIONAL:\n **f_hz** (float, 50.) - power system frequency in hertz\n\n **name** (string, None) - name for the network\n\n **sn_mva** (float, 1e3) - reference apparent power for per unit system\n\n **add_stdtypes** (boolean, True) - Includes standard types to net\n\n OUTPUT:\n **net** (attrdict) - PANDAPOWER attrdict with empty tables:\n\n EXAMPLE:\n net = create_empty_network()\n\n \"\"\"\n net = pandapowerNet({\n # structure data\n \"bus\": [('name', dtype(object)),\n ('vn_kv', 'f8'),\n ('type', dtype(object)),\n ('zone', dtype(object)),\n ('in_service', 'bool'), ],\n \"load\": [(\"name\", dtype(object)),\n (\"bus\", \"u4\"),\n (\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"const_z_percent\", \"f8\"),\n (\"const_i_percent\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"in_service\", 'bool'),\n (\"type\", dtype(object))],\n \"sgen\": [(\"name\", dtype(object)),\n (\"bus\", \"i8\"),\n (\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"in_service\", 'bool'),\n (\"type\", dtype(object)),\n (\"current_source\", \"bool\")],\n \"motor\": [(\"name\", dtype(object)),\n (\"bus\", \"i8\"),\n (\"pn_mech_mw\", \"f8\"),\n (\"loading_percent\", \"f8\"),\n (\"cos_phi\", \"f8\"),\n (\"cos_phi_n\", \"f8\"),\n (\"efficiency_percent\", \"f8\"),\n (\"efficiency_n_percent\", \"f8\"),\n (\"lrc_pu\", \"f8\"),\n (\"vn_kv\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"in_service\", 'bool'),\n (\"rx\", 'f8')\n ],\n \"asymmetric_load\": [(\"name\", dtype(object)),\n (\"bus\", \"u4\"),\n (\"p_a_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"),\n (\"p_b_mw\", \"f8\"),\n (\"q_b_mvar\", \"f8\"),\n (\"p_c_mw\", \"f8\"),\n (\"q_c_mvar\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"in_service\", 'bool'),\n (\"type\", dtype(object))],\n\n \"asymmetric_sgen\": [(\"name\", dtype(object)),\n (\"bus\", \"i8\"),\n (\"p_a_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"),\n (\"p_b_mw\", \"f8\"),\n (\"q_b_mvar\", \"f8\"),\n (\"p_c_mw\", \"f8\"),\n (\"q_c_mvar\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"in_service\", 'bool'),\n (\"type\", dtype(object)),\n (\"current_source\", \"bool\")],\n \"storage\": [(\"name\", dtype(object)),\n (\"bus\", \"i8\"),\n (\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"soc_percent\", \"f8\"),\n (\"min_e_mwh\", \"f8\"),\n (\"max_e_mwh\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"in_service\", 'bool'),\n (\"type\", dtype(object))],\n \"gen\": [(\"name\", dtype(object)),\n (\"bus\", \"u4\"),\n (\"p_mw\", \"f8\"),\n (\"vm_pu\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"min_q_mvar\", \"f8\"),\n (\"max_q_mvar\", \"f8\"),\n (\"scaling\", \"f8\"),\n (\"slack\", \"bool\"),\n (\"in_service\", 'bool'),\n (\"slack_weight\", 'f8'),\n (\"type\", dtype(object))],\n \"switch\": [(\"bus\", \"i8\"),\n (\"element\", \"i8\"),\n (\"et\", dtype(object)),\n (\"type\", dtype(object)),\n (\"closed\", \"bool\"),\n (\"name\", dtype(object)),\n (\"z_ohm\", \"f8\")],\n \"shunt\": [(\"bus\", \"u4\"),\n (\"name\", dtype(object)),\n (\"q_mvar\", \"f8\"),\n (\"p_mw\", \"f8\"),\n (\"vn_kv\", \"f8\"),\n (\"step\", \"u4\"),\n (\"max_step\", \"u4\"),\n (\"in_service\", \"bool\")],\n \"ext_grid\": [(\"name\", dtype(object)),\n (\"bus\", \"u4\"),\n (\"vm_pu\", \"f8\"),\n (\"va_degree\", \"f8\"),\n (\"slack_weight\", 'f8'),\n (\"in_service\", 'bool')],\n \"line\": [(\"name\", dtype(object)),\n (\"std_type\", dtype(object)),\n (\"from_bus\", \"u4\"),\n (\"to_bus\", \"u4\"),\n (\"length_km\", \"f8\"),\n (\"r_ohm_per_km\", \"f8\"),\n (\"x_ohm_per_km\", \"f8\"),\n (\"c_nf_per_km\", \"f8\"),\n (\"g_us_per_km\", \"f8\"),\n (\"max_i_ka\", \"f8\"),\n (\"df\", \"f8\"),\n (\"parallel\", \"u4\"),\n (\"type\", dtype(object)),\n (\"in_service\", 'bool')],\n \"trafo\": [(\"name\", dtype(object)),\n (\"std_type\", dtype(object)),\n (\"hv_bus\", \"u4\"),\n (\"lv_bus\", \"u4\"),\n (\"sn_mva\", \"f8\"),\n (\"vn_hv_kv\", \"f8\"),\n (\"vn_lv_kv\", \"f8\"),\n (\"vk_percent\", \"f8\"),\n (\"vkr_percent\", \"f8\"),\n (\"pfe_kw\", \"f8\"),\n (\"i0_percent\", \"f8\"),\n (\"shift_degree\", \"f8\"),\n (\"tap_side\", dtype(object)),\n (\"tap_neutral\", \"i4\"),\n (\"tap_min\", \"i4\"),\n (\"tap_max\", \"i4\"),\n (\"tap_step_percent\", \"f8\"),\n (\"tap_step_degree\", \"f8\"),\n (\"tap_pos\", \"i4\"),\n (\"tap_phase_shifter\", 'bool'),\n (\"parallel\", \"u4\"),\n (\"df\", \"f8\"),\n (\"in_service\", 'bool')],\n \"trafo3w\": [(\"name\", dtype(object)),\n (\"std_type\", dtype(object)),\n (\"hv_bus\", \"u4\"),\n (\"mv_bus\", \"u4\"),\n (\"lv_bus\", \"u4\"),\n (\"sn_hv_mva\", \"f8\"),\n (\"sn_mv_mva\", \"f8\"),\n (\"sn_lv_mva\", \"f8\"),\n (\"vn_hv_kv\", \"f8\"),\n (\"vn_mv_kv\", \"f8\"),\n (\"vn_lv_kv\", \"f8\"),\n (\"vk_hv_percent\", \"f8\"),\n (\"vk_mv_percent\", \"f8\"),\n (\"vk_lv_percent\", \"f8\"),\n (\"vkr_hv_percent\", \"f8\"),\n (\"vkr_mv_percent\", \"f8\"),\n (\"vkr_lv_percent\", \"f8\"),\n (\"pfe_kw\", \"f8\"),\n (\"i0_percent\", \"f8\"),\n (\"shift_mv_degree\", \"f8\"),\n (\"shift_lv_degree\", \"f8\"),\n (\"tap_side\", dtype(object)),\n (\"tap_neutral\", \"i4\"),\n (\"tap_min\", \"i4\"),\n (\"tap_max\", \"i4\"),\n (\"tap_step_percent\", \"f8\"),\n (\"tap_step_degree\", \"f8\"),\n (\"tap_pos\", \"i4\"),\n (\"tap_at_star_point\", 'bool'),\n (\"in_service\", 'bool')],\n \"impedance\": [(\"name\", dtype(object)),\n (\"from_bus\", \"u4\"),\n (\"to_bus\", \"u4\"),\n (\"rft_pu\", \"f8\"),\n (\"xft_pu\", \"f8\"),\n (\"rtf_pu\", \"f8\"),\n (\"xtf_pu\", \"f8\"),\n (\"sn_mva\", \"f8\"),\n (\"in_service\", 'bool')],\n \"dcline\": [(\"name\", dtype(object)),\n (\"from_bus\", \"u4\"),\n (\"to_bus\", \"u4\"),\n (\"p_mw\", \"f8\"),\n (\"loss_percent\", 'f8'),\n (\"loss_mw\", 'f8'),\n (\"vm_from_pu\", \"f8\"),\n (\"vm_to_pu\", \"f8\"),\n (\"max_p_mw\", \"f8\"),\n (\"min_q_from_mvar\", \"f8\"),\n (\"min_q_to_mvar\", \"f8\"),\n (\"max_q_from_mvar\", \"f8\"),\n (\"max_q_to_mvar\", \"f8\"),\n (\"in_service\", 'bool')],\n \"ward\": [(\"name\", dtype(object)),\n (\"bus\", \"u4\"),\n (\"ps_mw\", \"f8\"),\n (\"qs_mvar\", \"f8\"),\n (\"qz_mvar\", \"f8\"),\n (\"pz_mw\", \"f8\"),\n (\"in_service\", \"bool\")],\n \"xward\": [(\"name\", dtype(object)),\n (\"bus\", \"u4\"),\n (\"ps_mw\", \"f8\"),\n (\"qs_mvar\", \"f8\"),\n (\"qz_mvar\", \"f8\"),\n (\"pz_mw\", \"f8\"),\n (\"r_ohm\", \"f8\"),\n (\"x_ohm\", \"f8\"),\n (\"vm_pu\", \"f8\"),\n (\"slack_weight\", 'f8'),\n (\"in_service\", \"bool\")],\n \"measurement\": [(\"name\", dtype(object)),\n (\"measurement_type\", dtype(object)),\n (\"element_type\", dtype(object)),\n (\"element\", \"uint32\"),\n (\"value\", \"float64\"),\n (\"std_dev\", \"float64\"),\n (\"side\", dtype(object))],\n \"pwl_cost\": [(\"power_type\", dtype(object)),\n (\"element\", \"u4\"),\n (\"et\", dtype(object)),\n (\"points\", dtype(object))],\n \"poly_cost\": [(\"element\", \"u4\"),\n (\"et\", dtype(object)),\n (\"cp0_eur\", dtype(\"f8\")),\n (\"cp1_eur_per_mw\", dtype(\"f8\")),\n (\"cp2_eur_per_mw2\", dtype(\"f8\")),\n (\"cq0_eur\", dtype(\"f8\")),\n (\"cq1_eur_per_mvar\", dtype(\"f8\")),\n (\"cq2_eur_per_mvar2\", dtype(\"f8\"))\n ],\n 'characteristic': [\n ('object', dtype(object))\n ],\n 'controller': [\n ('object', dtype(object)),\n ('in_service', \"bool\"),\n ('order', \"float64\"),\n ('level', dtype(object)),\n ('initial_run', \"bool\"),\n (\"recycle\", dtype(object))\n ],\n # geodata\n \"line_geodata\": [(\"coords\", dtype(object))],\n \"bus_geodata\": [(\"x\", \"f8\"), (\"y\", \"f8\"), (\"coords\", dtype(object))],\n\n # result tables\n \"_empty_res_bus\": [(\"vm_pu\", \"f8\"),\n (\"va_degree\", \"f8\"),\n (\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_ext_grid\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_line\": [(\"p_from_mw\", \"f8\"),\n (\"q_from_mvar\", \"f8\"),\n (\"p_to_mw\", \"f8\"),\n (\"q_to_mvar\", \"f8\"),\n (\"pl_mw\", \"f8\"),\n (\"ql_mvar\", \"f8\"),\n (\"i_from_ka\", \"f8\"),\n (\"i_to_ka\", \"f8\"),\n (\"i_ka\", \"f8\"),\n (\"vm_from_pu\", \"f8\"),\n (\"va_from_degree\", \"f8\"),\n (\"vm_to_pu\", \"f8\"),\n (\"va_to_degree\", \"f8\"),\n (\"loading_percent\", \"f8\")],\n \"_empty_res_trafo\": [(\"p_hv_mw\", \"f8\"),\n (\"q_hv_mvar\", \"f8\"),\n (\"p_lv_mw\", \"f8\"),\n (\"q_lv_mvar\", \"f8\"),\n (\"pl_mw\", \"f8\"),\n (\"ql_mvar\", \"f8\"),\n (\"i_hv_ka\", \"f8\"),\n (\"i_lv_ka\", \"f8\"),\n (\"vm_hv_pu\", \"f8\"),\n (\"va_hv_degree\", \"f8\"),\n (\"vm_lv_pu\", \"f8\"),\n (\"va_lv_degree\", \"f8\"),\n (\"loading_percent\", \"f8\")],\n \"_empty_res_load\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_asymmetric_load\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_asymmetric_sgen\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_motor\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_sgen\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_shunt\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"vm_pu\", \"f8\")],\n \"_empty_res_impedance\": [(\"p_from_mw\", \"f8\"),\n (\"q_from_mvar\", \"f8\"),\n (\"p_to_mw\", \"f8\"),\n (\"q_to_mvar\", \"f8\"),\n (\"pl_mw\", \"f8\"),\n (\"ql_mvar\", \"f8\"),\n (\"i_from_ka\", \"f8\"),\n (\"i_to_ka\", \"f8\")],\n \"_empty_res_dcline\": [(\"p_from_mw\", \"f8\"),\n (\"q_from_mvar\", \"f8\"),\n (\"p_to_mw\", \"f8\"),\n (\"q_to_mvar\", \"f8\"),\n (\"pl_mw\", \"f8\"),\n (\"vm_from_pu\", \"f8\"),\n (\"va_from_degree\", \"f8\"),\n (\"vm_to_pu\", \"f8\"),\n (\"va_to_degree\", \"f8\")],\n \"_empty_res_ward\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"vm_pu\", \"f8\")],\n \"_empty_res_xward\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"vm_pu\", \"f8\"),\n (\"va_internal_degree\", \"f8\"),\n (\"vm_internal_pu\", \"f8\")],\n\n \"_empty_res_trafo_3ph\": [(\"p_a_hv_mw\", \"f8\"),\n (\"q_a_hv_mvar\", \"f8\"),\n (\"p_b_hv_mw\", \"f8\"),\n (\"q_b_hv_mvar\", \"f8\"),\n (\"p_c_hv_mw\", \"f8\"),\n (\"q_c_hv_mvar\", \"f8\"),\n (\"p_a_lv_mw\", \"f8\"),\n (\"q_a_lv_mvar\", \"f8\"),\n (\"p_b_lv_mw\", \"f8\"),\n (\"q_b_lv_mvar\", \"f8\"),\n (\"p_c_lv_mw\", \"f8\"),\n (\"q_c_lv_mvar\", \"f8\"),\n (\"p_a_l_mw\", \"f8\"),\n (\"q_a_l_mvar\", \"f8\"),\n (\"p_b_l_mw\", \"f8\"),\n (\"q_b_l_mvar\", \"f8\"),\n (\"p_c_l_mw\", \"f8\"),\n (\"q_c_l_mvar\", \"f8\"),\n (\"i_a_hv_ka\", \"f8\"),\n (\"i_a_lv_ka\", \"f8\"),\n (\"i_b_hv_ka\", \"f8\"),\n (\"i_b_lv_ka\", \"f8\"),\n (\"i_c_hv_ka\", \"f8\"),\n (\"i_c_lv_ka\", \"f8\"),\n # (\"i_n_hv_ka\", \"f8\"),\n # (\"i_n_lv_ka\", \"f8\"),\n (\"loading_a_percent\", \"f8\"),\n (\"loading_b_percent\", \"f8\"),\n (\"loading_c_percent\", \"f8\"),\n (\"loading_percent\", \"f8\")],\n \"_empty_res_trafo3w\": [(\"p_hv_mw\", \"f8\"),\n (\"q_hv_mvar\", \"f8\"),\n (\"p_mv_mw\", \"f8\"),\n (\"q_mv_mvar\", \"f8\"),\n (\"p_lv_mw\", \"f8\"),\n (\"q_lv_mvar\", \"f8\"),\n (\"pl_mw\", \"f8\"),\n (\"ql_mvar\", \"f8\"),\n (\"i_hv_ka\", \"f8\"),\n (\"i_mv_ka\", \"f8\"),\n (\"i_lv_ka\", \"f8\"),\n (\"vm_hv_pu\", \"f8\"),\n (\"va_hv_degree\", \"f8\"),\n (\"vm_mv_pu\", \"f8\"),\n (\"va_mv_degree\", \"f8\"),\n (\"vm_lv_pu\", \"f8\"),\n (\"va_lv_degree\", \"f8\"),\n (\"va_internal_degree\", \"f8\"),\n (\"vm_internal_pu\", \"f8\"),\n (\"loading_percent\", \"f8\")],\n \"_empty_res_bus_3ph\": [(\"vm_a_pu\", \"f8\"),\n (\"va_a_degree\", \"f8\"),\n (\"vm_b_pu\", \"f8\"),\n (\"va_b_degree\", \"f8\"),\n (\"vm_c_pu\", \"f8\"),\n (\"va_c_degree\", \"f8\"),\n (\"p_a_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"),\n (\"p_b_mw\", \"f8\"),\n (\"q_b_mvar\", \"f8\"),\n (\"p_c_mw\", \"f8\"),\n (\"q_c_mvar\", \"f8\")],\n \"_empty_res_ext_grid_3ph\": [(\"p_a_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"),\n (\"p_b_mw\", \"f8\"),\n (\"q_b_mvar\", \"f8\"),\n (\"p_c_mw\", \"f8\"),\n (\"q_c_mvar\", \"f8\")],\n \"_empty_res_line_3ph\": [(\"p_a_from_mw\", \"f8\"),\n (\"q_a_from_mvar\", \"f8\"),\n (\"p_b_from_mw\", \"f8\"),\n (\"q_b_from_mvar\", \"f8\"),\n (\"q_c_from_mvar\", \"f8\"),\n (\"p_a_to_mw\", \"f8\"),\n (\"q_a_to_mvar\", \"f8\"),\n (\"p_b_to_mw\", \"f8\"),\n (\"q_b_to_mvar\", \"f8\"),\n (\"p_c_to_mw\", \"f8\"),\n (\"q_c_to_mvar\", \"f8\"),\n (\"p_a_l_mw\", \"f8\"),\n (\"q_a_l_mvar\", \"f8\"),\n (\"p_b_l_mw\", \"f8\"),\n (\"q_b_l_mvar\", \"f8\"),\n (\"p_c_l_mw\", \"f8\"),\n (\"q_c_l_mvar\", \"f8\"),\n (\"i_a_from_ka\", \"f8\"),\n (\"i_a_to_ka\", \"f8\"),\n (\"i_b_from_ka\", \"f8\"),\n (\"i_b_to_ka\", \"f8\"),\n (\"i_c_from_ka\", \"f8\"),\n (\"i_c_to_ka\", \"f8\"),\n (\"i_a_ka\", \"f8\"),\n (\"i_b_ka\", \"f8\"),\n (\"i_c_ka\", \"f8\"),\n (\"i_n_from_ka\", \"f8\"),\n (\"i_n_to_ka\", \"f8\"),\n (\"i_n_ka\", \"f8\"),\n (\"loading_a_percent\", \"f8\"),\n (\"loading_b_percent\", \"f8\"),\n (\"loading_c_percent\", \"f8\")],\n \"_empty_res_asymmetric_load_3ph\": [(\"p_a_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"),\n (\"p_b_mw\", \"f8\"),\n (\"q_b_mvar\", \"f8\"),\n (\"p_c_mw\", \"f8\"),\n (\"q_c_mvar\", \"f8\")],\n \"_empty_res_asymmetric_sgen_3ph\": [(\"p_a_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"),\n (\"p_b_mw\", \"f8\"),\n (\"q_b_mvar\", \"f8\"),\n (\"p_c_mw\", \"f8\"),\n (\"q_c_mvar\", \"f8\")],\n \"_empty_res_storage\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\")],\n \"_empty_res_storage_3ph\": [(\"p_a_mw\", \"f8\"), (\"p_b_mw\", \"f8\"), (\"p_c_mw\", \"f8\"),\n (\"q_a_mvar\", \"f8\"), (\"q_b_mvar\", \"f8\"), (\"q_c_mvar\", \"f8\")],\n \"_empty_res_gen\": [(\"p_mw\", \"f8\"),\n (\"q_mvar\", \"f8\"),\n (\"va_degree\", \"f8\"),\n (\"vm_pu\", \"f8\")],\n\n # internal\n \"_ppc\": None,\n \"_ppc0\": None,\n \"_ppc1\": None,\n \"_ppc2\": None,\n \"_is_elements\": None,\n \"_pd2ppc_lookups\": {\"bus\": None,\n \"ext_grid\": None,\n \"gen\": None,\n \"branch\": None},\n \"version\": __version__,\n \"converged\": False,\n \"name\": name,\n \"f_hz\": f_hz,\n \"sn_mva\": sn_mva\n })\n\n net._empty_res_load_3ph = net._empty_res_load\n net._empty_res_sgen_3ph = net._empty_res_sgen\n net._empty_res_storage_3ph = net._empty_res_storage\n\n for s in net:\n if isinstance(net[s], list):\n net[s] = pd.DataFrame(zeros(0, dtype=net[s]), index=pd.Index([], dtype=np.int64))\n if add_stdtypes:\n add_basic_std_types(net)\n else:\n net.std_types = {\"line\": {}, \"trafo\": {}, \"trafo3w\": {}}\n for mode in [\"pf\", \"se\", \"sc\", \"pf_3ph\"]:\n reset_results(net, mode)\n net['user_pf_options'] = dict()\n return net\n\n\ndef create_bus(net, vn_kv, name=None, index=None, geodata=None, type=\"b\", zone=None,\n in_service=True, max_vm_pu=nan, min_vm_pu=nan, coords=None, **kwargs):\n \"\"\"\n Adds one bus in table net[\"bus\"].\n\n Busses are the nodes of the network that all other elements connect to.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network in which the element is created\n\n OPTIONAL:\n **name** (string, default None) - the name for this bus\n\n **index** (int, default None) - Force a specified ID if it is available. If None, the \\\n index one higher than the highest already existing index is selected.\n\n **vn_kv** (float) - The grid voltage level.\n\n **geodata** ((x,y)-tuple, default None) - coordinates used for plotting\n\n **type** (string, default \"b\") - Type of the bus. \"n\" - node,\n \"b\" - busbar, \"m\" - muff\n\n **zone** (string, None) - grid region\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF\n\n **min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF\n\n **coords** (list (len=2) of tuples (len=2), default None) - busbar coordinates to plot\n the bus with multiple points. coords is typically a list of tuples (start and endpoint of\n the busbar) - Example: [(x1, y1), (x2, y2)]\n\n OUTPUT:\n **index** (int) - The unique ID of the created element\n\n EXAMPLE:\n create_bus(net, name = \"bus1\")\n \"\"\"\n index = _get_index_with_check(net, \"bus\", index)\n\n entries = dict(zip([\"name\", \"vn_kv\", \"type\", \"zone\", \"in_service\"],\n [name, vn_kv, type, zone, bool(in_service)]))\n\n _set_entries(net, \"bus\", index, True, **entries, **kwargs)\n\n if geodata is not None:\n if len(geodata) != 2:\n raise UserWarning(\"geodata must be given as (x, y) tuple\")\n net[\"bus_geodata\"].loc[index, [\"x\", \"y\"]] = geodata\n\n if coords is not None:\n net[\"bus_geodata\"].at[index, \"coords\"] = coords\n\n # column needed by OPF. 0. and 2. are the default maximum / minimum voltages\n _create_column_and_set_value(net, index, min_vm_pu, \"min_vm_pu\", \"bus\", default_val=0.)\n _create_column_and_set_value(net, index, max_vm_pu, \"max_vm_pu\", \"bus\", default_val=2.)\n\n return index\n\n\ndef create_buses(net, nr_buses, vn_kv, index=None, name=None, type=\"b\", geodata=None,\n zone=None, in_service=True, max_vm_pu=None, min_vm_pu=None, coords=None, **kwargs):\n \"\"\"\n Adds several buses in table net[\"bus\"] at once.\n\n Busses are the nodal points of the network that all other elements connect to.\n\n Input:\n **net** (pandapowerNet) - The pandapower network in which the element is created\n\n **nr_buses** (int) - The number of buses that is created\n\n OPTIONAL:\n **name** (string, default None) - the name for this bus\n\n **index** (int, default None) - Force specified IDs if available. If None, the indices \\\n higher than the highest already existing index are selected.\n\n **vn_kv** (float) - The grid voltage level.\n\n **geodata** ((x,y)-tuple or list of tuples with length == nr_buses, default None) -\n coordinates used for plotting\n\n **type** (string, default \"b\") - Type of the bus. \"n\" - auxilary node,\n \"b\" - busbar, \"m\" - muff\n\n **zone** (string, None) - grid region\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF\n\n **min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF\n\n **coords** (list (len=nr_buses) of list (len=2) of tuples (len=2), default None) - busbar\n coordinates to plot the bus with multiple points. coords is typically a list of tuples\n (start and endpoint of the busbar) - Example for 3 buses:\n [[(x11, y11), (x12, y12)], [(x21, y21), (x22, y22)], [(x31, y31), (x32, y32)]]\n\n\n OUTPUT:\n **index** (int) - The unique indices ID of the created elements\n\n EXAMPLE:\n create_bus(net, name = \"bus1\")\n \"\"\"\n index = _get_multiple_index_with_check(net, \"bus\", index, nr_buses)\n\n entries = {\"vn_kv\": vn_kv, \"type\": type, \"zone\": zone, \"in_service\": in_service, \"name\": name}\n _add_series_to_entries(entries, index, \"min_vm_pu\", min_vm_pu)\n _add_series_to_entries(entries, index, \"max_vm_pu\", max_vm_pu)\n _set_multiple_entries(net, \"bus\", index, **entries, **kwargs)\n\n if geodata is not None:\n # works with a 2-tuple or a matching array\n net.bus_geodata = pd.concat([net.bus_geodata,\n pd.DataFrame(zeros((len(index), len(net.bus_geodata.columns)), dtype=int),\n index=index, columns=net.bus_geodata.columns)])\n net.bus_geodata.loc[index, :] = nan\n net.bus_geodata.loc[index, [\"x\", \"y\"]] = geodata\n if coords is not None:\n net.bus_geodata = pd.concat([net.bus_geodata, pd.DataFrame(index=index, columns=net.bus_geodata.columns)])\n net[\"bus_geodata\"].loc[index, \"coords\"] = coords\n return index\n\n\ndef create_load(net, bus, p_mw, q_mvar=0, const_z_percent=0, const_i_percent=0, sn_mva=nan,\n name=None, scaling=1., index=None, in_service=True, type='wye', max_p_mw=nan,\n min_p_mw=nan, max_q_mvar=nan, min_q_mvar=nan, controllable=nan):\n \"\"\"\n Adds one load in table net[\"load\"].\n\n All loads are modelled in the consumer system, meaning load is positive and generation is\n negative active power. Please pay attention to the correct signing of the reactive power as\n well.\n\n INPUT:\n **net** - The net within this load should be created\n\n **bus** (int) - The bus id to which the load is connected\n\n OPTIONAL:\n **p_mw** (float, default 0) - The active power of the load\n\n - postive value -> load\n - negative value -> generation\n\n **q_mvar** (float, default 0) - The reactive power of the load\n\n **const_z_percent** (float, default 0) - percentage of p_mw and q_mvar that will be \\\n associated to constant impedance load at rated voltage\n\n **const_i_percent** (float, default 0) - percentage of p_mw and q_mvar that will be \\\n associated to constant current load at rated voltage\n\n **sn_mva** (float, default None) - Nominal power of the load\n\n **name** (string, default None) - The name for this load\n\n **scaling** (float, default 1.) - An OPTIONAL scaling factor to be set customly.\n Multiplys with p_mw and q_mvar.\n\n **type** (string, 'wye') - type variable to classify the load: wye/delta\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **max_p_mw** (float, default NaN) - Maximum active power load - necessary for controllable \\\n loads in for OPF\n\n **min_p_mw** (float, default NaN) - Minimum active power load - necessary for controllable \\\n loads in for OPF\n\n **max_q_mvar** (float, default NaN) - Maximum reactive power load - necessary for \\\n controllable loads in for OPF\n\n **min_q_mvar** (float, default NaN) - Minimum reactive power load - necessary for \\\n controllable loads in OPF\n\n **controllable** (boolean, default NaN) - States, whether a load is controllable or not. \\\n Only respected for OPF; defaults to False if \"controllable\" column exists in DataFrame\n\n OUTPUT:\n **index** (int) - The unique ID of the created element\n\n EXAMPLE:\n create_load(net, bus=0, p_mw=10., q_mvar=2.)\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"load\", index)\n\n entries = dict(zip([\"name\", \"bus\", \"p_mw\", \"const_z_percent\", \"const_i_percent\", \"scaling\",\n \"q_mvar\", \"sn_mva\", \"in_service\", \"type\"],\n [name, bus, p_mw, const_z_percent, const_i_percent, scaling, q_mvar, sn_mva,\n bool(in_service), type]))\n\n _set_entries(net, \"load\", index, True, **entries)\n\n _create_column_and_set_value(net, index, min_p_mw, \"min_p_mw\", \"load\")\n _create_column_and_set_value(net, index, max_p_mw, \"max_p_mw\", \"load\")\n _create_column_and_set_value(net, index, min_q_mvar, \"min_q_mvar\", \"load\")\n _create_column_and_set_value(net, index, max_q_mvar, \"max_q_mvar\", \"load\")\n _create_column_and_set_value(net, index, controllable, \"controllable\", \"load\", dtyp=bool_,\n default_val=False, default_for_nan=True)\n\n return index\n\n\ndef create_loads(net, buses, p_mw, q_mvar=0, const_z_percent=0, const_i_percent=0, sn_mva=nan,\n name=None, scaling=1., index=None, in_service=True, type=None, max_p_mw=None,\n min_p_mw=None, max_q_mvar=None, min_q_mvar=None, controllable=None, **kwargs):\n \"\"\"\n Adds a number of loads in table net[\"load\"].\n\n All loads are modelled in the consumer system, meaning load is positive and generation is\n negative active power. Please pay attention to the correct signing of the reactive power as\n well.\n\n INPUT:\n **net** - The net within this load should be created\n\n **buses** (list of int) - A list of bus ids to which the loads are connected\n\n OPTIONAL:\n **p_mw** (list of floats) - The active power of the loads\n\n - postive value -> load\n - negative value -> generation\n\n **q_mvar** (list of floats, default 0) - The reactive power of the loads\n\n **const_z_percent** (list of floats, default 0) - percentage of p_mw and q_mvar that will \\\n be associated to constant impedance loads at rated voltage\n\n **const_i_percent** (list of floats, default 0) - percentage of p_mw and q_mvar that will \\\n be associated to constant current load at rated voltage\n\n **sn_mva** (list of floats, default None) - Nominal power of the loads\n\n **name** (list of strings, default None) - The name for this load\n\n **scaling** (list of floats, default 1.) - An OPTIONAL scaling factor to be set customly.\n Multiplys with p_mw and q_mvar.\n\n **type** (string, None) - type variable to classify the load\n\n **index** (list of int, None) - Force a specified ID if it is available. If None, the index\\\n is set to a range between one higher than the highest already existing index and the \\\n length of loads that shall be created.\n\n **in_service** (list of boolean) - True for in_service or False for out of service\n\n **max_p_mw** (list of floats, default NaN) - Maximum active power load - necessary for \\\n controllable loads in for OPF\n\n **min_p_mw** (list of floats, default NaN) - Minimum active power load - necessary for \\\n controllable loads in for OPF\n\n **max_q_mvar** (list of floats, default NaN) - Maximum reactive power load - necessary for \\\n controllable loads in for OPF\n\n **min_q_mvar** (list of floats, default NaN) - Minimum reactive power load - necessary for \\\n controllable loads in OPF\n\n **controllable** (list of boolean, default NaN) - States, whether a load is controllable \\\n or not. Only respected for OPF\n Defaults to False if \"controllable\" column exists in DataFrame\n\n OUTPUT:\n **index** (int) - The unique IDs of the created elements\n\n EXAMPLE:\n create_loads(net, buses=[0, 2], p_mw=[10., 5.], q_mvar=[2., 0.])\n\n \"\"\"\n _check_multiple_node_elements(net, buses)\n\n index = _get_multiple_index_with_check(net, \"load\", index, len(buses))\n\n entries = {\"bus\": buses, \"p_mw\": p_mw, \"q_mvar\": q_mvar, \"sn_mva\": sn_mva,\n \"const_z_percent\": const_z_percent, \"const_i_percent\": const_i_percent,\n \"scaling\": scaling, \"in_service\": in_service, \"name\": name, \"type\": type}\n\n _add_series_to_entries(entries, index, \"min_p_mw\", min_p_mw)\n _add_series_to_entries(entries, index, \"max_p_mw\", max_p_mw)\n _add_series_to_entries(entries, index, \"min_q_mvar\", min_q_mvar)\n _add_series_to_entries(entries, index, \"max_q_mvar\", max_q_mvar)\n _add_series_to_entries(entries, index, \"controllable\", controllable, dtyp=bool_,\n default_val=False)\n\n _set_multiple_entries(net, \"load\", index, **entries, **kwargs)\n\n return index\n\n\ndef create_asymmetric_load(net, bus, p_a_mw=0, p_b_mw=0, p_c_mw=0, q_a_mvar=0, q_b_mvar=0,\n q_c_mvar=0, sn_mva=nan, name=None, scaling=1., index=None,\n in_service=True, type=\"wye\"):\n \"\"\"\n Adds one 3 phase load in table net[\"asymmetric_load\"].\n\n All loads are modelled in the consumer system, meaning load is positive and generation is\n negative active power. Please pay attention to the correct signing of the reactive power as\n well.\n\n INPUT:\n **net** - The net within this load should be created\n\n **bus** (int) - The bus id to which the load is connected\n\n OPTIONAL:\n **p_a_mw** (float, default 0) - The active power for Phase A load\n\n **p_b_mw** (float, default 0) - The active power for Phase B load\n\n **p_c_mw** (float, default 0) - The active power for Phase C load\n\n **q_a_mvar** float, default 0) - The reactive power for Phase A load\n\n **q_b_mvar** float, default 0) - The reactive power for Phase B load\n\n **q_c_mvar** (float, default 0) - The reactive power for Phase C load\n\n **sn_kva** (float, default: None) - Nominal power of the load\n\n **name** (string, default: None) - The name for this load\n\n **scaling** (float, default: 1.) - An OPTIONAL scaling factor to be set customly\n Multiplys with p_mw and q_mvar of all phases.\n\n **type** (string,default: wye) - type variable to classify three ph load: delta/wye\n\n **index** (int,default: None) - Force a specified ID if it is available. If None, the index\\\n one higher than the highest already existing index is selected.\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n OUTPUT:\n **index** (int) - The unique ID of the created element\n\n EXAMPLE:\n **create_asymmetric_load(net, bus=0, p_c_mw = 9., q_c_mvar = 1.8)**\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"asymmetric_load\", index, name=\"3 phase asymmetric_load\")\n\n entries = dict(zip([\"name\", \"bus\", \"p_a_mw\", \"p_b_mw\", \"p_c_mw\", \"scaling\", \"q_a_mvar\",\n \"q_b_mvar\", \"q_c_mvar\", \"sn_mva\", \"in_service\", \"type\"],\n [name, bus, p_a_mw, p_b_mw, p_c_mw, scaling, q_a_mvar, q_b_mvar, q_c_mvar,\n sn_mva, bool(in_service), type]))\n\n _set_entries(net, \"asymmetric_load\", index, True, **entries)\n\n return index\n\n\n# =============================================================================\n# def create_impedance_load(net, bus, r_A , r_B , r_C, x_A=0, x_B=0, x_C=0,\n# sn_mva=nan, name=None, scaling=1.,\n# index=None, in_service=True, type=None,\n# ):\n# \"\"\"\n# Creates a constant impedance load element ABC.\n#\n# INPUT:\n# **net** - The net within this constant impedance load should be created\n#\n# **bus** (int) - The bus id to which the load is connected\n#\n# **sn_mva** (float) - rated power of the load\n#\n# **r_A** (float) - Resistance in Phase A\n# **r_B** (float) - Resistance in Phase B\n# **r_C** (float) - Resistance in Phase C\n# **x_A** (float) - Reactance in Phase A\n# **x_B** (float) - Reactance in Phase B\n# **x_C** (float) - Reactance in Phase C\n#\n#\n# **kwargs are passed on to the create_load function\n#\n# OUTPUT:\n# **index** (int) - The unique ID of the created load\n#\n# Load elements are modeled from a consumer point of view. Active power will therefore always be\n# positive, reactive power will be positive for under-excited behavior (Q absorption, decreases voltage) and negative for over-excited behavior (Q injection, increases voltage)\n# \"\"\"\n# if bus not in net[\"bus\"].index.values:\n# raise UserWarning(\"Cannot attach to bus %s, bus does not exist\" % bus)\n#\n# if index is None:\n# index = get_free_id(net[\"asymmetric_load\"])\n# if index in net[\"impedance_load\"].index:\n# raise UserWarning(\"A 3 phase asymmetric_load with the id %s already exists\" % index)\n#\n# # store dtypes\n# dtypes = net.impedance_load.dtypes\n#\n# net.impedance_load.loc[index, [\"name\", \"bus\", \"r_A\",\"r_B\",\"r_C\", \"scaling\",\n# \"x_A\",\"x_B\",\"x_C\",\"sn_mva\", \"in_service\", \"type\"]] = \\\n# [name, bus, r_A,r_B,r_C, scaling,\n# x_A,x_B,x_C,sn_mva, bool(in_service), type]\n#\n# # and preserve dtypes\n# _preserve_dtypes(net.impedance_load, dtypes)\n#\n# return index\n#\n# =============================================================================\n\n\ndef create_load_from_cosphi(net, bus, sn_mva, cos_phi, mode, **kwargs):\n \"\"\"\n Creates a load element from rated power and power factor cos(phi).\n\n INPUT:\n **net** - The net within this static generator should be created\n\n **bus** (int) - The bus id to which the load is connected\n\n **sn_mva** (float) - rated power of the load\n\n **cos_phi** (float) - power factor cos_phi\n\n **mode** (str) - \"underexcited\" (Q absorption, decreases voltage) or \"overexcited\" (Q injection, increases voltage)\n\n OPTIONAL:\n same as in create_load, keyword arguments are passed to the create_load function\n\n OUTPUT:\n **index** (int) - The unique ID of the created load\n\n Load elements are modeled from a consumer point of view. Active power will therefore always be\n positive, reactive power will be positive for underexcited behavior (Q absorption, decreases voltage) and negative for\n overexcited behavior (Q injection, increases voltage).\n \"\"\"\n from pandapower.toolbox import pq_from_cosphi\n p_mw, q_mvar = pq_from_cosphi(sn_mva, cos_phi, qmode=mode, pmode=\"load\")\n return create_load(net, bus, sn_mva=sn_mva, p_mw=p_mw, q_mvar=q_mvar, **kwargs)\n\n\ndef create_sgen(net, bus, p_mw, q_mvar=0, sn_mva=nan, name=None, index=None,\n scaling=1., type='wye', in_service=True, max_p_mw=nan, min_p_mw=nan,\n max_q_mvar=nan, min_q_mvar=nan, controllable=nan, k=nan, rx=nan,\n current_source=True):\n \"\"\"\n Adds one static generator in table net[\"sgen\"].\n\n Static generators are modelled as positive and constant PQ power. This element is used to model\n generators with a constant active and reactive power feed-in. If you want to model a voltage\n controlled generator, use the generator element instead.\n\n gen, sgen and ext_grid in the grid are modelled in the generator system!\n If you want to model the generation of power, you have to assign a positive active power\n to the generator. Please pay attention to the correct signing of the\n reactive power as well (positive for injection and negative for consumption).\n\n INPUT:\n **net** - The net within this static generator should be created\n\n **bus** (int) - The bus id to which the static generator is connected\n\n **p_mw** (float) - The active power of the static generator (positive for generation!)\n\n OPTIONAL:\n **q_mvar** (float, 0) - The reactive power of the sgen\n\n **sn_mva** (float, None) - Nominal power of the sgen\n\n **name** (string, None) - The name for this sgen\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly.\n Multiplys with p_mw and q_mvar.\n\n **type** (string, None) - Three phase Connection type of the static generator: wye/delta\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **max_p_mw** (float, NaN) - Maximum active power injection - necessary for \\\n controllable sgens in OPF\n\n **min_p_mw** (float, NaN) - Minimum active power injection - necessary for \\\n controllable sgens in OPF\n\n **max_q_mvar** (float, NaN) - Maximum reactive power injection - necessary for \\\n controllable sgens in OPF\n\n **min_q_mvar** (float, NaN) - Minimum reactive power injection - necessary for \\\n controllable sgens in OPF\n\n **controllable** (bool, NaN) - Whether this generator is controllable by the optimal \\\n powerflow; defaults to False if \"controllable\" column exists in DataFrame\n\n **k** (float, NaN) - Ratio of nominal current to short circuit current\n\n **rx** (float, NaN) - R/X ratio for short circuit impedance. Only relevant if type is \\\n specified as motor so that sgen is treated as asynchronous motor\n\n **current_source** (bool, True) - Model this sgen as a current source during short-\\\n circuit calculations; useful in some cases, for example the simulation of full-\\\n size converters per IEC 60909-0:2016.\n\n OUTPUT:\n **index** (int) - The unique ID of the created sgen\n\n EXAMPLE:\n create_sgen(net, 1, p_mw = -120)\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"sgen\", index, name=\"static generator\")\n\n entries = dict(zip([\"name\", \"bus\", \"p_mw\", \"scaling\", \"q_mvar\", \"sn_mva\", \"in_service\", \"type\",\n \"current_source\"], [name, bus, p_mw, scaling, q_mvar, sn_mva,\n bool(in_service), type, current_source]))\n\n _set_entries(net, \"sgen\", index, True, **entries)\n\n _create_column_and_set_value(net, index, min_p_mw, \"min_p_mw\", \"sgen\")\n _create_column_and_set_value(net, index, max_p_mw, \"max_p_mw\", \"sgen\")\n _create_column_and_set_value(net, index, min_q_mvar, \"min_q_mvar\", \"sgen\")\n _create_column_and_set_value(net, index, max_q_mvar, \"max_q_mvar\", \"sgen\")\n _create_column_and_set_value(net, index, k, \"k\", \"sgen\")\n _create_column_and_set_value(net, index, rx, \"rx\", \"sgen\")\n _create_column_and_set_value(net, index, controllable, \"controllable\", \"sgen\", dtyp=bool_,\n default_val=False, default_for_nan=True)\n\n return index\n\n\ndef create_sgens(net, buses, p_mw, q_mvar=0, sn_mva=nan, name=None, index=None,\n scaling=1., type='wye', in_service=True, max_p_mw=None, min_p_mw=None,\n max_q_mvar=None, min_q_mvar=None, controllable=None, k=None, rx=None,\n current_source=True, **kwargs):\n \"\"\"\n Adds a number of sgens in table net[\"sgen\"].\n\n Static generators are modelled as positive and constant PQ power. This element is used to model\n generators with a constant active and reactive power feed-in. If you want to model a voltage\n controlled generator, use the generator element instead.\n\n INPUT:\n **net** - The net within this load should be created\n\n **buses** (list of int) - A list of bus ids to which the loads are connected\n\n OPTIONAL:\n\n **p_mw** (list of floats) - The active power of the sgens\n\n - postive value -> generation\n - negative value -> load\n\n **q_mvar** (list of floats, default 0) - The reactive power of the sgens\n\n **sn_mva** (list of floats, default None) - Nominal power of the sgens\n\n **name** (list of strings, default None) - The name for this sgen\n\n **scaling** (list of floats, default 1.) - An OPTIONAL scaling factor to be set customly.\n Multiplys with p_mw and q_mvar.\n\n **type** (string, None) - type variable to classify the sgen\n\n **index** (list of int, None) - Force a specified ID if it is available. If None, the index\\\n is set to a range between one higher than the highest already existing index and the \\\n length of sgens that shall be created.\n\n **in_service** (list of boolean) - True for in_service or False for out of service\n\n **max_p_mw** (list of floats, default NaN) - Maximum active power sgen - necessary for \\\n controllable sgens in for OPF\n\n **min_p_mw** (list of floats, default NaN) - Minimum active power sgen - necessary for \\\n controllable sgens in for OPF\n\n **max_q_mvar** (list of floats, default NaN) - Maximum reactive power sgen - necessary for \\\n controllable sgens in for OPF\n\n **min_q_mvar** (list of floats, default NaN) - Minimum reactive power sgen - necessary for \\\n controllable sgens in OPF\n\n **controllable** (list of boolean, default NaN) - States, whether a sgen is controllable \\\n or not. Only respected for OPF\n Defaults to False if \"controllable\" column exists in DataFrame\n\n **k** (list of floats, None) - Ratio of nominal current to short circuit current\n\n **rx** (list of floats, NaN) - R/X ratio for short circuit impedance. Only relevant if type\\\n is specified as motor so that sgen is treated as asynchronous motor\n\n **current_source** (list of bool, True) - Model this sgen as a current source during short-\\\n circuit calculations; useful in some cases, for example the simulation of full-\\\n size converters per IEC 60909-0:2016.\n\n OUTPUT:\n **index** (int) - The unique IDs of the created elements\n\n EXAMPLE:\n create_sgens(net, buses=[0, 2], p_mw=[10., 5.], q_mvar=[2., 0.])\n\n \"\"\"\n _check_multiple_node_elements(net, buses)\n\n index = _get_multiple_index_with_check(net, \"sgen\", index, len(buses))\n\n entries = {\"bus\": buses, \"p_mw\": p_mw, \"q_mvar\": q_mvar, \"sn_mva\": sn_mva, \"scaling\": scaling,\n \"in_service\": in_service, \"name\": name, \"type\": type,\n 'current_source': current_source}\n\n _add_series_to_entries(entries, index, \"min_p_mw\", min_p_mw)\n _add_series_to_entries(entries, index, \"max_p_mw\", max_p_mw)\n _add_series_to_entries(entries, index, \"min_q_mvar\", min_q_mvar)\n _add_series_to_entries(entries, index, \"max_q_mvar\", max_q_mvar)\n _add_series_to_entries(entries, index, \"k\", k)\n _add_series_to_entries(entries, index, \"rx\", rx)\n _add_series_to_entries(entries, index, \"controllable\", controllable, dtyp=bool_,\n default_val=False)\n\n _set_multiple_entries(net, \"sgen\", index, **entries, **kwargs)\n\n return index\n\n\n# =============================================================================\n# Create 3ph Sgen\n# =============================================================================\n\ndef create_asymmetric_sgen(net, bus, p_a_mw=0, p_b_mw=0, p_c_mw=0, q_a_mvar=0, q_b_mvar=0,\n q_c_mvar=0, sn_mva=nan,\n name=None, index=None, scaling=1., type='wye', in_service=True):\n \"\"\"\n\n Adds one static generator in table net[\"asymmetric_sgen\"].\n\n Static generators are modelled as negative PQ loads. This element is used to model generators\n with a constant active and reactive power feed-in. Positive active power means generation.\n\n INPUT:\n **net** - The net within this static generator should be created\n\n **bus** (int) - The bus id to which the static generator is connected\n\n OPTIONAL:\n\n **p_a_mw** (float, default 0) - The active power of the static generator : Phase A\n\n **p_b_mw** (float, default 0) - The active power of the static generator : Phase B\n\n **p_c_mw** (float, default 0) - The active power of the static generator : Phase C\n\n **q_a_mvar** (float, default 0) - The reactive power of the sgen : Phase A\n\n **q_b_mvar** (float, default 0) - The reactive power of the sgen : Phase B\n\n **q_c_mvar** (float, default 0) - The reactive power of the sgen : Phase C\n\n **sn_mva** (float, default None) - Nominal power of the sgen\n\n **name** (string, default None) - The name for this sgen\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly.\n Multiplys with p_mw and q_mvar of all phases.\n\n **type** (string, 'wye') - Three phase Connection type of the static generator: wye/delta\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n OUTPUT:\n **index** (int) - The unique ID of the created sgen\n\n EXAMPLE:\n create_asymmetric_sgen(net, 1, p_b_mw=0.12)\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"asymmetric_sgen\", index,\n name=\"3 phase asymmetric static generator\")\n\n entries = dict(zip([\"name\", \"bus\", \"p_a_mw\", \"p_b_mw\", \"p_c_mw\", \"scaling\", \"q_a_mvar\",\n \"q_b_mvar\", \"q_c_mvar\", \"sn_mva\", \"in_service\", \"type\"],\n [name, bus, p_a_mw, p_b_mw, p_c_mw, scaling, q_a_mvar, q_b_mvar, q_c_mvar,\n sn_mva, bool(in_service), type]))\n\n _set_entries(net, \"asymmetric_sgen\", index, True, **entries)\n\n return index\n\n\ndef create_sgen_from_cosphi(net, bus, sn_mva, cos_phi, mode, **kwargs):\n \"\"\"\n Creates an sgen element from rated power and power factor cos(phi).\n\n INPUT:\n **net** - The net within this static generator should be created\n\n **bus** (int) - The bus id to which the static generator is connected\n\n **sn_mva** (float) - rated power of the generator\n\n **cos_phi** (float) - power factor cos_phi\n\n **mode** (str) - \"underexcited\" (Q absorption, decreases voltage) or \"overexcited\" (Q injection, increases voltage)\n\n OUTPUT:\n **index** (int) - The unique ID of the created sgen\n\n gen, sgen, and ext_grid are modelled in the generator point of view. Active power\n will therefore be postive for generation, and reactive power will be negative for\n underexcited behavior (Q absorption, decreases voltage) and\n positive for for overexcited behavior (Q injection, increases voltage).\n \"\"\"\n from pandapower.toolbox import pq_from_cosphi\n p_mw, q_mvar = pq_from_cosphi(sn_mva, cos_phi, qmode=mode, pmode=\"gen\")\n return create_sgen(net, bus, sn_mva=sn_mva, p_mw=p_mw, q_mvar=q_mvar, **kwargs)\n\n\ndef create_storage(net, bus, p_mw, max_e_mwh, q_mvar=0, sn_mva=nan, soc_percent=nan, min_e_mwh=0.0,\n name=None, index=None, scaling=1., type=None, in_service=True, max_p_mw=nan,\n min_p_mw=nan, max_q_mvar=nan, min_q_mvar=nan, controllable=nan):\n \"\"\"\n Adds a storage to the network.\n\n In order to simulate a storage system it is possible to use sgens or loads to model the\n discharging or charging state. The power of a storage can be positive or negative, so the use\n of either a sgen or a load is (per definition of the elements) not correct.\n To overcome this issue, a storage element can be created.\n\n As pandapower is not a time dependend simulation tool and there is no time domain parameter in\n default power flow calculations, the state of charge (SOC) is not updated during any power flow\n calculation.\n The implementation of energy content related parameters in the storage element allows to create\n customized, time dependend simulations by running several power flow calculations and updating\n variables manually.\n\n INPUT:\n **net** - The net within this storage should be created\n\n **bus** (int) - The bus id to which the storage is connected\n\n **p_mw** (float) - The momentary active power of the storage \\\n (positive for charging, negative for discharging)\n\n **max_e_mwh** (float) - The maximum energy content of the storage \\\n (maximum charge level)\n\n OPTIONAL:\n **q_mvar** (float, default 0) - The reactive power of the storage\n\n **sn_mva** (float, default None) - Nominal power of the storage\n\n **soc_percent** (float, NaN) - The state of charge of the storage\n\n **min_e_mwh** (float, 0) - The minimum energy content of the storage \\\n (minimum charge level)\n\n **name** (string, default None) - The name for this storage\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly.\n Multiplys with p_mw and q_mvar.\n\n **type** (string, None) - type variable to classify the storage\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **max_p_mw** (float, NaN) - Maximum active power injection - necessary for a \\\n controllable storage in OPF\n\n **min_p_mw** (float, NaN) - Minimum active power injection - necessary for a \\\n controllable storage in OPF\n\n **max_q_mvar** (float, NaN) - Maximum reactive power injection - necessary for a \\\n controllable storage in OPF\n\n **min_q_mvar** (float, NaN) - Minimum reactive power injection - necessary for a \\\n controllable storage in OPF\n\n **controllable** (bool, NaN) - Whether this storage is controllable by the optimal \\\n powerflow; defaults to False if \"controllable\" column exists in DataFrame\n\n OUTPUT:\n **index** (int) - The unique ID of the created storage\n\n EXAMPLE:\n create_storage(net, 1, p_mw = -30, max_e_mwh = 60, soc_percent = 1.0, min_e_mwh = 5)\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"storage\", index)\n\n entries = dict(zip([\"name\", \"bus\", \"p_mw\", \"q_mvar\", \"sn_mva\", \"scaling\", \"soc_percent\",\n \"min_e_mwh\", \"max_e_mwh\", \"in_service\", \"type\"],\n [name, bus, p_mw, q_mvar, sn_mva, scaling, soc_percent, min_e_mwh, max_e_mwh,\n bool(in_service), type]))\n\n _set_entries(net, \"storage\", index, True, **entries)\n\n # check for OPF parameters and add columns to network table\n _create_column_and_set_value(net, index, min_p_mw, \"min_p_mw\", \"storage\")\n _create_column_and_set_value(net, index, max_p_mw, \"max_p_mw\", \"storage\")\n _create_column_and_set_value(net, index, min_q_mvar, \"min_q_mvar\", \"storage\")\n _create_column_and_set_value(net, index, max_q_mvar, \"max_q_mvar\", \"storage\")\n _create_column_and_set_value(net, index, controllable, \"controllable\", \"storage\",\n dtyp=bool_, default_val=False, default_for_nan=True)\n\n return index\n\n\ndef create_gen(net, bus, p_mw, vm_pu=1., sn_mva=nan, name=None, index=None, max_q_mvar=nan,\n min_q_mvar=nan, min_p_mw=nan, max_p_mw=nan, min_vm_pu=nan, max_vm_pu=nan,\n scaling=1., type=None, slack=False, controllable=nan, vn_kv=nan,\n xdss_pu=nan, rdss_ohm=nan, cos_phi=nan, pg_percent=nan, power_station_trafo=None,\n in_service=True, slack_weight=0.0):\n \"\"\"\n Adds a generator to the network.\n\n Generators are always modelled as voltage controlled PV nodes, which is why the input parameter\n is active power and a voltage set point. If you want to model a generator as PQ load with fixed\n reactive power and variable voltage, please use a static generator instead.\n\n INPUT:\n **net** - The net within this generator should be created\n\n **bus** (int) - The bus id to which the generator is connected\n\n OPTIONAL:\n **p_mw** (float, default 0) - The active power of the generator (positive for generation!)\n\n **vm_pu** (float, default 0) - The voltage set point of the generator.\n\n **sn_mva** (float, None) - Nominal power of the generator\n\n **name** (string, None) - The name for this generator\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **scaling** (float, 1.0) - scaling factor which for the active power of the generator\n\n **type** (string, None) - type variable to classify generators\n\n **controllable** (bool, NaN) - True: p_mw, q_mvar and vm_pu limits are enforced for this \\\n generator in OPF\n False: p_mw and vm_pu setpoints are enforced and *limits are ignored*.\n defaults to True if \"controllable\" column exists in DataFrame\n\n **slack_weight** (float, default 0.0) - Contribution factor for distributed slack power\n flow calculation (active power balancing)\n\n powerflow\n\n **vn_kv** (float, NaN) - Rated voltage of the generator for short-circuit calculation\n\n **xdss_pu** (float, NaN) - Subtransient generator reactance for short-circuit calculation\n\n **rdss_ohm** (float, NaN) - Subtransient generator resistance for short-circuit calculation\n\n **cos_phi** (float, NaN) - Rated cosine phi of the generator for short-circuit calculation\n\n **pg_percent** (float, NaN) - Rated pg (voltage control range) of the generator for\n short-circuit calculation\n\n **power_station_trafo** (int, None) - Index of the power station transformer for\n short-circuit calculation\n\n **in_service** (bool, True) - True for in_service or False for out of service\n\n **max_p_mw** (float, default NaN) - Maximum active power injection - necessary for OPF\n\n **min_p_mw** (float, default NaN) - Minimum active power injection - necessary for OPF\n\n **max_q_mvar** (float, default NaN) - Maximum reactive power injection - necessary for OPF\n\n **min_q_mvar** (float, default NaN) - Minimum reactive power injection - necessary for OPF\n\n **min_vm_pu** (float, default NaN) - Minimum voltage magnitude. If not set the bus voltage \\\n limit is taken.\n - necessary for OPF.\n\n **max_vm_pu** (float, default NaN) - Maximum voltage magnitude. If not set the bus voltage\\\n limit is taken.\n - necessary for OPF\n\n OUTPUT:\n **index** (int) - The unique ID of the created generator\n\n EXAMPLE:\n create_gen(net, 1, p_mw = 120, vm_pu = 1.02)\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"gen\", index, name=\"generator\")\n\n columns = [\"name\", \"bus\", \"p_mw\", \"vm_pu\", \"sn_mva\", \"type\", \"slack\", \"in_service\",\n \"scaling\", \"slack_weight\"]\n variables = [name, bus, p_mw, vm_pu, sn_mva, type, slack, bool(in_service), scaling,\n slack_weight]\n\n _set_entries(net, \"gen\", index, True, **dict(zip(columns, variables)))\n\n # OPF limits\n if not isnan(controllable):\n if \"controllable\" not in net.gen.columns:\n net.gen.loc[:, \"controllable\"] = pd.Series(dtype=bool, data=True)\n net.gen.at[index, \"controllable\"] = bool(controllable)\n elif \"controllable\" in net.gen.columns:\n net.gen.at[index, \"controllable\"] = True\n # P limits for OPF if controllable == True\n _create_column_and_set_value(net, index, min_p_mw, \"min_p_mw\", \"gen\")\n _create_column_and_set_value(net, index, max_p_mw, \"max_p_mw\", \"gen\")\n # Q limits for OPF if controllable == True\n _create_column_and_set_value(net, index, min_q_mvar, \"min_q_mvar\", \"gen\")\n _create_column_and_set_value(net, index, max_q_mvar, \"max_q_mvar\", \"gen\")\n # V limits for OPF if controllable == True\n _create_column_and_set_value(net, index, max_vm_pu, \"max_vm_pu\", \"gen\", default_val=2.)\n _create_column_and_set_value(net, index, min_vm_pu, \"min_vm_pu\", \"gen\", default_val=0.)\n\n # Short circuit calculation variables\n _create_column_and_set_value(net, index, vn_kv, \"vn_kv\", \"gen\")\n _create_column_and_set_value(net, index, cos_phi, \"cos_phi\", \"gen\")\n _create_column_and_set_value(net, index, xdss_pu, \"xdss_pu\", \"gen\")\n _create_column_and_set_value(net, index, rdss_ohm, \"rdss_ohm\", \"gen\")\n _create_column_and_set_value(net, index, pg_percent, \"pg_percent\", \"gen\")\n _create_column_and_set_value(net, index, power_station_trafo,\n \"power_station_trafo\", \"gen\")\n\n return index\n\n\ndef create_gens(net, buses, p_mw, vm_pu=1., sn_mva=nan, name=None, index=None, max_q_mvar=None,\n min_q_mvar=None, min_p_mw=None, max_p_mw=None, min_vm_pu=None, max_vm_pu=None,\n scaling=1., type=None, slack=False, controllable=None, vn_kv=None,\n xdss_pu=None, rdss_ohm=None, cos_phi=None, pg_percent=None, power_station_trafo=None,\n in_service=True, slack_weight=0.0, **kwargs):\n \"\"\"\n Adds generators to the specified buses network.\n\n Generators are always modelled as voltage controlled PV nodes, which is why the input parameter\n is active power and a voltage set point. If you want to model a generator as PQ load with fixed\n reactive power and variable voltage, please use a static generator instead.\n\n INPUT:\n **net** - The net within this generator should be created\n\n **buses** (list of int) - The bus ids to which the generators are connected\n\n OPTIONAL:\n **p_mw** (list of float, default 0) - The active power of the generator (positive for \\\n generation!)\n\n **vm_pu** (list of float, default 0) - The voltage set point of the generator.\n\n **sn_mva** (list of float, None) - Nominal power of the generator\n\n **name** (list of string, None) - The name for this generator\n\n **index** (list of int, None) - Force a specified ID if it is available. If None, the index\\\n one higher than the highest already existing index is selected.\n\n **scaling** (list of float, 1.0) - scaling factor which for the active power of the\\\n generator\n\n **type** (list of string, None) - type variable to classify generators\n\n **controllable** (bool, NaN) - True: p_mw, q_mvar and vm_pu limits are enforced for this \\\n generator in OPF\n False: p_mw and vm_pu setpoints are enforced and \\\n *limits are ignored*.\n defaults to True if \"controllable\" column exists in DataFrame\n powerflow\n\n **vn_kv** (list of float, NaN) - Rated voltage of the generator for short-circuit \\\n calculation\n\n **xdss_pu** (list of float, NaN) - Subtransient generator reactance for short-circuit \\\n calculation\n\n **rdss_ohm** (list of float, NaN) - Subtransient generator resistance for short-circuit \\\n calculation\n\n **cos_phi** (list of float, NaN) - Rated cosine phi of the generator for short-circuit \\\n calculation\n\n **pg_percent** (float, NaN) - Rated pg (voltage control range) of the generator for \\\n short-circuit calculation\n\n **power_station_trafo** (int, None) - Index of the power station transformer for short-circuit calculation\n\n **in_service** (bool, True) - True for in_service or False for out of service\n\n **slack_weight** (float, default 0.0) - Contribution factor for distributed slack power flow calculation (active power balancing)\n\n **max_p_mw** (list of float, default NaN) - Maximum active power injection - necessary for\\\n OPF\n\n **min_p_mw** (list of float, default NaN) - Minimum active power injection - necessary for \\\n OPF\n\n **max_q_mvar** (list of float, default NaN) - Maximum reactive power injection - necessary\\\n for OPF\n\n **min_q_mvar** (list of float, default NaN) - Minimum reactive power injection - necessary \\\n for OPF\n\n **min_vm_pu** (list of float, default NaN) - Minimum voltage magnitude. If not set the \\\n bus voltage limit is taken.\n - necessary for OPF.\n\n **max_vm_pu** (list of float, default NaN) - Maximum voltage magnitude. If not set the bus\\\n voltage limit is taken.\n - necessary for OPF\n\n OUTPUT:\n **index** (int) - The unique ID of the created generator\n\n EXAMPLE:\n create_gen(net, 1, p_mw = 120, vm_pu = 1.02)\n\n \"\"\"\n _check_multiple_node_elements(net, buses)\n\n index = _get_multiple_index_with_check(net, \"gen\", index, len(buses))\n\n entries = {\"bus\": buses, \"p_mw\": p_mw, \"vm_pu\": vm_pu, \"sn_mva\": sn_mva, \"scaling\": scaling,\n \"in_service\": in_service, \"slack_weight\": slack_weight, \"name\": name, \"type\": type, \"slack\": slack}\n\n _add_series_to_entries(entries, index, \"min_p_mw\", min_p_mw)\n _add_series_to_entries(entries, index, \"max_p_mw\", max_p_mw)\n _add_series_to_entries(entries, index, \"min_q_mvar\", min_q_mvar)\n _add_series_to_entries(entries, index, \"max_q_mvar\", max_q_mvar)\n _add_series_to_entries(entries, index, \"min_vm_pu\", min_vm_pu)\n _add_series_to_entries(entries, index, \"max_vm_pu\", max_vm_pu)\n _add_series_to_entries(entries, index, \"vn_kv\", vn_kv)\n _add_series_to_entries(entries, index, \"cos_phi\", cos_phi)\n _add_series_to_entries(entries, index, \"xdss_pu\", xdss_pu)\n _add_series_to_entries(entries, index, \"rdss_ohm\", rdss_ohm)\n _add_series_to_entries(entries, index, \"pg_percent\", pg_percent)\n _add_series_to_entries(entries, index, \"power_station_trafo\", power_station_trafo)\n _add_series_to_entries(entries, index, \"controllable\", controllable, dtyp=bool_,\n default_val=False)\n\n _set_multiple_entries(net, \"gen\", index, **entries, **kwargs)\n\n return index\n\n\ndef create_motor(net, bus, pn_mech_mw, cos_phi, efficiency_percent=100., loading_percent=100.,\n name=None, lrc_pu=nan, scaling=1.0, vn_kv=nan, rx=nan, index=None, in_service=True,\n cos_phi_n=nan, efficiency_n_percent=nan):\n \"\"\"\n Adds a motor to the network.\n\n\n INPUT:\n **net** - The net within this motor should be created\n\n **bus** (int) - The bus id to which the motor is connected\n\n **pn_mech_mw** (float) - Mechanical rated power of the motor\n\n **cos_phi** (float, nan) - cosine phi at current operating point\n\n OPTIONAL:\n\n **name** (string, None) - The name for this motor\n\n **efficiency_percent** (float, 100) - Efficiency in percent at current operating point\n\n **loading_percent** (float, 100) - The mechanical loading in percentage of the rated \\\n mechanical power\n\n **scaling** (float, 1.0) - scaling factor which for the active power of the motor\n\n **cos_phi_n** (float, nan) - cosine phi at rated power of the motor for short-circuit \\\n calculation\n\n **efficiency_n_percent** (float, 100) - Efficiency in percent at rated power for \\\n short-circuit calculation\n\n **lrc_pu** (float, nan) - locked rotor current in relation to the rated motor current\n\n **rx** (float, nan) - R/X ratio of the motor for short-circuit calculation.\n\n **vn_kv** (float, NaN) - Rated voltage of the motor for short-circuit calculation\n\n **in_service** (bool, True) - True for in_service or False for out of service\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n OUTPUT:\n **index** (int) - The unique ID of the created motor\n\n EXAMPLE:\n create_motor(net, 1, pn_mech_mw = 0.120, cos_ph=0.9, vn_kv=0.6, efficiency_percent=90, \\\n loading_percent=40, lrc_pu=6.0)\n\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"motor\", index)\n\n columns = [\"name\", \"bus\", \"pn_mech_mw\", \"cos_phi\", \"cos_phi_n\", \"vn_kv\", \"rx\",\n \"efficiency_n_percent\", \"efficiency_percent\", \"loading_percent\",\n \"lrc_pu\", \"scaling\", \"in_service\"]\n variables = [name, bus, pn_mech_mw, cos_phi, cos_phi_n, vn_kv, rx, efficiency_n_percent,\n efficiency_percent, loading_percent, lrc_pu, scaling, bool(in_service)]\n _set_entries(net, \"motor\", index, **dict(zip(columns, variables)))\n\n return index\n\n\ndef create_ext_grid(net, bus, vm_pu=1.0, va_degree=0., name=None, in_service=True,\n s_sc_max_mva=nan, s_sc_min_mva=nan, rx_max=nan, rx_min=nan,\n max_p_mw=nan, min_p_mw=nan, max_q_mvar=nan, min_q_mvar=nan,\n index=None, r0x0_max=nan, x0x_max=nan, controllable=nan,\n slack_weight=1.0, **kwargs):\n \"\"\"\n Creates an external grid connection.\n\n External grids represent the higher level power grid connection and are modelled as the slack\n bus in the power flow calculation.\n\n INPUT:\n **net** - pandapower network\n\n **bus** (int) - bus where the slack is connected\n\n OPTIONAL:\n **vm_pu** (float, default 1.0) - voltage at the slack node in per unit\n\n **va_degree** (float, default 0.) - voltage angle at the slack node in degrees*\n\n **name** (string, default None) - name of of the external grid\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **s_sc_max_mva** (float, NaN) - maximal short circuit apparent power to calculate internal \\\n impedance of ext_grid for short circuit calculations\n\n **s_sc_min_mva** (float, NaN) - minimal short circuit apparent power to calculate internal \\\n impedance of ext_grid for short circuit calculations\n\n **rx_max** (float, NaN) - maximal R/X-ratio to calculate internal impedance of ext_grid \\\n for short circuit calculations\n\n **rx_min** (float, NaN) - minimal R/X-ratio to calculate internal impedance of ext_grid \\\n for short circuit calculations\n\n **max_p_mw** (float, NaN) - Maximum active power injection. Only respected for OPF\n\n **min_p_mw** (float, NaN) - Minimum active power injection. Only respected for OPF\n\n **max_q_mvar** (float, NaN) - Maximum reactive power injection. Only respected for OPF\n\n **min_q_mvar** (float, NaN) - Minimum reactive power injection. Only respected for OPF\n\n **r0x0_max** (float, NaN) - maximal R/X-ratio to calculate Zero sequence\n internal impedance of ext_grid\n\n **x0x_max** (float, NaN) - maximal X0/X-ratio to calculate Zero sequence\n internal impedance of ext_grid\n\n **slack_weight** (float, default 1.0) - Contribution factor for distributed slack power flow calculation (active power balancing)\n\n ** only considered in loadflow if calculate_voltage_angles = True\n\n **controllable** (bool, NaN) - True: p_mw, q_mvar and vm_pu limits are enforced for the \\\n ext_grid in OPF. The voltage limits set in the \\\n ext_grid bus are enforced.\n False: p_mw and vm_pu setpoints are enforced and *limits are\\\n ignored*. The vm_pu setpoint is enforced and limits \\\n of the bus table are ignored.\n defaults to False if \"controllable\" column exists in\\\n DataFrame\n\n EXAMPLE:\n create_ext_grid(net, 1, voltage = 1.03)\n\n For three phase load flow\n\n create_ext_grid(net, 1, voltage=1.03, s_sc_max_mva=1000, rx_max=0.1, r0x0_max=0.1,\\\n x0x_max=1.0)\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"ext_grid\", index, name=\"external grid\")\n\n entries = dict(zip([\"bus\", \"name\", \"vm_pu\", \"va_degree\", \"in_service\", \"slack_weight\"],\n [bus, name, vm_pu, va_degree, bool(in_service), slack_weight]))\n _set_entries(net, \"ext_grid\", index, **entries, **kwargs)\n\n # OPF limits\n _create_column_and_set_value(net, index, s_sc_max_mva, \"s_sc_max_mva\", \"ext_grid\")\n _create_column_and_set_value(net, index, s_sc_min_mva, \"s_sc_min_mva\", \"ext_grid\")\n _create_column_and_set_value(net, index, rx_min, \"rx_min\", \"ext_grid\")\n _create_column_and_set_value(net, index, rx_max, \"rx_max\", \"ext_grid\")\n _create_column_and_set_value(net, index, min_p_mw, \"min_p_mw\", \"ext_grid\")\n _create_column_and_set_value(net, index, max_p_mw, \"max_p_mw\", \"ext_grid\")\n _create_column_and_set_value(net, index, min_q_mvar, \"min_q_mvar\", \"ext_grid\")\n _create_column_and_set_value(net, index, max_q_mvar, \"max_q_mvar\", \"ext_grid\")\n _create_column_and_set_value(net, index, x0x_max, \"x0x_max\", \"ext_grid\")\n _create_column_and_set_value(net, index, r0x0_max, \"r0x0_max\", \"ext_grid\")\n _create_column_and_set_value(net, index, controllable, \"controllable\", \"ext_grid\",\n dtyp=bool_, default_val=False, default_for_nan=True)\n\n return index\n\n\ndef create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, geodata=None,\n df=1., parallel=1, in_service=True, max_loading_percent=nan, alpha=nan,\n temperature_degree_celsius=nan):\n \"\"\"\n Creates a line element in net[\"line\"]\n The line parameters are defined through the standard type library.\n\n\n INPUT:\n **net** - The net within this line should be created\n\n **from_bus** (int) - ID of the bus on one side which the line will be connected with\n\n **to_bus** (int) - ID of the bus on the other side which the line will be connected with\n\n **length_km** (float) - The line length in km\n\n **std_type** (string) - Name of a standard linetype :\n\n - Pre-defined in standard_linetypes\n\n **or**\n\n - Customized std_type made using **create_std_type()**\n\n OPTIONAL:\n **name** (string, None) - A custom name for this line\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **geodata**\n (array, default None, shape= (,2L)) -\n The linegeodata of the line. The first row should be the coordinates\n of bus a and the last should be the coordinates of bus b. The points\n in the middle represent the bending points of the line\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n **df** (float, 1) - derating factor: maximal current of line in relation to nominal current\\\n of line (from 0 to 1)\n\n **parallel** (integer, 1) - number of parallel line systems\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n OUTPUT:\n **index** (int) - The unique ID of the created line\n\n EXAMPLE:\n create_line(net, \"line1\", from_bus = 0, to_bus = 1, length_km=0.1, std_type=\"NAYY 4x50 SE\")\n\n \"\"\"\n\n # check if bus exist to attach the line to\n _check_branch_element(net, \"Line\", index, from_bus, to_bus)\n\n index = _get_index_with_check(net, \"line\", index)\n\n v = {\n \"name\": name, \"length_km\": length_km, \"from_bus\": from_bus,\n \"to_bus\": to_bus, \"in_service\": bool(in_service), \"std_type\": std_type,\n \"df\": df, \"parallel\": parallel\n }\n\n lineparam = load_std_type(net, std_type, \"line\")\n\n v.update({param: lineparam[param] for param in [\"r_ohm_per_km\", \"x_ohm_per_km\", \"c_nf_per_km\",\n \"max_i_ka\"]})\n if \"r0_ohm_per_km\" in lineparam:\n v.update({param: lineparam[param] for param in [\"r0_ohm_per_km\", \"x0_ohm_per_km\", \"c0_nf_per_km\"]})\n\n v[\"g_us_per_km\"] = lineparam[\"g_us_per_km\"] if \"g_us_per_km\" in lineparam else 0.\n\n if \"type\" in lineparam:\n v[\"type\"] = lineparam[\"type\"]\n\n # if net.line column already has alpha, add it from std_type\n if \"alpha\" in net.line.columns and \"alpha\" in lineparam:\n v[\"alpha\"] = lineparam[\"alpha\"]\n\n _set_entries(net, \"line\", index, **v)\n\n if geodata is not None:\n net[\"line_geodata\"].loc[index, \"coords\"] = None\n net[\"line_geodata\"].at[index, \"coords\"] = geodata\n\n _create_column_and_set_value(net, index, max_loading_percent, \"max_loading_percent\", \"line\")\n _create_column_and_set_value(net, index, alpha, \"alpha\", \"line\")\n _create_column_and_set_value(net, index, temperature_degree_celsius,\n \"temperature_degree_celsius\", \"line\")\n\n return index\n\n\ndef create_lines(net, from_buses, to_buses, length_km, std_type, name=None, index=None,\n geodata=None, df=1., parallel=1, in_service=True, max_loading_percent=None):\n \"\"\" Convenience function for creating many lines at once. Parameters 'from_buses' and 'to_buses'\n must be arrays of equal length. Other parameters may be either arrays of the same length or\n single or values. In any case the line parameters are defined through a single standard\n type, so all lines have the same standard type.\n\n\n INPUT:\n **net** - The net within this line should be created\n\n **from_buses** (list of int) - ID of the bus on one side which the line will be \\\n connected with\n\n **to_buses** (list of int) - ID of the bus on the other side which the line will be \\\n connected with\n\n **length_km** (list of float) - The line length in km\n\n **std_type** (string) - The linetype of the lines.\n\n OPTIONAL:\n **name** (list of string, None) - A custom name for this line\n\n **index** (list of int, None) - Force a specified ID if it is available. If None, the\\\n index one higher than the highest already existing index is selected.\n\n **geodata**\n (list of arrays, default None, shape of arrays (,2L)) -\n The linegeodata of the line. The first row should be the coordinates\n of bus a and the last should be the coordinates of bus b. The points\n in the middle represent the bending points of the line\n\n **in_service** (list of boolean, True) - True for in_service or False for out of service\n\n **df** (list of float, 1) - derating factor: maximal current of line in relation to \\\n nominal current of line (from 0 to 1)\n\n **parallel** (list of integer, 1) - number of parallel line systems\n\n **max_loading_percent (list of float)** - maximum current loading (only needed for OPF)\n\n OUTPUT:\n **index** (list of int) - The unique ID of the created line\n\n EXAMPLE:\n create_line(net, \"line1\", from_bus=0, to_bus=1, length_km=0.1, std_type=\"NAYY 4x50 SE\")\n\n \"\"\"\n _check_multiple_branch_elements(net, from_buses, to_buses, \"Lines\")\n\n index = _get_multiple_index_with_check(net, \"line\", index, len(from_buses))\n\n entries = {\"from_bus\": from_buses, \"to_bus\": to_buses, \"length_km\": length_km,\n \"std_type\": std_type, \"name\": name, \"df\": df, \"parallel\": parallel,\n \"in_service\": in_service}\n\n # add std type data\n if isinstance(std_type, str):\n lineparam = load_std_type(net, std_type, \"line\")\n entries[\"r_ohm_per_km\"] = lineparam[\"r_ohm_per_km\"]\n entries[\"x_ohm_per_km\"] = lineparam[\"x_ohm_per_km\"]\n entries[\"c_nf_per_km\"] = lineparam[\"c_nf_per_km\"]\n entries[\"max_i_ka\"] = lineparam[\"max_i_ka\"]\n entries[\"g_us_per_km\"] = lineparam[\"g_us_per_km\"] if \"g_us_per_km\" in lineparam else 0.\n if \"type\" in lineparam:\n entries[\"type\"] = lineparam[\"type\"]\n else:\n lineparam = list(map(load_std_type, [net] * len(std_type), std_type, ['line'] * len(std_type)))\n entries[\"r_ohm_per_km\"] = list(map(itemgetter(\"r_ohm_per_km\"), lineparam))\n entries[\"x_ohm_per_km\"] = list(map(itemgetter(\"x_ohm_per_km\"), lineparam))\n entries[\"c_nf_per_km\"] = list(map(itemgetter(\"c_nf_per_km\"), lineparam))\n entries[\"max_i_ka\"] = list(map(itemgetter(\"max_i_ka\"), lineparam))\n entries[\"g_us_per_km\"] = list(map(check_entry_in_std_type, lineparam, [\"g_us_per_km\"] * len(lineparam),\n [0.] * len(lineparam)))\n entries[\"type\"] = list(map(check_entry_in_std_type, lineparam, [\"type\"] * len(lineparam),\n [None] * len(lineparam)))\n\n _add_series_to_entries(entries, index, \"max_loading_percent\", max_loading_percent)\n\n _set_multiple_entries(net, \"line\", index, **entries)\n\n if geodata is not None:\n _add_multiple_branch_geodata(net, \"line\", geodata, index)\n\n return index\n\n\ndef create_line_from_parameters(net, from_bus, to_bus, length_km, r_ohm_per_km, x_ohm_per_km,\n c_nf_per_km, max_i_ka, name=None, index=None, type=None,\n geodata=None, in_service=True, df=1., parallel=1, g_us_per_km=0.,\n max_loading_percent=nan, alpha=nan,\n temperature_degree_celsius=nan, r0_ohm_per_km=nan,\n x0_ohm_per_km=nan, c0_nf_per_km=nan, g0_us_per_km=0,\n endtemp_degree=nan, **kwargs):\n \"\"\"\n Creates a line element in net[\"line\"] from line parameters.\n\n INPUT:\n **net** - The net within this line should be created\n\n **from_bus** (int) - ID of the bus on one side which the line will be connected with\n\n **to_bus** (int) - ID of the bus on the other side which the line will be connected with\n\n **length_km** (float) - The line length in km\n\n **r_ohm_per_km** (float) - line resistance in ohm per km\n\n **x_ohm_per_km** (float) - line reactance in ohm per km\n\n **c_nf_per_km** (float) - line capacitance (line-to-earth) in nano Farad per km\n\n **r0_ohm_per_km** (float) - zero sequence line resistance in ohm per km\n\n **x0_ohm_per_km** (float) - zero sequence line reactance in ohm per km\n\n **c0_nf_per_km** (float) - zero sequence line capacitance in nano Farad per km\n\n **max_i_ka** (float) - maximum thermal current in kilo Ampere\n\n OPTIONAL:\n **name** (string, None) - A custom name for this line\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n **type** (str, None) - type of line (\"ol\" for overhead line or \"cs\" for cable system)\n\n **df** (float, 1) - derating factor: maximal current of line in relation to nominal current\\\n of line (from 0 to 1)\n\n **g_us_per_km** (float, 0) - dielectric conductance in micro Siemens per km\n\n **g0_us_per_km** (float, 0) - zero sequence dielectric conductance in micro Siemens per km\n\n **parallel** (integer, 1) - number of parallel line systems\n\n **geodata**\n (array, default None, shape= (,2L)) -\n The linegeodata of the line. The first row should be the coordinates\n of bus a and the last should be the coordinates of bus b. The points\n in the middle represent the bending points of the line\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n OUTPUT:\n **index** (int) - The unique ID of the created line\n\n EXAMPLE:\n create_line_from_parameters(net, \"line1\", from_bus = 0, to_bus = 1, lenght_km=0.1,\n r_ohm_per_km = .01, x_ohm_per_km = 0.05, c_nf_per_km = 10,\n max_i_ka = 0.4)\n\n \"\"\"\n\n # check if bus exist to attach the line to\n _check_branch_element(net, \"Line\", index, from_bus, to_bus)\n\n index = _get_index_with_check(net, \"line\", index)\n\n v = {\n \"name\": name, \"length_km\": length_km, \"from_bus\": from_bus,\n \"to_bus\": to_bus, \"in_service\": bool(in_service), \"std_type\": None,\n \"df\": df, \"r_ohm_per_km\": r_ohm_per_km, \"x_ohm_per_km\": x_ohm_per_km,\n \"c_nf_per_km\": c_nf_per_km, \"max_i_ka\": max_i_ka, \"parallel\": parallel, \"type\": type,\n \"g_us_per_km\": g_us_per_km\n }\n\n _set_entries(net, \"line\", index, **v, **kwargs)\n\n nan_0_values = [isnan(r0_ohm_per_km), isnan(x0_ohm_per_km), isnan(c0_nf_per_km)]\n if not np_any(nan_0_values):\n _create_column_and_set_value(net, index, r0_ohm_per_km, \"r0_ohm_per_km\", \"line\")\n _create_column_and_set_value(net, index, x0_ohm_per_km, \"x0_ohm_per_km\", \"line\")\n _create_column_and_set_value(net, index, c0_nf_per_km, \"c0_nf_per_km\", \"line\")\n _create_column_and_set_value(net, index, g0_us_per_km, \"g0_us_per_km\", \"line\",\n default_val=0.)\n elif not np_all(nan_0_values):\n logger.warning(\"Zero sequence values are given for only some parameters. Please specify \"\n \"them for all parameters, otherwise they are not set!\")\n\n if geodata is not None:\n net[\"line_geodata\"].loc[index, \"coords\"] = None\n net[\"line_geodata\"].at[index, \"coords\"] = geodata\n\n _create_column_and_set_value(net, index, max_loading_percent, \"max_loading_percent\", \"line\")\n _create_column_and_set_value(net, index, alpha, \"alpha\", \"line\")\n _create_column_and_set_value(net, index, temperature_degree_celsius,\n \"temperature_degree_celsius\", \"line\")\n _create_column_and_set_value(net, index, endtemp_degree, \"endtemp_degree\", \"line\")\n\n return index\n\n\ndef create_lines_from_parameters(net, from_buses, to_buses, length_km, r_ohm_per_km, x_ohm_per_km,\n c_nf_per_km, max_i_ka, name=None, index=None, type=None,\n geodata=None, in_service=True, df=1., parallel=1, g_us_per_km=0.,\n max_loading_percent=None, alpha=None,\n temperature_degree_celsius=None, r0_ohm_per_km=None,\n x0_ohm_per_km=None, c0_nf_per_km=None, g0_us_per_km=None,\n **kwargs):\n \"\"\"\n Convenience function for creating many lines at once. Parameters 'from_buses' and 'to_buses'\n must be arrays of equal length. Other parameters may be either arrays of the same length or\n single or values.\n\n INPUT:\n **net** - The net within this line should be created\n\n **from_bus** (list of int) - ID of the bus on one side which the line will be connected with\n\n **to_bus** (list of int) - ID of the bus on the other side which the line will be connected\\\n with\n\n **length_km** (list of float) - The line length in km\n\n **r_ohm_per_km** (list of float) - line resistance in ohm per km\n\n **x_ohm_per_km** (list of float) - line reactance in ohm per km\n\n **c_nf_per_km** (list of float) - line capacitance in nano Farad per km\n\n **r0_ohm_per_km** (list of float) - zero sequence line resistance in ohm per km\n\n **x0_ohm_per_km** (list of float) - zero sequence line reactance in ohm per km\n\n **c0_nf_per_km** (list of float) - zero sequence line capacitance in nano Farad per km\n\n **max_i_ka** (list of float) - maximum thermal current in kilo Ampere\n\n OPTIONAL:\n **name** (string, None) - A custom name for this line\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n **type** (str, None) - type of line (\"ol\" for overhead line or \"cs\" for cable system)\n\n **df** (float, 1) - derating factor: maximal current of line in relation to nominal current\\\n of line (from 0 to 1)\n\n **g_us_per_km** (float, 0) - dielectric conductance in micro Siemens per km\n\n **g0_us_per_km** (float, 0) - zero sequence dielectric conductance in micro Siemens per km\n\n **parallel** (integer, 1) - number of parallel line systems\n\n **geodata**\n (array, default None, shape= (,2L)) -\n The linegeodata of the line. The first row should be the coordinates\n of bus a and the last should be the coordinates of bus b. The points\n in the middle represent the bending points of the line\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n OUTPUT:\n **index** (int) - The unique ID of the created line\n\n EXAMPLE:\n create_line_from_parameters(net, \"line1\", from_bus = 0, to_bus = 1, lenght_km=0.1,\n r_ohm_per_km = .01, x_ohm_per_km = 0.05, c_nf_per_km = 10,\n max_i_ka = 0.4)\n\n \"\"\"\n _check_multiple_branch_elements(net, from_buses, to_buses, \"Lines\")\n\n index = _get_multiple_index_with_check(net, \"line\", index, len(from_buses))\n\n entries = {\"from_bus\": from_buses, \"to_bus\": to_buses, \"length_km\": length_km, \"type\": type,\n \"r_ohm_per_km\": r_ohm_per_km, \"x_ohm_per_km\": x_ohm_per_km,\n \"c_nf_per_km\": c_nf_per_km, \"max_i_ka\": max_i_ka, \"g_us_per_km\": g_us_per_km,\n \"name\": name, \"df\": df, \"parallel\": parallel, \"in_service\": in_service}\n\n _add_series_to_entries(entries, index, \"max_loading_percent\", max_loading_percent)\n _add_series_to_entries(entries, index, \"r0_ohm_per_km\", r0_ohm_per_km)\n _add_series_to_entries(entries, index, \"x0_ohm_per_km\", x0_ohm_per_km)\n _add_series_to_entries(entries, index, \"c0_nf_per_km\", c0_nf_per_km)\n _add_series_to_entries(entries, index, \"g0_us_per_km\", g0_us_per_km)\n _add_series_to_entries(entries, index, \"temperature_degree_celsius\", temperature_degree_celsius)\n _add_series_to_entries(entries, index, \"alpha\", alpha)\n\n _set_multiple_entries(net, \"line\", index, **entries, **kwargs)\n\n if geodata is not None:\n _add_multiple_branch_geodata(net, \"line\", geodata, index)\n\n return index\n\n\ndef create_transformer(net, hv_bus, lv_bus, std_type, name=None, tap_pos=nan, in_service=True,\n index=None, max_loading_percent=nan, parallel=1, df=1.,\n tap_dependent_impedance=None, vk_percent_characteristic=None,\n vkr_percent_characteristic=None):\n \"\"\"\n Creates a two-winding transformer in table net[\"trafo\"].\n The trafo parameters are defined through the standard type library.\n\n INPUT:\n **net** - The net within this transformer should be created\n\n **hv_bus** (int) - The bus on the high-voltage side on which the transformer will be \\\n connected to\n\n **lv_bus** (int) - The bus on the low-voltage side on which the transformer will be \\\n connected to\n\n **std_type** - The used standard type from the standard type library\n\n **Zero sequence parameters** (Added through std_type For Three phase load flow) :\n\n **vk0_percent** - zero sequence relative short-circuit voltage\n\n **vkr0_percent** - real part of zero sequence relative short-circuit voltage\n\n **mag0_percent** - ratio between magnetizing and short circuit impedance (zero sequence)\n\n z_mag0 / z0\n\n **mag0_rx** - zero sequence magnetizing r/x ratio\n\n **si0_hv_partial** - zero sequence short circuit impedance distribution in hv side\n\n OPTIONAL:\n **name** (string, None) - A custom name for this transformer\n\n **tap_pos** (int, nan) - current tap position of the transformer. Defaults to the medium \\\n position (tap_neutral)\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n **parallel** (integer) - number of parallel transformers\n\n **df** (float) - derating factor: maximal current of transformer in relation to nominal \\\n current of transformer (from 0 to 1)\n\n **tap_dependent_impedance** (boolean) - True if transformer impedance must be adjusted dependent \\\n on the tap position of the trabnsformer. Requires the additional columns \\\n \"vk_percent_characteristic\" and \"vkr_percent_characteristic\" that reference the index of the \\\n characteristic from the table net.characteristic. A convenience function \\\n pandapower.control.create_trafo_characteristics can be used to create the SplineCharacteristic \\\n objects, add the relevant columns and set up the references to the characteristics. \\\n The function pandapower.control.trafo_characteristics_diagnostic can be used for sanity checks.\n\n **vk_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n OUTPUT:\n **index** (int) - The unique ID of the created transformer\n\n EXAMPLE:\n create_transformer(net, hv_bus = 0, lv_bus = 1, name = \"trafo1\", std_type = \\\n \"0.4 MVA 10/0.4 kV\")\n \"\"\"\n\n # Check if bus exist to attach the trafo to\n _check_branch_element(net, \"Trafo\", index, hv_bus, lv_bus)\n\n index = _get_index_with_check(net, \"trafo\", index, name=\"transformer\")\n\n if df <= 0:\n raise UserWarning(\"derating factor df must be positive: df = %.3f\" % df)\n\n v = {\n \"name\": name, \"hv_bus\": hv_bus, \"lv_bus\": lv_bus,\n \"in_service\": bool(in_service), \"std_type\": std_type\n }\n ti = load_std_type(net, std_type, \"trafo\")\n\n updates = {\n \"sn_mva\": ti[\"sn_mva\"],\n \"vn_hv_kv\": ti[\"vn_hv_kv\"],\n \"vn_lv_kv\": ti[\"vn_lv_kv\"],\n \"vk_percent\": ti[\"vk_percent\"],\n \"vkr_percent\": ti[\"vkr_percent\"],\n \"pfe_kw\": ti[\"pfe_kw\"],\n \"i0_percent\": ti[\"i0_percent\"],\n \"parallel\": parallel,\n \"df\": df,\n \"shift_degree\": ti[\"shift_degree\"] if \"shift_degree\" in ti else 0,\n \"tap_phase_shifter\": ti[\"tap_phase_shifter\"] if \"tap_phase_shifter\" in ti\n and pd.notnull(\n ti[\"tap_phase_shifter\"]) else False\n }\n for zero_param in ['vk0_percent', 'vkr0_percent', 'mag0_percent', 'mag0_rx', 'si0_hv_partial']:\n if zero_param in ti:\n updates[zero_param] = ti[zero_param]\n v.update(updates)\n for tp in (\"tap_neutral\", \"tap_max\", \"tap_min\", \"tap_side\", \"tap_step_percent\",\n \"tap_step_degree\"):\n if tp in ti:\n v[tp] = ti[tp]\n if (\"tap_neutral\" in v) and (tap_pos is nan):\n v[\"tap_pos\"] = v[\"tap_neutral\"]\n else:\n v[\"tap_pos\"] = tap_pos\n if isinstance(tap_pos, float):\n net.trafo.tap_pos = net.trafo.tap_pos.astype(float)\n\n _set_entries(net, \"trafo\", index, **v)\n\n _create_column_and_set_value(net, index, max_loading_percent, \"max_loading_percent\", \"trafo\")\n\n if tap_dependent_impedance is not None:\n _create_column_and_set_value(net, index, tap_dependent_impedance, \"tap_dependent_impedance\", \"trafo\", bool_, False, True)\n if vk_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_percent_characteristic, \"vk_percent_characteristic\", \"trafo\", \"Int64\") # Int64Dtype\n if vkr_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_percent_characteristic, \"vkr_percent_characteristic\", \"trafo\", \"Int64\")\n\n # tap_phase_shifter default False\n net.trafo.tap_phase_shifter.fillna(False, inplace=True)\n\n return index\n\n\ndef create_transformer_from_parameters(net, hv_bus, lv_bus, sn_mva, vn_hv_kv, vn_lv_kv,\n vkr_percent, vk_percent, pfe_kw, i0_percent,\n shift_degree=0,\n tap_side=None, tap_neutral=nan, tap_max=nan,\n tap_min=nan, tap_step_percent=nan, tap_step_degree=nan,\n tap_pos=nan, tap_phase_shifter=False, in_service=True,\n name=None, vector_group=None, index=None,\n max_loading_percent=nan, parallel=1,\n df=1., vk0_percent=nan, vkr0_percent=nan,\n mag0_percent=nan, mag0_rx=nan,\n si0_hv_partial=nan,\n pt_percent=nan, oltc=False, tap_dependent_impedance=None,\n vk_percent_characteristic=None,\n vkr_percent_characteristic=None, **kwargs):\n \"\"\"\n Creates a two-winding transformer in table net[\"trafo\"].\n The trafo parameters are defined through the standard type library.\n\n INPUT:\n **net** - The net within this transformer should be created\n\n **hv_bus** (int) - The bus on the high-voltage side on which the transformer will be \\\n connected to\n\n **lv_bus** (int) - The bus on the low-voltage side on which the transformer will be \\\n connected to\n\n **sn_mva** (float) - rated apparent power\n\n **vn_hv_kv** (float) - rated voltage on high voltage side\n\n **vn_lv_kv** (float) - rated voltage on low voltage side\n\n **vkr_percent** (float) - real part of relative short-circuit voltage\n\n **vk_percent** (float) - relative short-circuit voltage\n\n **pfe_kw** (float) - iron losses in kW\n\n **i0_percent** (float) - open loop losses in percent of rated current\n\n **vector_group** (String) - Vector group of the transformer\n\n HV side is Uppercase letters\n and LV side is lower case\n\n **vk0_percent** (float) - zero sequence relative short-circuit voltage\n\n **vkr0_percent** - real part of zero sequence relative short-circuit voltage\n\n **mag0_percent** - zero sequence magnetizing impedance/ vk0\n\n **mag0_rx** - zero sequence magnitizing R/X ratio\n\n **si0_hv_partial** - Distribution of zero sequence leakage impedances for HV side\n\n\n OPTIONAL:\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **parallel** (integer) - number of parallel transformers\n\n **name** (string) - A custom name for this transformer\n\n **shift_degree** (float) - Angle shift over the transformer*\n\n **tap_side** (string) - position of tap changer (\"hv\", \"lv\")\n\n **tap_pos** (int, nan) - current tap position of the transformer. Defaults to the medium \\\n position (tap_neutral)\n\n **tap_neutral** (int, nan) - tap position where the transformer ratio is equal to the \\\n ratio of the rated voltages\n\n **tap_max** (int, nan) - maximal allowed tap position\n\n **tap_min** (int, nan): minimal allowed tap position\n\n **tap_step_percent** (float) - tap step size for voltage magnitude in percent\n\n **tap_step_degree** (float) - tap step size for voltage angle in degree*\n\n **tap_phase_shifter** (bool) - whether the transformer is an ideal phase shifter*\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n **df** (float) - derating factor: maximal current of transformer in relation to nominal \\\n current of transformer (from 0 to 1)\n\n **tap_dependent_impedance** (boolean) - True if transformer impedance must be adjusted dependent \\\n on the tap position of the trabnsformer. Requires the additional columns \\\n \"vk_percent_characteristic\" and \"vkr_percent_characteristic\" that reference the index of the \\\n characteristic from the table net.characteristic. A convenience function \\\n pandapower.control.create_trafo_characteristics can be used to create the SplineCharacteristic \\\n objects, add the relevant columns and set up the references to the characteristics. \\\n The function pandapower.control.trafo_characteristics_diagnostic can be used for sanity checks.\n\n **vk_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **pt_percent** (float, nan) - (short circuit only)\n\n **oltc** (bool, False) - (short circuit only)\n\n ** only considered in loadflow if calculate_voltage_angles = True\n\n OUTPUT:\n **index** (int) - The unique ID of the created transformer\n\n EXAMPLE:\n create_transformer_from_parameters(net, hv_bus=0, lv_bus=1, name=\"trafo1\", sn_mva=40, \\\n vn_hv_kv=110, vn_lv_kv=10, vk_percent=10, vkr_percent=0.3, pfe_kw=30, \\\n i0_percent=0.1, shift_degree=30)\n \"\"\"\n\n # Check if bus exist to attach the trafo to\n _check_branch_element(net, \"Trafo\", index, hv_bus, lv_bus)\n\n index = _get_index_with_check(net, \"trafo\", index, name=\"transformer\")\n\n if df <= 0:\n raise UserWarning(\"derating factor df must be positive: df = %.3f\" % df)\n\n if tap_pos is nan:\n tap_pos = tap_neutral\n # store dtypes\n\n v = {\n \"name\": name, \"hv_bus\": hv_bus, \"lv_bus\": lv_bus,\n \"in_service\": bool(in_service), \"std_type\": None, \"sn_mva\": sn_mva, \"vn_hv_kv\": vn_hv_kv,\n \"vn_lv_kv\": vn_lv_kv, \"vk_percent\": vk_percent, \"vkr_percent\": vkr_percent,\n \"pfe_kw\": pfe_kw, \"i0_percent\": i0_percent, \"tap_neutral\": tap_neutral,\n \"tap_max\": tap_max, \"tap_min\": tap_min, \"shift_degree\": shift_degree,\n \"tap_side\": tap_side, \"tap_step_percent\": tap_step_percent, \"tap_step_degree\": tap_step_degree,\n \"tap_phase_shifter\": tap_phase_shifter, \"parallel\": parallel, \"df\": df,\n \"pt_percent\": pt_percent, \"oltc\": oltc\n }\n\n if (\"tap_neutral\" in v) and (tap_pos is nan):\n v[\"tap_pos\"] = v[\"tap_neutral\"]\n else:\n v[\"tap_pos\"] = tap_pos\n if type(tap_pos) == float:\n net.trafo.tap_pos = net.trafo.tap_pos.astype(float)\n\n _set_entries(net, \"trafo\", index, **v, **kwargs)\n\n if tap_dependent_impedance is not None:\n _create_column_and_set_value(net, index, tap_dependent_impedance, \"tap_dependent_impedance\", \"trafo\", bool_, False, True)\n if vk_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_percent_characteristic, \"vk_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_percent_characteristic, \"vkr_percent_characteristic\", \"trafo\", \"Int64\")\n\n if not (isnan(vk0_percent) and isnan(vkr0_percent) and isnan(mag0_percent)\n and isnan(mag0_rx) and isnan(si0_hv_partial) and vector_group is None):\n _create_column_and_set_value(net, index, vk0_percent, \"vk0_percent\", \"trafo\")\n _create_column_and_set_value(net, index, vkr0_percent, \"vkr0_percent\", \"trafo\")\n _create_column_and_set_value(net, index, mag0_percent, \"mag0_percent\", \"trafo\")\n _create_column_and_set_value(net, index, mag0_rx, \"mag0_rx\", \"trafo\")\n _create_column_and_set_value(net, index, si0_hv_partial, \"si0_hv_partial\", \"trafo\")\n _create_column_and_set_value(net, index, vector_group, \"vector_group\", \"trafo\", dtyp=str,\n default_val=None)\n _create_column_and_set_value(net, index, pt_percent, \"pt_percent\", \"trafo\")\n _create_column_and_set_value(net, index, max_loading_percent, \"max_loading_percent\", \"trafo\")\n\n return index\n\n\ndef create_transformers_from_parameters(net, hv_buses, lv_buses, sn_mva, vn_hv_kv, vn_lv_kv,\n vkr_percent, vk_percent, pfe_kw, i0_percent, shift_degree=0,\n tap_side=None, tap_neutral=nan, tap_max=nan, tap_min=nan,\n tap_step_percent=nan, tap_step_degree=nan, tap_pos=nan,\n tap_phase_shifter=False, in_service=True, name=None,\n vector_group=None, index=None, max_loading_percent=None,\n parallel=1, df=1., vk0_percent=None, vkr0_percent=None,\n mag0_percent=None, mag0_rx=None, si0_hv_partial=None,\n pt_percent=nan, oltc=False, tap_dependent_impedance=None,\n vk_percent_characteristic=None,\n vkr_percent_characteristic=None, **kwargs):\n \"\"\"\n Creates several two-winding transformers in table net[\"trafo\"].\n The trafo parameters are defined through the standard type library.\n\n INPUT:\n **net** - The net within this transformer should be created\n\n **hv_bus** (list of int) - The bus on the high-voltage side on which the transformer will \\\n be connected to\n\n **lv_bus** (list of int) - The bus on the low-voltage side on which the transformer will \\\n be connected to\n\n **sn_mva** (list of float) - rated apparent power\n\n **vn_hv_kv** (list of float) - rated voltage on high voltage side\n\n **vn_lv_kv** (list of float) - rated voltage on low voltage side\n\n **vkr_percent** (list of float) - real part of relative short-circuit voltage\n\n **vk_percent** (list of float) - relative short-circuit voltage\n\n **pfe_kw** (list of float) - iron losses in kW\n\n **i0_percent** (list of float) - open loop losses in percent of rated current\n\n **vector_group** (list of String) - Vector group of the transformer\n\n HV side is Uppercase letters\n and LV side is lower case\n\n **vk0_percent** (list of float) - zero sequence relative short-circuit voltage\n\n **vkr0_percent** - (list of float) real part of zero sequence relative short-circuit voltage\n\n **mag0_percent** - (list of float) zero sequence magnetizing impedance/ vk0\n\n **mag0_rx** - (list of float) zero sequence magnitizing R/X ratio\n\n **si0_hv_partial** - (list of float) Distribution of zero sequence leakage impedances for \\\n HV side\n\n\n OPTIONAL:\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **parallel** (integer) - number of parallel transformers\n\n **name** (string) - A custom name for this transformer\n\n **shift_degree** (float) - Angle shift over the transformer*\n\n **tap_side** (string) - position of tap changer (\"hv\", \"lv\")\n\n **tap_pos** (int, nan) - current tap position of the transformer. Defaults to the medium \\\n position (tap_neutral)\n\n **tap_neutral** (int, nan) - tap position where the transformer ratio is equal to the ratio\\\n of the rated voltages\n\n **tap_max** (int, nan) - maximal allowed tap position\n\n **tap_min** (int, nan): minimal allowed tap position\n\n **tap_step_percent** (float) - tap step size for voltage magnitude in percent\n\n **tap_step_degree** (float) - tap step size for voltage angle in degree*\n\n **tap_phase_shifter** (bool) - whether the transformer is an ideal phase shifter*\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n **df** (float) - derating factor: maximal current of transformer in relation to nominal \\\n current of transformer (from 0 to 1)\n\n **tap_dependent_impedance** (boolean) - True if transformer impedance must be adjusted dependent \\\n on the tap position of the trabnsformer. Requires the additional columns \\\n \"vk_percent_characteristic\" and \"vkr_percent_characteristic\" that reference the index of the \\\n characteristic from the table net.characteristic. A convenience function \\\n pandapower.control.create_trafo_characteristics can be used to create the SplineCharacteristic \\\n objects, add the relevant columns and set up the references to the characteristics. \\\n The function pandapower.control.trafo_characteristics_diagnostic can be used for sanity checks.\n\n **vk_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **pt_percent** (float, nan) - (short circuit only)\n\n **oltc** (bool, False) - (short circuit only)\n\n ** only considered in loadflow if calculate_voltage_angles = True\n\n OUTPUT:\n **index** (int) - The unique ID of the created transformer\n\n EXAMPLE:\n create_transformer_from_parameters(net, hv_bus=0, lv_bus=1, name=\"trafo1\", sn_mva=40, \\\n vn_hv_kv=110, vn_lv_kv=10, vk_percent=10, vkr_percent=0.3, pfe_kw=30, \\\n i0_percent=0.1, shift_degree=30)\n \"\"\"\n _check_multiple_branch_elements(net, hv_buses, lv_buses, \"Transformers\")\n\n index = _get_multiple_index_with_check(net, \"trafo\", index, len(hv_buses))\n\n tp_neutral = pd.Series(tap_neutral, index=index, dtype=float64)\n tp_pos = pd.Series(tap_pos, index=index, dtype=float64).fillna(tp_neutral)\n entries = {\"name\": name, \"hv_bus\": hv_buses, \"lv_bus\": lv_buses,\n \"in_service\": array(in_service).astype(bool_), \"std_type\": None, \"sn_mva\": sn_mva,\n \"vn_hv_kv\": vn_hv_kv, \"vn_lv_kv\": vn_lv_kv, \"vk_percent\": vk_percent,\n \"vkr_percent\": vkr_percent, \"pfe_kw\": pfe_kw, \"i0_percent\": i0_percent,\n \"tap_neutral\": tp_neutral, \"tap_max\": tap_max, \"tap_min\": tap_min,\n \"shift_degree\": shift_degree, \"tap_pos\": tp_pos, \"tap_side\": tap_side,\n \"tap_step_percent\": tap_step_percent, \"tap_step_degree\": tap_step_degree,\n \"tap_phase_shifter\": tap_phase_shifter, \"parallel\": parallel, \"df\": df,\n \"pt_percent\": pt_percent, \"oltc\": oltc}\n\n if tap_dependent_impedance is not None:\n _add_series_to_entries(entries, index, \"tap_dependent_impedance\", tap_dependent_impedance, dtype=bool_, default_val=False)\n if vk_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vk_percent_characteristic\", vk_percent_characteristic, \"Int64\")\n if vkr_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vkr_percent_characteristic\", vkr_percent_characteristic, \"Int64\")\n\n _add_series_to_entries(entries, index, \"vk0_percent\", vk0_percent)\n _add_series_to_entries(entries, index, \"vkr0_percent\", vkr0_percent)\n _add_series_to_entries(entries, index, \"mag0_percent\", mag0_percent)\n _add_series_to_entries(entries, index, \"mag0_rx\", mag0_rx)\n _add_series_to_entries(entries, index, \"si0_hv_partial\", si0_hv_partial)\n _add_series_to_entries(entries, index, \"max_loading_percent\", max_loading_percent)\n _add_series_to_entries(entries, index, \"vector_group\", vector_group, dtyp=str)\n _add_series_to_entries(entries, index, \"pt_percent\", pt_percent)\n\n _set_multiple_entries(net, \"trafo\", index, **entries, **kwargs)\n\n return index\n\n\ndef create_transformer3w(net, hv_bus, mv_bus, lv_bus, std_type, name=None, tap_pos=nan,\n in_service=True, index=None, max_loading_percent=nan,\n tap_at_star_point=False, tap_dependent_impedance=None,\n vk_hv_percent_characteristic=None, vkr_hv_percent_characteristic=None,\n vk_mv_percent_characteristic=None, vkr_mv_percent_characteristic=None,\n vk_lv_percent_characteristic=None, vkr_lv_percent_characteristic=None):\n \"\"\"\n Creates a three-winding transformer in table net[\"trafo3w\"].\n The trafo parameters are defined through the standard type library.\n\n INPUT:\n **net** - The net within this transformer should be created\n\n **hv_bus** (int) - The bus on the high-voltage side on which the transformer will be \\\n connected to\n\n **mv_bus** (int) - The medium voltage bus on which the transformer will be connected to\n\n **lv_bus** (int) - The bus on the low-voltage side on which the transformer will be \\\n connected to\n\n **std_type** - The used standard type from the standard type library\n\n OPTIONAL:\n **name** (string) - A custom name for this transformer\n\n **tap_pos** (int, nan) - current tap position of the transformer. Defaults to the medium \\\n position (tap_neutral)\n\n **tap_at_star_point** (boolean) - Whether tap changer is located at the star point of the \\\n 3W-transformer or at the bus\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n **tap_at_star_point (bool)** - whether tap changer is modelled at star point or at the bus\n\n **tap_dependent_impedance** (boolean) - True if transformer impedance must be adjusted dependent \\\n on the tap position of the trabnsformer. Requires the additional columns \\\n \"vk_percent_characteristic\" and \"vkr_percent_characteristic\" that reference the index of the \\\n characteristic from the table net.characteristic. A convenience function \\\n pandapower.control.create_trafo_characteristics can be used to create the SplineCharacteristic \\\n objects, add the relevant columns and set up the references to the characteristics. \\\n The function pandapower.control.trafo_characteristics_diagnostic can be used for sanity checks.\n\n **vk_hv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_hv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk_mv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_mv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk_lv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_lv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n OUTPUT:\n **index** (int) - The unique ID of the created transformer\n\n EXAMPLE:\n create_transformer3w(net, hv_bus = 0, mv_bus = 1, lv_bus = 2, name = \"trafo1\", std_type = \\\n \"63/25/38 MVA 110/20/10 kV\")\n \"\"\"\n\n # Check if bus exist to attach the trafo to\n for b in [hv_bus, mv_bus, lv_bus]:\n if b not in net[\"bus\"].index.values:\n raise UserWarning(\"Trafo tries to attach to bus %s\" % b)\n\n v = {\n \"name\": name, \"hv_bus\": hv_bus, \"mv_bus\": mv_bus, \"lv_bus\": lv_bus,\n \"in_service\": bool(in_service), \"std_type\": std_type\n }\n ti = load_std_type(net, std_type, \"trafo3w\")\n\n index = _get_index_with_check(net, \"trafo3w\", index, \"three winding transformer\")\n\n v.update({\n \"sn_hv_mva\": ti[\"sn_hv_mva\"],\n \"sn_mv_mva\": ti[\"sn_mv_mva\"],\n \"sn_lv_mva\": ti[\"sn_lv_mva\"],\n \"vn_hv_kv\": ti[\"vn_hv_kv\"],\n \"vn_mv_kv\": ti[\"vn_mv_kv\"],\n \"vn_lv_kv\": ti[\"vn_lv_kv\"],\n \"vk_hv_percent\": ti[\"vk_hv_percent\"],\n \"vk_mv_percent\": ti[\"vk_mv_percent\"],\n \"vk_lv_percent\": ti[\"vk_lv_percent\"],\n \"vkr_hv_percent\": ti[\"vkr_hv_percent\"],\n \"vkr_mv_percent\": ti[\"vkr_mv_percent\"],\n \"vkr_lv_percent\": ti[\"vkr_lv_percent\"],\n \"pfe_kw\": ti[\"pfe_kw\"],\n \"i0_percent\": ti[\"i0_percent\"],\n \"shift_mv_degree\": ti[\"shift_mv_degree\"] if \"shift_mv_degree\" in ti else 0,\n \"shift_lv_degree\": ti[\"shift_lv_degree\"] if \"shift_lv_degree\" in ti else 0,\n \"tap_at_star_point\": tap_at_star_point\n })\n for tp in (\n \"tap_neutral\", \"tap_max\", \"tap_min\", \"tap_side\", \"tap_step_percent\", \"tap_step_degree\"):\n if tp in ti:\n v.update({tp: ti[tp]})\n\n if (\"tap_neutral\" in v) and (tap_pos is nan):\n v[\"tap_pos\"] = v[\"tap_neutral\"]\n else:\n v[\"tap_pos\"] = tap_pos\n if type(tap_pos) == float:\n net.trafo3w.tap_pos = net.trafo3w.tap_pos.astype(float)\n\n dd = pd.DataFrame(v, index=[index])\n # todo: drop __version__ checks\n if version.parse(pd.__version__) < version.parse(\"0.21\"):\n net[\"trafo3w\"] = net[\"trafo3w\"].append(dd).reindex_axis(net[\"trafo3w\"].columns, axis=1)\n elif version.parse(pd.__version__) < version.parse(\"0.23\"):\n net[\"trafo3w\"] = net[\"trafo3w\"].append(dd).reindex(net[\"trafo3w\"].columns, axis=1)\n else:\n net[\"trafo3w\"] = net[\"trafo3w\"].append(dd, sort=True).reindex(net[\"trafo3w\"].columns,\n axis=1)\n # todo: append -> concat:\n # net[\"trafo3w\"] = pd.concat([net[\"trafo3w\"], dd], sort=True).reindex(net[\"trafo3w\"].columns, axis=1)\n\n _create_column_and_set_value(net, index, max_loading_percent, \"max_loading_percent\", \"trafo3w\")\n\n if tap_dependent_impedance is not None:\n _create_column_and_set_value(net, index, tap_dependent_impedance, \"tap_dependent_impedance\", \"trafo\", bool_, False, True)\n if vk_hv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_hv_percent_characteristic, \"vk_hv_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_hv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_hv_percent_characteristic, \"vkr_hv_percent_characteristic\", \"trafo\", \"Int64\")\n if vk_mv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_mv_percent_characteristic, \"vk_mv_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_mv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_mv_percent_characteristic, \"vkr_mv_percent_characteristic\", \"trafo\", \"Int64\")\n if vk_lv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_lv_percent_characteristic, \"vk_lv_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_lv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_lv_percent_characteristic, \"vkr_lv_percent_characteristic\", \"trafo\", \"Int64\")\n\n return index\n\n\ndef create_transformer3w_from_parameters(net, hv_bus, mv_bus, lv_bus, vn_hv_kv, vn_mv_kv, vn_lv_kv,\n sn_hv_mva, sn_mv_mva, sn_lv_mva, vk_hv_percent,\n vk_mv_percent, vk_lv_percent, vkr_hv_percent,\n vkr_mv_percent, vkr_lv_percent, pfe_kw, i0_percent,\n shift_mv_degree=0., shift_lv_degree=0., tap_side=None,\n tap_step_percent=nan, tap_step_degree=nan, tap_pos=nan,\n tap_neutral=nan, tap_max=nan,\n tap_min=nan, name=None, in_service=True, index=None,\n max_loading_percent=nan, tap_at_star_point=False,\n vk0_hv_percent=nan, vk0_mv_percent=nan, vk0_lv_percent=nan,\n vkr0_hv_percent=nan, vkr0_mv_percent=nan, vkr0_lv_percent=nan,\n vector_group=None, tap_dependent_impedance=None,\n vk_hv_percent_characteristic=None, vkr_hv_percent_characteristic=None,\n vk_mv_percent_characteristic=None, vkr_mv_percent_characteristic=None,\n vk_lv_percent_characteristic=None, vkr_lv_percent_characteristic=None):\n \"\"\"\n Adds a three-winding transformer in table net[\"trafo3w\"].\n The model currently only supports one tap-changer per 3W Transformer.\n\n Input:\n **net** (pandapowerNet) - The net within this transformer should be created\n\n **hv_bus** (int) - The bus on the high-voltage side on which the transformer will be \\\n connected to\n\n **mv_bus** (int) - The bus on the middle-voltage side on which the transformer will be \\\n connected to\n\n **lv_bus** (int) - The bus on the low-voltage side on which the transformer will be \\\n connected to\n\n **vn_hv_kv** (float) rated voltage on high voltage side\n\n **vn_mv_kv** (float) rated voltage on medium voltage side\n\n **vn_lv_kv** (float) rated voltage on low voltage side\n\n **sn_hv_mva** (float) - rated apparent power on high voltage side\n\n **sn_mv_mva** (float) - rated apparent power on medium voltage side\n\n **sn_lv_mva** (float) - rated apparent power on low voltage side\n\n **vk_hv_percent** (float) - short circuit voltage from high to medium voltage\n\n **vk_mv_percent** (float) - short circuit voltage from medium to low voltage\n\n **vk_lv_percent** (float) - short circuit voltage from high to low voltage\n\n **vkr_hv_percent** (float) - real part of short circuit voltage from high to medium voltage\n\n **vkr_mv_percent** (float) - real part of short circuit voltage from medium to low voltage\n\n **vkr_lv_percent** (float) - real part of short circuit voltage from high to low voltage\n\n **pfe_kw** (float) - iron losses in kW\n\n **i0_percent** (float) - open loop losses\n\n OPTIONAL:\n **shift_mv_degree** (float, 0) - angle shift to medium voltage side*\n\n **shift_lv_degree** (float, 0) - angle shift to low voltage side*\n\n **tap_step_percent** (float) - Tap step in percent\n\n **tap_step_degree** (float) - Tap phase shift angle in degrees\n\n **tap_side** (string, None) - \"hv\", \"mv\", \"lv\"\n\n **tap_neutral** (int, nan) - default tap position\n\n **tap_min** (int, nan) - Minimum tap position\n\n **tap_max** (int, nan) - Maximum tap position\n\n **tap_pos** (int, nan) - current tap position of the transformer. Defaults to the \\\n medium position (tap_neutral)\n\n **tap_at_star_point** (boolean) - Whether tap changer is located at the star point of the \\\n 3W-transformer or at the bus\n\n **name** (string, None) - Name of the 3-winding transformer\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n **tap_dependent_impedance** (boolean) - True if transformer impedance must be adjusted dependent \\\n on the tap position of the trabnsformer. Requires the additional columns \\\n \"vk_percent_characteristic\" and \"vkr_percent_characteristic\" that reference the index of the \\\n characteristic from the table net.characteristic. A convenience function \\\n pandapower.control.create_trafo_characteristics can be used to create the SplineCharacteristic \\\n objects, add the relevant columns and set up the references to the characteristics. \\\n The function pandapower.control.trafo_characteristics_diagnostic can be used for sanity checks.\n\n **vk_hv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_hv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk_mv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_mv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk_lv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_lv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk0_hv_percent** (float) - zero sequence short circuit voltage from high to medium voltage\n\n **vk0_mv_percent** (float) - zero sequence short circuit voltage from medium to low voltage\n\n **vk0_lv_percent** (float) - zero sequence short circuit voltage from high to low voltage\n\n **vkr0_hv_percent** (float) - zero sequence real part of short circuit voltage from high to medium voltage\n\n **vkr0_mv_percent** (float) - zero sequence real part of short circuit voltage from medium to low voltage\n\n **vkr0_lv_percent** (float) - zero sequence real part of short circuit voltage from high to low voltage\n\n **vector_group** (list of String) - Vector group of the transformer3w\n\n OUTPUT:\n **trafo_id** - The unique trafo_id of the created 3W transformer\n\n Example:\n create_transformer3w_from_parameters(net, hv_bus=0, mv_bus=1, lv_bus=2, name=\"trafo1\",\n sn_hv_mva=40, sn_mv_mva=20, sn_lv_mva=20, vn_hv_kv=110, vn_mv_kv=20, vn_lv_kv=10,\n vk_hv_percent=10,vk_mv_percent=11, vk_lv_percent=12, vkr_hv_percent=0.3,\n vkr_mv_percent=0.31, vkr_lv_percent=0.32, pfe_kw=30, i0_percent=0.1, shift_mv_degree=30,\n shift_lv_degree=30)\n\n \"\"\"\n\n # Check if bus exist to attach the trafo to\n for b in [hv_bus, mv_bus, lv_bus]:\n if b not in net[\"bus\"].index.values:\n raise UserWarning(\"Trafo tries to attach to non-existent bus %s\" % b)\n\n index = _get_index_with_check(net, \"trafo3w\", index, \"three winding transformer\")\n\n if tap_pos is nan:\n tap_pos = tap_neutral\n\n columns = [\"lv_bus\", \"mv_bus\", \"hv_bus\", \"vn_hv_kv\", \"vn_mv_kv\", \"vn_lv_kv\", \"sn_hv_mva\",\n \"sn_mv_mva\", \"sn_lv_mva\", \"vk_hv_percent\", \"vk_mv_percent\", \"vk_lv_percent\",\n \"vkr_hv_percent\", \"vkr_mv_percent\", \"vkr_lv_percent\", \"pfe_kw\", \"i0_percent\",\n \"shift_mv_degree\", \"shift_lv_degree\", \"tap_side\", \"tap_step_percent\",\n \"tap_step_degree\", \"tap_pos\", \"tap_neutral\", \"tap_max\", \"tap_min\", \"in_service\",\n \"name\", \"std_type\", \"tap_at_star_point\", \"vk0_hv_percent\", \"vk0_mv_percent\", \"vk0_lv_percent\",\n \"vkr0_hv_percent\", \"vkr0_mv_percent\", \"vkr0_lv_percent\", \"vector_group\"]\n values = [lv_bus, mv_bus, hv_bus, vn_hv_kv, vn_mv_kv, vn_lv_kv, sn_hv_mva, sn_mv_mva, sn_lv_mva,\n vk_hv_percent, vk_mv_percent, vk_lv_percent, vkr_hv_percent, vkr_mv_percent,\n vkr_lv_percent, pfe_kw, i0_percent, shift_mv_degree, shift_lv_degree, tap_side,\n tap_step_percent, tap_step_degree, tap_pos, tap_neutral, tap_max, tap_min,\n bool(in_service), name, None, tap_at_star_point,\n vk0_hv_percent, vk0_mv_percent, vk0_lv_percent,\n vkr0_hv_percent, vkr0_mv_percent, vkr0_lv_percent, vector_group]\n\n _set_entries(net, \"trafo3w\", index, **dict(zip(columns, values)))\n\n _create_column_and_set_value(net, index, max_loading_percent, \"max_loading_percent\", \"trafo3w\")\n\n if tap_dependent_impedance is not None:\n _create_column_and_set_value(net, index, tap_dependent_impedance, \"tap_dependent_impedance\", \"trafo\", bool_, False, True)\n if vk_hv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_hv_percent_characteristic, \"vk_hv_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_hv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_hv_percent_characteristic, \"vkr_hv_percent_characteristic\", \"trafo\", \"Int64\")\n if vk_mv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_mv_percent_characteristic, \"vk_mv_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_mv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_mv_percent_characteristic, \"vkr_mv_percent_characteristic\", \"trafo\", \"Int64\")\n if vk_lv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vk_lv_percent_characteristic, \"vk_lv_percent_characteristic\", \"trafo\", \"Int64\")\n if vkr_lv_percent_characteristic is not None:\n _create_column_and_set_value(net, index, vkr_lv_percent_characteristic, \"vkr_lv_percent_characteristic\", \"trafo\", \"Int64\")\n\n return index\n\n\ndef create_transformers3w_from_parameters(net, hv_buses, mv_buses, lv_buses, vn_hv_kv, vn_mv_kv,\n vn_lv_kv, sn_hv_mva, sn_mv_mva, sn_lv_mva, vk_hv_percent,\n vk_mv_percent, vk_lv_percent, vkr_hv_percent,\n vkr_mv_percent, vkr_lv_percent, pfe_kw, i0_percent,\n shift_mv_degree=0., shift_lv_degree=0., tap_side=None,\n tap_step_percent=nan, tap_step_degree=nan, tap_pos=nan,\n tap_neutral=nan, tap_max=nan, tap_min=nan, name=None,\n in_service=True, index=None, max_loading_percent=None,\n tap_at_star_point=False,\n vk0_hv_percent=nan, vk0_mv_percent=nan, vk0_lv_percent=nan,\n vkr0_hv_percent=nan, vkr0_mv_percent=nan, vkr0_lv_percent=nan,\n vector_group=None, tap_dependent_impedance=None,\n vk_hv_percent_characteristic=None, vkr_hv_percent_characteristic=None,\n vk_mv_percent_characteristic=None, vkr_mv_percent_characteristic=None,\n vk_lv_percent_characteristic=None, vkr_lv_percent_characteristic=None, **kwargs):\n \"\"\"\n Adds a three-winding transformer in table net[\"trafo3w\"].\n\n Input:\n **net** (pandapowerNet) - The net within this transformer should be created\n\n **hv_bus** (list) - The bus on the high-voltage side on which the transformer will be \\\n connected to\n\n **mv_bus** (list) - The bus on the middle-voltage side on which the transformer will be \\\n connected to\n\n **lv_bus** (list) - The bus on the low-voltage side on which the transformer will be \\\n connected to\n\n **vn_hv_kv** (float or list) rated voltage on high voltage side\n\n **vn_mv_kv** (float or list) rated voltage on medium voltage side\n\n **vn_lv_kv** (float or list) rated voltage on low voltage side\n\n **sn_hv_mva** (float or list) - rated apparent power on high voltage side\n\n **sn_mv_mva** (float or list) - rated apparent power on medium voltage side\n\n **sn_lv_mva** (float or list) - rated apparent power on low voltage side\n\n **vk_hv_percent** (float or list) - short circuit voltage from high to medium voltage\n\n **vk_mv_percent** (float or list) - short circuit voltage from medium to low voltage\n\n **vk_lv_percent** (float or list) - short circuit voltage from high to low voltage\n\n **vkr_hv_percent** (float or list) - real part of short circuit voltage from high to medium\\\n voltage\n\n **vkr_mv_percent** (float or list) - real part of short circuit voltage from medium to low\\\n voltage\n\n **vkr_lv_percent** (float or list) - real part of short circuit voltage from high to low\\\n voltage\n\n **pfe_kw** (float or list) - iron losses in kW\n\n **i0_percent** (float or list) - open loop losses\n\n OPTIONAL:\n **shift_mv_degree** (float or list, 0) - angle shift to medium voltage side*\n\n **shift_lv_degree** (float or list, 0) - angle shift to low voltage side*\n\n **tap_step_percent** (float or list) - Tap step in percent\n\n **tap_step_degree** (float or list) - Tap phase shift angle in degrees\n\n **tap_side** (string, None) - \"hv\", \"mv\", \"lv\"\n\n **tap_neutral** (int, nan) - default tap position\n\n **tap_min** (int, nan) - Minimum tap position\n\n **tap_max** (int, nan) - Maximum tap position\n\n **tap_pos** (int, nan) - current tap position of the transformer. Defaults to the \\\n medium position (tap_neutral)\n\n **tap_at_star_point** (boolean) - Whether tap changer is located at the star point of the \\\n 3W-transformer or at the bus\n\n **name** (string, None) - Name of the 3-winding transformer\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n ** only considered in loadflow if calculate_voltage_angles = True\n **The model currently only supports one tap-changer per 3W Transformer.\n\n **max_loading_percent (float)** - maximum current loading (only needed for OPF)\n\n **tap_dependent_impedance** (boolean) - True if transformer impedance must be adjusted dependent \\\n on the tap position of the trabnsformer. Requires the additional columns \\\n \"vk_percent_characteristic\" and \"vkr_percent_characteristic\" that reference the index of the \\\n characteristic from the table net.characteristic. A convenience function \\\n pandapower.control.create_trafo_characteristics can be used to create the SplineCharacteristic \\\n objects, add the relevant columns and set up the references to the characteristics. \\\n The function pandapower.control.trafo_characteristics_diagnostic can be used for sanity checks.\n\n **vk_hv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_hv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk_mv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_mv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk_lv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vkr_lv_percent_characteristic** (int) - index of the characteristic from net.characteristic for \\\n the adjustment of the parameter \"vk_percent\" for the calculation of tap dependent impedance.\n\n **vk0_hv_percent** (float) - zero sequence short circuit voltage from high to medium voltage\n\n **vk0_mv_percent** (float) - zero sequence short circuit voltage from medium to low voltage\n\n **vk0_lv_percent** (float) - zero sequence short circuit voltage from high to low voltage\n\n **vkr0_hv_percent** (float) - zero sequence real part of short circuit voltage from high to medium voltage\n\n **vkr0_mv_percent** (float) - zero sequence real part of short circuit voltage from medium to low voltage\n\n **vkr0_lv_percent** (float) - zero sequence real part of short circuit voltage from high to low voltage\n\n **vector_group** (list of String) - Vector group of the transformer3w\n\n OUTPUT:\n **trafo_id** - List of trafo_ids of the created 3W transformers\n\n Example:\n create_transformer3w_from_parameters(net, hv_bus=0, mv_bus=1, lv_bus=2, name=\"trafo1\",\n sn_hv_mva=40, sn_mv_mva=20, sn_lv_mva=20, vn_hv_kv=110, vn_mv_kv=20, vn_lv_kv=10,\n vk_hv_percent=10,vk_mv_percent=11, vk_lv_percent=12, vkr_hv_percent=0.3,\n vkr_mv_percent=0.31, vkr_lv_percent=0.32, pfe_kw=30, i0_percent=0.1, shift_mv_degree=30,\n shift_lv_degree=30)\n\n \"\"\"\n index = _get_multiple_index_with_check(net, \"trafo3w\", index, len(hv_buses),\n name=\"Three winding transformers\")\n\n if not np_all(isin(hv_buses, net.bus.index)):\n bus_not_exist = set(hv_buses) - set(net.bus.index)\n raise UserWarning(\"Transformers trying to attach to non existing buses %s\" % bus_not_exist)\n if not np_all(isin(mv_buses, net.bus.index)):\n bus_not_exist = set(mv_buses) - set(net.bus.index)\n raise UserWarning(\"Transformers trying to attach to non existing buses %s\" % bus_not_exist)\n if not np_all(isin(lv_buses, net.bus.index)):\n bus_not_exist = set(lv_buses) - set(net.bus.index)\n raise UserWarning(\"Transformers trying to attach to non existing buses %s\" % bus_not_exist)\n\n tp_neutral = pd.Series(tap_neutral, index=index, dtype=float64)\n tp_pos = pd.Series(tap_pos, index=index, dtype=float64).fillna(tp_neutral)\n entries = {\"lv_bus\": lv_buses, \"mv_bus\": mv_buses, \"hv_bus\": hv_buses, \"vn_hv_kv\": vn_hv_kv,\n \"vn_mv_kv\": vn_mv_kv, \"vn_lv_kv\": vn_lv_kv, \"sn_hv_mva\": sn_hv_mva,\n \"sn_mv_mva\": sn_mv_mva, \"sn_lv_mva\": sn_lv_mva, \"vk_hv_percent\": vk_hv_percent,\n \"vk_mv_percent\": vk_mv_percent, \"vk_lv_percent\": vk_lv_percent,\n \"vkr_hv_percent\": vkr_hv_percent, \"vkr_mv_percent\": vkr_mv_percent,\n \"vkr_lv_percent\": vkr_lv_percent, \"pfe_kw\": pfe_kw, \"i0_percent\": i0_percent,\n \"shift_mv_degree\": shift_mv_degree, \"shift_lv_degree\": shift_lv_degree,\n \"tap_side\": tap_side, \"tap_step_percent\": tap_step_percent,\n \"tap_step_degree\": tap_step_degree, \"tap_pos\": tp_pos, \"tap_neutral\": tp_neutral,\n \"tap_max\": tap_max, \"tap_min\": tap_min,\n \"in_service\": array(in_service).astype(bool_), \"name\": name,\n \"tap_at_star_point\": array(tap_at_star_point).astype(bool_), \"std_type\": None,\n \"vk0_hv_percent\": vk0_hv_percent, \"vk0_mv_percent\": vk0_mv_percent,\n \"vk0_lv_percent\": vk0_lv_percent, \"vkr0_hv_percent\": vkr0_hv_percent,\n \"vkr0_mv_percent\": vkr0_mv_percent, \"vkr0_lv_percent\": vkr0_lv_percent,\n \"vector_group\": vector_group}\n\n _add_series_to_entries(entries, index, \"max_loading_percent\", max_loading_percent)\n\n if tap_dependent_impedance is not None:\n _add_series_to_entries(entries, index, \"tap_dependent_impedance\", tap_dependent_impedance, dtype=bool_, default_val=False)\n if vk_hv_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vk_hv_percent_characteristic\", vk_hv_percent_characteristic, \"Int64\")\n if vkr_hv_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vkr_hv_percent_characteristic\", vkr_hv_percent_characteristic, \"Int64\")\n if vk_mv_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vk_mv_percent_characteristic\", vk_mv_percent_characteristic, \"Int64\")\n if vkr_mv_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vkr_mv_percent_characteristic\", vkr_mv_percent_characteristic, \"Int64\")\n if vk_lv_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vk_lv_percent_characteristic\", vk_lv_percent_characteristic, \"Int64\")\n if vkr_lv_percent_characteristic is not None:\n _add_series_to_entries(entries, index, \"vkr_lv_percent_characteristic\", vkr_lv_percent_characteristic, \"Int64\")\n\n _set_multiple_entries(net, \"trafo3w\", index, **entries, **kwargs)\n\n return index\n\n\ndef create_switch(net, bus, element, et, closed=True, type=None, name=None, index=None, z_ohm=0):\n \"\"\"\n Adds a switch in the net[\"switch\"] table.\n\n Switches can be either between two buses (bus-bus switch) or at the end of a line or transformer\n element (bus-element switch).\n\n Two buses that are connected through a closed bus-bus switches are fused in the power flow if\n the switch is closed or separated if the switch is open.\n\n An element that is connected to a bus through a bus-element switch is connected to the bus\n if the switch is closed or disconnected if the switch is open.\n\n INPUT:\n **net** (pandapowerNet) - The net within which this switch should be created\n\n **bus** - The bus that the switch is connected to\n\n **element** - index of the element: bus id if et == \"b\", line id if et == \"l\", trafo id if \\\n et == \"t\"\n\n **et** - (string) element type: \"l\" = switch between bus and line, \"t\" = switch between\n bus and transformer, \"t3\" = switch between bus and transformer3w, \"b\" = switch between\n two buses\n\n OPTIONAL:\n **closed** (boolean, True) - switch position: False = open, True = closed\n\n **type** (int, None) - indicates the type of switch: \"LS\" = Load Switch, \"CB\" = \\\n Circuit Breaker, \"LBS\" = Load Break Switch or \"DS\" = Disconnecting Switch\n\n **z_ohm** (float, 0) - indicates the resistance of the switch, which has effect only on\n bus-bus switches, if sets to 0, the buses will be fused like before, if larger than\n 0 a branch will be created for the switch which has also effects on the bus mapping\n\n **name** (string, default None) - The name for this switch\n\n OUTPUT:\n **sid** - The unique switch_id of the created switch\n\n EXAMPLE:\n create_switch(net, bus = 0, element = 1, et = 'b', type =\"LS\", z_ohm = 0.1)\n\n create_switch(net, bus = 0, element = 1, et = 'l')\n\n \"\"\"\n _check_node_element(net, bus)\n if et == \"l\":\n elm_tab = 'line'\n if element not in net[elm_tab].index:\n raise UserWarning(\"Unknown line index\")\n if (not net[elm_tab][\"from_bus\"].loc[element] == bus and\n not net[elm_tab][\"to_bus\"].loc[element] == bus):\n raise UserWarning(\"Line %s not connected to bus %s\" % (element, bus))\n elif et == \"t\":\n elm_tab = 'trafo'\n if element not in net[elm_tab].index:\n raise UserWarning(\"Unknown bus index\")\n if (not net[elm_tab][\"hv_bus\"].loc[element] == bus and\n not net[elm_tab][\"lv_bus\"].loc[element] == bus):\n raise UserWarning(\"Trafo %s not connected to bus %s\" % (element, bus))\n elif et == \"t3\":\n elm_tab = 'trafo3w'\n if element not in net[elm_tab].index:\n raise UserWarning(\"Unknown trafo3w index\")\n if (not net[elm_tab][\"hv_bus\"].loc[element] == bus and\n not net[elm_tab][\"mv_bus\"].loc[element] == bus and\n not net[elm_tab][\"lv_bus\"].loc[element] == bus):\n raise UserWarning(\"Trafo3w %s not connected to bus %s\" % (element, bus))\n elif et == \"b\":\n _check_node_element(net, element)\n else:\n raise UserWarning(\"Unknown element type\")\n\n index = _get_index_with_check(net, \"switch\", index)\n\n entries = dict(zip([\"bus\", \"element\", \"et\", \"closed\", \"type\", \"name\", \"z_ohm\"],\n [bus, element, et, closed, type, name, z_ohm]))\n _set_entries(net, \"switch\", index, **entries)\n\n return index\n\n\ndef create_switches(net, buses, elements, et, closed=True, type=None, name=None, index=None,\n z_ohm=0, **kwargs):\n \"\"\"\n Adds a switch in the net[\"switch\"] table.\n\n Switches can be either between two buses (bus-bus switch) or at the end of a line or transformer\n element (bus-element switch).\n\n Two buses that are connected through a closed bus-bus switches are fused in the power flow if\n the switch is closed or separated if the switch is open.\n\n An element that is connected to a bus through a bus-element switch is connected to the bus\n if the switch is closed or disconnected if the switch is open.\n\n INPUT:\n **net** (pandapowerNet) - The net within which this switch should be created\n\n **buses** (list)- The bus that the switch is connected to\n\n **element** (list)- index of the element: bus id if et == \"b\", line id if et == \"l\", \\\n trafo id if et == \"t\"\n\n **et** - (list) element type: \"l\" = switch between bus and line, \"t\" = switch between\n bus and transformer, \"t3\" = switch between bus and transformer3w, \"b\" = switch between\n two buses\n\n OPTIONAL:\n **closed** (boolean, True) - switch position: False = open, True = closed\n\n **type** (int, None) - indicates the type of switch: \"LS\" = Load Switch, \"CB\" = \\\n Circuit Breaker, \"LBS\" = Load Break Switch or \"DS\" = Disconnecting Switch\n\n **z_ohm** (float, 0) - indicates the resistance of the switch, which has effect only on\n bus-bus switches, if sets to 0, the buses will be fused like before, if larger than\n 0 a branch will be created for the switch which has also effects on the bus mapping\n\n **name** (string, default None) - The name for this switch\n\n OUTPUT:\n **sid** - The unique switch_id of the created switch\n\n EXAMPLE:\n create_switch(net, bus = 0, element = 1, et = 'b', type =\"LS\", z_ohm = 0.1)\n\n create_switch(net, bus = 0, element = 1, et = 'l')\n\n \"\"\"\n index = _get_multiple_index_with_check(net, \"switch\", index, len(buses), name=\"Switches\")\n _check_multiple_node_elements(net, buses)\n\n for element, elm_type, bus in zip(elements, et, buses):\n if elm_type == \"l\":\n elm_tab = 'line'\n if element not in net[elm_tab].index:\n raise UserWarning(\"Line %s does not exist\" % element)\n if (not net[elm_tab][\"from_bus\"].loc[element] == bus and\n not net[elm_tab][\"to_bus\"].loc[element] == bus):\n raise UserWarning(\"Line %s not connected to bus %s\" % (element, bus))\n elif elm_type == \"t\":\n elm_tab = 'trafo'\n if element not in net[elm_tab].index:\n raise UserWarning(\"Trafo %s does not exist\" % element)\n if (not net[elm_tab][\"hv_bus\"].loc[element] == bus and\n not net[elm_tab][\"lv_bus\"].loc[element] == bus):\n raise UserWarning(\"Trafo %s not connected to bus %s\" % (element, bus))\n elif elm_type == \"t3\":\n elm_tab = 'trafo3w'\n if element not in net[elm_tab].index:\n raise UserWarning(\"Trafo3w %s does not exist\" % element)\n if (not net[elm_tab][\"hv_bus\"].loc[element] == bus and\n not net[elm_tab][\"mv_bus\"].loc[element] == bus and\n not net[elm_tab][\"lv_bus\"].loc[element] == bus):\n raise UserWarning(\"Trafo3w %s not connected to bus %s\" % (element, bus))\n elif elm_type == \"b\":\n _check_node_element(net, element)\n else:\n raise UserWarning(\"Unknown element type\")\n\n entries = {\"bus\": buses, \"element\": elements, \"et\": et, \"closed\": closed, \"type\": type,\n \"name\": name, \"z_ohm\": z_ohm}\n\n _set_multiple_entries(net, \"switch\", index, **entries, **kwargs)\n\n return index\n\n\ndef create_shunt(net, bus, q_mvar, p_mw=0., vn_kv=None, step=1, max_step=1, name=None,\n in_service=True, index=None):\n \"\"\"\n Creates a shunt element\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network in which the element is created\n\n **bus** - bus number of bus to whom the shunt is connected to\n\n **p_mw** - shunt active power in MW at v= 1.0 p.u.\n\n **q_mvar** - shunt susceptance in MVAr at v= 1.0 p.u.\n\n OPTIONAL:\n **vn_kv** (float, None) - rated voltage of the shunt. Defaults to rated voltage of \\\n connected bus\n\n **step** (int, 1) - step of shunt with which power values are multiplied\n\n **max_step** (boolean, True) - True for in_service or False for out of service\n\n **name** (str, None) - element name\n\n **in_service** (boolean, True) - True for in_service or False for out of service\n\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n OUTPUT:\n **index** (int) - The unique ID of the created shunt\n\n EXAMPLE:\n create_shunt(net, 0, 20)\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"shunt\", index)\n\n if vn_kv is None:\n vn_kv = net.bus.vn_kv.at[bus]\n\n entries = dict(zip([\"bus\", \"name\", \"p_mw\", \"q_mvar\", \"vn_kv\", \"step\", \"max_step\", \"in_service\"],\n [bus, name, p_mw, q_mvar, vn_kv, step, max_step, in_service]))\n _set_entries(net, \"shunt\", index, **entries)\n\n return index\n\n\ndef create_shunt_as_capacitor(net, bus, q_mvar, loss_factor, **kwargs):\n \"\"\"\n Creates a shunt element representing a capacitor bank.\n\n INPUT:\n\n **net** (pandapowerNet) - The pandapower network in which the element is created\n\n **bus** - bus number of bus to whom the shunt is connected to\n\n **q_mvar** (float) - reactive power of the capacitor bank at rated voltage\n\n **loss_factor** (float) - loss factor tan(delta) of the capacitor bank\n\n OPTIONAL:\n same as in create_shunt, keyword arguments are passed to the create_shunt function\n\n\n OUTPUT:\n **index** (int) - The unique ID of the created shunt\n \"\"\"\n q_mvar = -abs(q_mvar) # q is always negative for capacitor\n p_mw = abs(q_mvar * loss_factor) # p is always positive for active power losses\n return create_shunt(net, bus, q_mvar=q_mvar, p_mw=p_mw, **kwargs)\n\n\ndef create_impedance(net, from_bus, to_bus, rft_pu, xft_pu, sn_mva, rtf_pu=None, xtf_pu=None,\n name=None, in_service=True, index=None):\n \"\"\"\n Creates an per unit impedance element\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network in which the element is created\n\n **from_bus** (int) - starting bus of the impedance\n\n **to_bus** (int) - ending bus of the impedance\n\n **r_pu** (float) - real part of the impedance in per unit\n\n **x_pu** (float) - imaginary part of the impedance in per unit\n\n **sn_mva** (float) - rated power of the impedance in MVA\n\n OUTPUT:\n\n impedance id\n \"\"\"\n index = _get_index_with_check(net, \"impedance\", index)\n\n _check_branch_element(net, \"Impedance\", index, from_bus, to_bus)\n\n if rtf_pu is None:\n rtf_pu = rft_pu\n if xtf_pu is None:\n xtf_pu = xft_pu\n\n columns = [\"from_bus\", \"to_bus\", \"rft_pu\", \"xft_pu\", \"rtf_pu\", \"xtf_pu\", \"name\", \"sn_mva\",\n \"in_service\"]\n values = [from_bus, to_bus, rft_pu, xft_pu, rtf_pu, xtf_pu, name, sn_mva, in_service]\n _set_entries(net, \"impedance\", index, **dict(zip(columns, values)))\n\n return index\n\n\ndef create_series_reactor_as_impedance(net, from_bus, to_bus, r_ohm, x_ohm, sn_mva,\n name=None, in_service=True, index=None):\n \"\"\"\n Creates a series reactor as per-unit impedance\n :param net: (pandapowerNet) - The pandapower network in which the element is created\n :param from_bus: (int) - starting bus of the series reactor\n :param to_bus: (int) - ending bus of the series reactor\n :param r_ohm: (float) - real part of the impedance in Ohm\n :param x_ohm: (float) - imaginary part of the impedance in Ohm\n :param sn_mva: (float) - rated power of the series reactor in MVA\n :param name:\n :type name:\n :param in_service:\n :type in_service:\n :param index:\n :type index:\n :return: index of the created element\n \"\"\"\n if net.bus.at[from_bus, 'vn_kv'] == net.bus.at[to_bus, 'vn_kv']:\n vn_kv = net.bus.at[from_bus, 'vn_kv']\n else:\n raise UserWarning('Unable to infer rated voltage vn_kv for series reactor %s due to '\n 'different rated voltages of from_bus %d (%.3f p.u.) and '\n 'to_bus %d (%.3f p.u.)' % (name, from_bus, net.bus.at[from_bus, 'vn_kv'],\n to_bus, net.bus.at[to_bus, 'vn_kv']))\n\n base_z_ohm = vn_kv ** 2 / sn_mva\n rft_pu = r_ohm / base_z_ohm\n xft_pu = x_ohm / base_z_ohm\n\n index = create_impedance(net, from_bus=from_bus, to_bus=to_bus, rft_pu=rft_pu, xft_pu=xft_pu,\n sn_mva=sn_mva, name=name, in_service=in_service, index=index)\n return index\n\n\ndef create_ward(net, bus, ps_mw, qs_mvar, pz_mw, qz_mvar, name=None, in_service=True, index=None):\n \"\"\"\n Creates a ward equivalent.\n\n A ward equivalent is a combination of an impedance load and a PQ load.\n\n INPUT:\n **net** (pandapowernet) - The pandapower net within the element should be created\n\n **bus** (int) - bus of the ward equivalent\n\n **ps_mw** (float) - active power of the PQ load\n\n **qs_mvar** (float) - reactive power of the PQ load\n\n **pz_mw** (float) - active power of the impedance load in MW at 1.pu voltage\n\n **qz_mvar** (float) - reactive power of the impedance load in MVar at 1.pu voltage\n\n OUTPUT:\n ward id\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"ward\", index, \"ward equivalent\")\n\n entries = dict(zip([\"bus\", \"ps_mw\", \"qs_mvar\", \"pz_mw\", \"qz_mvar\", \"name\", \"in_service\"],\n [bus, ps_mw, qs_mvar, pz_mw, qz_mvar, name, in_service]))\n _set_entries(net, \"ward\", index, **entries)\n\n return index\n\n\ndef create_xward(net, bus, ps_mw, qs_mvar, pz_mw, qz_mvar, r_ohm, x_ohm, vm_pu, in_service=True,\n name=None, index=None, slack_weight=0.0):\n \"\"\"\n Creates an extended ward equivalent.\n\n A ward equivalent is a combination of an impedance load, a PQ load and as voltage source with\n an internal impedance.\n\n INPUT:\n **net** - The pandapower net within the impedance should be created\n\n **bus** (int) - bus of the ward equivalent\n\n **ps_mw** (float) - active power of the PQ load\n\n **qs_mvar** (float) - reactive power of the PQ load\n\n **pz_mw** (float) - active power of the impedance load in MW at 1.pu voltage\n\n **qz_mvar** (float) - reactive power of the impedance load in MVar at 1.pu voltage\n\n **r_ohm** (float) - internal resistance of the voltage source\n\n **x_ohm** (float) - internal reactance of the voltage source\n\n **vm_pu** (float) - voltage magnitude at the additional PV-node\n\n **slack_weight** (float, default 1.0) - Contribution factor for distributed slack power flow calculation (active power balancing)\n\n OUTPUT:\n xward id\n \"\"\"\n _check_node_element(net, bus)\n\n index = _get_index_with_check(net, \"xward\", index, \"extended ward equivalent\")\n\n columns = [\"bus\", \"ps_mw\", \"qs_mvar\", \"pz_mw\", \"qz_mvar\", \"r_ohm\", \"x_ohm\", \"vm_pu\", \"name\",\n \"slack_weight\", \"in_service\"]\n values = [bus, ps_mw, qs_mvar, pz_mw, qz_mvar, r_ohm, x_ohm, vm_pu, name, slack_weight, in_service]\n _set_entries(net, \"xward\", index, **dict(zip(columns, values)))\n\n return index\n\n\ndef create_dcline(net, from_bus, to_bus, p_mw, loss_percent, loss_mw, vm_from_pu, vm_to_pu,\n index=None, name=None, max_p_mw=nan, min_q_from_mvar=nan, min_q_to_mvar=nan,\n max_q_from_mvar=nan, max_q_to_mvar=nan, in_service=True):\n \"\"\"\n Creates a dc line.\n\n INPUT:\n **from_bus** (int) - ID of the bus on one side which the line will be connected with\n\n **to_bus** (int) - ID of the bus on the other side which the line will be connected with\n\n **p_mw** - (float) Active power transmitted from 'from_bus' to 'to_bus'\n\n **loss_percent** - (float) Relative transmission loss in percent of active power\n transmission\n\n **loss_mw** - (float) Total transmission loss in MW\n\n **vm_from_pu** - (float) Voltage setpoint at from bus\n\n **vm_to_pu** - (float) Voltage setpoint at to bus\n\n OPTIONAL:\n **index** (int, None) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **name** (str, None) - A custom name for this dc line\n\n **in_service** (boolean) - True for in_service or False for out of service\n\n **max_p_mw** - Maximum active power flow. Only respected for OPF\n\n **min_q_from_mvar** - Minimum reactive power at from bus. Necessary for OPF\n\n **min_q_to_mvar** - Minimum reactive power at to bus. Necessary for OPF\n\n **max_q_from_mvar** - Maximum reactive power at from bus. Necessary for OPF\n\n **max_q_to_mvar** - Maximum reactive power at to bus. Necessary for OPF\n\n OUTPUT:\n **index** (int) - The unique ID of the created element\n\n EXAMPLE:\n create_dcline(net, from_bus=0, to_bus=1, p_mw=1e4, loss_percent=1.2, loss_mw=25, \\\n vm_from_pu=1.01, vm_to_pu=1.02)\n \"\"\"\n index = _get_index_with_check(net, \"dcline\", index)\n\n _check_branch_element(net, \"DCLine\", index, from_bus, to_bus)\n\n columns = [\"name\", \"from_bus\", \"to_bus\", \"p_mw\", \"loss_percent\", \"loss_mw\", \"vm_from_pu\",\n \"vm_to_pu\", \"max_p_mw\", \"min_q_from_mvar\", \"min_q_to_mvar\", \"max_q_from_mvar\",\n \"max_q_to_mvar\", \"in_service\"]\n values = [name, from_bus, to_bus, p_mw, loss_percent, loss_mw, vm_from_pu, vm_to_pu, max_p_mw,\n min_q_from_mvar, min_q_to_mvar, max_q_from_mvar, max_q_to_mvar, in_service]\n _set_entries(net, \"dcline\", index, **dict(zip(columns, values)))\n\n return index\n\n\ndef create_measurement(net, meas_type, element_type, value, std_dev, element, side=None,\n check_existing=True, index=None, name=None):\n \"\"\"\n Creates a measurement, which is used by the estimation module. Possible types of measurements\n are: v, p, q, i, va, ia\n\n INPUT:\n **meas_type** (string) - Type of measurement. \"v\", \"p\", \"q\", \"i\", \"va\", \"ia\" are possible\n\n **element_type** (string) - Clarifies which element is measured. \"bus\", \"line\",\n \"trafo\", and \"trafo3w\" are possible\n\n **value** (float) - Measurement value. Units are \"MW\" for P, \"MVar\" for Q, \"p.u.\" for V,\n \"kA\" for I. Bus power measurement is in load reference system, which is consistent to\n the rest of pandapower.\n\n **std_dev** (float) - Standard deviation in the same unit as the measurement\n\n **element** (int) - Index of the measured element (either bus index, line index,\\\n trafo index, trafo3w index)\n\n **side** (int, string, default: None) - Only used for measured lines or transformers. Side \\\n defines at which end of the branch the measurement is gathered. For lines this may be \\\n \"from\", \"to\" to denote the side with the from_bus or to_bus. It can also the be index \\\n of the from_bus or to_bus. For transformers, it can be \"hv\", \"mv\" or \"lv\" or the \\\n corresponding bus index, respectively\n\n OPTIONAL:\n **check_existing** (bool, default: None) - Check for and replace existing measurements for\\\n this bus, type and element_type. Set it to false for performance improvements which can\\\n cause unsafe behavior\n\n **index** (int, default: None) - Index of the measurement in the measurement table. Should\\\n not exist already.\n\n **name** (str, default: None) - Name of measurement\n\n OUTPUT:\n (int) Index of measurement\n\n EXAMPLES:\n 2 MW load measurement with 0.05 MW standard deviation on bus 0:\n create_measurement(net, \"p\", \"bus\", 0, 2., 0.05.)\n\n 4.5 MVar line measurement with 0.1 MVar standard deviation on the \"to_bus\" side of line 2\n create_measurement(net, \"q\", \"line\", 2, 4.5, 0.1, \"to\")\n \"\"\"\n if meas_type not in (\"v\", \"p\", \"q\", \"i\", \"va\", \"ia\"):\n raise UserWarning(\"Invalid measurement type ({})\".format(meas_type))\n\n if side is None and element_type in (\"line\", \"trafo\"):\n raise UserWarning(\"The element type '{element_type}' requires a value in 'side'\")\n\n if meas_type in (\"v\", \"va\"):\n element_type = \"bus\"\n\n if element_type not in (\"bus\", \"line\", \"trafo\", \"trafo3w\"):\n raise UserWarning(\"Invalid element type ({})\".format(element_type))\n\n if element is not None and element not in net[element_type].index.values:\n raise UserWarning(\"{} with index={} does not exist\".format(element_type.capitalize(),\n element))\n\n index = _get_index_with_check(net, \"measurement\", index)\n\n if meas_type in (\"i\", \"ia\") and element_type == \"bus\":\n raise UserWarning(\"Line current measurements cannot be placed at buses\")\n\n if meas_type in (\"v\", \"va\") and element_type in (\"line\", \"trafo\", \"trafo3w\"):\n raise UserWarning(\n \"Voltage measurements can only be placed at buses, not at {}\".format(element_type))\n\n if check_existing:\n if side is None:\n existing = net.measurement[(net.measurement.measurement_type == meas_type) &\n (net.measurement.element_type == element_type) &\n (net.measurement.element == element) &\n (pd.isnull(net.measurement.side))].index\n else:\n existing = net.measurement[(net.measurement.measurement_type == meas_type) &\n (net.measurement.element_type == element_type) &\n (net.measurement.element == element) &\n (net.measurement.side == side)].index\n if len(existing) == 1:\n index = existing[0]\n elif len(existing) > 1:\n raise UserWarning(\"More than one measurement of this type exists\")\n\n columns = [\"name\", \"measurement_type\", \"element_type\", \"element\", \"value\", \"std_dev\", \"side\"]\n values = [name, meas_type.lower(), element_type, element, value, std_dev, side]\n _set_entries(net, \"measurement\", index, **dict(zip(columns, values)))\n return index\n\n\ndef create_pwl_cost(net, element, et, points, power_type=\"p\", index=None, check=True):\n \"\"\"\n Creates an entry for piecewise linear costs for an element. The currently supported elements are\n - Generator\n - External Grid\n - Static Generator\n - Load\n - Dcline\n - Storage\n\n INPUT:\n **element** (int) - ID of the element in the respective element table\n\n **et** (string) - element type, one of \"gen\", \"sgen\", \"ext_grid\", \"load\",\n \"dcline\", \"storage\"]\n\n **points** - (list) list of lists with [[p1, p2, c1], [p2, p3, c2], ...] where c(n) \\\n defines the costs between p(n) and p(n+1)\n\n OPTIONAL:\n **type** - (string) - Type of cost [\"p\", \"q\"] are allowed for active or reactive power\n\n **index** (int, index) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **check** (bool, True) - raises UserWarning if costs already exist to this element.\n\n OUTPUT:\n **index** (int) - The unique ID of created cost entry\n\n EXAMPLE:\n The cost function is given by the x-values p1 and p2 with the slope m between those points.\\\n The constant part b of a linear function y = m*x + b can be neglected for OPF purposes. \\\n The intervals have to be continuous (the starting point of an interval has to be equal to \\\n the end point of the previous interval).\n\n To create a gen with costs of 1€/MW between 0 and 20 MW and 2€/MW between 20 and 30:\n\n create_pwl_cost(net, 0, \"gen\", [[0, 20, 1], [20, 30, 2]])\n \"\"\"\n element = element if not hasattr(element, \"__iter__\") else element[0]\n if check and _cost_existance_check(net, element, et, power_type=power_type):\n raise UserWarning(\"There already exist costs for %s %i\" % (et, element))\n\n index = _get_index_with_check(net, \"pwl_cost\", index, \"piecewise_linear_cost\")\n\n entries = dict(zip([\"power_type\", \"element\", \"et\", \"points\"],\n [power_type, element, et, points]))\n _set_entries(net, \"pwl_cost\", index, **entries)\n return index\n\n\ndef create_poly_cost(net, element, et, cp1_eur_per_mw, cp0_eur=0, cq1_eur_per_mvar=0,\n cq0_eur=0, cp2_eur_per_mw2=0, cq2_eur_per_mvar2=0, index=None, check=True):\n \"\"\"\n Creates an entry for polynimoal costs for an element. The currently supported elements are:\n - Generator (\"gen\")\n - External Grid (\"ext_grid\")\n - Static Generator (\"sgen\")\n - Load (\"load\")\n - Dcline (\"dcline\")\n - Storage (\"storage\")\n\n INPUT:\n **element** (int) - ID of the element in the respective element table\n\n **et** (string) - Type of element [\"gen\", \"sgen\", \"ext_grid\", \"load\", \"dcline\", \"storage\"] \\\n are possible\n\n **cp1_eur_per_mw** (float) - Linear costs per MW\n\n **cp0_eur=0** (float) - Offset active power costs in euro\n\n **cq1_eur_per_mvar=0** (float) - Linear costs per Mvar\n\n **cq0_eur=0** (float) - Offset reactive power costs in euro\n\n **cp2_eur_per_mw2=0** (float) - Quadratic costs per MW\n\n **cq2_eur_per_mvar2=0** (float) - Quadratic costs per Mvar\n\n OPTIONAL:\n\n **index** (int, index) - Force a specified ID if it is available. If None, the index one \\\n higher than the highest already existing index is selected.\n\n **check** (bool, True) - raises UserWarning if costs already exist to this element.\n\n OUTPUT:\n **index** (int) - The unique ID of created cost entry\n\n EXAMPLE:\n The polynomial cost function is given by the linear and quadratic cost coefficients.\n\n create_poly_cost(net, 0, \"load\", cp1_eur_per_mw = 0.1)\n \"\"\"\n element = element if not hasattr(element, \"__iter__\") else element[0]\n if check and _cost_existance_check(net, element, et):\n raise UserWarning(\"There already exist costs for %s %i\" % (et, element))\n\n index = _get_index_with_check(net, \"poly_cost\", index)\n columns = [\"element\", \"et\", \"cp0_eur\", \"cp1_eur_per_mw\", \"cq0_eur\", \"cq1_eur_per_mvar\",\n \"cp2_eur_per_mw2\", \"cq2_eur_per_mvar2\"]\n variables = [element, et, cp0_eur, cp1_eur_per_mw, cq0_eur, cq1_eur_per_mvar,\n cp2_eur_per_mw2, cq2_eur_per_mvar2]\n _set_entries(net, \"poly_cost\", index, **dict(zip(columns, variables)))\n return index\n\n\ndef _get_index_with_check(net, table, index, name=None):\n if name is None:\n name = table\n if index is None:\n index = get_free_id(net[table])\n if index in net[table].index:\n raise UserWarning(\"A %s with the id %s already exists\" % (name, index))\n return index\n\n\ndef _cost_existance_check(net, element, et, power_type=None):\n if power_type is None:\n return (bool(net.poly_cost.shape[0]) and\n np_any((net.poly_cost.element == element).values &\n (net.poly_cost.et == et).values)) \\\n or (bool(net.pwl_cost.shape[0]) and\n np_any((net.pwl_cost.element == element).values &\n (net.pwl_cost.et == et).values))\n else:\n return (bool(net.poly_cost.shape[0]) and\n np_any((net.poly_cost.element == element).values &\n (net.poly_cost.et == et).values)) \\\n or (bool(net.pwl_cost.shape[0]) and\n np_any((net.pwl_cost.element == element).values &\n (net.pwl_cost.et == et).values &\n (net.pwl_cost.power_type == power_type).values))\n\n\ndef _get_multiple_index_with_check(net, table, index, number, name=None):\n if name is None:\n name = table.capitalize() + \"s\"\n if index is None:\n bid = get_free_id(net[table])\n return arange(bid, bid + number, 1)\n contained = isin(net[table].index.values, index)\n if np_any(contained):\n raise UserWarning(\"%s with indexes %s already exist.\"\n % (name, net[table].index.values[contained]))\n return index\n\n\ndef _check_node_element(net, node, node_table=\"bus\"):\n if node not in net[node_table].index.values:\n raise UserWarning(\"Cannot attach to %s %s, %s does not exist\"\n % (node_table, node, node_table))\n\n\ndef _check_multiple_node_elements(net, nodes, node_table=\"bus\", name=\"buses\"):\n if np_any(~isin(nodes, net[node_table].index.values)):\n node_not_exist = set(nodes) - set(net[node_table].index.values)\n raise UserWarning(\"Cannot attach to %s %s, they do not exist\" % (name, node_not_exist))\n\n\ndef _check_branch_element(net, element_name, index, from_node, to_node, node_name=\"bus\",\n plural=\"es\"):\n missing_nodes = {from_node, to_node} - set(net[node_name].index.values)\n if missing_nodes:\n raise UserWarning(\"%s %d tries to attach to non-existing %s(%s) %s\"\n % (element_name.capitalize(), index, node_name, plural, missing_nodes))\n\n\ndef _check_multiple_branch_elements(net, from_nodes, to_nodes, element_name, node_name=\"bus\",\n plural=\"es\"):\n all_nodes = array(list(from_nodes) + list(to_nodes))\n if np_any(~isin(all_nodes, net[node_name].index.values)):\n node_not_exist = set(all_nodes) - set(net[node_name].index)\n raise UserWarning(\"%s trying to attach to non existing %s%s %s\"\n % (element_name, node_name, plural, node_not_exist))\n\n\ndef _create_column_and_set_value(net, index, variable, column, element, dtyp=float64,\n default_val=nan, default_for_nan=False):\n # if variable (e.g. p_mw) is not None and column (e.g. \"p_mw\") doesn't exist in element\n # (e.g. \"gen\") table\n # create this column and write the value of variable to the index of this element\n try:\n set_value = not (isnan(variable) or variable is None)\n except TypeError:\n set_value = True\n if set_value:\n if column not in net[element].columns:\n # this part is for compatibility with pandas < 1.0, can be removed if pandas >= 1.0 is required in setup.py\n if isinstance(default_val, str) \\\n and version.parse(pd.__version__) < version.parse(\"1.0\"):\n net[element].loc[:, column] = pd.Series([default_val] * len(net[element]),\n dtype=dtyp)\n else:\n net[element].loc[:, column] = pd.Series(data=default_val, index=net[element].index, dtype=dtyp)\n net[element].at[index, column] = variable\n elif default_for_nan and column in net[element].columns:\n net[element].at[index, column] = default_val\n return net\n\n\ndef _add_series_to_entries(entries, index, column, values, dtyp=float64, default_val=nan):\n if values is not None:\n try:\n fill_default = not isnan(default_val)\n except TypeError:\n fill_default = True\n if isinstance(values, str) and version.parse(pd.__version__) < version.parse(\"1.0\"):\n s = pd.Series([values] * len(index), index=index, dtype=dtyp)\n else:\n s = pd.Series(values, index=index, dtype=dtyp)\n if fill_default:\n s = s.fillna(default_val)\n entries[column] = s\n\n\ndef _add_multiple_branch_geodata(net, table, geodata, index):\n geo_table = \"%s_geodata\" % table\n dtypes = net[geo_table].dtypes\n df = pd.DataFrame(index=index, columns=net[geo_table].columns)\n # works with single or multiple lists of coordinates\n if len(geodata[0]) == 2 and not hasattr(geodata[0][0], \"__iter__\"):\n # geodata is a single list of coordinates\n df[\"coords\"] = [geodata] * len(index)\n else:\n # geodata is multiple lists of coordinates\n df[\"coords\"] = geodata\n\n # todo: drop version checks\n if version.parse(pd.__version__) >= version.parse(\"0.23\"):\n net[geo_table] = pd.concat([net[geo_table],df], sort=False)\n else:\n # prior to pandas 0.23 there was no explicit parameter (instead it was standard behavior)\n net[geo_table] = net[geo_table].append(df)\n\n _preserve_dtypes(net[geo_table], dtypes)\n\n\ndef _set_entries(net, table, index, preserve_dtypes=True, **entries):\n dtypes = None\n if preserve_dtypes:\n # only get dtypes of columns that are set and that are already present in the table\n dtypes = net[table][intersect1d(net[table].columns, list(entries.keys()))].dtypes\n\n for col, val in entries.items():\n net[table].at[index, col] = val\n\n # and preserve dtypes\n if preserve_dtypes:\n _preserve_dtypes(net[table], dtypes)\n\n\ndef _set_multiple_entries(net, table, index, preserve_dtypes=True, **entries):\n dtypes = None\n if preserve_dtypes:\n # store dtypes\n dtypes = net[table].dtypes\n\n def check_entry(val):\n if isinstance(val, pd.Series) and not np_all(isin(val.index, index)):\n return val.values\n elif isinstance(val, set) and len(val) == len(index):\n return list(val)\n return val\n\n entries = {k: check_entry(v) for k, v in entries.items()}\n\n dd = pd.DataFrame(index=index, columns=net[table].columns)\n dd = dd.assign(**entries)\n\n # extend the table by the frame we just created\n if version.parse(pd.__version__) >= version.parse(\"0.23\"):\n net[table] = pd.concat([net[table],dd], sort=False)\n else:\n # prior to pandas 0.23 there was no explicit parameter (instead it was standard behavior)\n net[table] = net[table].append(dd)\n\n # and preserve dtypes\n if preserve_dtypes:\n _preserve_dtypes(net[table], dtypes)\n", "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport pandas as pd\r\nfrom numpy import allclose, isclose\r\nfrom pandapower.pf.runpp_3ph import runpp_3ph\r\nfrom pandapower.results import get_relevant_elements\r\nimport pandapower as pp\r\n\r\n\r\ndef runpp_with_consistency_checks(net, **kwargs):\r\n pp.runpp(net, **kwargs)\r\n consistency_checks(net)\r\n return True\r\n\r\ndef runpp_3ph_with_consistency_checks(net, **kwargs):\r\n runpp_3ph(net, **kwargs)\r\n consistency_checks_3ph(net)\r\n return True\r\n\r\ndef rundcpp_with_consistency_checks(net, **kwargs):\r\n pp.rundcpp(net, **kwargs)\r\n consistency_checks(net, test_q=False)\r\n return True\r\n\r\ndef consistency_checks(net, rtol=1e-3, test_q=True):\r\n indices_consistent(net)\r\n branch_loss_consistent_with_bus_feed_in(net, rtol)\r\n element_power_consistent_with_bus_power(net, rtol, test_q)\r\n\r\ndef indices_consistent(net):\r\n elements = get_relevant_elements()\r\n for element in elements:\r\n e_idx = net[element].index\r\n res_idx = net[\"res_\" + element].index\r\n assert len(e_idx) == len(res_idx), \"length of %s bus and res_%s indices do not match\"%(element, element)\r\n assert all(e_idx == res_idx), \"%s bus and res_%s indices do not match\"%(element, element)\r\n\r\n\r\ndef branch_loss_consistent_with_bus_feed_in(net, atol=1e-2):\r\n \"\"\"\r\n The surpluss of bus feed summed over all buses always has to be equal to the sum of losses in\r\n all branches.\r\n \"\"\"\r\n # Active Power\r\n bus_surplus_p = -net.res_bus.p_mw.sum()\r\n bus_surplus_q = -net.res_bus.q_mvar.sum()\r\n\r\n branch_loss_p = net.res_line.pl_mw.values.sum() + net.res_trafo.pl_mw.values.sum() + \\\r\n net.res_trafo3w.pl_mw.values.sum() + net.res_impedance.pl_mw.values.sum() + \\\r\n net.res_dcline.pl_mw.values.sum()\r\n branch_loss_q = net.res_line.ql_mvar.values.sum() + net.res_trafo.ql_mvar.values.sum() + \\\r\n net.res_trafo3w.ql_mvar.values.sum() + net.res_impedance.ql_mvar.values.sum() + \\\r\n net.res_dcline.q_to_mvar.values.sum() + net.res_dcline.q_from_mvar.values.sum()\r\n\r\n try:\r\n assert isclose(bus_surplus_p, branch_loss_p, atol=atol)\r\n except AssertionError:\r\n raise AssertionError(\"Branch losses are %.4f MW, but power generation at the buses exceeds the feedin by %.4f MW\"%(branch_loss_p, bus_surplus_p))\r\n try:\r\n assert isclose(bus_surplus_q, branch_loss_q, atol=atol)\r\n except AssertionError:\r\n raise AssertionError(\"Branch losses are %.4f MVar, but power generation at the buses exceeds the feedin by %.4f MVar\"%(branch_loss_q, bus_surplus_q))\r\n\r\n\r\ndef element_power_consistent_with_bus_power(net, rtol=1e-2, test_q=True):\r\n \"\"\"\r\n The bus feed-in at each node has to be equal to the sum of the element feed ins at each node.\r\n \"\"\"\r\n bus_p = pd.Series(data=0., index=net.bus.index)\r\n bus_q = pd.Series(data=0., index=net.bus.index)\r\n\r\n for idx, tab in net.ext_grid.iterrows():\r\n if tab.in_service:\r\n bus_p.at[tab.bus] -= net.res_ext_grid.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_ext_grid.q_mvar.at[idx]\r\n\r\n for idx, tab in net.gen.iterrows():\r\n if tab.in_service:\r\n bus_p.at[tab.bus] -= net.res_gen.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_gen.q_mvar.at[idx]\r\n\r\n for idx, tab in net.load.iterrows():\r\n bus_p.at[tab.bus] += net.res_load.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_load.q_mvar.at[idx]\r\n\r\n for idx, tab in net.sgen.iterrows():\r\n bus_p.at[tab.bus] -= net.res_sgen.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_sgen.q_mvar.at[idx]\r\n\r\n for idx, tab in net.asymmetric_load.iterrows():\r\n bus_p.at[tab.bus] += net.res_asymmetric_load.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_asymmetric_load.q_mvar.at[idx]\r\n\r\n for idx, tab in net.asymmetric_sgen.iterrows():\r\n bus_p.at[tab.bus] -= net.res_asymmetric_sgen.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_asymmetric_sgen.q_mvar.at[idx]\r\n\r\n for idx, tab in net.storage.iterrows():\r\n bus_p.at[tab.bus] += net.res_storage.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_storage.q_mvar.at[idx]\r\n\r\n for idx, tab in net.shunt.iterrows():\r\n bus_p.at[tab.bus] += net.res_shunt.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_shunt.q_mvar.at[idx]\r\n\r\n for idx, tab in net.ward.iterrows():\r\n bus_p.at[tab.bus] += net.res_ward.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_ward.q_mvar.at[idx]\r\n\r\n for idx, tab in net.xward.iterrows():\r\n bus_p.at[tab.bus] += net.res_xward.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_xward.q_mvar.at[idx]\r\n\r\n\r\n assert allclose(net.res_bus.p_mw.values, bus_p.values, equal_nan=True, rtol=rtol)\r\n if test_q:\r\n assert allclose(net.res_bus.q_mvar.values, bus_q.values, equal_nan=True, rtol=rtol)\r\n\r\n\r\ndef consistency_checks_3ph(net, rtol=2e-3):\r\n indices_consistent_3ph(net)\r\n branch_loss_consistent_with_bus_feed_in_3ph(net, rtol)\r\n element_power_consistent_with_bus_power_3ph(net, rtol)\r\n\r\ndef indices_consistent_3ph(net):\r\n elements = get_relevant_elements(\"pf_3ph\")\r\n for element in elements:\r\n e_idx = net[element].index\r\n res_idx = net[\"res_\" + element+\"_3ph\"].index\r\n assert len(e_idx) == len(res_idx), \"length of %s bus and res_%s indices do not match\"%(element, element)\r\n assert all(e_idx == res_idx), \"%s bus and res_%s indices do not match\"%(element, element)\r\n\r\n\r\ndef branch_loss_consistent_with_bus_feed_in_3ph(net, atol=1e-2):\r\n \"\"\"\r\n The surpluss of bus feed summed over all buses always has to be equal to the sum of losses in\r\n all branches.\r\n \"\"\"\r\n bus_surplus_p = -net.res_bus_3ph[[\"p_a_mw\", \"p_b_mw\", \"p_c_mw\"]].sum().sum()\r\n bus_surplus_q = -net.res_bus_3ph[[\"q_a_mvar\", \"q_b_mvar\", \"q_c_mvar\"]].sum().sum()\r\n\r\n\r\n branch_loss_p = net.res_line_3ph.p_a_l_mw.sum() + net.res_trafo_3ph.p_a_l_mw.sum() + \\\r\n net.res_line_3ph.p_b_l_mw.sum() + net.res_trafo_3ph.p_b_l_mw.sum() + \\\r\n net.res_line_3ph.p_c_l_mw.sum() + net.res_trafo_3ph.p_c_l_mw.sum()\r\n\r\n branch_loss_q = net.res_line_3ph.q_a_l_mvar.sum() + net.res_trafo_3ph.q_a_l_mvar.sum() + \\\r\n net.res_line_3ph.q_b_l_mvar.sum() + net.res_trafo_3ph.q_b_l_mvar.sum() + \\\r\n net.res_line_3ph.q_c_l_mvar.sum() + net.res_trafo_3ph.q_c_l_mvar.sum()\r\n\r\n try:\r\n assert isclose(bus_surplus_p, branch_loss_p, atol=atol)\r\n except AssertionError:\r\n raise AssertionError(\"Branch losses are %.4f MW, but power generation at the buses exceeds the feedin by %.4f MW\"%(branch_loss_p, bus_surplus_p))\r\n try:\r\n assert isclose(bus_surplus_q, branch_loss_q, atol=atol)\r\n except AssertionError:\r\n raise AssertionError(\"Branch losses are %.4f MVar, but power generation at the buses exceeds the feedin by %.4f MVar\"%(branch_loss_q, bus_surplus_q))\r\n\r\n\r\ndef element_power_consistent_with_bus_power_3ph(net, rtol=1e-2):\r\n \"\"\"\r\n The bus feed-in at each node has to be equal to the sum of the element feed ins at each node.\r\n \"\"\"\r\n bus_p_a = pd.Series(data=0., index=net.bus.index)\r\n bus_q_a = pd.Series(data=0., index=net.bus.index)\r\n bus_p_b = pd.Series(data=0., index=net.bus.index)\r\n bus_q_b = pd.Series(data=0., index=net.bus.index)\r\n bus_p_c = pd.Series(data=0., index=net.bus.index)\r\n bus_q_c = pd.Series(data=0., index=net.bus.index)\r\n\r\n for idx, tab in net.ext_grid.iterrows():\r\n bus_p_a.at[tab.bus] -= net.res_ext_grid_3ph.p_a_mw.at[idx]\r\n bus_q_a.at[tab.bus] -= net.res_ext_grid_3ph.q_a_mvar.at[idx]\r\n bus_p_b.at[tab.bus] -= net.res_ext_grid_3ph.p_b_mw.at[idx]\r\n bus_q_b.at[tab.bus] -= net.res_ext_grid_3ph.q_b_mvar.at[idx]\r\n bus_p_c.at[tab.bus] -= net.res_ext_grid_3ph.p_c_mw.at[idx]\r\n bus_q_c.at[tab.bus] -= net.res_ext_grid_3ph.q_c_mvar.at[idx]\r\n\r\n for idx, tab in net.load.iterrows():\r\n bus_p_a.at[tab.bus] += net.res_load_3ph.p_mw.at[idx]/3\r\n bus_q_a.at[tab.bus] += net.res_load_3ph.q_mvar.at[idx] /3\r\n bus_p_b.at[tab.bus] += net.res_load_3ph.p_mw.at[idx]/3\r\n bus_q_b.at[tab.bus] += net.res_load_3ph.q_mvar.at[idx] /3\r\n bus_p_c.at[tab.bus] += net.res_load_3ph.p_mw.at[idx]/3\r\n bus_q_c.at[tab.bus] += net.res_load_3ph.q_mvar.at[idx] /3\r\n\r\n for idx, tab in net.asymmetric_load.iterrows():\r\n bus_p_a.at[tab.bus] += net.res_asymmetric_load_3ph.p_a_mw.at[idx]\r\n bus_q_a.at[tab.bus] += net.res_asymmetric_load_3ph.q_a_mvar.at[idx]\r\n bus_p_b.at[tab.bus] += net.res_asymmetric_load_3ph.p_b_mw.at[idx]\r\n bus_q_b.at[tab.bus] += net.res_asymmetric_load_3ph.q_b_mvar.at[idx]\r\n bus_p_c.at[tab.bus] += net.res_asymmetric_load_3ph.p_c_mw.at[idx]\r\n bus_q_c.at[tab.bus] += net.res_asymmetric_load_3ph.q_c_mvar.at[idx]\r\n\r\n for idx, tab in net.asymmetric_sgen.iterrows():\r\n bus_p_a.at[tab.bus] -= net.res_asymmetric_sgen_3ph.p_a_mw.at[idx]\r\n bus_q_a.at[tab.bus] -= net.res_asymmetric_sgen_3ph.q_a_mvar.at[idx]\r\n bus_p_b.at[tab.bus] -= net.res_asymmetric_sgen_3ph.p_b_mw.at[idx]\r\n bus_q_b.at[tab.bus] -= net.res_asymmetric_sgen_3ph.q_b_mvar.at[idx]\r\n bus_p_c.at[tab.bus] -= net.res_asymmetric_sgen_3ph.p_c_mw.at[idx]\r\n bus_q_c.at[tab.bus] -= net.res_asymmetric_sgen_3ph.q_c_mvar.at[idx]\r\n\r\n for idx, tab in net.sgen.iterrows():\r\n bus_p_a.at[tab.bus] -= net.res_sgen_3ph.p_mw.at[idx] / 3\r\n bus_q_a.at[tab.bus] -= net.res_sgen_3ph.q_mvar.at[idx] / 3\r\n bus_p_b.at[tab.bus] -= net.res_sgen_3ph.p_mw.at[idx] / 3\r\n bus_q_b.at[tab.bus] -= net.res_sgen_3ph.q_mvar.at[idx] / 3\r\n bus_p_c.at[tab.bus] -= net.res_sgen_3ph.p_mw.at[idx] / 3\r\n bus_q_c.at[tab.bus] -= net.res_sgen_3ph.q_mvar.at[idx] / 3\r\n\r\n assert allclose(net.res_bus_3ph.p_a_mw.values, bus_p_a.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.q_a_mvar.values, bus_q_a.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.p_b_mw.values, bus_p_b.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.q_b_mvar.values, bus_q_b.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.p_c_mw.values, bus_p_c.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.q_c_mvar.values, bus_q_c.values, equal_nan=True, rtol=rtol)\r\n", "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nfrom math import pi\nfrom numpy import sign, nan, append, zeros, array, sqrt, where\nfrom numpy import max as max_\nfrom pandas import Series, DataFrame, concat\nfrom pandapower.pypower.idx_gen import GEN_BUS, PMIN, PMAX, QMIN, QMAX, GEN_STATUS\nfrom pandapower.pypower.idx_cost import COST, NCOST\nfrom pandapower.pypower.idx_bus import BUS_I, BASE_KV\nimport pandapower as pp\n\ntry:\n import pandaplan.core.pplog as logging\nexcept ImportError:\n import logging\nlogger = logging.getLogger(__name__)\n\ntry:\n from pypower import ppoption, runpf, runopf, rundcpf, rundcopf\n ppopt = ppoption.ppoption(VERBOSE=0, OUT_ALL=0)\n pypower_import = True\nexcept ImportError:\n pypower_import = False\n\nppc_elms = [\"bus\", \"branch\", \"gen\"]\n\n\ndef _create_costs(net, ppc, gen_lookup, type, idx):\n if ppc['gencost'][idx, 0] == 1:\n if not len(ppc['gencost'][idx, COST:]) == 2*ppc['gencost'][idx, NCOST]:\n logger.error(\"In gencost line %s, the number n does not fit to the number of values\" %\n idx)\n raise NotImplementedError\n pp.create_pwl_cost(net, gen_lookup.element.at[idx],\n gen_lookup.element_type.at[idx],\n ppc['gencost'][idx, 4:], type)\n elif ppc['gencost'][idx, 0] == 2:\n ncost = ppc['gencost'][idx, NCOST]\n if ncost == 1:\n cp2 = 0\n cp1 = 0\n cp0 = ppc['gencost'][idx, COST]\n elif ncost == 2:\n cp2 = 0\n cp1 = ppc['gencost'][idx, COST]\n cp0 = ppc['gencost'][idx, COST + 1]\n elif ncost == 3:\n cp2 = ppc['gencost'][idx, COST]\n cp1 = ppc['gencost'][idx, COST + 1]\n cp0 = ppc['gencost'][idx, COST + 2]\n elif ncost > 3:\n logger.warning(\"The pandapower poly_cost table only supports up to 2nd order \" +\n \"polynomials. The ppc higher order polynomials cannot be converted.\")\n cp2 = ppc['gencost'][idx, COST + ncost - 3]\n cp1 = ppc['gencost'][idx, COST + ncost - 2]\n cp0 = ppc['gencost'][idx, COST + ncost - 1]\n else:\n raise ValueError(\"'ncost' must be an positve integer but is \" + str(ncost))\n pp.create_poly_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],\n cp1_eur_per_mw=cp1, cp2_eur_per_mw2=cp2, cp0_eur=cp0)\n else:\n logger.info(\"Cost mode of gencost line %s is unknown.\" % idx)\n\n\ndef _gen_bus_info(ppc, idx_gen):\n bus_name = int(ppc[\"gen\"][idx_gen, GEN_BUS])\n # assumption: there is only one bus with this bus_name:\n idx_bus = int(where(ppc[\"bus\"][:, BUS_I] == bus_name)[0][0])\n current_bus_type = int(ppc[\"bus\"][idx_bus, 1])\n\n same_bus_gen_idx = where(ppc[\"gen\"][:, GEN_BUS] == ppc[\"gen\"][idx_gen, GEN_BUS])[0].astype(int)\n same_bus_in_service_gen_idx = same_bus_gen_idx[where(ppc[\"gen\"][same_bus_gen_idx, GEN_STATUS] > 0)]\n first_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[0] if len(\n same_bus_in_service_gen_idx) else None\n last_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[-1] if len(\n same_bus_in_service_gen_idx) else None\n\n return current_bus_type, idx_bus, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \\\n last_same_bus_in_service_gen_idx\n\n\ndef from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):\n \"\"\"\n This function converts pypower case files to pandapower net structure.\n\n INPUT:\n\n **ppc** : The pypower case file.\n\n OPTIONAL:\n\n **f_hz** (float, 50) - The frequency of the network.\n\n **validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.\n For running the validation, the ppc must already contain the pypower\n powerflow results or pypower must be importable.\n\n ****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True\n\n OUTPUT:\n\n **net** : pandapower net.\n\n EXAMPLE:\n\n import pandapower.converter as pc\n\n from pypower import case4gs\n\n ppc_net = case4gs.case4gs()\n\n net = pc.from_ppc(ppc_net, f_hz=60)\n\n \"\"\"\n # --- catch common failures\n if Series(ppc['bus'][:, BASE_KV] <= 0).any():\n logger.info('There are false baseKV given in the pypower case file.')\n\n # --- general_parameters\n baseMVA = ppc['baseMVA'] # MVA\n omega = pi * f_hz # 1/s\n MAX_VAL = 99999.\n\n net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA)\n\n # --- bus data -> create buses, sgen, load, shunt\n for i in range(len(ppc['bus'])):\n # create buses\n pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type=\"b\",\n zone=ppc['bus'][i, 10], in_service=bool(ppc['bus'][i, 1] != 4),\n max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])\n # create sgen, load\n if ppc['bus'][i, 2] > 0:\n pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],\n controllable=False)\n elif ppc['bus'][i, 2] < 0:\n pp.create_sgen(net, i, p_mw=-ppc['bus'][i, 2], q_mvar=-ppc['bus'][i, 3],\n type=\"\", controllable=False)\n elif ppc['bus'][i, 3] != 0:\n pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],\n controllable=False)\n # create shunt\n if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:\n pp.create_shunt(net, i, p_mw=ppc['bus'][i, 4],\n q_mvar=-ppc['bus'][i, 5])\n # unused data of ppc: Vm, Va (partwise: in ext_grid), zone\n\n # --- gen data -> create ext_grid, gen, sgen\n gen_lookup = DataFrame(nan, columns=['element', 'element_type'],\n index=range(len(ppc['gen'][:, 0])))\n # if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array\n if len(ppc[\"gen\"].shape) == 1:\n ppc[\"gen\"] = array(ppc[\"gen\"], ndmin=2)\n for i in range(len(ppc['gen'][:, 0])):\n current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \\\n last_same_bus_in_service_gen_idx = _gen_bus_info(ppc, i)\n # create ext_grid\n if current_bus_type == 3:\n if i == first_same_bus_in_service_gen_idx:\n gen_lookup.element.loc[i] = pp.create_ext_grid(\n net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],\n va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),\n max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],\n max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])\n gen_lookup.element_type.loc[i] = 'ext_grid'\n if ppc['gen'][i, 4] > ppc['gen'][i, 3]:\n logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)\n if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:\n logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)\n else:\n current_bus_type = 1\n # create gen\n elif current_bus_type == 2:\n if i == first_same_bus_in_service_gen_idx:\n gen_lookup.element.loc[i] = pp.create_gen(\n net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],\n p_mw=ppc['gen'][i, 1],\n in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,\n max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],\n max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])\n gen_lookup.element_type.loc[i] = 'gen'\n if ppc['gen'][i, 1] < 0:\n logger.info('p_mw of gen %d must be less than zero but is not.' % i)\n if ppc['gen'][i, 4] > ppc['gen'][i, 3]:\n logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)\n if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:\n logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)\n else:\n current_bus_type = 1\n # create sgen\n if current_bus_type == 1:\n gen_lookup.element.loc[i] = pp.create_sgen(\n net, bus=current_bus_idx, p_mw=ppc['gen'][i, 1],\n q_mvar=ppc['gen'][i, 2], type=\"\", in_service=bool(ppc['gen'][i, 7] > 0),\n max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],\n max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN],\n controllable=True)\n gen_lookup.element_type.loc[i] = 'sgen'\n if ppc['gen'][i, 1] < 0:\n logger.info('p_mw of sgen %d must be less than zero but is not.' % i)\n if ppc['gen'][i, 4] > ppc['gen'][i, 3]:\n logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)\n if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:\n logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)\n # unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,\n # Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf\n\n # --- branch data -> create line, trafo\n for i in range(len(ppc['branch'])):\n from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))\n to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))\n\n from_vn_kv = ppc['bus'][from_bus, 9]\n to_vn_kv = ppc['bus'][to_bus, 9]\n if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \\\n (ppc['branch'][i, 9] == 0): # create line\n Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm\n max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)\n if max_i_ka == 0.0:\n max_i_ka = MAX_VAL\n logger.debug(\"ppc branch rateA is zero -> Using MAX_VAL instead to calculate \" +\n \"maximum branch flow\")\n pp.create_line_from_parameters(\n net, from_bus=from_bus, to_bus=to_bus, length_km=1,\n r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,\n c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,\n max_i_ka=max_i_ka, type='ol', max_loading_percent=100,\n in_service=bool(ppc['branch'][i, 10]))\n\n else: # create transformer\n if from_vn_kv >= to_vn_kv:\n hv_bus = from_bus\n vn_hv_kv = from_vn_kv\n lv_bus = to_bus\n vn_lv_kv = to_vn_kv\n tap_side = 'hv'\n else:\n hv_bus = to_bus\n vn_hv_kv = to_vn_kv\n lv_bus = from_bus\n vn_lv_kv = from_vn_kv\n tap_side = 'lv'\n if from_vn_kv == to_vn_kv:\n logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'\n ' as a transformer because of a ratio != 0 | 1 but it connects '\n 'the same voltage level', i, ppc['branch'][i, 0],\n ppc['branch'][i, 1])\n rk = ppc['branch'][i, 2]\n xk = ppc['branch'][i, 3]\n zk = (rk ** 2 + xk ** 2) ** 0.5\n sn = ppc['branch'][i, 5]\n if sn == 0.0:\n sn = MAX_VAL\n logger.debug(\"ppc branch rateA is zero -> Using MAX_VAL instead to calculate \" +\n \"apparent power\")\n ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100\n i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA / sn\n if i0_percent < 0:\n logger.info('A transformer always behaves inductive consumpting but the '\n 'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '\n 'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])\n\n pp.create_transformer_from_parameters(\n net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv,\n vn_lv_kv=vn_lv_kv, vk_percent=sign(xk) * zk * sn * 100 / baseMVA,\n vkr_percent=rk * sn * 100 / baseMVA, max_loading_percent=100,\n pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],\n tap_step_percent=abs(ratio_1), tap_pos=sign(ratio_1),\n tap_side=tap_side, tap_neutral=0)\n # unused data of ppc: rateB, rateC\n\n # --- gencost -> create polynomial_cost, piecewise_cost\n if 'gencost' in ppc:\n if len(ppc['gencost'].shape) == 1:\n # reshape gencost if only one gencost is given -> no indexError\n ppc['gencost'] = ppc['gencost'].reshape((1, -1))\n if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:\n idx_p = range(ppc['gencost'].shape[0])\n idx_q = []\n elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:\n idx_p = range(gen_lookup.shape[0])\n idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])\n if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:\n idx_p = range(gen_lookup.shape[0])\n idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])\n for idx in idx_p:\n _create_costs(net, ppc, gen_lookup, 'p', idx)\n for idx in idx_q:\n _create_costs(net, ppc, gen_lookup, 'q', idx)\n\n # areas are unconverted\n\n if validate_conversion:\n logger.setLevel(logging.DEBUG)\n if not validate_from_ppc(ppc, net, **kwargs):\n logger.error(\"Validation failed.\")\n\n net._options = {}\n net._options[\"gen_lookup\"] = gen_lookup\n\n return net\n\n\ndef _validate_diff_res(diff_res, max_diff_values):\n to_iterate = set(max_diff_values.keys()) & {'gen_q_mvar', 'branch_p_mw', 'branch_q_mvar',\n 'gen_p_mw', 'bus_va_degree', 'bus_vm_pu'}\n if not len(to_iterate):\n logger.warning(\"There are no keys to validate.\")\n val = True\n for i in to_iterate:\n elm = i.split(\"_\")[0]\n sought = [\"p\", \"q\"] if elm != \"bus\" else [\"vm\", \"va\"]\n col = int(array([0, 1])[[j in i for j in sought]][0]) if elm != \"branch\" else \\\n list(array([[0, 2], [1, 3]])[[j in i for j in sought]][0])\n val &= bool(max_(abs(diff_res[elm][:, col])) < max_diff_values[i])\n return val\n\n\ndef validate_from_ppc(ppc_net, net, pf_type=\"runpp\", max_diff_values={\n \"bus_vm_pu\": 1e-6, \"bus_va_degree\": 1e-5, \"branch_p_mw\": 1e-6, \"branch_q_mvar\": 1e-6,\n \"gen_p_mw\": 1e-6, \"gen_q_mvar\": 1e-6}, run=True):\n \"\"\"\n This function validates the pypower case files to pandapower net structure conversion via a \\\n comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)\n\n INPUT:\n\n **ppc_net** - The pypower case file, which must already contain the pypower powerflow\n results or pypower must be importable.\n\n **net** - The pandapower network.\n\n OPTIONAL:\n\n **pf_type** (\"runpp\", string) - Type of validated power flow. Possible are (\"runpp\",\n \"rundcpp\", \"runopp\", \"rundcopp\")\n\n **max_diff_values** - Dict of maximal allowed difference values. The keys must be\n 'vm_pu', 'va_degree', 'p_branch_mw', 'q_branch_mvar', 'p_gen_mw' and 'q_gen_mvar' and\n the values floats.\n\n **run** (True, bool or list of two bools) - changing the value to False avoids trying to run\n (optimal) loadflows. Giving a list of two bools addresses first pypower and second\n pandapower.\n\n OUTPUT:\n\n **conversion_success** - conversion_success is returned as False if pypower or pandapower\n cannot calculate a powerflow or if the maximum difference values (max_diff_values )\n cannot be hold.\n\n EXAMPLE:\n\n import pandapower.converter as pc\n\n net = cv.from_ppc(ppc_net, f_hz=50)\n\n conversion_success = cv.validate_from_ppc(ppc_net, net)\n\n NOTE:\n\n The user has to take care that the loadflow results already are included in the provided \\\n ppc_net or pypower is importable.\n \"\"\"\n # check in case of optimal powerflow comparison whether cost information exist\n if \"opp\" in pf_type:\n if not (len(net.polynomial_cost) | len(net.piecewise_linear_cost)):\n if \"gencost\" in ppc_net:\n if not len(ppc_net[\"gencost\"]):\n logger.debug('ppc and pandapower net do not include cost information.')\n return True\n else:\n logger.error('The pandapower net does not include cost information.')\n return False\n else:\n logger.debug('ppc and pandapower net do not include cost information.')\n return True\n\n # guarantee run parameter as list, for pypower and pandapower (optimal) powerflow run\n run = [run, run] if isinstance(run, bool) else run\n\n # --- check pypower powerflow success, if possible\n if pypower_import and run[0]:\n try:\n if pf_type == \"runpp\":\n ppc_net = runpf.runpf(ppc_net, ppopt)[0]\n elif pf_type == \"rundcpp\":\n ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0]\n elif pf_type == \"runopp\":\n ppc_net = runopf.runopf(ppc_net, ppopt)\n elif pf_type == \"rundcopp\":\n ppc_net = rundcopf.rundcopf(ppc_net, ppopt)\n else:\n raise ValueError(\"The pf_type %s is unknown\" % pf_type)\n except:\n logger.debug(\"The pypower run did not work.\")\n ppc_success = True\n if 'success' in ppc_net.keys():\n if ppc_net['success'] != 1:\n ppc_success = False\n logger.error(\"The given ppc data indicates an unsuccessful pypower powerflow: \" +\n \"'ppc_net['success'] != 1'\")\n if (ppc_net['branch'].shape[1] < 17):\n ppc_success = False\n logger.error(\"The shape of given ppc data indicates missing pypower powerflow results.\")\n\n # --- try to run a pandapower powerflow\n if run[1]:\n if pf_type == \"runpp\":\n try:\n pp.runpp(net, init=\"dc\", calculate_voltage_angles=True, trafo_model=\"pi\")\n except pp.LoadflowNotConverged:\n try:\n pp.runpp(net, calculate_voltage_angles=True, init=\"flat\", trafo_model=\"pi\")\n except pp.LoadflowNotConverged:\n try:\n pp.runpp(net, trafo_model=\"pi\", calculate_voltage_angles=False)\n if \"bus_va_degree\" in max_diff_values.keys():\n max_diff_values[\"bus_va_degree\"] = 1e2 if max_diff_values[\n \"bus_va_degree\"] < 1e2 else max_diff_values[\"bus_va_degree\"]\n logger.info(\"voltage_angles could be calculated.\")\n except pp.LoadflowNotConverged:\n logger.error('The pandapower powerflow does not converge.')\n elif pf_type == \"rundcpp\":\n try:\n pp.rundcpp(net, trafo_model=\"pi\")\n except pp.LoadflowNotConverged:\n logger.error('The pandapower dc powerflow does not converge.')\n elif pf_type == \"runopp\":\n try:\n pp.runopp(net, init=\"flat\", calculate_voltage_angles=True)\n except pp.OPFNotConverged:\n try:\n pp.runopp(net, init=\"pf\", calculate_voltage_angles=True)\n except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):\n try:\n pp.runopp(net, init=\"flat\", calculate_voltage_angles=False)\n logger.info(\"voltage_angles could be calculated.\")\n if \"bus_va_degree\" in max_diff_values.keys():\n max_diff_values[\"bus_va_degree\"] = 1e2 if max_diff_values[\n \"bus_va_degree\"] < 1e2 else max_diff_values[\"bus_va_degree\"]\n except pp.OPFNotConverged:\n try:\n pp.runopp(net, init=\"pf\", calculate_voltage_angles=False)\n if \"bus_va_degree\" in max_diff_values.keys():\n max_diff_values[\"bus_va_degree\"] = 1e2 if max_diff_values[\n \"bus_va_degree\"] < 1e2 else max_diff_values[\"bus_va_degree\"]\n logger.info(\"voltage_angles could be calculated.\")\n except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):\n logger.error('The pandapower optimal powerflow does not converge.')\n elif pf_type == \"rundcopp\":\n try:\n pp.rundcopp(net)\n except pp.LoadflowNotConverged:\n logger.error('The pandapower dc optimal powerflow does not converge.')\n else:\n raise ValueError(\"The pf_type %s is unknown\" % pf_type)\n\n # --- prepare powerflow result comparison by reordering pp results as they are in ppc results\n if not ppc_success:\n return False\n if \"opp\" in pf_type:\n if not net.OPF_converged:\n return\n elif not net.converged:\n return False\n\n # --- store pypower powerflow results\n ppc_res = dict.fromkeys(ppc_elms)\n ppc_res[\"branch\"] = ppc_net['branch'][:, 13:17]\n ppc_res[\"bus\"] = ppc_net['bus'][:, 7:9]\n ppc_res[\"gen\"] = ppc_net['gen'][:, 1:3]\n\n # --- pandapower bus result table\n pp_res = dict.fromkeys(ppc_elms)\n pp_res[\"bus\"] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']])\n\n # --- pandapower gen result table\n pp_res[\"gen\"] = zeros([1, 2])\n # consideration of parallel generators via storing how much generators have been considered\n # each node\n # if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array\n if len(ppc_net[\"gen\"].shape) == 1:\n ppc_net[\"gen\"] = array(ppc_net[\"gen\"], ndmin=2)\n GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))\n GEN_uniq = GENS.drop_duplicates()\n already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int),\n index=[int(v) for v in GEN_uniq.values])\n change_q_compare = []\n for i, j in GENS.iterrows():\n current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \\\n last_same_bus_in_service_gen_idx = _gen_bus_info(ppc_net, i)\n if current_bus_type == 3 and i == first_same_bus_in_service_gen_idx:\n pp_res[\"gen\"] = append(pp_res[\"gen\"], array(net.res_ext_grid[\n net.ext_grid.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)\n elif current_bus_type == 2 and i == first_same_bus_in_service_gen_idx:\n pp_res[\"gen\"] = append(pp_res[\"gen\"], array(net.res_gen[\n net.gen.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)\n else:\n pp_res[\"gen\"] = append(pp_res[\"gen\"], array(net.res_sgen[\n net.sgen.bus == current_bus_idx][['p_mw', 'q_mvar']])[\n already_used_gen.at[int(j)]].reshape((1, 2)), 0)\n already_used_gen.at[int(j)] += 1\n change_q_compare += [int(j)]\n pp_res[\"gen\"] = pp_res[\"gen\"][1:, :] # delete initial zero row\n\n # --- pandapower branch result table\n pp_res[\"branch\"] = zeros([1, 4])\n # consideration of parallel branches via storing how often branches were considered\n # each node-to-node-connection\n try:\n init1 = concat([net.line.from_bus, net.line.to_bus], axis=1,\n sort=True).drop_duplicates()\n init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1,\n sort=True).drop_duplicates()\n except TypeError:\n # legacy pandas < 0.21\n init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates()\n init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates()\n init1['hv_bus'] = nan\n init1['lv_bus'] = nan\n init2['from_bus'] = nan\n init2['to_bus'] = nan\n try:\n already_used_branches = concat([init1, init2], axis=0, sort=True)\n except TypeError:\n # pandas < 0.21 legacy\n already_used_branches = concat([init1, init2], axis=0)\n already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)\n BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]])\n for i in BRANCHES.index:\n from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 0]))\n to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 1]))\n from_vn_kv = ppc_net['bus'][from_bus, 9]\n to_vn_kv = ppc_net['bus'][to_bus, 9]\n ratio = BRANCHES[2].at[i]\n angle = BRANCHES[3].at[i]\n # from line results\n if (from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1)) & (angle == 0):\n pp_res[\"branch\"] = append(pp_res[\"branch\"], array(net.res_line[\n (net.line.from_bus == from_bus) &\n (net.line.to_bus == to_bus)]\n [['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[\n int(already_used_branches.number.loc[\n (already_used_branches.from_bus == from_bus) &\n (already_used_branches.to_bus == to_bus)].values)].reshape(1, 4), 0)\n already_used_branches.number.loc[(already_used_branches.from_bus == from_bus) &\n (already_used_branches.to_bus == to_bus)] += 1\n # from trafo results\n else:\n if from_vn_kv >= to_vn_kv:\n pp_res[\"branch\"] = append(pp_res[\"branch\"], array(net.res_trafo[\n (net.trafo.hv_bus == from_bus) &\n (net.trafo.lv_bus == to_bus)]\n [['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[\n int(already_used_branches.number.loc[\n (already_used_branches.hv_bus == from_bus) &\n (already_used_branches.lv_bus == to_bus)].values)].reshape(1, 4), 0)\n already_used_branches.number.loc[(already_used_branches.hv_bus == from_bus) &\n (already_used_branches.lv_bus == to_bus)] += 1\n else: # switch hv-lv-connection of pypower connection buses\n pp_res[\"branch\"] = append(pp_res[\"branch\"], array(net.res_trafo[\n (net.trafo.hv_bus == to_bus) &\n (net.trafo.lv_bus == from_bus)]\n [['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[\n int(already_used_branches.number.loc[\n (already_used_branches.hv_bus == to_bus) &\n (already_used_branches.lv_bus == from_bus)].values)].reshape(1, 4), 0)\n already_used_branches.number.loc[\n (already_used_branches.hv_bus == to_bus) &\n (already_used_branches.lv_bus == from_bus)] += 1\n pp_res[\"branch\"] = pp_res[\"branch\"][1:, :] # delete initial zero row\n\n # --- do the powerflow result comparison\n diff_res = dict.fromkeys(ppc_elms)\n diff_res[\"bus\"] = ppc_res[\"bus\"] - pp_res[\"bus\"]\n diff_res[\"bus\"][:, 1] -= diff_res[\"bus\"][0, 1] # remove va_degree offset\n diff_res[\"branch\"] = ppc_res[\"branch\"] - pp_res[\"branch\"]\n diff_res[\"gen\"] = ppc_res[\"gen\"] - pp_res[\"gen\"]\n # comparison of buses with several generator units only as q sum\n for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index:\n next_is = GEN_uniq.index[GEN_uniq.index > i]\n if len(next_is) > 0:\n next_i = next_is[0]\n else:\n next_i = GENS.index[-1] + 1\n if (next_i - i) > 1:\n diff_res[\"gen\"][i:next_i, 1] = sum(diff_res[\"gen\"][i:next_i, 1])\n # logger info\n logger.debug(\"Maximum voltage magnitude difference between pypower and pandapower: \"\n \"%.2e pu\" % max_(abs(diff_res[\"bus\"][:, 0])))\n logger.debug(\"Maximum voltage angle difference between pypower and pandapower: \"\n \"%.2e degree\" % max_(abs(diff_res[\"bus\"][:, 1])))\n logger.debug(\"Maximum branch flow active power difference between pypower and pandapower: \"\n \"%.2e MW\" % max_(abs(diff_res[\"branch\"][:, [0, 2]])))\n logger.debug(\"Maximum branch flow reactive power difference between pypower and \"\n \"pandapower: %.2e MVAr\" % max_(abs(diff_res[\"branch\"][:, [1, 3]])))\n logger.debug(\"Maximum active power generation difference between pypower and pandapower: \"\n \"%.2e MW\" % max_(abs(diff_res[\"gen\"][:, 0])))\n logger.debug(\"Maximum reactive power generation difference between pypower and pandapower: \"\n \"%.2e MVAr\" % max_(abs(diff_res[\"gen\"][:, 1])))\n if _validate_diff_res(diff_res, {\"bus_vm_pu\": 1e-3, \"bus_va_degree\": 1e-3, \"branch_p_mw\": 1e-6,\n \"branch_q_mvar\": 1e-6}) and \\\n (max_(abs(diff_res[\"gen\"])) > 1e-1).any():\n logger.debug(\"The active/reactive power generation difference possibly results \"\n \"because of a pypower error. Please validate \"\n \"the results via pypower loadflow.\") # this occurs e.g. at ppc case9\n # give a return\n if isinstance(max_diff_values, dict):\n return _validate_diff_res(diff_res, max_diff_values)\n else:\n logger.debug(\"'max_diff_values' must be a dict.\")\n", "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport numpy as np\nfrom scipy.stats import chi2\n\nfrom pandapower.estimation.algorithm.base import (WLSAlgorithm,\n WLSZeroInjectionConstraintsAlgorithm,\n IRWLSAlgorithm)\nfrom pandapower.estimation.algorithm.lp import LPAlgorithm\nfrom pandapower.estimation.algorithm.optimization import OptAlgorithm\nfrom pandapower.estimation.ppc_conversion import pp2eppci, _initialize_voltage\nfrom pandapower.estimation.results import eppci2pp\nfrom pandapower.estimation.util import set_bb_switch_impedance, reset_bb_switch_impedance\n\ntry:\n import pandaplan.core.pplog as logging\nexcept ImportError:\n import logging\nstd_logger = logging.getLogger(__name__)\n\nALGORITHM_MAPPING = {'wls': WLSAlgorithm,\n 'wls_with_zero_constraint': WLSZeroInjectionConstraintsAlgorithm,\n 'opt': OptAlgorithm,\n 'irwls': IRWLSAlgorithm,\n 'lp': LPAlgorithm}\nALLOWED_OPT_VAR = {\"a\", \"opt_method\", \"estimator\"}\n\n\ndef estimate(net, algorithm='wls',\n init='flat', tolerance=1e-6, maximum_iterations=10,\n calculate_voltage_angles=True,\n zero_injection='aux_bus', fuse_buses_with_bb_switch='all',\n **opt_vars):\n \"\"\"\n Wrapper function for WLS state estimation.\n\n INPUT:\n **net** (pandapowerNet) - The net within this line should be created\n\n **init** (string) - Initial voltage for the estimation. 'flat' sets 1.0 p.u. / 0° for all \\\n buses, 'results' uses the values from *res_bus* if available and 'slack' considers the \\\n slack bus voltage (and optionally, angle) as the initial values. Default is 'flat'\n\n OPTIONAL:\n **tolerance** (float) - When the maximum state change between iterations is less than \\\n tolerance, the process stops. Default is 1e-6\n\n **maximum_iterations** (integer) - Maximum number of iterations. Default is 10\n\n **calculate_voltage_angles** (boolean) - Take into account absolute voltage angles and phase \\\n shifts in transformers, if init is 'slack'. Default is True\n\n **zero_injection** (str, iterable, None) - Defines which buses are zero injection bus or the method \\\n to identify zero injection bus, with 'wls_estimator' virtual measurements will be added, with \\\n 'wls_estimator with zero constraints' the buses will be handled as constraints\n\n - \"auto\": all bus without p,q measurement, without p, q value (load, sgen...) and aux buses will be \\\n identified as zero injection bus\n - \"aux_bus\": only aux bus will be identified as zero injection bus\n - None: no bus will be identified as zero injection bus\n - iterable: the iterable should contain index of the zero injection bus and also aux bus will be identified \\\n as zero-injection bus\n\n **fuse_buses_with_bb_switch** (str, iterable, None) - Defines how buses with closed bb switches should \\\n be handled, if fuse buses will only fused to one for calculation, if not fuse, an auxiliary bus and \\\n auxiliary line will be automatically added to the network to make the buses with different p,q injection \\\n measurements identifieble\n\n - \"all\": all buses with bb-switches will be fused, the same as the default behaviour in load flow\n - None: buses with bb-switches and individual p,q measurements will be reconfigurated \\\n by auxiliary elements\n - iterable: the iterable should contain the index of buses to be fused, the behaviour is contigous e.g. \\\n if one of the bus among the buses connected through bb switch is given, then all of them will still \\\n be fused\n\n OUTPUT:\n **successful** (boolean) - Was the state estimation successful?\n \"\"\"\n if algorithm not in ALGORITHM_MAPPING:\n raise UserWarning(\"Algorithm {} is not a valid estimator\".format(algorithm))\n\n se = StateEstimation(net, tolerance, maximum_iterations, algorithm=algorithm)\n v_start, delta_start = _initialize_voltage(net, init, calculate_voltage_angles)\n return se.estimate(v_start=v_start, delta_start=delta_start,\n calculate_voltage_angles=calculate_voltage_angles,\n zero_injection=zero_injection,\n fuse_buses_with_bb_switch=fuse_buses_with_bb_switch, **opt_vars)\n\n\ndef remove_bad_data(net, init='flat', tolerance=1e-6, maximum_iterations=10,\n calculate_voltage_angles=True, rn_max_threshold=3.0):\n \"\"\"\n Wrapper function for bad data removal.\n\n INPUT:\n **net** - The net within this line should be created\n\n **init** - (string) Initial voltage for the estimation. 'flat' sets 1.0 p.u. / 0° for all\n buses, 'results' uses the values from *res_bus_est* if available and 'slack' considers the\n slack bus voltage (and optionally, angle) as the initial values. Default is 'flat'\n\n OPTIONAL:\n **tolerance** - (float) - When the maximum state change between iterations is less than\n tolerance, the process stops. Default is 1e-6\n\n **maximum_iterations** - (integer) - Maximum number of iterations. Default is 10\n\n **calculate_voltage_angles** - (boolean) - Take into account absolute voltage angles and phase\n shifts in transformers, if init is 'slack'. Default is True\n\n **rn_max_threshold** (float) - Identification threshold to determine\n if the largest normalized residual reflects a bad measurement\n (default value of 3.0)\n\n OUTPUT:\n **successful** (boolean) - Was the state estimation successful?\n \"\"\"\n wls_se = StateEstimation(net, tolerance, maximum_iterations, algorithm=\"wls\")\n v_start, delta_start = _initialize_voltage(net, init, calculate_voltage_angles)\n return wls_se.perform_rn_max_test(v_start, delta_start, calculate_voltage_angles,\n rn_max_threshold)\n\n\ndef chi2_analysis(net, init='flat', tolerance=1e-6, maximum_iterations=10,\n calculate_voltage_angles=True, chi2_prob_false=0.05):\n \"\"\"\n Wrapper function for the chi-squared test.\n\n INPUT:\n **net** - The net within this line should be created.\n\n **init** - (string) Initial voltage for the estimation. 'flat' sets 1.0 p.u. / 0° for all\n buses, 'results' uses the values from *res_bus_est* if available and 'slack' considers the\n slack bus voltage (and optionally, angle) as the initial values. Default is 'flat'\n\n OPTIONAL:\n **tolerance** - (float) - When the maximum state change between iterations is less than\n tolerance, the process stops. Default is 1e-6\n\n **maximum_iterations** - (integer) - Maximum number of iterations. Default is 10\n\n **calculate_voltage_angles** - (boolean) - Take into account absolute voltage angles and phase\n shifts in transformers, if init is 'slack'. Default is True\n\n **chi2_prob_false** (float) - probability of error / false alarms\n (default value: 0.05)\n\n OUTPUT:\n **bad_data_detected** (boolean) - Returns true if bad data has been detected\n \"\"\"\n wls_se = StateEstimation(net, tolerance, maximum_iterations, algorithm=\"wls\")\n v_start, delta_start = _initialize_voltage(net, init, calculate_voltage_angles)\n return wls_se.perform_chi2_test(v_start, delta_start, calculate_voltage_angles,\n chi2_prob_false)\n\n\nclass StateEstimation:\n \"\"\"\n Any user of the estimation module only needs to use the class state_estimation. It contains all\n relevant functions to control and operator the module. Two functions are used to configure the\n system according to the users needs while one function is used for the actual estimation\n process.\n \"\"\"\n\n def __init__(self, net, tolerance=1e-6, maximum_iterations=10, algorithm='wls', logger=None, recycle=False):\n self.logger = logger\n if self.logger is None:\n self.logger = std_logger\n # self.logger.setLevel(logging.DEBUG)\n self.net = net\n self.solver = ALGORITHM_MAPPING[algorithm](tolerance,\n maximum_iterations, self.logger)\n self.ppc = None\n self.eppci = None\n self.recycle = recycle\n\n # variables for chi^2 / rn_max tests\n self.delta = None\n self.bad_data_present = None\n\n def estimate(self, v_start='flat', delta_start='flat', calculate_voltage_angles=True,\n zero_injection=None, fuse_buses_with_bb_switch='all', **opt_vars):\n \"\"\"\n The function estimate is the main function of the module. It takes up to three input\n arguments: v_start, delta_start and calculate_voltage_angles. The first two are the initial\n state variables for the estimation process. Usually they can be initialized in a\n \"flat-start\" condition: All voltages being 1.0 pu and all voltage angles being 0 degrees.\n In this case, the parameters can be left at their default values (None). If the estimation\n is applied continuously, using the results from the last estimation as the starting\n condition for the current estimation can decrease the amount of iterations needed to\n estimate the current state. The third parameter defines whether all voltage angles are\n calculated absolutely, including phase shifts from transformers. If only the relative\n differences between buses are required, this parameter can be set to False. Returned is a\n boolean value, which is true after a successful estimation and false otherwise.\n The resulting complex voltage will be written into the pandapower network. The result\n fields are found res_bus_est of the pandapower network.\n INPUT:\n **net** - The net within this line should be created\n **v_start** (np.array, shape=(1,), optional) - Vector with initial values for all\n voltage magnitudes in p.u. (sorted by bus index)\n **delta_start** (np.array, shape=(1,), optional) - Vector with initial values for all\n voltage angles in degrees (sorted by bus index)\n OPTIONAL:\n **tolerance** - (float) - When the maximum state change between iterations is less than\n tolerance, the process stops. Default is 1e-6\n\n **maximum_iterations** - (integer) - Maximum number of iterations. Default is 10\n\n **calculate_voltage_angles** - (boolean) - Take into account absolute voltage angles and phase\n shifts in transformers, if init is 'slack'. Default is True\n \n **zero_injection** - (str, iterable, None) - Defines which buses are zero injection bus or the method\n to identify zero injection bus, with 'wls_estimator' virtual measurements will be added, with \n 'wls_estimator with zero constraints' the buses will be handled as constraints\n \"auto\": all bus without p,q measurement, without p, q value (load, sgen...) and aux buses will be\n identified as zero injection bus \n \"aux_bus\": only aux bus will be identified as zero injection bus\n None: no bus will be identified as zero injection bus\n iterable: the iterable should contain index of the zero injection bus and also aux bus will be identified\n as zero-injection bus\n\n **fuse_buses_with_bb_switch** - (str, iterable, None) - Defines how buses with closed bb switches should \n be handled, if fuse buses will only fused to one for calculation, if not fuse, an auxiliary bus and \n auxiliary line will be automatically added to the network to make the buses with different p,q injection\n measurements identifieble\n \"all\": all buses with bb-switches will be fused, the same as the default behaviour in load flow\n None: buses with bb-switches and individual p,q measurements will be reconfigurated\n by auxiliary elements\n iterable: the iterable should contain the index of buses to be fused, the behaviour is contigous e.g.\n if one of the bus among the buses connected through bb switch is given, then all of them will still\n be fused\n OUTPUT:\n **successful** (boolean) - True if the estimation process was successful\n Optional estimation variables:\n The bus power injections can be accessed with *se.s_node_powers* and the estimated\n values corresponding to the (noisy) measurement values with *se.hx*. (*hx* denotes h(x))\n EXAMPLE:\n success = estimate(np.array([1.0, 1.0, 1.0]), np.array([0.0, 0.0, 0.0]))\n \"\"\"\n # check if all parameter are allowed\n for var_name in opt_vars.keys():\n if var_name not in ALLOWED_OPT_VAR:\n self.logger.warning(\"Caution! %s is not allowed as parameter\" % var_name \\\n + \" for estimate and will be ignored!\")\n\n if self.net is None:\n raise UserWarning(\"SE Component was not initialized with a network.\")\n\n # change the configuration of the pp net to avoid auto fusing of buses connected\n # through bb switch with elements on each bus if this feature enabled\n bus_to_be_fused = None\n if fuse_buses_with_bb_switch != 'all' and not self.net.switch.empty:\n if isinstance(fuse_buses_with_bb_switch, str):\n raise UserWarning(\"fuse_buses_with_bb_switch parameter is not correctly initialized\")\n elif hasattr(fuse_buses_with_bb_switch, '__iter__'):\n bus_to_be_fused = fuse_buses_with_bb_switch\n set_bb_switch_impedance(self.net, bus_to_be_fused)\n\n self.net, self.ppc, self.eppci = pp2eppci(self.net, v_start=v_start, delta_start=delta_start,\n calculate_voltage_angles=calculate_voltage_angles,\n zero_injection=zero_injection, ppc=self.ppc, eppci=self.eppci)\n\n # Estimate voltage magnitude and angle with the given estimator\n self.eppci = self.solver.estimate(self.eppci, **opt_vars)\n\n if self.solver.successful:\n self.net = eppci2pp(self.net, self.ppc, self.eppci)\n else:\n self.logger.warning(\"Estimation failed! Pandapower network failed to update!\")\n\n # clear the aux elements and calculation results created for the substitution of bb switches\n if fuse_buses_with_bb_switch != 'all' and not self.net.switch.empty:\n reset_bb_switch_impedance(self.net)\n\n # if recycle is not wished, reset ppc, ppci\n if not self.recycle:\n self.ppc, self.eppci = None, None\n return self.solver.successful\n\n def perform_chi2_test(self, v_in_out=None, delta_in_out=None,\n calculate_voltage_angles=True, chi2_prob_false=0.05):\n \"\"\"\n The function perform_chi2_test performs a Chi^2 test for bad data and topology error\n detection. The function can be called with the optional input arguments v_in_out and\n delta_in_out. Then, the Chi^2 test is performed after calling the function estimate using\n them as input arguments. It can also be called without these arguments if it is called\n from the same object with which estimate had been called beforehand. Then, the Chi^2 test is\n performed for the states estimated by the funtion estimate and the result, the existence of bad data,\n is given back as a boolean. As a optional argument the probability\n of a false measurement can be provided additionally. For bad data detection, the function\n perform_rn_max_test is more powerful and should be the function of choice. For topology\n error detection, however, perform_chi2_test should be used.\n\n INPUT:\n **v_in_out** (np.array, shape=(1,), optional) - Vector with initial values for all\n voltage magnitudes in p.u. (sorted by bus index)\n\n **delta_in_out** (np.array, shape=(1,), optional) - Vector with initial values for all\n voltage angles in degrees (sorted by bus index)\n\n OPTIONAL:\n **calculate_voltage_angles** - (boolean) - Take into account absolute voltage angles and phase\n shifts in transformers, if init is 'slack'. Default is True\n\n **chi2_prob_false** (float) - probability of error / false alarms (standard value: 0.05)\n\n OUTPUT:\n **successful** (boolean) - True if bad data has been detected\n\n EXAMPLE:\n perform_chi2_test(np.array([1.0, 1.0, 1.0]), np.array([0.0, 0.0, 0.0]), 0.97)\n\n \"\"\"\n # perform SE\n self.estimate(v_in_out, delta_in_out, calculate_voltage_angles)\n\n # Performance index J(hx)\n J = np.dot(self.solver.r.T, np.dot(self.solver.R_inv, self.solver.r))\n\n # Number of measurements\n m = len(self.net.measurement)\n\n # Number of state variables (the -1 is due to the reference bus)\n n = len(self.solver.eppci.v) + len(self.solver.eppci.delta) - 1\n\n # Chi^2 test threshold\n test_thresh = chi2.ppf(1 - chi2_prob_false, m - n)\n\n # Print results\n self.logger.debug(\"Result of Chi^2 test:\")\n self.logger.debug(\"Number of measurements: %d\" % m)\n self.logger.debug(\"Number of state variables: %d\" % n)\n self.logger.debug(\"Performance index: %.2f\" % J)\n self.logger.debug(\"Chi^2 test threshold: %.2f\" % test_thresh)\n\n if J <= test_thresh:\n self.bad_data_present = False\n self.logger.debug(\"Chi^2 test passed. No bad data or topology error detected.\")\n else:\n self.bad_data_present = True\n self.logger.debug(\"Chi^2 test failed. Bad data or topology error detected.\")\n\n if self.solver.successful:\n return self.bad_data_present\n\n def perform_rn_max_test(self, v_in_out=None, delta_in_out=None,\n calculate_voltage_angles=True, rn_max_threshold=3.0):\n \"\"\"\n The function perform_rn_max_test performs a largest normalized residual test for bad data\n identification and removal. It takes two input arguments: v_in_out and delta_in_out.\n These are the initial state variables for the combined estimation and bad data\n identification and removal process. They can be initialized as described above, e.g.,\n using a \"flat\" start. In an iterative process, the function performs a state estimation,\n identifies a bad data measurement, removes it from the set of measurements\n (only if the rn_max threshold is violated by the largest residual of all measurements,\n which can be modified), performs the state estimation again,\n and so on and so forth until no further bad data measurements are detected.\n\n INPUT:\n **v_in_out** (np.array, shape=(1,), optional) - Vector with initial values for all\n voltage magnitudes in p.u. (sorted by bus index)\n\n **delta_in_out** (np.array, shape=(1,), optional) - Vector with initial values for all\n voltage angles in degrees (sorted by bus index)\n\n OPTIONAL:\n **calculate_voltage_angles** - (boolean) - Take into account absolute voltage angles and phase\n shifts in transformers, if init is 'slack'. Default is True\n\n **rn_max_threshold** (float) - Identification threshold to determine\n if the largest normalized residual reflects a bad measurement\n (standard value of 3.0)\n\n OUTPUT:\n **successful** (boolean) - True if all bad data could be removed\n\n EXAMPLE:\n perform_rn_max_test(np.array([1.0, 1.0, 1.0]), np.array([0.0, 0.0, 0.0]), 5.0, 0.05)\n\n \"\"\"\n num_iterations = 0\n\n while num_iterations <= 10:\n # Estimate the state with bad data identified in previous iteration\n # removed from set of measurements:\n self.estimate(v_in_out, delta_in_out, calculate_voltage_angles)\n\n # Try to remove the bad data\n try:\n # Error covariance matrix:\n R = np.linalg.inv(self.solver.R_inv)\n\n # for future debugging: this line's results have changed with the ppc\n # overhaul in April 2017 after commit 9ae5b8f42f69ae39f8c8cf (which still works)\n # there are differences of < 1e-10 for the Omega entries which cause\n # the function to work far worse. As of now it is unclear if it's just numerical\n # accuracy to blame or an error in the code. a sort in the ppc creation function\n # was removed which caused this issue\n # Covariance matrix of the residuals: \\Omega = S*R = R - H*G^(-1)*H^T\n # (S is the sensitivity matrix: r = S*e):\n Omega = R - np.dot(self.solver.H, np.dot(np.linalg.inv(self.solver.Gm), self.solver.H.T))\n\n # Diagonalize \\Omega:\n Omega = np.diag(np.diag(Omega))\n\n # Compute squareroot (|.| since some -0.0 produced nans):\n Omega = np.sqrt(np.absolute(Omega))\n\n OmegaInv = np.linalg.inv(Omega)\n\n # Compute normalized residuals (r^N_i = |r_i|/sqrt{Omega_ii}):\n rN = np.dot(OmegaInv, np.absolute(self.solver.r))\n\n if max(rN) <= rn_max_threshold:\n self.logger.debug(\"Largest normalized residual test passed. \"\n \"No bad data detected.\")\n return True\n else:\n self.logger.debug(\n \"Largest normalized residual test failed (%.1f > %.1f).\"\n % (max(rN), rn_max_threshold))\n\n # Identify bad data: Determine index corresponding to max(rN):\n idx_rN = np.argsort(rN, axis=0)[-1]\n\n # Determine pandapower index of measurement to be removed:\n meas_idx = self.solver.pp_meas_indices[idx_rN]\n\n # Remove bad measurement:\n self.logger.debug(\"Removing measurement: %s\"\n % self.net.measurement.loc[meas_idx].values[0])\n self.net.measurement.drop(meas_idx, inplace=True)\n self.logger.debug(\"Bad data removed from the set of measurements.\")\n\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n self.logger.debug(\"rN_max identification threshold: %.2f\" % rn_max_threshold)\n num_iterations += 1\n\n return False\n" ]
[ [ "matplotlib.colors.to_rgb", "numpy.linspace", "matplotlib.colors.to_rgba", "matplotlib.pyplot.get_cmap", "matplotlib.colors.Normalize", "matplotlib.cm.get_cmap" ], [ "pandas.concat", "pandas.notnull", "pandas.Series", "pandas.isnull", "numpy.isnan", "numpy.arange", "pandas.Index", "pandas.DataFrame", "numpy.dtype", "numpy.all", "numpy.any", "numpy.array", "numpy.zeros", "numpy.isin" ], [ "numpy.isclose", "pandas.Series", "numpy.allclose" ], [ "pandas.concat", "pandas.Series", "numpy.sqrt", "pandas.DataFrame", "numpy.sign", "numpy.array", "numpy.zeros", "numpy.where" ], [ "scipy.stats.chi2.ppf", "numpy.dot", "numpy.diag", "numpy.absolute", "numpy.linalg.inv", "numpy.argsort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kkorhone/Infinite_Borehole_Field
[ "2a08aa48a9943ea824759006ccb30701ac143d75" ]
[ "temp.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nTg = 16.0\nq = 0.090\n\nh = np.array([10, 50, 100, 500])\nk = np.array([1, 3, 0.5, 4])\nI = np.array([0, 1, 2, 3])\n\ndef T(l, z):\n return Tg + sum(q/k[:l]*h[:l]) - sum(q/k[l]*h[:l]) - q/k[l]*z\n\nz1 = 0\nfor l in range(len(h)):\n z2 = z1 - h[l]\n z = np.linspace(z1, z2, 100)\n plt.plot(T(l,z),z)\n plt.axhline(z1, ls=\":\")\n plt.axhline(z2, ls=\":\")\n z1 = z2\n\nplt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.axhline", "matplotlib.pyplot.show", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Clement-W/Neural.NET
[ "78e5206c541b63ae0aba764d87c2910a351f3efb" ]
[ "dataset/generate_dataset.py" ]
[ "import sklearn.datasets as dt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nseed = 1\n# Create dataset\n\"\"\"\nx_data,y_data = dt.make_classification(n_samples=1000,\n n_features=2,\n n_repeated=0,\n class_sep=2,\n n_redundant=0,\n random_state=seed)\n\"\"\"\n\nx_data,y_data = dt.make_circles(n_samples=700,\n noise=0.2,\n factor=0.3)\n\n# Plot dataset\nmy_scatter_plot = plt.scatter(x_data[:,0],\n x_data[:,1],\n c=y_data,\n vmin=min(y_data),\n vmax=max(y_data),\n s=35)\nplt.savefig(\"data.png\")\nplt.show()\n\n\n\n# Format y_data\ny_data = np.array([[1,0] if y==0 else [0,1] for y in y_data])\n\n# Save data into csv files\nnp.savetxt(\"x_data.csv\", x_data,delimiter=',',fmt='%f')\nnp.savetxt(\"y_data.csv\", y_data,delimiter=',',fmt='%f')\n\n\n" ]
[ [ "matplotlib.pyplot.savefig", "sklearn.datasets.make_circles", "numpy.savetxt", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmitry-vorobiev/kaggle-global-wheat-detection
[ "adf75b73f5955848488477c361c66f1b0510b2bb" ]
[ "src/nms/cluster_nms.py" ]
[ "import torch\nfrom torch import Tensor\n\nEPS = torch.tensor(1e-8)\n\n\[email protected]\ndef dist_iou_ab(box_a: Tensor, box_b: Tensor, eps=EPS):\n \"\"\"\n Args:\n box_a: tensor of shape [batch_size, boxes_a, 4]\n box_b: tensor of shape [batch_size, boxes_b, 4]\n gamma: float\n eps: float\n\n Original:\n https://github.com/Zzh-tju/CIoU/blob/8995056b1e93b86d03c384f042514391b70e58e0/layers/functions/detection.py#L162\n https://github.com/Zzh-tju/CIoU/blob/8995056b1e93b86d03c384f042514391b70e58e0/layers/box_utils.py#L82\n \"\"\"\n assert box_a.dim() == 3\n assert box_b.dim() == 3\n assert box_a.size(0) == box_b.size(0)\n\n A, B = box_a.size(1), box_b.size(1)\n box_a = box_a.unsqueeze(2).expand(-1, -1, A, -1)\n box_b = box_b.unsqueeze(1).expand(-1, B, -1, -1)\n\n inter_yx0 = torch.max(box_a[..., :2], box_b[..., :2])\n inter_yx1 = torch.min(box_a[..., 2:4], box_b[..., 2:4])\n\n inter_hw = torch.clamp_min_(inter_yx1 - inter_yx0, 0)\n inter_area = torch.prod(inter_hw, dim=-1)\n # del inter_hw, inter_yx0, inter_yx1\n\n hw_a = box_a[..., 2:4] - box_a[..., :2]\n hw_b = box_b[..., 2:4] - box_b[..., :2]\n\n area_a = torch.prod(hw_a, dim=-1)\n area_b = torch.prod(hw_b, dim=-1)\n\n union_area = area_a + area_b - inter_area\n iou = inter_area / (union_area + eps)\n # del inter_area, union_area, area_a, area_b, hw_a, hw_b\n\n center_a = (box_a[..., :2] + box_a[..., 2:4]) / 2\n center_b = (box_b[..., :2] + box_b[..., 2:4]) / 2\n inter_diag = torch.pow(center_b - center_a, 2).sum(dim=-1)\n\n clos_yx0 = torch.min(box_a[..., :2], box_b[..., :2])\n clos_yx1 = torch.max(box_a[..., 2:4], box_b[..., 2:4])\n clos_hw = torch.clamp_min_(clos_yx1 - clos_yx0, 0)\n clos_diag = torch.pow(clos_hw, 2).sum(dim=-1)\n # del clos_yx0, clos_yx1, clos_hw, center_a, center_b\n\n dist = inter_diag / (clos_diag + eps)\n return iou - dist ** 0.9\n\n\ndef cluster_nms_dist_iou(boxes: Tensor, scores: Tensor, iou_threshold=0.5, top_k=200):\n assert boxes.dim() == 2\n assert scores.dim() == 2\n assert boxes.size(0) == scores.size(0)\n\n scores, classes = torch.max(scores, dim=1)\n # scores: [detections]\n _, idx = scores.sort(descending=True)\n idx = idx[:top_k]\n # add batch dim\n top_k_boxes = boxes[idx][None, ...]\n\n # [1, top_k, top_k] -> [top_k, top_k]\n iou = dist_iou_ab(top_k_boxes, top_k_boxes)[0]\n iou = iou.triu_(diagonal=1)\n best_iou = torch.zeros_like(idx)\n\n iou_b = iou\n for i in range(top_k):\n iou_a = iou_b\n best_iou, _ = torch.max(iou_b, dim=0)\n # keep far away boxes\n keep = (best_iou <= iou_threshold)[:, None].expand_as(iou_b)\n iou_b = torch.where(keep, iou, torch.zeros_like(iou_b))\n if iou_b.eq(iou_a).all():\n break\n\n idx = idx[best_iou <= iou_threshold]\n return boxes[idx], scores[idx], classes[idx]\n" ]
[ [ "torch.max", "torch.min", "torch.zeros_like", "torch.tensor", "torch.clamp_min_", "torch.prod", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vedant-S/AI_Project
[ "e5fba10990901ee29b72db9e4932e6d1c72d3673" ]
[ "Hand_Skeleton/handPoseDetector.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 2 15:06:04 2020\n\n@author: KIIT\n\"\"\"\n\n\nimport cv2\nimport time\nimport numpy as np\n\n\nprotoFile = \"pose_deploy.prototxt\"\nweightsFile = \"pose_iter_102000.caffemodel\"\nnPoints = 22\nPOSE_PAIRS = [ [0,1],[1,2],[2,3],[3,4],[0,5],[5,6],[6,7],[7,8],[0,9],[9,10],[10,11],[11,12],[0,13],[13,14],[14,15],[15,16],[0,17],[17,18],[18,19],[19,20] ]\n\nthreshold = 0.2\n\n\n#input_source = \"asl.mp4\"\ncap = cv2.VideoCapture(0)\nhasFrame, frame = cap.read()\n\nframeWidth = frame.shape[1]\nframeHeight = frame.shape[0]\n\naspect_ratio = frameWidth/frameHeight\n\ninHeight = 368\ninWidth = int(((aspect_ratio*inHeight)*8)//8)\n\nvid_writer = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1],frame.shape[0]))\n\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\nk = 0\nwhile 1:\n k+=1\n t = time.time()\n hasFrame, frame = cap.read()\n frameCopy = np.copy(frame)\n if not hasFrame:\n cv2.waitKey()\n break\n\n inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n\n net.setInput(inpBlob)\n\n output = net.forward()\n\n print(\"forward = {}\".format(time.time() - t))\n\n # Empty list to store the detected keypoints\n points = []\n\n for i in range(nPoints):\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n probMap = cv2.resize(probMap, (frameWidth, frameHeight))\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n if prob > threshold :\n cv2.circle(frameCopy, (int(point[0]), int(point[1])), 6, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(frameCopy, \"{}\".format(i), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, .8, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n points.append((int(point[0]), int(point[1])))\n else :\n points.append(None)\n\n # Draw Skeleton\n for pair in POSE_PAIRS:\n partA = pair[0]\n partB = pair[1]\n\n if points[partA] and points[partB]:\n cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2, lineType=cv2.LINE_AA)\n cv2.circle(frame, points[partA], 5, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.circle(frame, points[partB], 5, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)\n\n print(\"Time Taken for frame = {}\".format(time.time() - t))\n\n # cv2.putText(frame, \"time taken = {:.2f} sec\".format(time.time() - t), (50, 50), cv2.FONT_HERSHEY_COMPLEX, .8, (255, 50, 0), 2, lineType=cv2.LINE_AA)\n # cv2.putText(frame, \"Hand Pose using OpenCV\", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 50, 0), 2, lineType=cv2.LINE_AA)\n cv2.imshow('Output-Skeleton', frame)\n # cv2.imwrite(\"video_output/{:03d}.jpg\".format(k), frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n print(\"total = {}\".format(time.time() - t))\n\n vid_writer.write(frame)\ncap.release()\nvid_writer.release()" ]
[ [ "numpy.copy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pascal-vecsei/netket
[ "fc96a828e1ac71a7ecd16b4b178ca689e1871958", "fc96a828e1ac71a7ecd16b4b178ca689e1871958", "3985355234183e3f5a5a97d542a6240d490dcda2" ]
[ "test/operator/test_fermions.py", "Examples/Fermions/fermi_hubbard.py", "netket/operator/_local_operator_helpers.py" ]
[ "import netket as nk\nimport numpy as np\nimport netket.experimental as nkx\nfrom netket.experimental.operator._fermions_2nd import _convert_terms_to_spin_blocks\nfrom netket.experimental.operator.fermion import destroy, create, number\n\nimport pytest\n\nop_ferm = {}\nhi = nkx.hilbert.SpinOrbitalFermions(3)\nop_ferm[\"FermionOperator2nd_hermitian\"] = (\n nkx.operator.FermionOperator2nd(\n hi, terms=(((0, 0), (1, 1)), ((1, 0), (0, 1))), weights=(1.0 + 1j, 1 - 1j)\n ),\n True,\n)\nop_ferm[\"FermionOperator2nd_not_hermitian\"] = (\n nkx.operator.FermionOperator2nd(\n hi, terms=(((0, 0), (2, 1)), ((1, 0), (0, 1))), weights=(1.0 + 1j, 1 - 1j)\n ),\n False,\n)\n\nop_ferm[\"FermionOperator2nd_hermitian_3term\"] = (\n nkx.operator.FermionOperator2nd(\n hi,\n (((0, 0), (1, 1), (2, 1)), ((2, 0), (1, 0), (0, 1))),\n weights=(1.0 - 1j, 1 + 1j),\n ),\n True,\n)\nop_ferm[\"FermionOperator2nd_not_hermitian_3term\"] = (\n nkx.operator.FermionOperator2nd(\n hi,\n (((0, 0), (1, 1), (2, 1)), ((3, 0), (1, 0), (0, 1))),\n weights=(1.0 - 1j, 2 + 2j),\n ),\n False,\n)\n\nop_ferm[\"fermihubbard_int\"] = (\n nkx.operator.FermionOperator2nd(\n hi,\n terms=(\n ((0, 1), (0, 0), (1, 1), (1, 0)),\n ((0, 1), (0, 0), (1, 1), (1, 0)),\n ((0, 1), (0, 0), (1, 1), (1, 0)),\n ((0, 1), (0, 0), (1, 1), (1, 0)),\n ),\n weights=(1.0, 1.0, 1.0, 1.0),\n ),\n True,\n)\n\nop_ferm[\"ordering\"] = (\n nkx.operator.FermionOperator2nd(\n hi,\n terms=(((0, 1), (0, 0), (1, 1), (1, 0)), ((1, 1), (1, 0), (0, 1), (0, 0))),\n weights=(1.0 - 1j, 1 + 1j),\n ),\n True,\n)\n\n\[email protected](\n \"op_ferm, is_hermitian\",\n [pytest.param(op, is_herm, id=name) for name, (op, is_herm) in op_ferm.items()],\n)\ndef test_is_hermitian_fermion2nd(op_ferm, is_hermitian):\n print(\"OPERATOR\", op_ferm._operators)\n assert op_ferm.is_hermitian == is_hermitian\n\n\ndef test_fermion_operator_with_strings():\n hi = nkx.hilbert.SpinOrbitalFermions(3)\n terms = (((0, 1), (2, 0)),)\n op1 = nkx.operator.FermionOperator2nd(hi, terms)\n op2 = nkx.operator.FermionOperator2nd(hi, (\"0^ 2\",))\n assert np.allclose(op1.to_dense(), op2.to_dense())\n\n terms = (((0, 1), (1, 0)), ((2, 1), (1, 0)))\n weights = (0.5 - 0.5j, 0.5 + 0.5j)\n op1 = nkx.operator.FermionOperator2nd(hi, terms, weights)\n op2 = nkx.operator.FermionOperator2nd(hi, (\"0^ 1\", \"2^ 1\"), weights)\n assert np.allclose(op1.to_dense(), op2.to_dense())\n\n terms = (((0, 1), (1, 0), (2, 1)), ((2, 1), (1, 0), (0, 1)))\n weights = (0.5 - 0.5j, 0.5 + 0.5j)\n op1 = nkx.operator.FermionOperator2nd(hi, terms, weights)\n op2 = nkx.operator.FermionOperator2nd(hi, (\"0^ 1 2^\", \"2^ 1 0^\"), weights)\n assert np.allclose(op1.to_dense(), op2.to_dense())\n\n\ndef compare_openfermion_fermions():\n # skip test if openfermion not installed\n pytest.importorskip(\"openfermion\")\n from openfermion import FermionOperator, get_sparse_operator\n\n # openfermion\n of = FermionOperator(\"0^ 1\", 1.0) + FermionOperator(\"1^ 0\", 1.0)\n of_dense = get_sparse_operator(of).todense()\n # from_openfermion\n fo = nkx.operator.FermionOperator2nd.from_openfermion(of)\n fo_dense = fo.to_dense()\n # FermionOperator2nd\n hi = nkx.hilbert.SpinOrbitalFermions(2) # two sites\n fermop = nkx.operator.FermionOperator2nd(\n hi, terms=(((0, 1), (1, 0)), ((1, 1), (0, 0))), weights=(1.0, 1.0)\n )\n fermop_dense = fermop.to_dense()\n # compare openfermion vs from_openfermion\n assert np.array_equal(of_dense, fo_dense)\n # compare openfermion vs FermionOperator2nd\n assert np.array_equal(of_dense, fermop_dense)\n # compare from_openfermion vs FermionOperator 2nd\n assert np.array_equal(fo_dense, fermop_dense)\n\n\ndef test_add_fermions():\n hi = nkx.hilbert.SpinOrbitalFermions(5)\n op1 = nkx.operator.FermionOperator2nd(hi, terms=(\"1^ 2\"), weights=(1,), constant=2)\n op2 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"3^ 4\"), weights=(1.3,), constant=5.7\n )\n op3 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"3^ 4\", \"1^ 2\"), weights=(1.3, 1), constant=7.7\n )\n op4 = op3 * 2\n op5 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"3^ 4\", \"1^ 2\"), weights=(2 * 1.3, 2 * 1), constant=2 * 7.7\n )\n assert np.allclose((op1 + op2).to_dense(), op3.to_dense())\n assert np.allclose(op4.to_dense(), op5.to_dense())\n\n\ndef test_create_annihil_number():\n hi = nkx.hilbert.SpinOrbitalFermions(5)\n op1 = nkx.operator.FermionOperator2nd(hi, terms=(\"0^ 0\", \"1^ 2\"), weights=(0.3, 2))\n\n # without spin\n def c(site):\n return destroy(hi, site)\n\n def cdag(site):\n return create(hi, site)\n\n def cn(site):\n return number(hi, site)\n\n op2 = 0.3 * cn(0) + 2 * cdag(1) * c(2)\n assert np.allclose(op1.to_dense(), op2.to_dense())\n op3 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 1\", \"1^ 2\"), weights=(1 + 1j, 2 - 2j), constant=2\n )\n op4 = (1 + 1j) * cdag(0) * c(1) + (2 - 2j) * cdag(1) * c(2) + 2\n assert np.allclose(op3.to_dense(), op4.to_dense())\n\n # same, but with spin\n hi = nkx.hilbert.SpinOrbitalFermions(4, s=1 / 2)\n op1 = nkx.operator.FermionOperator2nd(hi, terms=(\"0^ 0\", \"1^ 6\"), weights=(0.3, 2))\n\n op2 = 0.3 * number(hi, 0, -0.5) + 2 * create(hi, 1, -0.5) * destroy(hi, 2, +0.5)\n assert np.allclose(op1.to_dense(), op2.to_dense())\n op3 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"4^ 1\", \"1^ 2\"), weights=(1 + 1j, 2 - 2j), constant=2\n )\n op4 = (\n (1 + 1j) * create(hi, 0, +0.5) * destroy(hi, 1, -0.5)\n + (2 - 2j) * create(hi, 1, -0.5) * destroy(hi, 2, -0.5)\n + 2\n )\n assert np.allclose(op3.to_dense(), op4.to_dense())\n\n\ndef test_operations_fermions():\n hi = nkx.hilbert.SpinOrbitalFermions(5)\n op1 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 2\",), weights=(1,), constant=2, dtype=complex\n )\n op2 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"3^ 4\"), weights=(1.3,), constant=5.7\n )\n op2copy = op2.copy()\n assert op2copy.hilbert == op2.hilbert\n assert np.allclose(list(op2._operators.keys()), list(op2copy._operators.keys()))\n assert np.allclose(list(op2._operators.values()), list(op2copy._operators.values()))\n assert op2.is_hermitian == op2copy.is_hermitian\n assert np.allclose(op2.to_dense(), op2copy.to_dense())\n\n op3 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"3^ 4\", \"1^ 2\"), weights=(1.3, 1), constant=7.7\n )\n op12 = op1.copy()\n op12 += op2\n assert np.allclose((op1 + op2).to_dense(), op3.to_dense())\n assert np.allclose(op12.to_dense(), op3.to_dense())\n\n op4 = op3 * 2\n op5 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"3^ 4\", \"1^ 2\"), weights=(2 * 1.3, 2 * 1), constant=2 * 7.7\n )\n op4b = op3.copy()\n op4b *= 2\n assert np.allclose(op4.to_dense(), op5.to_dense())\n assert np.allclose(op4b.to_dense(), op5.to_dense())\n\n op6 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 2\", \"0^ 1\"), weights=(1j, -1.0j), constant=7.7\n )\n op7 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 2\", \"0^ 1\"), weights=(1, 1), constant=7.7\n )\n op8 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 2\", \"0^ 1\"), weights=(1.0 + 1j, 1 - 1j), constant=2 * 7.7\n )\n op67 = op6.copy()\n op67 += op7\n assert np.allclose((op6 + op7).to_dense(), op8.to_dense())\n assert np.allclose(op67.to_dense(), op8.to_dense())\n\n op8 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 1\", \"2^ 3\"), weights=(1 + 1j, 2 - 0.5j), constant=1.0 + 3j\n )\n op8_trueconj = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 0\", \"3^ 2\"), weights=(1 - 1j, 2 + 0.5j), constant=1.0 - 3j\n )\n assert np.allclose(op8.conjugate().to_dense(), op8_trueconj.to_dense())\n\n\ndef test_fermion_op_matmul():\n hi = nkx.hilbert.SpinOrbitalFermions(3)\n op1 = nkx.operator.FermionOperator2nd(hi, terms=(\"0^ 0\", \"1^ 2\"), weights=(0.3, 2))\n\n # multiply with a real constant\n op_real = nkx.operator.FermionOperator2nd(hi, [], [], constant=2.0)\n assert np.allclose((op1 @ op_real).to_dense(), (op1 * 2).to_dense())\n assert np.allclose((op1 * op_real).to_dense(), (op1 * 2).to_dense())\n\n # multiply with a real+complex constant\n op_complex = nkx.operator.FermionOperator2nd(hi, [], [], constant=2.0 + 2j)\n assert np.allclose((op1 @ op_complex).to_dense(), (op1 * (2 + 2j)).to_dense())\n assert np.allclose((op1 * op_complex).to_dense(), (op1 * (2 + 2j)).to_dense())\n\n # multiply with another operator\n op2 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 1\", \"0^ 2\"), weights=(1 + 1j, 0.5)\n )\n op2b = nkx.operator.FermionOperator2nd(\n hi,\n terms=(\"0^ 0 1^ 1\", \"0^ 0 0^ 2\", \"1^ 2 1^ 1\", \"1^ 2 0^ 2\"),\n weights=(0.3 * (1 + 1j), 0.3 * 0.5, 2 * (1 + 1j), 2 * 0.5),\n )\n assert np.allclose(\n (op1 @ op2).to_dense(),\n op2b.to_dense(),\n )\n assert np.allclose(\n (op1 * op2).to_dense(),\n op2b.to_dense(),\n )\n\n # multiply with another operator + constant\n op3 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"1^ 1\",), weights=(1 + 1j,), constant=5\n )\n op3b = nkx.operator.FermionOperator2nd(\n hi,\n terms=(\"0^ 0 1^ 1\", \"0^ 0\", \"1^ 2 1^ 1\", \"1^ 2\"),\n weights=(0.3 * (1 + 1j), 5 * 0.3, 2 * (1 + 1j), 10),\n constant=0,\n )\n assert np.allclose(\n (op1 @ op3).to_dense(),\n op3b.to_dense(),\n )\n assert np.allclose(\n (op1 * op3).to_dense(),\n op3b.to_dense(),\n )\n\n\ndef test_fermion_add_sub_mul():\n # check addition\n hi = nkx.hilbert.SpinOrbitalFermions(3)\n op1 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 0\", \"1^ 2\"), weights=(0.3, 2), constant=2\n )\n assert np.allclose((op1 + op1).to_dense(), 2 * op1.to_dense())\n\n op2 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 0\", \"0^ 1\"), weights=(0.5, 4j), constant=1\n )\n op2b = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 0\", \"1^ 2\", \"0^ 1\"), weights=(0.3 + 0.5, 2, 4j), constant=3\n )\n assert np.allclose((op1 + op2).to_dense(), op2b.to_dense())\n op2c = op2.copy()\n op2c += op1\n assert np.allclose(op2c.to_dense(), op2b.to_dense())\n\n # check substraction\n op2d = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 0\", \"1^ 2\", \"0^ 1\"), weights=(0.3 - 0.5, 2, -4j), constant=1\n )\n assert np.allclose((op1 - op2).to_dense(), op2d.to_dense())\n op1c = op1.copy()\n op1c -= op2\n assert np.allclose(op1c.to_dense(), op2d.to_dense())\n\n # check multiplication with scalar\n op1f = nkx.operator.FermionOperator2nd(\n hi,\n terms=(\"0^ 0\", \"1^ 2\"),\n weights=(\n 3,\n 20,\n ),\n constant=20,\n )\n op1c = op1.copy()\n op1c *= 10\n assert np.allclose((op1 * 10).to_dense(), op1f.to_dense())\n assert np.allclose(op1c.to_dense(), op1f.to_dense())\n\n\[email protected](\"dtype1\", [np.float32, np.float64])\[email protected](\"dtype2\", [np.float32, np.float64])\ndef test_dtype_promotion(dtype1, dtype2):\n hi = nkx.hilbert.SpinOrbitalFermions(3)\n op1 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 0\", \"1^ 2\"), weights=(0.3, 2), constant=2, dtype=dtype1\n )\n op2 = nkx.operator.FermionOperator2nd(\n hi, terms=(\"0^ 1\"), weights=(0.1,), constant=2, dtype=dtype2\n )\n\n assert op1.dtype == dtype1\n assert op2.dtype == dtype2\n assert op1.to_dense().dtype == dtype1\n assert op2.to_dense().dtype == dtype2\n\n assert (-op1).dtype == dtype1\n assert (-op2).dtype == dtype2\n\n assert (op1 + op2).dtype == np.promote_types(op1.dtype, op2.dtype)\n assert (op1 - op2).dtype == np.promote_types(op1.dtype, op2.dtype)\n assert (op1 @ op2).dtype == np.promote_types(op1.dtype, op2.dtype)\n\n a = np.array(0.5, dtype=dtype1)\n assert (op2 + a + op2).dtype == np.promote_types(a.dtype, op2.dtype)\n assert (op2 - a).dtype == np.promote_types(a.dtype, op2.dtype)\n assert (op2 * a).dtype == np.promote_types(a.dtype, op2.dtype)\n\n a = np.array(0.5, dtype=dtype2)\n assert (op1 + a).dtype == np.promote_types(op1.dtype, a.dtype)\n assert (op1 - a).dtype == np.promote_types(op1.dtype, a.dtype)\n assert (op1 * a).dtype == np.promote_types(op1.dtype, a.dtype)\n\n\ndef test_convert_to_spin_blocks():\n # skip test if openfermion not installed\n pytest.importorskip(\"openfermion\")\n import openfermion\n\n hi = nkx.hilbert.SpinOrbitalFermions(3, s=1 / 2)\n term1 = (((0, 1), (1, 0)),)\n term1_conv = _convert_terms_to_spin_blocks(term1, 3, 2)\n assert term1_conv == (((0, 1), (3, 0)),)\n\n term2 = (((2, 1), (3, 0)), ((4, 1), (5, 0)))\n term2_conv = _convert_terms_to_spin_blocks(term2, 3, 2)\n assert term2_conv == (((1, 1), (4, 0)), ((2, 1), (5, 0)))\n\n term3 = (((0, 1), (0, 0), (1, 1), (1, 0)),)\n term3_conv = _convert_terms_to_spin_blocks(term3, 3, 2)\n assert term3_conv == (((0, 1), (0, 0), (3, 1), (3, 0)),)\n\n # check fermi-hubbard - netket\n L = 2\n D = 2\n t = 1 # tunneling/hopping\n U = 0.01 # coulomb\n # create the graph where fermions can hop on\n g = nk.graph.Hypercube(length=L, n_dim=D, pbc=True)\n Nsites = g.n_nodes\n hi = nkx.hilbert.SpinOrbitalFermions(Nsites, s=1 / 2)\n # create an operator representing fermi hubbard interactions\n up = +1 / 2\n down = -1 / 2\n terms = []\n weights = []\n for sz in (up, down):\n for u, v in g.edges():\n c_u = hi._get_index(u, sz)\n c_v = hi._get_index(v, sz)\n\n terms.append([(c_u, 1), (c_v, 0)])\n terms.append([(c_v, 1), (c_u, 0)])\n\n weights.append(-t)\n weights.append(-t)\n\n for u in g.nodes():\n nc_up = hi._get_index(u, up)\n nc_down = hi._get_index(u, down)\n\n terms.append([(nc_up, 1), (nc_up, 0), (nc_down, 1), (nc_down, 0)])\n weights.append(U)\n op = nkx.operator.FermionOperator2nd(hi, terms, weights)\n\n # eigenspectrum\n eig = np.linalg.eigvalsh(op.to_dense())\n\n # check fermi-hubbard - openfermion\n op_of = openfermion.fermi_hubbard(\n L, D, tunneling=t, coulomb=U, periodic=True, spinless=False\n )\n terms_conv = _convert_terms_to_spin_blocks(op_of.terms, Nsites, 2)\n op_conv = nkx.operator.FermionOperator2nd(\n hi, terms_conv, list(op_of.terms.values())\n )\n\n # eigenspectrum\n eig_conv = np.linalg.eigvalsh(op_conv.to_dense())\n\n assert np.allclose(eig_conv, eig)\n assert np.allclose(op.to_dense(), op_conv.to_dense())\n\n\ndef test_identity_zero():\n hi = nkx.hilbert.SpinOrbitalFermions(3)\n op0 = nkx.operator.fermion.zero(hi)\n op1 = nkx.operator.fermion.identity(hi)\n\n assert np.allclose(op0.to_dense(), np.zeros((2**3, 2**3)))\n assert np.allclose(op1.to_dense(), np.identity(2**3))\n hi = nkx.hilbert.SpinOrbitalFermions(3, s=1 / 2)\n op0 = nkx.operator.fermion.zero(hi)\n op1 = nkx.operator.fermion.identity(hi)\n\n assert np.allclose(op0.to_dense(), np.zeros(((8**2, 8**2))))\n assert np.allclose(op1.to_dense(), np.identity(8**2))\n", "import netket as nk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport jax\n\nfrom netket import experimental as nkx\n\nL = 2 # take a 2x2 lattice\nD = 2\nt = 1 # tunneling/hopping\nU = 0.01 # coulomb\n\n# create the graph our fermions can hop on\ng = nk.graph.Hypercube(length=L, n_dim=D, pbc=True)\nn_sites = g.n_nodes\n\n# create a hilbert space with 2 up and 2 down spins\nhi = nkx.hilbert.SpinOrbitalFermions(n_sites, s=1 / 2, n_fermions=(2, 2))\n\n# create an operator representing fermi hubbard interactions\n# -t (i^ j + h.c.) + U (i^ i j^ j)\n# we will create a helper function to abbreviate the creation, destruction and number operators\n# each operator has a site and spin projection (sz) in order to find the right position in the hilbert space samples\ndef c(site, sz):\n return nkx.operator.fermion.create(hi, site, sz=sz)\n\n\ndef cdag(site, sz):\n return nkx.operator.fermion.destroy(hi, site, sz=sz)\n\n\ndef nc(site, sz):\n return nkx.operator.fermion.number(hi, site, sz=sz)\n\n\nup = +1 / 2\ndown = -1 / 2\nham = 0.0\nfor sz in (up, down):\n for u, v in g.edges():\n ham += -t * cdag(u, sz) * c(v, sz) - t * cdag(v, sz) * c(u, sz)\nfor u in g.nodes():\n ham += U * nc(u, up) * nc(u, down)\n\nprint(\"Hamiltonian =\", ham.operator_string())\n\n# metropolis exchange moves fermions around according to a graph\n# the physical graph has LxL vertices, but the computational basis defined by the\n# hilbert space contains (2s+1)*L*L occupation numbers\n# by taking a disjoint copy of the lattice, we can\n# move the fermions around independently for both spins\n# and therefore conserve the number of fermions with up and down spin\n\n# g.n_nodes == L*L --> disj_graph == 2*L*L\ndisj_graph = nk.graph.disjoint_union(g, g)\nsa = nk.sampler.MetropolisExchange(hi, graph=disj_graph, n_chains=16)\n\n# since the hilbert basis is a set of occupation numbers, we can take a general RBM\n# we take complex parameters, since it learns sign structures more easily, and for even fermion number, the wave function might be complex\nma = nk.models.RBM(alpha=1, dtype=complex, use_visible_bias=False)\nvs = nk.vqs.MCState(sa, ma, n_discard_per_chain=100, n_samples=512)\n\n# we will use sgd with Stochastic Reconfiguration\nopt = nk.optimizer.Sgd(learning_rate=0.01)\nsr = nk.optimizer.SR(diag_shift=0.1)\n\ngs = nk.driver.VMC(ham, opt, variational_state=vs, preconditioner=sr)\n\n# now run the optimization\n# first step will take longer in order to compile\nexp_name = \"fermions_test\"\ngs.run(500, out=exp_name)\n\n############## plot #################\n\ned_energies = np.linalg.eigvalsh(ham.to_dense())\n\nwith open(f\"{exp_name}.log\", \"r\") as f:\n data = json.load(f)\n\nx = data[\"Energy\"][\"iters\"]\ny = data[\"Energy\"][\"Mean\"][\"real\"]\n\n# plot the energy levels\nplt.axhline(ed_energies[0], color=\"red\", label=\"E0\")\nfor e in ed_energies[1:]:\n plt.axhline(e, color=\"black\")\nplt.plot(x, y, color=\"red\", label=\"VMC\")\nplt.xlabel(\"step\")\nplt.ylabel(\"E\")\nplt.show()\n", "# Copyright 2022 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, Union\n\nimport functools\nimport numbers\n\nimport numpy as np\n\nfrom scipy.sparse import spmatrix\n\nfrom netket.hilbert import AbstractHilbert, Fock\nfrom netket.utils.types import DType, Array\n\nfrom ._abstract_operator import AbstractOperator\nfrom ._discrete_operator import DiscreteOperator\n\n\ndef _dtype(obj: Union[numbers.Number, Array, AbstractOperator]) -> DType:\n \"\"\"\n Returns the dtype of the input object\n \"\"\"\n if isinstance(obj, numbers.Number):\n return type(obj)\n elif isinstance(obj, DiscreteOperator):\n return obj.dtype\n elif isinstance(obj, np.ndarray):\n return obj.dtype\n else:\n raise TypeError(f\"cannot deduce dtype of object type {type(obj)}: {obj}\")\n\n\ndef cast_operator_matrix_dtype(matrix, dtype):\n \"\"\"\n Changes the dtype of a matrix, without changing the structural type of the object.\n\n This makes sure that if you pass sparse arrays to a LocalOperator, they remain\n sparse even if you change the dtype\n \"\"\"\n # must copy\n # return np.asarray(matrix, dtype=dtype)\n return matrix.astype(dtype)\n\n\ndef _standardize_matrix_input_type(op):\n \"\"\"\n Standardize the structural type of operators stored in LocalOperator.\n\n Eventually, we could also support spmatrices (but some work will be needed.)\n \"\"\"\n if isinstance(op, list):\n return np.asarray(op)\n elif isinstance(op, spmatrix):\n return op.todense()\n else:\n return op\n\n\ndef canonicalize_input(\n hilbert: AbstractHilbert, operators, acting_on, constant, *, dtype=None\n):\n \"\"\"\n Takes as inputs the inputs to the constructor of LocalOperator and canonicalizes\n them by ensuring the following holds:\n - acting_on is a list of list\n - acting_on[i] are sorted\n - operators is list of matrices\n - all dtypes match\n\n Args:\n hilbert: The hilbert space\n\n Returns:\n List of operators, acting ons and dtypes.\n \"\"\"\n # check if passing a single operator or a list of operators\n if isinstance(acting_on, numbers.Number):\n acting_on = [acting_on]\n\n is_nested = any(hasattr(i, \"__len__\") for i in acting_on)\n if not is_nested:\n operators = [operators]\n acting_on = [acting_on]\n\n if all(len(aon) == 0 for aon in acting_on):\n operators = []\n acting_on = []\n else:\n if max(map(max, acting_on)) >= hilbert.size or min(map(min, acting_on)) < 0:\n raise ValueError(\"An operator acts on an invalid set of sites.\")\n\n acting_on = [tuple(aon) for aon in acting_on]\n # operators = [np.asarray(operator) for operator in operators]\n operators = [_standardize_matrix_input_type(op) for op in operators]\n\n # If we asked for a specific dtype, enforce it.\n if dtype is None:\n dtype = np.promote_types(np.float32, _dtype(constant))\n dtype = functools.reduce(\n lambda dt, op: np.promote_types(dt, op.dtype), operators, dtype\n )\n dtype = np.empty((), dtype=dtype).dtype\n\n canonicalized_operators = []\n canonicalized_acting_on = []\n for (operator, acting_on) in zip(operators, acting_on):\n check_valid_opmatrix(hilbert, operator, acting_on)\n\n if operator.dtype is not dtype:\n operator = cast_operator_matrix_dtype(operator, dtype=dtype)\n\n # re-sort the operator\n operator, acting_on = _reorder_kronecker_product(hilbert, operator, acting_on)\n canonicalized_operators.append(operator)\n canonicalized_acting_on.append(acting_on)\n\n return canonicalized_operators, canonicalized_acting_on, dtype\n\n\ndef check_valid_opmatrix(hi, mat, acting_on):\n \"\"\" \"\"\"\n expected_size = np.prod([hi.shape[aon] for aon in acting_on])\n\n if mat.shape != (expected_size, expected_size):\n raise ValueError(\n f\"The matrix of the sub-operator acting on sites {acting_on} \"\n f\"must have shape {expected_size, expected_size}, \"\n f\"but it has shape {mat.shape}.\"\n )\n\n\n# TODO: support sparse arrays without returning dense arrays\ndef _reorder_kronecker_product(hi, mat, acting_on) -> Tuple[Array, Tuple]:\n \"\"\"\n Reorders the matrix resulting from a kronecker product of several\n operators in such a way to sort acting_on.\n\n A conceptual example is the following:\n if `mat = Â ⊗ B̂ ⊗ Ĉ` and `acting_on = [[2],[1],[3]`\n you will get `result = B̂ ⊗ Â ⊗ Ĉ, [[1], [2], [3]].\n\n However, essentially, A,B,C represent some operators acting on\n thei sub-space acting_on[1], [2] and [3] of the hilbert space.\n\n This function also handles any possible set of values in acting_on.\n\n The inner logic uses the Fock.all_states(), number_to_state and\n state_to_number to perform the re-ordering.\n \"\"\"\n acting_on_sorted = np.sort(acting_on)\n if np.array_equal(acting_on_sorted, acting_on):\n return mat, acting_on\n\n # could write custom binary <-> int logic instead of using Fock...\n # Since i need to work with bit-strings (where instead of bits i\n # have integers, in order to support arbitrary size spaces) this\n # is exactly what hilbert.to_number() and viceversa do.\n\n # target ordering binary representation\n hi_subspace = Fock(hi.shape[acting_on_sorted[0]] - 1)\n for site in acting_on_sorted[1:]:\n hi_subspace = hi_subspace * Fock(hi.shape[site] - 1)\n\n hi_unsorted_subspace = Fock(hi.shape[acting_on[0]] - 1)\n for site in acting_on[1:]:\n hi_unsorted_subspace = hi_unsorted_subspace * Fock(hi.shape[site] - 1)\n\n # find how to map target ordering back to unordered\n acting_on_unsorted_ids = np.zeros(len(acting_on), dtype=np.intp)\n for (i, site) in enumerate(acting_on):\n acting_on_unsorted_ids[i] = np.argmax(site == acting_on_sorted)\n\n # now it is valid that\n # acting_on_sorted == acting_on[acting_on_unsorted_ids]\n\n # generate n-bit strings in the target ordering\n v = hi_subspace.all_states()\n\n # convert them to origin (unordered) ordering\n v_unsorted = v[:, acting_on_unsorted_ids]\n # convert the unordered bit-strings to numbers in the target space.\n n_unsorted = hi_unsorted_subspace.states_to_numbers(v_unsorted)\n\n # reorder the matrix\n mat_sorted = mat[n_unsorted, :][:, n_unsorted]\n\n return mat_sorted, tuple(acting_on_sorted)\n\n\n# TODO: support sparse arrays without returning dense arrays\ndef _multiply_operators(\n hilbert, support_A: Tuple, A: Array, support_B: Tuple, B: Array, *, dtype\n) -> Tuple[Tuple, Array]:\n \"\"\"\n Returns the `Tuple[acting_on, Matrix]` representing the operator obtained by\n multiplying the two input operators A and B.\n \"\"\"\n support_A = np.asarray(support_A)\n support_B = np.asarray(support_B)\n\n inters = np.intersect1d(support_A, support_B, return_indices=False)\n\n if support_A.size == support_B.size and np.array_equal(support_A, support_B):\n return tuple(support_A), A @ B\n elif inters.size == 0:\n # disjoint supports\n support = tuple(np.concatenate([support_A, support_B]))\n operator = np.kron(A, B)\n operator, support = _reorder_kronecker_product(hilbert, operator, support)\n return tuple(support), operator\n else:\n _support_A = list(support_A)\n _support_B = list(support_B)\n _A = A.copy()\n _B = B.copy()\n\n # expand _act to match _act_i\n supp_B_min = min(support_B)\n for site in support_A:\n if site not in support_B:\n I = np.eye(hilbert.shape[site], dtype=dtype)\n if site < supp_B_min:\n _support_B = [site] + _support_B\n _B = np.kron(I, _B)\n else: # site > actmax\n _support_B = _support_B + [site]\n _B = np.kron(_B, I)\n\n supp_A_min = min(support_A)\n for site in support_B:\n if site not in support_A:\n I = np.eye(hilbert.shape[site], dtype=dtype)\n if site < supp_A_min:\n _support_A = [site] + _support_A\n _A = np.kron(I, _A)\n else: #  site > actmax\n _support_A = _support_A + [site]\n _A = np.kron(_A, I)\n\n # reorder\n _A, _support_A = _reorder_kronecker_product(hilbert, _A, _support_A)\n _B, _support_B = _reorder_kronecker_product(hilbert, _B, _support_B)\n\n if len(_support_A) == len(_support_B) and np.array_equal(\n _support_A, _support_B\n ):\n # back to the case of non-interesecting with same support\n return tuple(_support_A), _A @ _B\n else:\n raise ValueError(\"Something failed\")\n" ]
[ [ "numpy.allclose", "numpy.array_equal", "numpy.promote_types", "numpy.identity", "numpy.array", "numpy.zeros" ], [ "matplotlib.pyplot.axhline", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.array_equal", "numpy.asarray", "numpy.eye", "numpy.kron", "numpy.promote_types", "numpy.sort", "numpy.concatenate", "numpy.intersect1d", "numpy.argmax", "numpy.prod", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SultanOrazbayev/networkx
[ "5be9755636fa4da71da2e28f8467336d3c0164a7", "5be9755636fa4da71da2e28f8467336d3c0164a7" ]
[ "networkx/algorithms/node_classification/utils.py", "examples/algorithms/plot_subgraphs.py" ]
[ "def _get_label_info(G, label_name):\n \"\"\"Get and return information of labels from the input graph\n\n Parameters\n ----------\n G : Network X graph\n label_name : string\n Name of the target label\n\n Returns\n ----------\n labels : numpy array, shape = [n_labeled_samples, 2]\n Array of pairs of labeled node ID and label ID\n label_dict : numpy array, shape = [n_classes]\n Array of labels\n i-th element contains the label corresponding label ID `i`\n \"\"\"\n import numpy as np\n\n labels = []\n label_to_id = {}\n lid = 0\n for i, n in enumerate(G.nodes(data=True)):\n if label_name in n[1]:\n label = n[1][label_name]\n if label not in label_to_id:\n label_to_id[label] = lid\n lid += 1\n labels.append([i, label_to_id[label]])\n labels = np.array(labels)\n label_dict = np.array(\n [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])]\n )\n return (labels, label_dict)\n", "\"\"\"\n=========\nSubgraphs\n=========\nExample of partitioning a directed graph with nodes labeled as\nsupported and unsupported nodes into a list of subgraphs\nthat contain only entirely supported or entirely unsupported nodes.\nAdopted from \nhttps://github.com/lobpcg/python_examples/blob/master/networkx_example.py\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\ndef graph_partitioning(G, plotting=True):\n \"\"\"Partition a directed graph into a list of subgraphs that contain\n only entirely supported or entirely unsupported nodes.\n \"\"\"\n # Categorize nodes by their node_type attribute\n supported_nodes = {n for n, d in G.nodes(data=\"node_type\") if d == \"supported\"}\n unsupported_nodes = {n for n, d in G.nodes(data=\"node_type\") if d == \"unsupported\"}\n\n # Make a copy of the graph.\n H = G.copy()\n # Remove all edges connecting supported and unsupported nodes.\n H.remove_edges_from(\n (n, nbr, d)\n for n, nbrs in G.adj.items()\n if n in supported_nodes\n for nbr, d in nbrs.items()\n if nbr in unsupported_nodes\n )\n H.remove_edges_from(\n (n, nbr, d)\n for n, nbrs in G.adj.items()\n if n in unsupported_nodes\n for nbr, d in nbrs.items()\n if nbr in supported_nodes\n )\n\n # Collect all removed edges for reconstruction.\n G_minus_H = nx.DiGraph()\n G_minus_H.add_edges_from(set(G.edges) - set(H.edges))\n\n if plotting:\n # Plot the stripped graph with the edges removed.\n _node_colors = [c for _, c in H.nodes(data=\"node_color\")]\n _pos = nx.spring_layout(H)\n plt.figure(figsize=(8, 8))\n nx.draw_networkx_edges(H, _pos, alpha=0.3, edge_color=\"k\")\n nx.draw_networkx_nodes(H, _pos, node_color=_node_colors)\n nx.draw_networkx_labels(H, _pos, font_size=14)\n plt.axis(\"off\")\n plt.title(\"The stripped graph with the edges removed.\")\n plt.show()\n # Plot the the edges removed.\n _pos = nx.spring_layout(G_minus_H)\n plt.figure(figsize=(8, 8))\n ncl = [G.nodes[n][\"node_color\"] for n in G_minus_H.nodes]\n nx.draw_networkx_edges(G_minus_H, _pos, alpha=0.3, edge_color=\"k\")\n nx.draw_networkx_nodes(G_minus_H, _pos, node_color=ncl)\n nx.draw_networkx_labels(G_minus_H, _pos, font_size=14)\n plt.axis(\"off\")\n plt.title(\"The removed edges.\")\n plt.show()\n\n # Find the connected components in the stripped undirected graph.\n # And use the sets, specifying the components, to partition\n # the original directed graph into a list of directed subgraphs\n # that contain only entirely supported or entirely unsupported nodes.\n subgraphs = [\n H.subgraph(c).copy() for c in nx.connected_components(H.to_undirected())\n ]\n\n return subgraphs, G_minus_H\n\n\n###############################################################################\n# Create an example directed graph.\n# ---------------------------------\n#\n# This directed graph has one input node labeled `in` and plotted in blue color\n# and one output node labeled `out` and plotted in magenta color.\n# The other six nodes are classified as four `supported` plotted in green color\n# and two `unsupported` plotted in red color. The goal is computing a list\n# of subgraphs that contain only entirely `supported` or `unsupported` nodes.\nG_ex = nx.DiGraph()\nG_ex.add_nodes_from([\"In\"], node_type=\"input\", node_color=\"b\")\nG_ex.add_nodes_from([\"A\", \"C\", \"E\", \"F\"], node_type=\"supported\", node_color=\"g\")\nG_ex.add_nodes_from([\"B\", \"D\"], node_type=\"unsupported\", node_color=\"r\")\nG_ex.add_nodes_from([\"Out\"], node_type=\"output\", node_color=\"m\")\nG_ex.add_edges_from(\n [\n (\"In\", \"A\"),\n (\"A\", \"B\"),\n (\"B\", \"C\"),\n (\"B\", \"D\"),\n (\"D\", \"E\"),\n (\"C\", \"F\"),\n (\"E\", \"F\"),\n (\"F\", \"Out\"),\n ]\n)\n\n###############################################################################\n# Plot the original graph.\n# ------------------------\n#\nnode_color_list = [nc for _, nc in G_ex.nodes(data=\"node_color\")]\npos = nx.spectral_layout(G_ex)\nplt.figure(figsize=(8, 8))\nnx.draw_networkx_edges(G_ex, pos, alpha=0.3, edge_color=\"k\")\nnx.draw_networkx_nodes(G_ex, pos, alpha=0.8, node_color=node_color_list)\nnx.draw_networkx_labels(G_ex, pos, font_size=14)\nplt.axis(\"off\")\nplt.title(\"The original graph.\")\nplt.show()\n\n###############################################################################\n# Calculate the subgraphs with plotting all results of intemediate steps.\n# -----------------------------------------------------------------------\n#\nsubgraphs_of_G_ex, removed_edges = graph_partitioning(G_ex, plotting=True)\n\n###############################################################################\n# Plot the results: every subgraph in the list.\n# ---------------------------------------------\n#\nfor subgraph in subgraphs_of_G_ex:\n _pos = nx.spring_layout(subgraph)\n plt.figure(figsize=(8, 8))\n nx.draw_networkx_edges(subgraph, _pos, alpha=0.3, edge_color=\"k\")\n node_color_list_c = [nc for _, nc in subgraph.nodes(data=\"node_color\")]\n nx.draw_networkx_nodes(subgraph, _pos, node_color=node_color_list_c)\n nx.draw_networkx_labels(subgraph, _pos, font_size=14)\n plt.axis(\"off\")\n plt.title(\"One of the subgraphs.\")\n plt.show()\n\n###############################################################################\n# Put the graph back from the list of subgraphs\n# ---------------------------------------------\n#\nG_ex_r = nx.DiGraph()\n# Composing all subgraphs.\nfor subgraph in subgraphs_of_G_ex:\n G_ex_r = nx.compose(G_ex_r, subgraph)\n# Adding the previously stored edges.\nG_ex_r.add_edges_from(removed_edges.edges())\n\n###############################################################################\n# Check that the original graph and the reconstructed graphs are isomorphic.\n# --------------------------------------------------------------------------\n#\nassert nx.is_isomorphic(G_ex, G_ex_r)\n\n###############################################################################\n# Plot the reconstructed graph.\n# -----------------------------\n#\nnode_color_list = [nc for _, nc in G_ex_r.nodes(data=\"node_color\")]\npos = nx.spectral_layout(G_ex_r)\nplt.figure(figsize=(8, 8))\nnx.draw_networkx_edges(G_ex_r, pos, alpha=0.3, edge_color=\"k\")\nnx.draw_networkx_nodes(G_ex_r, pos, alpha=0.8, node_color=node_color_list)\nnx.draw_networkx_labels(G_ex_r, pos, font_size=14)\nplt.axis(\"off\")\nplt.title(\"The reconstructed graph.\")\nplt.show()\n" ]
[ [ "numpy.array" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.title", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mremilien/ICP
[ "671c600acc75a4126216cfc11c67d44ccfac037f" ]
[ "src/part5.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"this is for the part 5 of CourseWork 1.\"\"\"\r\n\r\n__author__ = 'Chengkun Li'\r\n\r\nimport sys\r\nimport os\r\n\r\nimport open3d as o3d\r\nimport numpy as np\r\nimport trimesh\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport tools.baseICP\r\nimport tools.tools\r\n\r\n\r\n# check whether the data folder exists\r\nFILE_PATH = os.path.dirname(os.path.abspath(__file__))\r\nRES_PATH = os.path.join(FILE_PATH, '../data/bunny_v2')\r\nif not os.path.exists(RES_PATH):\r\n print('cannot find resources folder, please update RES_PATH')\r\n exit(1)\r\n\r\n\r\nif __name__ == '__main__':\r\n # Load data file\r\n files = os.listdir(RES_PATH)\r\n files.sort()\r\n\r\n # start scan plane\r\n start_index = 4\r\n dst_DataFile = files[start_index]\r\n scan_angle = int(dst_DataFile[3:6])\r\n\r\n mesh_fp = os.path.join(RES_PATH, dst_DataFile)\r\n assert os.path.exists(mesh_fp), 'cannot found:' + mesh_fp\r\n dst_tm = trimesh.load(mesh_fp)\r\n tools.tools.trans_trimesh(dst_tm, scan_angle)\r\n\r\n for i in range(len(files) - 1):\r\n next_index = i + 1 + start_index\r\n next_index = (next_index - len(files) if next_index >= len(files) else next_index)\r\n src_DataFile = files[next_index]\r\n scan_angle = int(src_DataFile[3:6])\r\n\r\n mesh_fp = os.path.join(RES_PATH, src_DataFile)\r\n assert os.path.exists(mesh_fp), 'cannot found:' + mesh_fp\r\n src_tm = trimesh.load(mesh_fp)\r\n tools.tools.trans_trimesh(src_tm, scan_angle)\r\n\r\n # ICP\r\n H, _ = tools.baseICP.icp(src_tm, dst_tm, max_iterations=30)\r\n res_tm = src_tm.copy()\r\n res_tm.apply_transform(H)\r\n\r\n # get new dst_tm\r\n dst_vertices_array = np.array(dst_tm.vertices)\r\n res_vertices_array = np.array(res_tm.vertices)\r\n dst_faces_array = np.array(dst_tm.faces)\r\n res_faces_array = np.array(res_tm.faces)\r\n new_vertices = np.vstack((dst_vertices_array, res_vertices_array))\r\n new_faces = np.vstack((dst_faces_array, res_faces_array + dst_vertices_array.shape[0]))\r\n syn_dst_tm = trimesh.Trimesh(vertices=new_vertices, faces=new_faces)\r\n\r\n # update\r\n dst_tm = syn_dst_tm\r\n\r\n # show\r\n fig_mesh = plt.figure(figsize=(16, 8))\r\n for i in range(6):\r\n ax = fig_mesh.add_subplot(2, 3, i + 1, projection='3d')\r\n ax.scatter3D(syn_dst_tm.vertices[:, 2], syn_dst_tm.vertices[:, 0], syn_dst_tm.vertices[:, 1],\r\n c=(abs(syn_dst_tm.vertex_normals)), marker='.')\r\n ax.view_init(elev=30, azim=-40 + 60 * i)\r\n ax.axis('off')\r\n plt.show()\r\n\r\n\r\n\r\n" ]
[ [ "numpy.array", "matplotlib.pyplot.show", "numpy.vstack", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WongLynn/vnpy_Amerlin-1.1.20
[ "d701d8f12c29cc33f58ea025920b0c7240f74f82" ]
[ "vnpy/trader/utils/NVAnalysis.py" ]
[ "# encoding=utf-8\n'''\n净值分析工具\n提供:净值分析、净值合并分析、相关性分析\n'''\nimport copy\nfrom functools import reduce\n\nimport pandas as pd\nimport numpy as np\n\n\ndef getWeight(nvDf_dict, weightMethod=\"equal\"):\n result = {}\n if weightMethod == \"equal\":\n result = {name: 1 for name in nvDf_dict.keys()}\n elif weightMethod == \"equal_vol\":\n for name in nvDf_dict.keys():\n result[name] = 1 / max(0.01, (nvDf_dict[name][\"return\"].std() * 100))\n elif weightMethod == \"equal_maxdd\":\n for name in nvDf_dict.keys():\n maxDdPercent = abs(nvDf_dict[name]['ddPercent'].min())\n result[name] = 1 / max(0.01, maxDdPercent)\n elif weightMethod == \"sharpe\":\n for name in nvDf_dict.keys():\n dailyReturn = nvDf_dict[name]['return'].mean() * 100\n returnStd = nvDf_dict[name]['return'].std() * 100\n sharpeRatio = dailyReturn / returnStd * np.sqrt(240)\n result[name] = max(0, sharpeRatio)\n elif weightMethod == \"calmar\":\n for name in nvDf_dict.keys():\n df = nvDf_dict[name]\n totalDays = len(df)\n endBalance = df['balance'].iloc[-1]\n totalReturn = (endBalance - 1) * 100\n annualizedReturn = totalReturn / totalDays * 240\n maxDdPercent = abs(df['ddPercent'].min())\n calmarRatio = annualizedReturn / max(0.01, maxDdPercent)\n result[name] = max(0, calmarRatio)\n else:\n raise ValueError(\"weightMethod can only choose equal:等权 equal_vol:波动性标准化 equal_maxdd:最大回撤标准化 sharpe:夏普比率加权 calmar:卡玛比率加权\")\n\n # 权重值之和调整为0\n _sum = 0\n for name in result.keys():\n _sum += result[name]\n for name in result.keys():\n result[name] = result[name] / _sum\n return result\n\n\ndef combineNV(nvDf_dict, weightMethod=\"equal\", weight=None, normalized=True):\n '''\n :param nvDf_dict:各子策略净值表\n :param weightMethod: 内置加权方法 equal:等权 equal_vol:波动性标准化 equal_maxdd:最大回撤标准化 sharpe:夏普比率加权 calmar:卡玛比率加权\n :param weight:自定义权重。要求传入一个dict,key和nvDf_dict相同,值为权重值\n :return:合并净值表, 权重\n '''\n nvDf_dict = copy.deepcopy(nvDf_dict)\n # 对齐数据\n _index = set(nvDf_dict[list(nvDf_dict.keys())[0]].index)\n for name in nvDf_dict.keys():\n _index = _index & set(nvDf_dict[name].index)\n _index = sorted(list(_index))\n for name in nvDf_dict.keys():\n nvDf_dict[name] = nvDf_dict[name].reindex(_index).replace([np.inf, -np.inf], np.nan)\n nvDf_dict[name][\n [\"netPnl\", \"slippage\", \"commission\", \"turnover\", \"tradeCount\", \"tradingPnl\", \"positionPnl\", \"totalPnl\",\n \"return\", \"retWithoutFee\"]] = \\\n nvDf_dict[name][\n [\"netPnl\", \"slippage\", \"commission\", \"turnover\", \"tradeCount\", \"tradingPnl\", \"positionPnl\", \"totalPnl\",\n \"return\", \"retWithoutFee\"]].fillna(0)\n nvDf_dict[name] = nvDf_dict[name].fillna(method=\"ffill\")\n\n # 计算权重\n if weight is None:\n weight = getWeight(nvDf_dict, weightMethod)\n else:\n weight = weight.copy()\n if normalized:\n _sum = 0\n for name in weight.keys():\n _sum += weight[name]\n for name in weight.keys():\n weight[name] = weight[name] / _sum\n\n # 净值归一化\n for name in nvDf_dict.keys():\n df = nvDf_dict[name]\n capital = df['balance'].iloc[0] + df['netPnl'].iloc[0]\n df[\"netPnl\"] = df[\"netPnl\"] / capital\n df[\"slippage\"] = df[\"slippage\"] / capital\n df[\"commission\"] = df[\"commission\"] / capital\n df[\"turnover\"] = df[\"turnover\"] / capital\n df[\"tradingPnl\"] = df[\"tradingPnl\"] / capital\n df[\"positionPnl\"] = df[\"positionPnl\"] / capital\n df[\"totalPnl\"] = df[\"totalPnl\"] / capital\n df[\"balance\"] = df[\"balance\"] / capital\n tradeCount = df[\"tradeCount\"].copy()\n nvDf_dict[name] = df * weight[name]\n if weight[name] > 0:\n nvDf_dict[name][\"tradeCount\"] = tradeCount\n\n # 计算合并净值表\n def _sum_table(x, y):\n return x + y\n\n combined_NV_table = reduce(_sum_table, nvDf_dict.values())\n combined_NV_table['return'] = combined_NV_table[\"netPnl\"]\n combined_NV_table['retWithoutFee'] = combined_NV_table[\"totalPnl\"]\n combined_NV_table['highlevel'] = combined_NV_table['balance'].rolling(min_periods=1, window=len(combined_NV_table),\n center=False).max()\n combined_NV_table['drawdown'] = combined_NV_table['balance'] - combined_NV_table['highlevel']\n combined_NV_table['ddPercent'] = combined_NV_table['drawdown'] / combined_NV_table['highlevel'] * 100\n\n return combined_NV_table, weight\n\n\ndef getPearsonrMatrix(nvDf_dict):\n nvDf_dict = copy.deepcopy(nvDf_dict)\n # 对齐数据\n _index = set(nvDf_dict[list(nvDf_dict.keys())[0]].index)\n for name in nvDf_dict.keys():\n _index = _index & set(nvDf_dict[name].index)\n _index = sorted(list(_index))\n for name in nvDf_dict.keys():\n nvDf_dict[name] = nvDf_dict[name].reindex(_index).replace([np.inf, -np.inf], np.nan)\n x1 = np.vstack([df[\"return\"].fillna(0) for df in nvDf_dict.values()])\n x2 = np.vstack([df[\"retWithoutFee\"].fillna(0) for df in nvDf_dict.values()])\n r1 = pd.DataFrame(np.corrcoef(x1), columns=nvDf_dict.keys(), index=nvDf_dict.keys())\n r2 = pd.DataFrame(np.corrcoef(x2), columns=nvDf_dict.keys(), index=nvDf_dict.keys())\n return {\"return\": r1, \"retWithoutFee\": r2}\n\n\n# 净值分析\ndef calculateDailyStatistics(df):\n \"\"\"计算按日统计的结果\"\"\"\n if not isinstance(df, pd.DataFrame) or df.size <= 0:\n return None, {}\n\n # 计算统计结果\n df.index = pd.to_datetime(df.index)\n startDate = df.index[0]\n endDate = df.index[-1]\n\n totalDays = len(df)\n profitDays = len(df[df['netPnl'] > 0])\n lossDays = len(df[df['netPnl'] < 0])\n\n capital = df['balance'].iloc[0] + df['netPnl'].iloc[0]\n endBalance = df['balance'].iloc[-1]\n maxDrawdown = df['drawdown'].min()\n maxDdPercent = df['ddPercent'].min()\n\n totalNetPnl = df['netPnl'].sum()\n dailyNetPnl = totalNetPnl / totalDays\n\n totalCommission = df['commission'].sum()\n dailyCommission = totalCommission / totalDays\n\n totalSlippage = df['slippage'].sum()\n dailySlippage = totalSlippage / totalDays\n\n totalTurnover = df['turnover'].sum()\n dailyTurnover = totalTurnover / totalDays\n\n totalTradeCount = df['tradeCount'].sum()\n dailyTradeCount = totalTradeCount / totalDays\n\n totalReturn = (endBalance / capital - 1) * 100\n annualizedReturn = totalReturn / totalDays * 240\n dailyReturn = df['return'].mean() * 100\n returnStd = df['return'].std() * 100\n dailyReturnWithoutFee = df['retWithoutFee'].mean() * 100\n returnWithoutFeeStd = df['retWithoutFee'].std() * 100\n\n if returnStd:\n sharpeRatio = dailyReturn / returnStd * np.sqrt(240)\n else:\n sharpeRatio = 0\n if returnWithoutFeeStd:\n SRWithoutFee = dailyReturnWithoutFee / returnWithoutFeeStd * np.sqrt(240)\n else:\n SRWithoutFee = 0\n theoreticalSRWithoutFee = 0.1155 * np.sqrt(dailyTradeCount * 240)\n calmarRatio = annualizedReturn / abs(maxDdPercent)\n\n # 返回结果\n result = {\n 'startDate': startDate.strftime(\"%Y-%m-%d\"),\n 'endDate': endDate.strftime(\"%Y-%m-%d\"),\n 'totalDays': int(totalDays),\n 'profitDays': int(profitDays),\n 'lossDays': int(lossDays),\n 'endBalance': float(endBalance),\n 'maxDrawdown': float(maxDrawdown),\n 'maxDdPercent': float(maxDdPercent),\n 'totalNetPnl': float(totalNetPnl),\n 'dailyNetPnl': float(dailyNetPnl),\n 'totalCommission': float(totalCommission),\n 'dailyCommission': float(dailyCommission),\n 'totalSlippage': float(totalSlippage),\n 'dailySlippage': float(dailySlippage),\n 'totalTurnover': float(totalTurnover),\n 'dailyTurnover': float(dailyTurnover),\n 'totalTradeCount': int(totalTradeCount),\n 'dailyTradeCount': float(dailyTradeCount),\n 'totalReturn': float(totalReturn),\n 'annualizedReturn': float(annualizedReturn),\n 'calmarRatio': float(calmarRatio),\n 'dailyReturn': float(dailyReturn),\n 'returnStd': float(returnStd),\n 'sharpeRatio': float(sharpeRatio),\n 'dailyReturnWithoutFee': float(dailyReturnWithoutFee),\n 'returnWithoutFeeStd': float(returnWithoutFeeStd),\n 'SRWithoutFee': float(SRWithoutFee),\n 'theoreticalSRWithoutFee': float(theoreticalSRWithoutFee)\n }\n\n return result\n" ]
[ [ "numpy.corrcoef", "pandas.to_datetime", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yiming1012/koalas
[ "326a11c43bb30cb07063e5baf4dab21b4ec90b9d" ]
[ "databricks/koalas/namespace.py" ]
[ "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nWrappers around spark that correspond to common pandas functions.\n\"\"\"\nfrom typing import Optional, Union\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import ByteType, ShortType, IntegerType, LongType, FloatType, \\\n DoubleType, BooleanType, TimestampType, DecimalType, StringType, DateType, StructType\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas.utils import default_session\nfrom databricks.koalas.frame import DataFrame, _reduce_spark_multi\nfrom databricks.koalas.typedef import pandas_wraps\nfrom databricks.koalas.series import Series, _col\n\n\n__all__ = [\"from_pandas\", \"range\", \"read_csv\", \"read_delta\", \"read_table\", \"read_spark_io\",\n \"read_parquet\", \"read_clipboard\", \"read_excel\", \"read_html\", \"to_datetime\",\n \"get_dummies\", \"concat\", \"melt\", \"isna\", \"isnull\", \"notna\", \"notnull\"]\n\n\ndef from_pandas(pobj: Union['pd.DataFrame', 'pd.Series']) -> Union['Series', 'DataFrame']:\n \"\"\"Create a Koalas DataFrame or Series from a pandas DataFrame or Series.\n\n This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,\n but this also works with pandas Series and picks the index.\n\n Parameters\n ----------\n pobj : pandas.DataFrame or pandas.Series\n pandas DataFrame or Series to read.\n\n Returns\n -------\n Series or DataFrame\n If a pandas Series is passed in, this function returns a Koalas Series.\n If a pandas DataFrame is passed in, this function returns a Koalas DataFrame.\n \"\"\"\n if isinstance(pobj, pd.Series):\n return Series(pobj)\n elif isinstance(pobj, pd.DataFrame):\n return DataFrame(pobj)\n else:\n raise ValueError(\"Unknown data type: {}\".format(type(pobj)))\n\n\ndef range(start: int,\n end: Optional[int] = None,\n step: int = 1,\n num_partitions: Optional[int] = None) -> DataFrame:\n \"\"\"\n Create a DataFrame with some range of numbers.\n\n The resulting DataFrame has a single int64 column named `id`, containing elements in a range\n from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter\n (i.e. start) is specified, we treat it as the end value with the start value being 0.\n\n This is similar to the range function in SparkSession and is used primarily for testing.\n\n Parameters\n ----------\n start : int\n the start value (inclusive)\n end : int, optional\n the end value (exclusive)\n step : int, optional, default 1\n the incremental step\n num_partitions : int, optional\n the number of partitions of the DataFrame\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n When the first parameter is specified, we generate a range of values up till that number.\n\n >>> ks.range(5)\n id\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n\n When start, end, and step are specified:\n\n >>> ks.range(start = 100, end = 200, step = 20)\n id\n 0 100\n 1 120\n 2 140\n 3 160\n 4 180\n \"\"\"\n sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)\n return DataFrame(sdf)\n\n\ndef read_csv(path, header='infer', names=None, usecols=None,\n mangle_dupe_cols=True, parse_dates=False, comment=None):\n \"\"\"Read CSV (comma-separated) file into DataFrame.\n\n Parameters\n ----------\n path : str\n The path string storing the CSV file to be read.\n header : int, list of int, default ‘infer’\n Whether to to use as the column names, and the start of the data.\n Default behavior is to infer the column names: if no names are passed\n the behavior is identical to `header=0` and column names are inferred from\n the first line of the file, if column names are passed explicitly then\n the behavior is identical to `header=None`. Explicitly pass `header=0` to be\n able to replace existing names\n names : array-like, optional\n List of column names to use. If file contains no header row, then you should\n explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.\n usecols : list-like or callable, optional\n Return a subset of the columns. If list-like, all elements must either be\n positional (i.e. integer indices into the document columns) or strings that\n correspond to column names provided either by the user in names or inferred\n from the document header row(s).\n If callable, the callable function will be evaluated against the column names,\n returning names where the callable function evaluates to `True`.\n mangle_dupe_cols : bool, default True\n Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather\n than 'X' ... 'X'. Passing in False will cause data to be overwritten if\n there are duplicate names in the columns.\n Currently only `True` is allowed.\n parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.\n Currently only `False` is allowed.\n comment: str, optional\n Indicates the line should not be parsed.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n\n Examples\n --------\n >>> ks.read_csv('data.csv') # doctest: +SKIP\n \"\"\"\n if mangle_dupe_cols is not True:\n raise ValueError(\"mangle_dupe_cols can only be `True`: %s\" % mangle_dupe_cols)\n if parse_dates is not False:\n raise ValueError(\"parse_dates can only be `False`: %s\" % parse_dates)\n\n if usecols is not None and not callable(usecols):\n usecols = list(usecols)\n if usecols is None or callable(usecols) or len(usecols) > 0:\n reader = default_session().read.option(\"inferSchema\", \"true\")\n\n if header == 'infer':\n header = 0 if names is None else None\n if header == 0:\n reader.option(\"header\", True)\n elif header is None:\n reader.option(\"header\", False)\n else:\n raise ValueError(\"Unknown header argument {}\".format(header))\n\n if comment is not None:\n if not isinstance(comment, str) or len(comment) != 1:\n raise ValueError(\"Only length-1 comment characters supported\")\n reader.option(\"comment\", comment)\n\n sdf = reader.csv(path)\n\n if header is None:\n sdf = sdf.selectExpr(*[\"`%s` as `%s`\" % (field.name, i)\n for i, field in enumerate(sdf.schema)])\n if names is not None:\n names = list(names)\n if len(set(names)) != len(names):\n raise ValueError('Found non-unique column index')\n if len(names) != len(sdf.schema):\n raise ValueError('Names do not match the number of columns: %d' % len(names))\n sdf = sdf.selectExpr(*[\"`%s` as `%s`\" % (field.name, name)\n for field, name in zip(sdf.schema, names)])\n\n if usecols is not None:\n if callable(usecols):\n cols = [field.name for field in sdf.schema if usecols(field.name)]\n missing = []\n elif all(isinstance(col, int) for col in usecols):\n cols = [field.name for i, field in enumerate(sdf.schema) if i in usecols]\n missing = [col for col in usecols\n if col >= len(sdf.schema) or sdf.schema[col].name not in cols]\n elif all(isinstance(col, str) for col in usecols):\n cols = [field.name for field in sdf.schema if field.name in usecols]\n missing = [col for col in usecols if col not in cols]\n else:\n raise ValueError(\"'usecols' must either be list-like of all strings, \"\n \"all unicode, all integers or a callable.\")\n if len(missing) > 0:\n raise ValueError('Usecols do not match columns, columns expected but not '\n 'found: %s' % missing)\n\n if len(cols) > 0:\n sdf = sdf.select(cols)\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n return DataFrame(sdf)\n\n\ndef read_delta(path: str, version: Optional[str] = None, timestamp: Optional[str] = None,\n **options) -> DataFrame:\n \"\"\"\n Read a Delta Lake table on some file system and return a DataFrame.\n\n If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.\n\n Parameters\n ----------\n path : string\n Path to the Delta Lake table.\n version : string, optional\n Specifies the table version (based on Delta's internal transaction version) to read from,\n using Delta's time travel feature. This sets Delta's 'versionAsOf' option.\n timestamp : string, optional\n Specifies the table version (based on timestamp) to read from,\n using Delta's time travel feature. This must be a valid date or timestamp string in Spark,\n and sets Delta's 'timestampAsOf' option.\n options\n Additional options that can be passed onto Delta.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_delta\n read_table\n read_spark_io\n read_parquet\n\n Examples\n --------\n >>> ks.range(1).to_delta('%s/read_delta/foo' % path)\n >>> ks.read_delta('%s/read_delta/foo' % path)\n id\n 0 0\n \"\"\"\n if version is not None:\n options['versionAsOf'] = version\n if timestamp is not None:\n options['timestampAsOf'] = timestamp\n return read_spark_io(path, format='delta', options=options)\n\n\ndef read_table(name: str) -> DataFrame:\n \"\"\"\n Read a Spark table and return a DataFrame.\n\n Parameters\n ----------\n name : string\n Table name in Spark.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_table\n read_delta\n read_parquet\n read_spark_io\n\n Examples\n --------\n >>> ks.range(1).to_table('%s.my_table' % db)\n >>> ks.read_table('%s.my_table' % db)\n id\n 0 0\n \"\"\"\n sdf = default_session().read.table(name)\n return DataFrame(sdf)\n\n\ndef read_spark_io(path: Optional[str] = None, format: Optional[str] = None,\n schema: Union[str, 'StructType'] = None, **options) -> DataFrame:\n \"\"\"Load a DataFrame from a Spark data source.\n\n Parameters\n ----------\n path : string, optional\n Path to the data source.\n format : string, optional\n Specifies the output data source format. Some common ones are:\n\n - 'delta'\n - 'parquet'\n - 'orc'\n - 'json'\n - 'csv'\n schema : string or StructType, optional\n Input schema. If none, Spark tries to infer the schema automatically.\n The schema can either be a Spark StructType, or a DDL-formatted string like\n `col0 INT, col1 DOUBLE`.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n DataFrame.to_spark_io\n DataFrame.read_table\n DataFrame.read_delta\n DataFrame.read_parquet\n\n Examples\n --------\n >>> ks.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)\n >>> ks.read_spark_io(\n ... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')\n id\n 0 0\n \"\"\"\n sdf = default_session().read.load(path=path, format=format, schema=schema, options=options)\n return DataFrame(sdf)\n\n\ndef read_parquet(path, columns=None) -> DataFrame:\n \"\"\"Load a parquet object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : string\n File path\n columns : list, default=None\n If not None, only these columns will be read from the file.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_parquet\n DataFrame.read_table\n DataFrame.read_delta\n DataFrame.read_spark_io\n\n Examples\n --------\n >>> ks.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)\n >>> ks.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])\n id\n 0 0\n \"\"\"\n if columns is not None:\n columns = list(columns)\n if columns is None or len(columns) > 0:\n sdf = default_session().read.parquet(path)\n if columns is not None:\n fields = [field.name for field in sdf.schema]\n cols = [col for col in columns if col in fields]\n if len(cols) > 0:\n sdf = sdf.select(cols)\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n return DataFrame(sdf)\n\n\ndef read_clipboard(sep=r'\\s+', **kwargs):\n r\"\"\"\n Read text from clipboard and pass to read_csv. See read_csv for the\n full argument list\n\n Parameters\n ----------\n sep : str, default '\\s+'\n A string or regex delimiter. The default of '\\s+' denotes\n one or more whitespace characters.\n\n See Also\n --------\n DataFrame.to_clipboard : Write text out to clipboard.\n\n Returns\n -------\n parsed : DataFrame\n \"\"\"\n return from_pandas(pd.read_clipboard(sep, **kwargs))\n\n\ndef read_excel(io, sheet_name=0, header=0, names=None, index_col=None, usecols=None, squeeze=False,\n dtype=None, engine=None, converters=None, true_values=None, false_values=None,\n skiprows=None, nrows=None, na_values=None, keep_default_na=True, verbose=False,\n parse_dates=False, date_parser=None, thousands=None, comment=None, skipfooter=0,\n convert_float=True, mangle_dupe_cols=True, **kwds):\n \"\"\"\n Read an Excel file into a Koalas DataFrame.\n\n Support both `xls` and `xlsx` file extensions from a local filesystem or URL.\n Support an option to read a single sheet or a list of sheets.\n\n Parameters\n ----------\n io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book\n The string could be a URL. Valid URL schemes include http, ftp, s3,\n gcs, and file. For file URLs, a host is expected. For instance, a local\n file could be /path/to/workbook.xlsx.\n sheet_name : str, int, list, or None, default 0\n Strings are used for sheet names. Integers are used in zero-indexed\n sheet positions. Lists of strings/integers are used to request\n multiple sheets. Specify None to get all sheets.\n\n Available cases:\n\n * Defaults to ``0``: 1st sheet as a `DataFrame`\n * ``1``: 2nd sheet as a `DataFrame`\n * ``\"Sheet1\"``: Load sheet with name \"Sheet1\"\n * ``[0, 1, \"Sheet5\"]``: Load first, second and sheet named \"Sheet5\"\n as a dict of `DataFrame`\n * None: All sheets.\n\n header : int, list of int, default 0\n Row (0-indexed) to use for the column labels of the parsed\n DataFrame. If a list of integers is passed those row positions will\n be combined into a ``MultiIndex``. Use None if there is no header.\n names : array-like, default None\n List of column names to use. If file contains no header row,\n then you should explicitly pass header=None.\n index_col : int, list of int, default None\n Column (0-indexed) to use as the row labels of the DataFrame.\n Pass None if there is no such column. If a list is passed,\n those columns will be combined into a ``MultiIndex``. If a\n subset of data is selected with ``usecols``, index_col\n is based on the subset.\n usecols : int, str, list-like, or callable default None\n Return a subset of the columns.\n\n * If None, then parse all columns.\n * If str, then indicates comma separated list of Excel column letters\n and column ranges (e.g. \"A:E\" or \"A,C,E:F\"). Ranges are inclusive of\n both sides.\n * If list of int, then indicates list of column numbers to be parsed.\n * If list of string, then indicates list of column names to be parsed.\n * If callable, then evaluate each column name against it and parse the\n column if the callable returns ``True``.\n squeeze : bool, default False\n If the parsed data only contains one column then return a Series.\n dtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n Use `object` to preserve data as stored in Excel and not interpret dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n engine : str, default None\n If io is not a buffer or path, this must be set to identify io.\n Acceptable values are None or xlrd.\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the Excel cell content, and return the transformed\n content.\n true_values : list, default None\n Values to consider as True.\n false_values : list, default None\n Values to consider as False.\n skiprows : list-like\n Rows to skip at the beginning (0-indexed).\n nrows : int, default None\n Number of rows to parse.\n na_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted\n as NaN.\n keep_default_na : bool, default True\n If na_values are specified and keep_default_na is False the default NaN\n values are overridden, otherwise they're appended to.\n verbose : bool, default False\n Indicate number of NA values placed in non-numeric columns.\n parse_dates : bool, list-like, or dict, default False\n The behavior is as follows:\n\n * bool. If True -> try parsing the index.\n * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call\n result 'foo'\n\n If a column or index contains an unparseable date, the entire column or\n index will be returned unaltered as an object data type. For non-standard\n datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``\n\n Note: A fast-path exists for iso8601-formatted dates.\n date_parser : function, optional\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. Koalas will try to call `date_parser` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by `parse_dates` into a single array\n and pass that; and 3) call `date_parser` once for each row using one or\n more strings (corresponding to the columns defined by `parse_dates`) as\n arguments.\n thousands : str, default None\n Thousands separator for parsing string columns to numeric. Note that\n this parameter is only necessary for columns stored as TEXT in Excel,\n any numeric columns will automatically be parsed, regardless of display\n format.\n comment : str, default None\n Comments out remainder of line. Pass a character or characters to this\n argument to indicate comments in the input file. Any data between the\n comment string and the end of the current line is ignored.\n skipfooter : int, default 0\n Rows at the end to skip (0-indexed).\n convert_float : bool, default True\n Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric\n data will be read in as floats: Excel stores all numbers as floats\n internally.\n mangle_dupe_cols : bool, default True\n Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n 'X'...'X'. Passing in False will cause data to be overwritten if there\n are duplicate names in the columns.\n **kwds : optional\n Optional keyword arguments can be passed to ``TextFileReader``.\n\n Returns\n -------\n DataFrame or dict of DataFrames\n DataFrame from the passed in Excel file. See notes in sheet_name\n argument for more information on when a dict of DataFrames is returned.\n\n See Also\n --------\n DataFrame.to_excel : Write DataFrame to an Excel file.\n DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Examples\n --------\n The file can be read using the file name as string or an open file object:\n\n >>> ks.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP\n Name Value\n 0 string1 1\n 1 string2 2\n 2 #Comment 3\n\n >>> ks.read_excel(open('tmp.xlsx', 'rb'),\n ... sheet_name='Sheet3') # doctest: +SKIP\n Unnamed: 0 Name Value\n 0 0 string1 1\n 1 1 string2 2\n 2 2 #Comment 3\n\n Index and header can be specified via the `index_col` and `header` arguments\n\n >>> ks.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP\n 0 1 2\n 0 NaN Name Value\n 1 0.0 string1 1\n 2 1.0 string2 2\n 3 2.0 #Comment 3\n\n Column types are inferred but can be explicitly specified\n\n >>> ks.read_excel('tmp.xlsx', index_col=0,\n ... dtype={'Name': str, 'Value': float}) # doctest: +SKIP\n Name Value\n 0 string1 1.0\n 1 string2 2.0\n 2 #Comment 3.0\n\n True, False, and NA values, and thousands separators have defaults,\n but can be explicitly specified, too. Supply the values you would like\n as strings or lists of strings!\n\n >>> ks.read_excel('tmp.xlsx', index_col=0,\n ... na_values=['string1', 'string2']) # doctest: +SKIP\n Name Value\n 0 None 1\n 1 None 2\n 2 #Comment 3\n\n Comment lines in the excel input file can be skipped using the `comment` kwarg\n\n >>> ks.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP\n Name Value\n 0 string1 1.0\n 1 string2 2.0\n 2 None NaN\n \"\"\"\n pdfs = pd.read_excel(\n io=io, sheet_name=sheet_name, header=header, names=names, index_col=index_col,\n usecols=usecols, squeeze=squeeze, dtype=dtype, engine=engine, converters=converters,\n true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows,\n na_values=na_values, keep_default_na=keep_default_na, verbose=verbose,\n parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, comment=comment,\n skipfooter=skipfooter, convert_float=convert_float, mangle_dupe_cols=mangle_dupe_cols,\n kwds=kwds)\n if isinstance(pdfs, dict):\n return OrderedDict([(key, from_pandas(value)) for key, value in pdfs.items()])\n else:\n return from_pandas(pdfs)\n\n\ndef read_html(io, match='.+', flavor=None, header=None, index_col=None,\n skiprows=None, attrs=None, parse_dates=False,\n thousands=',', encoding=None,\n decimal='.', converters=None, na_values=None,\n keep_default_na=True, displayed_only=True):\n r\"\"\"Read HTML tables into a ``list`` of ``DataFrame`` objects.\n\n Parameters\n ----------\n io : str or file-like\n A URL, a file-like object, or a raw string containing HTML. Note that\n lxml only accepts the http, ftp and file url protocols. If you have a\n URL that starts with ``'https'`` you might try removing the ``'s'``.\n\n match : str or compiled regular expression, optional\n The set of tables containing text matching this regex or string will be\n returned. Unless the HTML is extremely simple you will probably need to\n pass a non-empty string here. Defaults to '.+' (match any non-empty\n string). The default value will return all tables contained on a page.\n This value is converted to a regular expression so that there is\n consistent behavior between Beautiful Soup and lxml.\n\n flavor : str or None, container of strings\n The parsing engine to use. 'bs4' and 'html5lib' are synonymous with\n each other, they are both there for backwards compatibility. The\n default of ``None`` tries to use ``lxml`` to parse and if that fails it\n falls back on ``bs4`` + ``html5lib``.\n\n header : int or list-like or None, optional\n The row (or list of rows for a :class:`~ks.MultiIndex`) to use to\n make the columns headers.\n\n index_col : int or list-like or None, optional\n The column (or list of columns) to use to create the index.\n\n skiprows : int or list-like or slice or None, optional\n 0-based. Number of rows to skip after parsing the column integer. If a\n sequence of integers or a slice is given, will skip the rows indexed by\n that sequence. Note that a single element sequence means 'skip the nth\n row' whereas an integer means 'skip n rows'.\n\n attrs : dict or None, optional\n This is a dictionary of attributes that you can pass to use to identify\n the table in the HTML. These are not checked for validity before being\n passed to lxml or Beautiful Soup. However, these attributes must be\n valid HTML table attributes to work correctly. For example, ::\n\n attrs = {'id': 'table'}\n\n is a valid attribute dictionary because the 'id' HTML tag attribute is\n a valid HTML attribute for *any* HTML tag as per `this document\n <http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::\n\n attrs = {'asdf': 'table'}\n\n is *not* a valid attribute dictionary because 'asdf' is not a valid\n HTML attribute even if it is a valid XML attribute. Valid HTML 4.01\n table attributes can be found `here\n <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A\n working draft of the HTML 5 spec can be found `here\n <http://www.w3.org/TR/html-markup/table.html>`__. It contains the\n latest information on table attributes for the modern web.\n\n parse_dates : bool, optional\n See :func:`~ks.read_csv` for more details.\n\n thousands : str, optional\n Separator to use to parse thousands. Defaults to ``','``.\n\n encoding : str or None, optional\n The encoding used to decode the web page. Defaults to ``None``.``None``\n preserves the previous encoding behavior, which depends on the\n underlying parser library (e.g., the parser library will try to use\n the encoding provided by the document).\n\n decimal : str, default '.'\n Character to recognize as decimal point (e.g. use ',' for European\n data).\n\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n\n na_values : iterable, default None\n Custom NA values\n\n keep_default_na : bool, default True\n If na_values are specified and keep_default_na is False the default NaN\n values are overridden, otherwise they're appended to\n\n displayed_only : bool, default True\n Whether elements with \"display: none\" should be parsed\n\n Returns\n -------\n dfs : list of DataFrames\n\n See Also\n --------\n read_csv\n DataFrame.to_html\n \"\"\"\n pdfs = pd.read_html(\n io=io, match=match, flavor=flavor, header=header, index_col=index_col, skiprows=skiprows,\n attrs=attrs, parse_dates=parse_dates, thousands=thousands, encoding=encoding,\n decimal=decimal, converters=converters, na_values=na_values,\n keep_default_na=keep_default_na, displayed_only=displayed_only)\n return [from_pandas(pdf) for pdf in pdfs]\n\n\ndef to_datetime(arg, errors='raise', format=None, infer_datetime_format=False):\n \"\"\"\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n or DataFrame/dict-like\n\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n format : string, default None\n strftime to parse time, eg \"%d/%m/%Y\", note that \"%f\" will parse\n all the way up to nanoseconds.\n infer_datetime_format : boolean, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings, and if it can be inferred, switch to a faster\n method of parsing them. In some cases this can increase the parsing\n speed by ~5-10x.\n\n Returns\n -------\n ret : datetime if parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n\n Examples\n --------\n Assembling a datetime from multiple columns of a DataFrame. The keys can be\n common abbreviations like ['year', 'month', 'day', 'minute', 'second',\n 'ms', 'us', 'ns']) or plurals of the same\n\n >>> df = ks.DataFrame({'year': [2015, 2016],\n ... 'month': [2, 3],\n ... 'day': [4, 5]})\n >>> ks.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n Name: _to_datetime2(arg_day=day, arg_month=month, arg_year=year), dtype: datetime64[ns]\n\n If a date does not meet the `timestamp limitations\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html\n #timeseries-timestamp-limits>`_, passing errors='ignore'\n will return the original input instead of raising any exception.\n\n Passing errors='coerce' will force an out-of-bounds date to NaT,\n in addition to forcing non-dates (or non-parseable dates) to NaT.\n\n >>> ks.to_datetime('13000101', format='%Y%m%d', errors='ignore')\n datetime.datetime(1300, 1, 1, 0, 0)\n >>> ks.to_datetime('13000101', format='%Y%m%d', errors='coerce')\n NaT\n\n Passing infer_datetime_format=True can often-times speedup a parsing\n if its not an ISO8601 format exactly, but in a regular format.\n\n >>> s = ks.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)\n >>> s.head()\n 0 3/11/2000\n 1 3/12/2000\n 2 3/13/2000\n 3 3/11/2000\n 4 3/12/2000\n Name: 0, dtype: object\n\n >>> import timeit\n >>> timeit.timeit(\n ... lambda: repr(ks.to_datetime(s, infer_datetime_format=True)),\n ... number = 1) # doctest: +SKIP\n 0.35832712500000063\n\n >>> timeit.timeit(\n ... lambda: repr(ks.to_datetime(s, infer_datetime_format=False)),\n ... number = 1) # doctest: +SKIP\n 0.8895321660000004\n \"\"\"\n if isinstance(arg, Series):\n return _to_datetime1(\n arg,\n errors=errors,\n format=format,\n infer_datetime_format=infer_datetime_format)\n if isinstance(arg, DataFrame):\n return _to_datetime2(\n arg_year=arg['year'],\n arg_month=arg['month'],\n arg_day=arg['day'],\n errors=errors,\n format=format,\n infer_datetime_format=infer_datetime_format)\n if isinstance(arg, dict):\n return _to_datetime2(\n arg_year=arg['year'],\n arg_month=arg['month'],\n arg_day=arg['day'],\n errors=errors,\n format=format,\n infer_datetime_format=infer_datetime_format)\n return pd.to_datetime(\n arg, errors=errors, format=format, infer_datetime_format=infer_datetime_format)\n\n\ndef get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False,\n drop_first=False, dtype=None):\n \"\"\"\n Convert categorical variable into dummy/indicator variables, also\n known as one hot encoding.\n\n Parameters\n ----------\n data : array-like, Series, or DataFrame\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `object` or `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy-encoded columns should be be backed by\n a :class:`SparseArray` (True) or a regular NumPy array (False).\n In Koalas, this value must be \"False\".\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n Returns\n -------\n dummies : DataFrame\n\n See Also\n --------\n Series.str.get_dummies\n\n Examples\n --------\n >>> s = ks.Series(list('abca'))\n\n >>> ks.get_dummies(s)\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n >>> df = ks.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],\n ... 'C': [1, 2, 3]},\n ... columns=['A', 'B', 'C'])\n\n >>> ks.get_dummies(df, prefix=['col1', 'col2'])\n C col1_a col1_b col2_a col2_b col2_c\n 0 1 1 0 0 1 0\n 1 2 0 1 1 0 0\n 2 3 1 0 0 0 1\n\n >>> ks.get_dummies(ks.Series(list('abcaa')))\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n 4 1 0 0\n\n >>> ks.get_dummies(ks.Series(list('abcaa')), drop_first=True)\n b c\n 0 0 0\n 1 1 0\n 2 0 1\n 3 0 0\n 4 0 0\n\n >>> ks.get_dummies(ks.Series(list('abc')), dtype=float)\n a b c\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n \"\"\"\n if sparse is not False:\n raise NotImplementedError(\"get_dummies currently does not support sparse\")\n\n if isinstance(columns, str):\n columns = [columns]\n if dtype is None:\n dtype = 'byte'\n\n if isinstance(data, Series):\n if prefix is not None:\n prefix = [str(prefix)]\n columns = [data.name]\n kdf = data.to_dataframe()\n remaining_columns = []\n else:\n if isinstance(prefix, str):\n raise ValueError(\"get_dummies currently does not support prefix as string types\")\n kdf = data.copy()\n if columns is None:\n columns = [column for column in kdf.columns\n if isinstance(data._sdf.schema[column].dataType,\n _get_dummies_default_accept_types)]\n if len(columns) == 0:\n return kdf\n\n if prefix is None:\n prefix = columns\n\n column_set = set(columns)\n remaining_columns = [kdf[column] for column in kdf.columns if column not in column_set]\n\n if any(not isinstance(kdf._sdf.schema[column].dataType, _get_dummies_acceptable_types)\n for column in columns):\n raise ValueError(\"get_dummies currently only accept {} values\"\n .format(', '.join([t.typeName() for t in _get_dummies_acceptable_types])))\n\n if prefix is not None and len(columns) != len(prefix):\n raise ValueError(\n \"Length of 'prefix' ({}) did not match the length of the columns being encoded ({}).\"\n .format(len(prefix), len(columns)))\n\n all_values = _reduce_spark_multi(kdf._sdf, [F.collect_set(F.col(column)).alias(column)\n for column in columns])\n for i, column in enumerate(columns):\n values = sorted(all_values[i])\n if drop_first:\n values = values[1:]\n\n def column_name(value):\n if prefix is None:\n return str(value)\n else:\n return '{}{}{}'.format(prefix[i], prefix_sep, value)\n\n for value in values:\n remaining_columns.append((kdf[column].notnull() & (kdf[column] == value))\n .astype(dtype)\n .rename(column_name(value)))\n if dummy_na:\n remaining_columns.append(kdf[column].isnull().astype(dtype).rename(column_name('nan')))\n\n return kdf[remaining_columns]\n\n\n# TODO: there are many parameters to implement and support. See Pandas's pd.concat.\ndef concat(objs, axis=0, join='outer', ignore_index=False):\n \"\"\"\n Concatenate pandas objects along a particular axis with optional set logic\n along the other axes.\n\n Parameters\n ----------\n objs : a sequence of Series or DataFrame\n Any None objects will be dropped silently unless\n they are all None in which case a ValueError will be raised\n axis : {0/'index'}, default 0\n The axis to concatenate along.\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis(es)\n ignore_index : boolean, default False\n If True, do not use the index values along the concatenation axis. The\n resulting axis will be labeled 0, ..., n - 1. This is useful if you are\n concatenating objects where the concatenation axis does not have\n meaningful indexing information. Note the index values on the other\n axes are still respected in the join.\n\n Returns\n -------\n concatenated : object, type of objs\n When concatenating all ``Series`` along the index (axis=0), a\n ``Series`` is returned. When ``objs`` contains at least one\n ``DataFrame``, a ``DataFrame`` is returned.\n\n See Also\n --------\n DataFrame.merge\n\n Examples\n --------\n Combine two ``Series``.\n\n >>> s1 = ks.Series(['a', 'b'])\n >>> s2 = ks.Series(['c', 'd'])\n >>> ks.concat([s1, s2])\n 0 a\n 1 b\n 0 c\n 1 d\n Name: 0, dtype: object\n\n Clear the existing index and reset it in the result\n by setting the ``ignore_index`` option to ``True``.\n\n >>> ks.concat([s1, s2], ignore_index=True)\n 0 a\n 1 b\n 2 c\n 3 d\n Name: 0, dtype: object\n\n Combine two ``DataFrame`` objects with identical columns.\n\n >>> df1 = ks.DataFrame([['a', 1], ['b', 2]],\n ... columns=['letter', 'number'])\n >>> df1\n letter number\n 0 a 1\n 1 b 2\n >>> df2 = ks.DataFrame([['c', 3], ['d', 4]],\n ... columns=['letter', 'number'])\n >>> df2\n letter number\n 0 c 3\n 1 d 4\n\n >>> ks.concat([df1, df2])\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n Combine ``DataFrame`` and ``Series`` objects with different columns.\n\n >>> ks.concat([df2, s1, s2])\n 0 letter number\n 0 None c 3.0\n 1 None d 4.0\n 0 a None NaN\n 1 b None NaN\n 0 c None NaN\n 1 d None NaN\n\n Combine ``DataFrame`` objects with overlapping columns\n and return everything. Columns outside the intersection will\n be filled with ``None`` values.\n\n >>> df3 = ks.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],\n ... columns=['letter', 'number', 'animal'])\n >>> df3\n letter number animal\n 0 c 3 cat\n 1 d 4 dog\n\n >>> ks.concat([df1, df3])\n animal letter number\n 0 None a 1\n 1 None b 2\n 0 cat c 3\n 1 dog d 4\n\n Combine ``DataFrame`` objects with overlapping columns\n and return only those that are shared by passing ``inner`` to\n the ``join`` keyword argument.\n\n >>> ks.concat([df1, df3], join=\"inner\")\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n \"\"\"\n if not isinstance(objs, (dict, Iterable)):\n raise TypeError('first argument must be an iterable of koalas '\n 'objects, you passed an object of type '\n '\"{name}\"'.format(name=type(objs).__name__))\n\n if axis not in [0, 'index']:\n raise ValueError('axis should be either 0 or \"index\" currently.')\n\n if all(map(lambda obj: obj is None, objs)):\n raise ValueError(\"All objects passed were None\")\n objs = list(filter(lambda obj: obj is not None, objs))\n\n for obj in objs:\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('cannot concatenate object of type '\"'{name}\"'; only ks.Series '\n 'and ks.DataFrame are valid'.format(name=type(objs).__name__))\n\n # Series, Series ...\n # We should return Series if objects are all Series.\n should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))\n\n # DataFrame, Series ... & Series, Series ...\n # In this case, we should return DataFrame.\n new_objs = []\n for obj in objs:\n if isinstance(obj, Series):\n obj = obj.to_dataframe()\n new_objs.append(obj)\n objs = new_objs\n\n # DataFrame, DataFrame, ...\n # All Series are converted into DataFrame and then compute concat.\n if not ignore_index:\n indices_of_kdfs = [kdf._internal.index_map for kdf in objs]\n index_of_first_kdf = indices_of_kdfs[0]\n for index_of_kdf in indices_of_kdfs:\n if index_of_first_kdf != index_of_kdf:\n raise ValueError(\n 'Index type and names should be same in the objects to concatenate. '\n 'You passed different indices '\n '{index_of_first_kdf} and {index_of_kdf}'.format(\n index_of_first_kdf=index_of_first_kdf, index_of_kdf=index_of_kdf))\n\n columns_of_kdfs = [kdf._internal.columns for kdf in objs]\n first_kdf = objs[0]\n if ignore_index:\n columns_of_first_kdf = first_kdf._internal.data_columns\n else:\n columns_of_first_kdf = first_kdf._internal.columns\n if all(current_kdf == columns_of_first_kdf for current_kdf in columns_of_kdfs):\n # If all columns are in the same order and values, use it.\n kdfs = objs\n else:\n if ignore_index:\n columns_to_apply = [kdf._internal.data_columns for kdf in objs]\n else:\n columns_to_apply = [kdf._internal.columns for kdf in objs]\n\n if join == \"inner\":\n interested_columns = set.intersection(*map(set, columns_to_apply))\n # Keep the column order with its firsts DataFrame.\n interested_columns = list(map(\n lambda c: columns_of_first_kdf[columns_of_first_kdf.index(c)],\n interested_columns))\n\n kdfs = []\n for kdf in objs:\n sdf = kdf._sdf.select(interested_columns)\n if ignore_index:\n kdfs.append(DataFrame(sdf))\n else:\n kdfs.append(DataFrame(first_kdf._internal.copy(sdf=sdf)))\n elif join == \"outer\":\n # If there are columns unmatched, just sort the column names.\n merged_columns = set(\n itertools.chain.from_iterable(columns_to_apply))\n\n kdfs = []\n for kdf in objs:\n if ignore_index:\n columns_to_add = merged_columns - set(kdf._internal.data_columns)\n else:\n columns_to_add = merged_columns - set(kdf._internal.columns)\n\n # TODO: NaN and None difference for missing values. pandas seems filling NaN.\n kdf = kdf.assign(**dict(zip(columns_to_add, [None] * len(columns_to_add))))\n\n if ignore_index:\n sdf = kdf._sdf.select(sorted(kdf._internal.data_columns))\n else:\n sdf = kdf._sdf.select(\n kdf._internal.index_columns + sorted(kdf._internal.data_columns))\n\n kdf = DataFrame(kdf._internal.copy(sdf=sdf,\n data_columns=sorted(kdf._internal.data_columns)))\n kdfs.append(kdf)\n else:\n raise ValueError(\n \"Only can inner (intersect) or outer (union) join the other axis.\")\n\n concatenated = kdfs[0]._sdf\n for kdf in kdfs[1:]:\n concatenated = concatenated.unionByName(kdf._sdf)\n\n if ignore_index:\n result_kdf = DataFrame(concatenated.select(kdfs[0]._internal.data_columns))\n else:\n result_kdf = DataFrame(kdfs[0]._internal.copy(sdf=concatenated))\n\n if should_return_series:\n # If all input were Series, we should return Series.\n return _col(result_kdf)\n else:\n return result_kdf\n\n\ndef melt(frame, id_vars=None, value_vars=None, var_name='variable',\n value_name='value'):\n return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)\n\n\nmelt.__doc__ = DataFrame.melt.__doc__\n\n\ndef isna(obj):\n \"\"\"\n Detect missing values for an array-like object.\n\n This function takes a scalar or array-like object and indicates\n whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``\n in object arrays).\n\n Parameters\n ----------\n obj : scalar or array-like\n Object to check for null or missing values.\n\n Returns\n -------\n bool or array-like of bool\n For scalar input, returns a scalar boolean.\n For array input, returns an array of boolean indicating whether each\n corresponding element is missing.\n\n See Also\n --------\n notnull : Boolean inverse of pandas.isnull.\n Series.isna : Detect missing values in a Series.\n Series.isnull : Detect missing values in a Series.\n DataFrame.isna : Detect missing values in a DataFrame.\n DataFrame.isnull : Detect missing values in a DataFrame.\n Index.isna : Detect missing values in an Index.\n Index.isnull : Detect missing values in an Index.\n\n Examples\n --------\n Scalar arguments (including strings) result in a scalar boolean.\n\n >>> ks.isna('dog')\n False\n\n >>> ks.isna(np.nan)\n True\n\n ndarrays result in an ndarray of booleans.\n\n >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])\n >>> array\n array([[ 1., nan, 3.],\n [ 4., 5., nan]])\n >>> ks.isna(array)\n array([[False, True, False],\n [False, False, True]])\n\n For Series and DataFrame, the same type is returned, containing booleans.\n\n >>> df = ks.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})\n >>> df\n a b\n 0 ant dog\n 1 bee None\n 2 cat fly\n\n >>> ks.isna(df)\n a b\n 0 False False\n 1 False True\n 2 False False\n\n >>> ks.isnull(df.b)\n 0 False\n 1 True\n 2 False\n Name: b, dtype: bool\n \"\"\"\n if isinstance(obj, (DataFrame, Series)):\n return obj.isnull()\n else:\n return pd.isnull(obj)\n\n\nisnull = isna\n\n\ndef notna(obj):\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. NA values, such as None or\n :attr:`numpy.NaN`, get mapped to False values.\n\n Returns\n -------\n bool or array-like of bool\n Mask of bool values for each element that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n isna : Detect missing values for an array-like object.\n Series.notna : Boolean inverse of Series.isna.\n Series.notnull :Boolean inverse of Series.isnull.\n DataFrame.notna :Boolean inverse of DataFrame.isna.\n DataFrame.notnull : Boolean inverse of DataFrame.isnull.\n Index.notna : Boolean inverse of Index.isna.\n Index.notnull : Boolean inverse of Index.isnull.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = ks.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notnull()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n Name: 0, dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n Name: 0, dtype: bool\n \"\"\"\n if isinstance(obj, (DataFrame, Series)):\n return obj.notna()\n else:\n return pd.notna(obj)\n\n\nnotnull = notna\n\n\n# @pandas_wraps(return_col=np.datetime64)\n@pandas_wraps\ndef _to_datetime1(arg, errors, format, infer_datetime_format) -> Series[np.datetime64]:\n return pd.to_datetime(\n arg,\n errors=errors,\n format=format,\n infer_datetime_format=infer_datetime_format)\n\n\n# @pandas_wraps(return_col=np.datetime64)\n@pandas_wraps\ndef _to_datetime2(arg_year, arg_month, arg_day,\n errors, format, infer_datetime_format) -> Series[np.datetime64]:\n arg = dict(year=arg_year, month=arg_month, day=arg_day)\n for key in arg:\n if arg[key] is None:\n del arg[key]\n return pd.to_datetime(\n arg,\n errors=errors,\n format=format,\n infer_datetime_format=infer_datetime_format)\n\n\n_get_dummies_default_accept_types = (\n DecimalType, StringType, DateType\n)\n_get_dummies_acceptable_types = _get_dummies_default_accept_types + (\n ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, BooleanType, TimestampType\n)\n" ]
[ [ "pandas.notna", "pandas.read_clipboard", "pandas.read_excel", "pandas.to_datetime", "pandas.isnull", "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
gatechzhu/wiggle
[ "664c1909ae768456e662e249fb51c5899334fc3b" ]
[ "wiggle/wiggle.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef insert_zeros(trace, tt=None):\n \"\"\"Insert zero locations in data trace and tt vector based on linear fit\"\"\"\n\n if tt is None:\n tt = np.arange(len(trace))\n\n # Find zeros\n zc_idx = np.where(np.diff(np.signbit(trace)))[0]\n x1 = tt[zc_idx]\n x2 = tt[zc_idx + 1]\n y1 = trace[zc_idx]\n y2 = trace[zc_idx + 1]\n a = (y2 - y1) / (x2 - x1)\n tt_zero = x1 - y1 / a\n\n # split tt and trace\n tt_split = np.split(tt, zc_idx + 1)\n trace_split = np.split(trace, zc_idx + 1)\n tt_zi = tt_split[0]\n trace_zi = trace_split[0]\n\n # insert zeros in tt and trace\n for i in range(len(tt_zero)):\n tt_zi = np.hstack(\n (tt_zi, np.array([tt_zero[i]]), tt_split[i + 1]))\n trace_zi = np.hstack(\n (trace_zi, np.zeros(1), trace_split[i + 1]))\n\n return trace_zi, tt_zi\n\n\ndef wiggle_input_check(data, tt, xx, sf, verbose):\n ''' Helper function for wiggle() and traces() to check input\n\n '''\n\n # Input check for verbose\n if not isinstance(verbose, bool):\n raise TypeError(\"verbose must be a bool\")\n\n # Input check for data\n if type(data).__module__ != np.__name__:\n raise TypeError(\"data must be a numpy array\")\n\n if len(data.shape) != 2:\n raise ValueError(\"data must be a 2D array\")\n\n # Input check for tt\n if tt is None:\n tt = np.arange(data.shape[0])\n if verbose:\n print(\"tt is automatically generated.\")\n print(tt)\n else:\n if type(tt).__module__ != np.__name__:\n raise TypeError(\"tt must be a numpy array\")\n if len(tt.shape) != 1:\n raise ValueError(\"tt must be a 1D array\")\n if tt.shape[0] != data.shape[0]:\n raise ValueError(\"tt must have same as data's rows\")\n\n # Input check for xx\n if xx is None:\n xx = np.arange(data.shape[1])\n if verbose:\n print(\"xx is automatically generated.\")\n print(xx)\n else:\n if type(xx).__module__ != np.__name__:\n raise TypeError(\"tt must be a numpy array\")\n if len(xx.shape) != 1:\n raise ValueError(\"tt must be a 1D array\")\n if tt.shape[0] != data.shape[0]:\n raise ValueError(\"tt must have same as data's rows\")\n if verbose:\n print(xx)\n\n # Input check for streth factor (sf)\n if not isinstance(sf, (int, float)):\n raise TypeError(\"Strech factor(sf) must be a number\")\n\n # Compute trace horizontal spacing\n ts = np.min(np.diff(xx))\n\n # Rescale data by trace_spacing and strech_factor\n data_max_std = np.max(np.std(data, axis=0))\n data = data / data_max_std * ts * sf\n\n return data, tt, xx, ts\n\n\ndef wiggle(data, tt=None, xx=None, color='k', sf=0.15, verbose=False):\n '''Wiggle plot of a sesimic data section\n\n Syntax examples:\n wiggle(data)\n wiggle(data, tt)\n wiggle(data, tt, xx)\n wiggle(data, tt, xx, color)\n fi = wiggle(data, tt, xx, color, sf, verbose)\n\n Use the column major order for array as in Fortran to optimal performance.\n\n The following color abbreviations are supported:\n\n ========== ========\n character color\n ========== ========\n 'b' blue\n 'g' green\n 'r' red\n 'c' cyan\n 'm' magenta\n 'y' yellow\n 'k' black\n 'w' white\n ========== ========\n\n\n '''\n\n # Input check\n data, tt, xx, ts = wiggle_input_check(data, tt, xx, sf, verbose)\n\n # Plot data using matplotlib.pyplot\n Ntr = data.shape[1]\n\n ax = plt.gca()\n for ntr in range(Ntr):\n trace = data[:, ntr]\n offset = xx[ntr]\n\n if verbose:\n print(offset)\n\n trace_zi, tt_zi = insert_zeros(trace, tt)\n ax.fill_betweenx(tt_zi, offset, trace_zi + offset,\n where=trace_zi >= 0,\n facecolor=color)\n ax.plot(trace_zi + offset, tt_zi, color)\n\n ax.set_xlim(xx[0] - ts, xx[-1] + ts)\n ax.set_ylim(tt[0], tt[-1])\n ax.invert_yaxis()\n\n\nif __name__ == '__main__':\n data = np.random.randn(1000, 100)\n wiggle(data)\n plt.show()" ]
[ [ "matplotlib.pyplot.gca", "numpy.split", "numpy.signbit", "numpy.arange", "numpy.std", "numpy.random.randn", "numpy.diff", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thejeshgn/data
[ "945af2e1269b914ff1e3169880169161f4137a58", "945af2e1269b914ff1e3169880169161f4137a58" ]
[ "scripts/biomedical/proteinAtlas/parse_protein_atlas.py", "scripts/us_bls/cpi/generate_csv_mcf.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This script will generate data mcf from Protein Tissue Atlas\nand two enum files: HumanTissueEnum.mcf, HumanCellTypeEnum.mcf\n\"\"\"\nimport re\nimport pandas as pd\n\nfrom absl import app\nfrom absl import flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('database', 'normal_tissue.tsv',\n 'input tissue atlas file path.')\n\nflags.DEFINE_string('gene_to_uniprot_list',\n 'gene_to_uniprot_list.txt',\n 'gene_to_uniprot_list file path.',\n short_name='g')\n\nflags.DEFINE_string('uniprot_to_dcid',\n 'uniprot_to_dcid.tsv',\n 'uniprot_to_dcid.tsv file path.',\n short_name='u')\n\nflags.DEFINE_string('data_mcf',\n 'ProteinAtlasData.mcf',\n 'The output data mcf file path.',\n short_name='m')\n\nflags.DEFINE_string('tissue_mcf', 'HumanTissueEnum.mcf',\n 'The output HumanTissueEnum.mcf file path.')\n\nflags.DEFINE_string('cell_mcf', 'HumanCellTypeEnum.mcf',\n 'The output HumanCellTypeEnum.mcf file path.')\nEXPRESSION_MAP = {\n 'Not detected': 'ProteinExpressionNotDetected',\n 'Low': 'ProteinExpressionLow',\n 'Medium': 'ProteinExpressionMedium',\n 'High': 'ProteinExpressionHigh'\n}\n\nRELIABILITY_MAP = {\n 'Enhanced': 'ProteinOccurrenceReliabilityEnhanced',\n 'Supported': 'ProteinOccurrenceReliabilitySupported',\n 'Approved': 'ProteinOccurrenceReliabilityApproved',\n 'Uncertain': 'ProteinOccurrenceReliabilityUncertain'\n}\n\n\ndef get_gene_to_uniprot_list(file_path):\n \"\"\"\n Args:\n file_path for the 'gene_to_uniprot_list.txt'.\n Returns:\n A dict mapping gene code to UniProt protein entry.\n example: {'TSPAN6': ['O43657']}\n \"\"\"\n with open(file_path, 'r') as file:\n lines = file.read().split('\\n')\n gene_to_uniprot_list = {}\n for line in lines:\n # line example: 'TSPAN6: O43657\\n'\n line_split = line.split(': ')\n gene = line_split[0]\n uniprot_list = line_split[1].split(' ')\n gene_to_uniprot_list[gene] = uniprot_list\n return gene_to_uniprot_list\n\n\ndef get_uniprot_to_dcid(file_path):\n \"\"\"\n Args:\n file_path for the 'uniprot_to_dcid.txt'.\n Returns:\n A dict mapping UniProt entry to protein DCID in Data Commons.\n example: {'O43657': 'TSN6_HUMAN'}\n \"\"\"\n with open(file_path, 'r') as file:\n lines = file.read().split('\\n')\n uniprot_to_dcid = {}\n for line in lines[1:-1]:\n line_split = line.split('\\t')\n uniprot = line_split[0]\n dcid = line_split[2]\n\n # multiple uniprot entry maps to one entry name\n if ',' in uniprot:\n uniprots = uniprot.split(',')\n for uni in uniprots:\n uniprot_to_dcid[uni] = dcid\n else:\n uniprot_to_dcid[uniprot] = dcid\n return uniprot_to_dcid\n\n\ndef get_gene_to_dcid_list(gene_to_uniprot_list, uniprot_to_dcid):\n \"\"\"\n Args:\n gene_to_uniprot_list: a dic mapping gene to a list of UniProt entry\n uniprot_to_dcid: a dic mapping UniProt entry to Data Commons DCID\n Returns:\n A dict mapping gene to a list of protein DCID in Data Commons.\n example: {'TSPAN6': ['TSN6_HUMAN']}\n \"\"\"\n gene_to_dcid_list = {}\n # This for loop generate the mapping\n for gene in gene_to_uniprot_list:\n # One gene can map to several UniProt entry\n uniprot_list = gene_to_uniprot_list[gene]\n dcid_list = []\n for uniprot in uniprot_list:\n dcid_list.append(uniprot_to_dcid[uniprot])\n gene_to_dcid_list[gene] = dcid_list\n return gene_to_dcid_list\n\n\ndef get_class_name(a_string):\n \"\"\"Convert a name string to format: ThisIsAnUnusualName.\n Take a space delimited string, return a class name such as ThisIsAnUnusualName\n Here we use this function for instance name. Thus it allows to start with a number\n \"\"\"\n joint_name = a_string.title().replace(' ', '')\n # substitute except for _, character, number\n non_legitimate = re.compile(r'[\\W]+')\n class_name = non_legitimate.sub('', joint_name)\n return class_name\n\n\ndef generate_mcf(protein_dcid, tissue, cell, expression, reliability):\n \"\"\"generate a data mcf\"\"\"\n name = '_'.join([protein_dcid, tissue, cell])\n mcf_list = []\n mcf_list.append('Node: dcid:bio/' + name + '\\n')\n mcf_list.append('typeOf: HumanProteinOccurrence' + '\\n')\n mcf_list.append('name: \"' + name + '\"' + '\\n')\n mcf_list.append('detectedProtein: dcs:bio/' + protein_dcid + '\\n')\n mcf_list.append('humanTissue: dcs:' + tissue + '\\n')\n mcf_list.append('humanCellType: dcs:' + cell + '\\n')\n mcf_list.append('proteinExpressionScore: dcs:' + expression + '\\n')\n mcf_list.append('humanProteinOccurrenceReliability: dcs:' + reliability)\n\n return ''.join(mcf_list)\n\n\ndef mcf_from_row(row, gene_to_dcid_list):\n \"\"\"Generate data mcf from each row of the dataframe\"\"\"\n gene = row['Gene name']\n tissue = get_class_name(row['Tissue'])\n cell = get_class_name(row['Cell type'])\n expression = EXPRESSION_MAP[row['Level']]\n reliability = RELIABILITY_MAP[row['Reliability']]\n if gene not in gene_to_dcid_list:\n # skip case when there is no gene to dcid mapping\n return None\n dcid_list = gene_to_dcid_list[gene]\n\n mcf_list = []\n for protein_dcid in dcid_list:\n mcf_list.append(\n generate_mcf(protein_dcid, tissue, cell, expression, reliability))\n return '\\n\\n'.join(mcf_list)\n\n\ndef get_tissue_enum(tissue):\n \"\"\"Generate a enum instance for a tissue\"\"\"\n name = get_class_name(tissue)\n mcf_list = []\n mcf_list.append('Node: dcid:' + name + '\\n')\n mcf_list.append('typeOf: dcs:HumanTissueEnum' + '\\n')\n mcf_list.append('name: \"' + name + '\"\\n')\n mcf_list.append('description: \"' + tissue[0].upper() + tissue[1:] + '\"\\n')\n mcf_list.append('domainIncludes: dcs:HumanTissueEnum\\n')\n\n return ''.join(mcf_list)\n\n\ndef get_cell_enum(cell):\n \"\"\"Generate a enum instance for a cell type\"\"\"\n name = get_class_name(cell)\n mcf_list = []\n mcf_list.append('Node: dcid:' + name + '\\n')\n mcf_list.append('typeOf: dcs:HumanCellTypeEnum' + '\\n')\n mcf_list.append('name: \"' + name + '\"\\n')\n mcf_list.append('description: \"' + cell[0].upper() + cell[1:] + '\"\\n')\n mcf_list.append('domainIncludes: dcs:HumanCellTypeEnum\\n')\n\n return ''.join(mcf_list)\n\n\ndef main(argv):\n \"Main function to read the database file and generate data mcf\"\n database_file = FLAGS.database\n gene_to_uniprot_list_path = FLAGS.gene_to_uniprot_list\n uniprot_to_dcid_path = FLAGS.uniprot_to_dcid\n\n gene_to_uniprot_list = get_gene_to_uniprot_list(gene_to_uniprot_list_path)\n uniprot_to_dcid = get_uniprot_to_dcid(uniprot_to_dcid_path)\n gene_to_dcid_list = get_gene_to_dcid_list(gene_to_uniprot_list,\n uniprot_to_dcid)\n tissue_atlas_path = database_file\n df = pd.read_csv(tissue_atlas_path, sep='\\t', header=[0], squeeze=True)\n\n df = df.dropna()\n df['mcf'] = df.apply(lambda row: mcf_from_row(row, gene_to_dcid_list),\n axis=1)\n data_mcf = '\\n\\n'.join(df['mcf'].dropna()) + '\\n'\n\n with open(FLAGS.data_mcf, 'w') as file:\n file.write(data_mcf)\n\n tissues = df['Tissue'].unique()\n tissue_enum_list = pd.Series(tissues).apply(get_tissue_enum)\n\n cells = df['Cell type'].unique()\n cell_enum_list = pd.Series(cells).apply(get_cell_enum)\n\n with open(FLAGS.tissue_mcf, 'w') as file:\n file.write('\\n'.join(tissue_enum_list) + '\\n')\n\n with open(FLAGS.cell_mcf, 'w') as file:\n file.write('\\n'.join(cell_enum_list) + '\\n')\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n'''\nGenerates the CSVs, StatisticalVariable MCFs, and template MCFs for importing\nUS Burea of Labor Statistics CPI-U, CPI-W, and C-CPI-U series into Data Commons.\nOnly monthly series for the US as a whole and not for parts of the US are\ngenerated. The semi-annually series overlap with the monthly series so they\nare not generated.\n\nThe script replies heavily on the CSVs provided by BLS that contain information,\nsuch as whether the series are seasonally adjusted, about series of a\nparticular type, e.g., https://download.bls.gov/pub/time.series/cu/cu.series.\nThe DataFrames loaded from these CSVs are often referred to as \"info_df\"\nin the script.\n\nRunning the script generates these files:\n- CSVs\n - cpi_u.csv\n - cpi_w.csv\n - c_cpi_u.csv\n- Node MCFs\n - cpi_u.mcf\n - Contains StatisticalVariables for CPI-U series.\n - cpi_w.mcf\n - c_cpi_u.mcf\n - pop_type_enums.mcf\n - Contains populationType enums for all three types of series.\n - unit_enums.mcf\n - Contains unit enums for all three types of series.\n- Template MCFs\n - cpi_u.tmcf\n - Contains the template MCF for CPI-U series.\n - cpi_w.tmcf\n - c_cpi_u.tmcf\n\nThe CSVs have these columns:\n- value\n - Observation values for the series.\n- date\n - Dates of the observations. For monthly series, the dates are of the form\n \"YYYY-MM\" For semi-annually series, the format is the same and the dates\n are the last months of the half years, i.e., June and December.\n- duration\n - Observation periods of the series. The durations are \"P1M\" and \"P6M\" for\n monthly series and semi-annually series respectively.\n- statvar\n - DCIDs of the StatisticalVariables meausred by the series.\n- unit\n - DCIDs of the units of the observations.\n\nUsage: python3 generate_csv_mcf.py\n'''\n\nimport re\nimport io\nimport dataclasses\nfrom typing import Set, List, Tuple, Iterable\n\nimport requests\nimport frozendict\nimport pandas as pd\n\n_PREFIX = \"https://download.bls.gov/pub/time.series/\"\n\n# From series types to lists of CSV URLs containing series of those types\nSERIES_TYPES_TO_DATA_URLS = frozendict.frozendict({\n \"cpi_u\": (f\"{_PREFIX}/cu/cu.data.1.AllItems\",\n f\"{_PREFIX}/cu/cu.data.11.USFoodBeverage\",\n f\"{_PREFIX}/cu/cu.data.12.USHousing\",\n f\"{_PREFIX}/cu/cu.data.13.USApparel\",\n f\"{_PREFIX}/cu/cu.data.14.USTransportation\",\n f\"{_PREFIX}/cu/cu.data.15.USMedical\",\n f\"{_PREFIX}/cu/cu.data.16.USRecreation\",\n f\"{_PREFIX}/cu/cu.data.17.USEducationAndCommunication\",\n f\"{_PREFIX}/cu/cu.data.18.USOtherGoodsAndServices\",\n f\"{_PREFIX}/cu/cu.data.20.USCommoditiesServicesSpecial\"),\n \"cpi_w\": (f\"{_PREFIX}/cw/cw.data.1.AllItems\",\n f\"{_PREFIX}/cw/cw.data.11.USFoodBeverage\",\n f\"{_PREFIX}/cw/cw.data.12.USHousing\",\n f\"{_PREFIX}/cw/cw.data.13.USApparel\",\n f\"{_PREFIX}/cw/cw.data.14.USTransportation\",\n f\"{_PREFIX}/cw/cw.data.15.USMedical\",\n f\"{_PREFIX}/cw/cw.data.16.USRecreation\",\n f\"{_PREFIX}/cw/cw.data.17.USEducationAndCommunication\",\n f\"{_PREFIX}/cw/cw.data.18.USOtherGoodsAndServices\",\n f\"{_PREFIX}/cw/cw.data.20.USCommoditiesServicesSpecial\"),\n \"c_cpi_u\": (f\"{_PREFIX}/su/su.data.1.AllItems\",)\n})\n\n# From series types to URLs of CSVs describing the series\nSERIES_TYPES_TO_INFO_URLS = frozendict.frozendict({\n \"cpi_u\": f\"{_PREFIX}/cu/cu.series\",\n \"cpi_w\": f\"{_PREFIX}/cw/cw.series\",\n \"c_cpi_u\": f\"{_PREFIX}/su/su.series\"\n})\n\n# From series types to URLs of CSVs containing mappings from\n# item code to item name\nSERIES_TYPES_TO_EXPENDITURE_TYPES_URLS = frozendict.frozendict({\n \"cpi_u\": f\"{_PREFIX}/cu/cu.item\",\n \"cpi_w\": f\"{_PREFIX}/cw/cw.item\",\n \"c_cpi_u\": f\"{_PREFIX}/su/su.item\"\n})\n\n\[email protected](frozen=True)\nclass SeriesInfo:\n \"\"\"Information about a series. For descriptions of the fields, see\n Section 4 of {_PREFIX}/cu/cu.txt.\n \"\"\"\n survey_abbreviation: str\n seasonal_code: str\n periodicity_code: str\n area_code: str\n item_code: str\n series_id: str\n\n def __post_init__(self):\n \"\"\"Validates the fields after init.\"\"\"\n self._validate()\n\n def _validate(self) -> None:\n \"\"\"Validates the fields.\n\n Raises:\n ValueError: Some field(s) is invalid.\n \"\"\"\n if (not self.series_id or len(self.series_id) < 11 or\n len(self.series_id) > 17):\n self._raise_validation_error(\"invalid series_id\")\n if self.survey_abbreviation not in (\"SU\", \"CU\", \"CW\"):\n self._raise_validation_error(\n f\"nvalid survey_abbreviation: {self.survey_abbreviation}\")\n if self.seasonal_code not in (\"S\", \"U\"):\n self._raise_validation_error(\n f\"invalid survey_abbreviation: {self.survey_abbreviation}\")\n if self.periodicity_code not in (\"R\", \"S\"):\n self._raise_validation_error(\n f\"invalid periodicity_code: {self.periodicity_code}\")\n if (not self.area_code or len(self.area_code) != 4):\n self._raise_validation_error(f\"invalid area_code: {self.area_code}\")\n\n def _raise_validation_error(self, message: str) -> None:\n raise ValueError(f\"{self.series_id}: {message}\")\n\n def is_us(self) -> bool:\n \"\"\"Returns if the series is for US as a whole and\n not for parts of US.\"\"\"\n return self.area_code == \"0000\"\n\n def is_monthly(self) -> bool:\n \"\"\"Returns if the series is monthly.\"\"\"\n return self.periodicity_code == \"R\"\n\n def is_semiannually(self) -> bool:\n \"\"\"Returns if the series is semi-annually.\"\"\"\n return self.periodicity_code == \"S\"\n\n def get_mmethod(self) -> str:\n \"\"\"Returns the DCID of the measurement method for this series.\"\"\"\n if self.survey_abbreviation == \"SU\":\n return \"BLSChained\"\n return \"BLSUnchained\"\n\n def get_pop_type(self) -> str:\n \"\"\"Returns the DCID of the population type for this series.\"\"\"\n return f\"BLSItem/{self.item_code}\"\n\n def get_consumer(self) -> str:\n \"\"\"Returns the DCID of the consumer for this series.\"\"\"\n if self.survey_abbreviation == \"CW\":\n return \"UrbanWageEarnerAndClericalWorker\"\n return \"UrbanConsumer\"\n\n def get_mqual(self) -> str:\n \"\"\"Returns the DCID of the measurement qualifier for this series.\"\"\"\n if self.seasonal_code == \"S\":\n return \"BLSSeasonallyAdjusted\"\n return \"BLSSeasonallyUnadjusted\"\n\n def get_statvar(self) -> str:\n \"\"\"Returns the DCID of the statistical variable for this series.\"\"\"\n return (\"ConsumerPriceIndex_\"\n f\"{self.get_pop_type()}_\"\n f\"{self.get_consumer()}_\"\n f\"{self.get_mqual()}\")\n\n def get_unit(self, info_df: pd.DataFrame) -> Tuple[str, str]:\n \"\"\"Returns the DCID of the unit for this series and a description\n of the unit.\n\n Args:\n info_df: DataFrame containing information about the series.\n\n Raises:\n ValueError: The base period obtained from the dataframe is invalid.\n \"\"\"\n row = info_df[info_df[\"series_id\"] == self.series_id]\n num_rows = row.shape[0]\n if num_rows != 1:\n self._raise_validation_error(f\"found {num_rows} in info_df\")\n base = row[\"base_period\"].iloc[0]\n\n # base is described in one of three ways:\n # \"YYYY=100\", e.g., \"1967=100\",\n # \"YYYY-YY=100\", e.g., \"1982-84=100\", or\n # \"MONTH YYYY=100\", e.g., \"DECEMBER 2009=100\"\n if not re.fullmatch(r\"\\d{4}=100|\\d{4}-\\d{2}=100|[A-Z]+ \\d{4}=100\",\n base):\n self._raise_validation_error(f\"invalid base_period: {base}\")\n if \" \" in base:\n month, year, _ = re.split(r\"[ =]\", base)\n month = month.lower().title()\n return (f\"IndexPointBasePeriod{month}{year}Equals100\",\n f\"The reference base is {month} {year} equals 100.\")\n elif \"-\" in base:\n year_start, year_end, _ = re.split(r\"[-=]\", base)\n year_end = year_start[:2] + year_end\n return (\n f\"IndexPointBasePeriod{year_start}To{year_end}Equals100\",\n f\"The reference base is {year_start} to {year_end} equals 100.\")\n year, _ = base.split(\"=\")\n return (f\"IndexPointBasePeriod{year}Equals100\",\n f\"The reference base is {year} equals 100.\")\n\n\ndef parse_series_id(series_id: str) -> SeriesInfo:\n \"\"\"Parses a series ID to a SeriesInfo. See Section 4 of\n {_PREFIX}/cu/cu.txt\n for a breakdown of series IDs.\"\"\"\n return SeriesInfo(survey_abbreviation=series_id[:2],\n seasonal_code=series_id[2],\n periodicity_code=series_id[3],\n area_code=series_id[4:8],\n item_code=series_id[8:],\n series_id=series_id)\n\n\ndef generate_unit_enums(info_df: pd.DataFrame, targets: Set[str]) -> Set[str]:\n \"\"\"Returns a list of enum definitions for the units required by the series\n identified by their IDs in \"targets\".\n\n Args:\n info_df: DataFrame containing information about\n all the series in targets.\n targets: Set of series IDs to generate unit enums for.\n \"\"\"\n generated = set()\n for series_id in targets:\n unit, desc = parse_series_id(series_id).get_unit(info_df)\n generated.add((f\"Node: dcid:{unit}\\n\"\n \"typeOf: dcs:UnitOfMeasure\\n\"\n f\"description: \\\"{desc}\\\"\\n\"\n \"descriptionUrl: \\\"https://www.bls.gov/cpi/\"\n \"technical-notes/home.htm\\\"\\n\\n\"))\n return generated\n\n\ndef generate_pop_type_enums(url: str, targets: Set[str]) -> Set[str]:\n \"\"\"Returns a list of enum definitions for the population types required\n by the series identified by their IDs in \"targets\".\n\n Args:\n url: URL to the CSV containing the mappings from item codes to item\n names needed by the type of the series in \"targets\".\n targets: Set of series IDs to generate population\n type enums for.\n\n Raises:\n ValueError: Some series(s) does not have an item code mapping.\n \"\"\"\n df = _download_df(url, sep=\"\\t\", usecols=(\"item_code\", \"item_name\"))\n if \"item_code\" not in df.columns or \"item_name\" not in df.columns:\n raise ValueError(\"item_code or/and item_name columns missing\")\n\n # Make sure every series of interest has an item_code mapping, i.e., has\n # an enum defined for pop type\n df = df[[\"item_code\", \"item_name\"]]\n codes = set(df[\"item_code\"])\n for series_id in targets:\n series_info = parse_series_id(series_id)\n if series_info.item_code not in codes:\n raise ValueError(\n f\"{series_info} does not have an item_code mapping\")\n\n generated = set()\n for row in df.itertuples(index=False):\n generated.add((f\"Node: dcid:BLSItem/{row.item_code}\\n\"\n \"typeOf: dcs:EconomicProductEnum\\n\"\n f\"name: \\\"{row.item_name}\\\"\\n\\n\"))\n return generated\n\n\ndef write_csv(urls: Iterable[str], dest: str, info_df: pd.DataFrame,\n targets: Set[str]) -> None:\n \"\"\"Writes out the CSV containing series of a particular type, e.g., CPI-U.\n\n Args:\n urls: URLs to the CSVs containing the series.\n dest: Path to the output CSV.\n info_df: DataFrame containing information about the series.\n targets: Series to include in the output CSV.\n \"\"\"\n result = pd.DataFrame()\n for url in urls:\n result = result.append(_generate_csv(url, info_df, targets))\n result.to_csv(dest, index=False)\n return result\n\n\ndef _download_df(url: str,\n sep: str = \"\\t\",\n usecols: Tuple[str] = None) -> pd.DataFrame:\n \"\"\"Downloads a CSV from a URL and loads it into a DataFrame,\n\n Args:\n url: URL to the CSV.\n sep: Separators used by the CSV. Can be a regex pattern.\n usecols: Columns to keep.\n \"\"\"\n response = requests.get(url)\n response.raise_for_status()\n return pd.read_csv(io.StringIO(response.text),\n sep=sep,\n dtype=\"str\",\n usecols=usecols).rename(columns=lambda col: col.strip())\n\n\ndef _generate_csv(url: str, info_df: pd.DataFrame,\n targets: List[str]) -> pd.DataFrame:\n \"\"\"Returns a DataFrame containing series obtained from \"url\" and specified\n by \"targets\".\n\n Args:\n url: URL to a CSV containing some of the series in \"targets\".\n info_df: DataFrame containing informatino about the series.\n targets: Series to include in the return DataFrame.\n\n Returns:\n A DataFrame of five columns: \"value\", \"date\", \"duration\", \"statvar\",\n and \"unit\". See module docstring for what the columns are.\n \"\"\"\n df = _download_df(url, sep=r\"\\s+\")\n result = pd.DataFrame()\n for series_id, group_df in df.groupby(by=\"series_id\"):\n if series_id not in targets:\n continue\n series_info = parse_series_id(series_id)\n # \"period\" is the months of the observations and is of the form \"MM\"\n # preceded by char 'M', e.g. \"M05\".\n # \"M13\" and \"S03\" are annual averages.\n group_df = group_df[~group_df[\"period\"].isin((\"M13\", \"S03\"))]\n # \"year\" is of the form \"YYYY\".\n if series_info.is_monthly():\n group_df.loc[:, \"date\"] = (group_df[\"year\"] + \"-\" +\n group_df[\"period\"].str[-2:])\n group_df.loc[:, \"duration\"] = \"P1M\"\n else:\n group_df.loc[:, \"date\"] = group_df[\"year\"] + \"-\" + group_df[\n \"period\"].map(lambda period: \"06\" if period == \"S01\" else \"12\")\n group_df.loc[:, \"duration\"] = \"P6M\"\n group_df.loc[:, \"statvar\"] = f\"dcs:{series_info.get_statvar()}\"\n group_df.loc[:, \"unit\"] = f\"dcs:{series_info.get_unit(info_df)[0]}\"\n # \"value\" is the CPI values.\n result = result.append(\n group_df[[\"value\", \"date\", \"duration\", \"statvar\", \"unit\"]])\n return result\n\n\ndef _generate_statvar(series_id: str) -> str:\n \"\"\"Returns the statvar definition for a series.\"\"\"\n series_info = parse_series_id(series_id)\n return (f\"Node: dcid:{series_info.get_statvar()}\\n\"\n \"typeOf: dcs:StatisticalVariable\\n\"\n f\"populationType: dcs:ConsumerGoodsAndServices\\n\"\n f\"consumedThing: dcs:{series_info.get_pop_type()}\\n\"\n f\"measurementQualifier: dcs:{series_info.get_mqual()}\\n\"\n \"measuredProperty: dcs:consumerPriceIndex\\n\"\n \"statType: dcs:measuredValue\\n\"\n f\"consumer: dcs:{series_info.get_consumer()}\\n\"\n f\"description: \\\"The series ID is {series_id}.\\\"\\n\")\n\n\ndef write_statvars(dest: str, targets: Set[str]) -> None:\n \"\"\"Writes out the statistical variable definitions required by the\n series in \"targets\" after sorting for output determinism.\"\"\"\n with open(dest, \"w\") as out:\n for series_id in sorted(targets):\n out.write(_generate_statvar(series_id))\n out.write(\"\\n\")\n\n\ndef filter_series(info_df: pd.DataFrame) -> Set[str]:\n \"\"\"Filters all series provided by BLS and returns only monthly series for\n the US as a whole and not parts of US.\"\"\"\n targets = set()\n # Prioritize monthly series\n for row in info_df.itertuples(index=False):\n series_info = parse_series_id(row.series_id)\n if not series_info.is_us() or not series_info.is_monthly():\n continue\n targets.add(row.series_id)\n return targets\n\n\ndef write_set(dest: str, to_write: List[str]) -> None:\n \"\"\"Writes out a set of strings after sorting for output determinism.\"\"\"\n with open(dest, \"w\") as out:\n for elem in sorted(to_write):\n out.write(elem)\n\n\ndef main() -> None:\n \"\"\"Runs the script. See module docstring.\"\"\"\n unit_enums = set()\n pop_type_enums = set()\n for series_type, urls in SERIES_TYPES_TO_DATA_URLS.items():\n info_df = _download_df(SERIES_TYPES_TO_INFO_URLS[series_type],\n sep=r\"\\s*\\t\")\n targets = filter_series(info_df)\n pop_type_enums.update(\n generate_pop_type_enums(\n SERIES_TYPES_TO_EXPENDITURE_TYPES_URLS[series_type], targets))\n unit_enums.update(generate_unit_enums(info_df, targets))\n write_statvars(f\"{series_type}.mcf\", targets)\n write_csv(urls, f\"{series_type}.csv\", info_df, targets)\n write_set(\"unit_enums.mcf\", unit_enums)\n write_set(\"pop_type_enums.mcf\", pop_type_enums)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv", "pandas.Series" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sharky5102/fbmatrix
[ "6558e1b249d94908d92a6475b07ebf9beae776a1" ]
[ "geometry/tree.py" ]
[ "import geometry\nimport math\nimport OpenGL.GL as gl\nimport json\nimport numpy as np\n\nclass tree(geometry.base):\n lampsize = 1/50\n\n vertex_code = \"\"\"\n uniform mat4 modelview;\n uniform mat4 projection;\n \n in highp vec3 position;\n in highp float id;\n\n out highp vec2 v_texcoor;\n out highp float v_id;\n \n void main()\n {\n gl_Position = projection * modelview * vec4(position,1.0);\n v_texcoor = position.xy / 4.0 + 0.5;\n v_id = id;\n } \"\"\"\n\n fragment_code = \"\"\"\n uniform sampler2D tex;\n uniform sampler2D lamptex;\n out highp vec4 f_color;\n in highp vec2 v_texcoor;\n in highp float v_id;\n \n void main()\n {\n highp vec2 lamppos = texelFetch(lamptex, ivec2(int(v_id), 0), 0).xy * vec2(0.5,0.5) + vec2(.5,.5);\n highp vec3 t = textureLod(tex, lamppos, 0.0).rgb;\n\t\t\t\n f_color = vec4(t, 1.0);\n } \"\"\"\n \n attributes = { 'position' : 3, 'id' : 1 }\n \n def __init__(self, jsondata):\n self.lamps = json.loads(jsondata)\n self.tex = 0\n \n for lamp in self.lamps:\n lamp[1] = -lamp[1]\n\n # Present the lamp locations as a 1d texture\n self.mapwidth = pow(2, math.ceil(math.log(len(self.lamps))/math.log(2)))\n\n data = np.zeros(self.mapwidth, (np.float32, 3))\n \n for i in range(0, len(self.lamps)):\n lamp = self.lamps[i]\n data[i][0] = lamp[0];\n data[i][1] = lamp[1];\n data[i][2] = lamp[2];\n \n self.lamptex = gl.glGenTextures(1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.lamptex)\n gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB16F, self.mapwidth, 1, 0, gl.GL_RGB, gl.GL_FLOAT, data)\n\n super(tree, self).__init__()\n\n def getVertices(self):\n verts = []\n ids = []\n \n sqverts = [(0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 1, 1), (1, 1, 1), (1, 0, 1), (0, 0, 1)]\n faces = [\n 0, 2, 1, #face front\n 0, 3, 2,\n 2, 3, 4, #face top\n 2, 4, 5,\n 1, 2, 5, #face right\n 1, 5, 6,\n 0, 7, 4, #face left\n 0, 4, 3,\n 5, 4, 7, #face back\n 5, 7, 6,\n 0, 6, 7, #face bottom\n 0, 1, 6\n ]\n\n for i in range(0, len(self.lamps)):\n vert = self.lamps[i]\n for face in faces:\n lx, ly, lz = vert\n x, y, z = sqverts[face]\n \n verts.append((x*self.lampsize+lx, y*self.lampsize+ly, z*self.lampsize+lz))\n ids.append(i)\n \n return { 'position' : verts, 'id' : ids }\n \n def setColor(self, color):\n self.color = color\n\n def draw(self):\n loc = gl.glGetUniformLocation(self.program, \"tex\")\n gl.glUniform1i(loc, 0)\n gl.glActiveTexture(gl.GL_TEXTURE0)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glGenerateMipmap(gl.GL_TEXTURE_2D)\n\n loc = gl.glGetUniformLocation(self.program, \"lamptex\")\n gl.glUniform1i(loc, 1)\n gl.glActiveTexture(gl.GL_TEXTURE1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.lamptex)\n \n super(tree, self).draw()\n\n def setTexture(self, tex):\n self.tex = tex\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
universewill/pytorch-CycleGAN-and-pix2pix
[ "3a5db404d8d2e0112b63445c4d35fc70f32ce194" ]
[ "data/base_dataset.py" ]
[ "\"\"\"This module implements an abstract base class (ABC) 'BaseDataset' for datasets.\n\nIt also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.\n\"\"\"\nimport random\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom abc import ABC, abstractmethod\n\n\nclass BaseDataset(data.Dataset, ABC):\n \"\"\"This class is an abstract base class (ABC) for datasets.\n\n To create a subclass, you need to implement the following four functions:\n -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).\n -- <__len__>: return the size of dataset.\n -- <__getitem__>: get a data point.\n -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the class; save the options in the class\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n self.opt = opt\n self.root = opt.dataroot\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n \"\"\"\n return parser\n\n @abstractmethod\n def __len__(self):\n \"\"\"Return the total number of images in the dataset.\"\"\"\n return 0\n\n @abstractmethod\n def __getitem__(self, index):\n \"\"\"Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns:\n a dictionary of data with their names. It ususally contains the data itself and its metadata information.\n \"\"\"\n pass\n\n\ndef get_params(opt, size):\n w, h = size\n new_h = h\n new_w = w\n if opt.preprocess == 'resize_and_crop':\n new_h = new_w = opt.load_size\n elif opt.preprocess == 'scale_width_and_crop':\n new_w = opt.load_size\n new_h = opt.load_size * h // w\n\n x = random.randint(0, np.maximum(0, new_w - opt.crop_size))\n y = random.randint(0, np.maximum(0, new_h - opt.crop_size))\n\n flip = random.random() > 0.5\n\n return {'crop_pos': (x, y), 'flip': flip}\n\n\ndef get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):\n transform_list = []\n if grayscale:\n transform_list.append(transforms.Grayscale(1))\n if 'resize' in opt.preprocess:\n osize = [opt.load_size, opt.load_size]\n transform_list.append(transforms.Resize(osize, method))\n elif 'scale_width' in opt.preprocess:\n transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))\n elif 'rand_width' in opt.preprocess:\n transform_list.append(transforms.Lambda(lambda img: __rand_width(img, ratio=[0.4, 1.0], crop_size=opt.crop_size)))\n\n if 'crop' in opt.preprocess:\n if params is None:\n transform_list.append(transforms.RandomCrop(opt.crop_size))\n else:\n transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))\n\n if opt.preprocess == 'none':\n transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))\n\n if not opt.no_flip:\n if params is None:\n transform_list.append(transforms.RandomHorizontalFlip())\n elif params['flip']:\n transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))\n\n if convert:\n transform_list += [transforms.ToTensor()]\n if grayscale:\n transform_list += [transforms.Normalize((0.5,), (0.5,))]\n else:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)\n\n\ndef __make_power_2(img, base, method=Image.BICUBIC):\n ow, oh = img.size\n h = int(round(oh / base) * base)\n w = int(round(ow / base) * base)\n if h == oh and w == ow:\n return img\n\n __print_size_warning(ow, oh, w, h)\n return img.resize((w, h), method)\n\n\ndef __scale_width(img, target_size, crop_size, method=Image.BICUBIC):\n ow, oh = img.size\n if ow == target_size and oh >= crop_size:\n return img\n w = target_size\n h = int(max(target_size * oh / ow, crop_size))\n return img.resize((w, h), method)\n\n\ndef __rand_width(img, ratio=[0.4, 1.0], crop_size=256.0, method=Image.BICUBIC):\n ow, oh = img.size\n ratio = random.uniform(ratio[0], ratio[1])\n ratio = max(ratio, float(crop_size)/ow, float(crop_size)/oh)\n new_w, new_h = int(ratio*ow), int(ratio*oh)\n return img.resize((new_w, new_h), method)\n\n\ndef __crop(img, pos, size):\n ow, oh = img.size\n x1, y1 = pos\n tw = th = size\n if (ow > tw or oh > th):\n return img.crop((x1, y1, x1 + tw, y1 + th))\n return img\n\n\ndef __flip(img, flip):\n if flip:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\n\ndef __print_size_warning(ow, oh, w, h):\n \"\"\"Print warning information about image size(only print once)\"\"\"\n if not hasattr(__print_size_warning, 'has_printed'):\n print(\"The image size needs to be a multiple of 4. \"\n \"The loaded image size was (%d, %d), so it was adjusted to \"\n \"(%d, %d). This adjustment will be done to all images \"\n \"whose sizes are not multiples of 4\" % (ow, oh, w, h))\n __print_size_warning.has_printed = True\n" ]
[ [ "numpy.maximum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sebpuetz/ffp
[ "76649e5206a262afde3d7c1db41798cc5447ae89" ]
[ "tests/test_storage.py" ]
[ "import contextlib\nimport os\n\nimport ffp\nimport ffp.io\nimport numpy as np\nimport pytest\nimport tempfile\n\n\ndef test_read_array(tests_root, vocab_array_tuple):\n with pytest.raises(TypeError):\n ffp.storage.load_storage(None)\n with pytest.raises(ffp.io.FinalfusionFormatError):\n ffp.storage.load_storage(1)\n with pytest.raises(IOError):\n ffp.storage.load_storage(\"foo\")\n e = ffp.storage.load_storage(\n os.path.join(tests_root, \"data\", \"embeddings.fifu\"))\n matrix = vocab_array_tuple[1]\n matrix = matrix.squeeze() / np.linalg.norm(matrix, axis=1, keepdims=True)\n assert e.shape == (7, 10)\n assert np.allclose(e, matrix)\n\n\ndef test_mmap_array(tests_root, vocab_array_tuple):\n with pytest.raises(TypeError):\n ffp.storage.load_storage(None, mmap=True)\n with pytest.raises(ffp.io.FinalfusionFormatError):\n ffp.storage.load_storage(1, mmap=True)\n with pytest.raises(IOError):\n ffp.storage.load_storage(\"foo\", mmap=True)\n e = ffp.storage.load_storage(os.path.join(tests_root, \"data\",\n \"embeddings.fifu\"),\n mmap=True)\n matrix = vocab_array_tuple[1]\n matrix = matrix.squeeze() / np.linalg.norm(matrix, axis=1, keepdims=True)\n assert e.shape == (7, 10)\n assert np.allclose(e, matrix)\n\n\ndef test_array_roundtrip(tests_root):\n tmp_dir = tempfile.gettempdir()\n filename = os.path.join(tmp_dir, \"write_simple.fifu\")\n s = ffp.storage.load_storage(\n os.path.join(tests_root, \"data\", \"embeddings.fifu\"))\n zero = s[0]\n assert isinstance(zero, np.ndarray)\n assert not isinstance(zero, ffp.storage.Storage)\n assert not isinstance(zero, ffp.storage.NdArray)\n s.write(filename)\n s2 = ffp.storage.load_storage(filename)\n zero2 = s2[0]\n assert np.allclose(zero, zero2)\n assert s.shape == s2.shape\n assert np.allclose(s, s2)\n\n\ndef test_array_roundtrip_mmap(tests_root):\n tmp_dir = tempfile.gettempdir()\n filename = os.path.join(tmp_dir, \"write_simple.fifu\")\n s = ffp.storage.load_storage(os.path.join(tests_root, \"data\",\n \"embeddings.fifu\"),\n mmap=True)\n zero = s[0]\n s.write(filename)\n s2 = ffp.storage.load_storage(filename, True)\n zero2 = s2[0]\n assert np.allclose(zero, zero2)\n assert s.shape == s2.shape\n assert np.allclose(s, s2)\n\n\ndef test_quantized_array_read(tests_root, pq_check):\n s = ffp.storage.load_quantized_array(\n os.path.join(tests_root, \"data/pq.fifu\"))\n for i, (check, e) in enumerate(zip(pq_check.storage, s)):\n out = np.zeros_like(check)\n assert np.allclose(check, e, atol=0.05)\n out2 = s.embedding(i, out)\n assert out is out2\n assert np.allclose(e, out2)\n out = np.zeros_like(pq_check.storage)\n out2 = s.embedding(slice(None, None), out=out)\n assert out is out2\n assert np.allclose(s, pq_check.storage, atol=0.05)\n assert np.allclose(out, pq_check.storage, atol=0.05)\n # works with arrays\n out2 = s.embedding(np.arange(len(s)), out=out)\n assert out is out2\n # works with matrices\n out = np.zeros((2, *s.shape))\n key = np.vstack((np.arange(len(s)), np.arange(len(s)))).reshape((2, -1))\n out2 = s.embedding(key, out=out)\n assert out is out2\n assert np.allclose(out,\n np.vstack((pq_check.storage, pq_check.storage)).reshape(\n (2, *pq_check.storage.shape)),\n atol=0.05)\n # works with tensors\n out = np.zeros_like(pq_check.storage)[None, None]\n out2 = s.embedding(np.arange(len(s))[None, None], out=out)\n assert out is out2\n\n\ndef test_quantized_array_mmap(tests_root, pq_check):\n s = ffp.storage.load_quantized_array(os.path.join(tests_root,\n \"data/pq.fifu\"),\n mmap=True)\n for check, e in zip(pq_check.storage, s):\n assert np.allclose(check, e, atol=0.05)\n assert np.allclose(s, pq_check.storage, atol=0.05)\n\n\ndef test_quantized_array_roundtrip(tests_root, tmp_path, pq_check):\n s = ffp.storage.load_quantized_array(\n os.path.join(tests_root, \"data/pq.fifu\"))\n outfile = tmp_path / \"pq_storage.fifu\"\n s.write(outfile)\n s2 = ffp.storage.load_quantized_array(outfile)\n assert np.allclose(s, s2)\n assert np.allclose(s, pq_check.storage, atol=0.05)\n\n\ndef test_from_matrix():\n matrix = np.tile(np.arange(0, 10, dtype=np.float32), (10, 1))\n s = ffp.storage.NdArray(matrix)\n assert np.allclose(matrix, s)\n assert s.shape == matrix.shape\n with pytest.raises(AttributeError):\n _ = ffp.storage.NdArray(None)\n with pytest.raises(TypeError):\n _ = ffp.storage.NdArray(np.arange(0, 10, dtype=np.float32))\n with pytest.raises(TypeError):\n _ = ffp.storage.NdArray(np.tile(np.arange(0, 10), (10, 1)))\n with pytest.raises(TypeError):\n _ = ffp.storage.NdArray(\n np.tile(np.arange(0, 10, dtype=np.float), (10, 1)))\n assert np.allclose(matrix, s)\n\n\ndef test_indexing():\n matrix = np.float32(\n np.random.random_sample(sorted(np.random.randint(10, 100, 2))))\n s = ffp.storage.NdArray(matrix)\n assert np.allclose(matrix, s)\n for _ in range(1000):\n idx = np.random.randint(-len(s) * 2, len(s) * 2)\n if idx >= len(s) or idx < -len(s):\n ctx = pytest.raises(IndexError)\n else:\n ctx = contextlib.suppress()\n with ctx:\n val = s[idx]\n with ctx:\n assert np.allclose(val, matrix[idx])\n\n\ndef test_iter():\n matrix = np.tile(np.arange(0, 10, dtype=np.float32), (10, 1))\n s = ffp.storage.NdArray(matrix)\n for storage_row, matrix_row in zip(s, matrix):\n assert np.allclose(storage_row, matrix_row)\n\n\ndef test_slicing():\n matrix = np.float32(np.random.random_sample((10, 10)))\n s = ffp.storage.NdArray(matrix)\n assert np.allclose(matrix[:], s[:])\n assert np.allclose(matrix, s)\n\n for _ in range(250):\n upper = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n lower = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n step = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n ctx = pytest.raises(ValueError) if step == 0 else contextlib.suppress()\n\n assert np.allclose(matrix[:upper], s[:upper])\n assert np.allclose(matrix[lower:upper], s[lower:upper])\n with ctx:\n val = s[lower:upper:step]\n with ctx:\n assert np.allclose(matrix[lower:upper:step], val)\n with ctx:\n val = s[:upper:step]\n with ctx:\n assert np.allclose(matrix[:upper:step], val)\n with ctx:\n val = s[::step]\n with ctx:\n assert np.allclose(matrix[::step], val)\n\n\ndef test_quantized_array_slices(tests_root, pq_check):\n s = ffp.storage.load_quantized_array(\n os.path.join(tests_root, \"data/pq.fifu\"))\n assert np.allclose(s, pq_check.storage, atol=0.05)\n\n for _ in range(250):\n upper = np.random.randint(-len(s) * 3, len(s) * 3)\n lower = np.random.randint(-len(s) * 3, len(s) * 3)\n step = np.random.randint(-len(s) * 3, len(s) * 3)\n ctx = pytest.raises(ValueError) if step == 0 else contextlib.suppress()\n\n assert np.allclose(pq_check.storage[:upper], s[:upper], atol=0.05)\n assert np.allclose(pq_check.storage[:upper],\n s.embedding(slice(None, upper)),\n atol=0.05)\n assert np.allclose(pq_check.storage[lower:upper],\n s[lower:upper],\n atol=0.05)\n assert np.allclose(pq_check.storage[lower:upper],\n s.embedding(slice(lower, upper)),\n atol=0.05)\n with ctx:\n val = s[lower:upper:step]\n with ctx:\n assert np.allclose(pq_check.storage[lower:upper:step],\n val,\n atol=0.05)\n with ctx:\n val = s[:upper:step]\n with ctx:\n assert np.allclose(pq_check.storage[:upper:step], val, atol=0.05)\n with ctx:\n val = s[::step]\n with ctx:\n assert np.allclose(pq_check.storage[::step], val, atol=0.05)\n\n\ndef test_slice_slice():\n for _ in range(250):\n matrix = np.float32(np.random.random_sample((100, 10)))\n s = ffp.storage.NdArray(matrix)\n assert np.allclose(matrix[:], s[:])\n assert np.allclose(matrix, s)\n for _ in range(5):\n if len(matrix) == 0:\n break\n upper = np.random.randint(-len(matrix) * 2, len(matrix) * 2)\n lower = np.random.randint(-len(matrix) * 2, len(matrix) * 2)\n step = np.random.randint(-len(matrix) * 2, len(matrix) * 2)\n ctx = pytest.raises(\n ValueError) if step == 0 else contextlib.suppress()\n with ctx:\n matrix = matrix[lower:upper:step]\n with ctx:\n s = s[lower:upper:step]\n assert isinstance(s, np.ndarray)\n assert isinstance(s, ffp.storage.Storage)\n assert isinstance(s, ffp.storage.NdArray)\n assert np.allclose(matrix, s)\n\n\ndef test_write_sliced():\n tmp_dir = tempfile.gettempdir()\n filename = os.path.join(tmp_dir, \"write_sliced.fifu\")\n matrix = np.float32(np.random.random_sample((10, 10)))\n s = ffp.storage.NdArray(matrix)\n for _ in range(250):\n upper = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n lower = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n step = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n mmap = np.random.randint(0, 1)\n if step == 0:\n continue\n s[lower:upper:step].write(filename)\n s2 = ffp.storage.load_ndarray(filename, bool(mmap))\n assert np.allclose(matrix[lower:upper:step], s2)\n\n\ndef test_iter_sliced():\n matrix = np.float32(np.random.random_sample((10, 10)))\n s = ffp.storage.NdArray(matrix)\n for _ in range(250):\n upper = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n lower = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n step = np.random.randint(-len(matrix) * 3, len(matrix) * 3)\n if step == 0:\n continue\n for storage_row, matrix_row in zip(s[lower:upper:step],\n matrix[lower:upper:step]):\n assert np.allclose(storage_row, matrix_row)\n" ]
[ [ "numpy.allclose", "numpy.arange", "numpy.linalg.norm", "numpy.random.random_sample", "numpy.zeros_like", "numpy.zeros", "numpy.vstack", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ddelange/ML-automator
[ "02b47af2498d4e736138a770f2129cba8b6dd3c8" ]
[ "mlautomator/objectives/regressor_objectives.py" ]
[ "# Standard Python Library imports\nimport time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import (StratifiedKFold, RepeatedKFold, KFold, cross_val_score)\nfrom sklearn.linear_model import SGDRegressor, LogisticRegression\nfrom sklearn.ensemble import RandomForestRegressor, BaggingRegressor\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler, RobustScaler\nfrom sklearn.svm import SVR\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.feature_selection import SelectKBest, chi2,f_classif\nimport warnings\n\n#Local Imports\nfrom mlautomator.search_keys import get_keys\n\n#3rd party imports\nfrom xgboost import XGBRegressor\n\nwarnings.filterwarnings(\"ignore\")\n\nclass Regressors:\n '''\n A utility class for holding all the objective functions for each regressor algorithm in the search space.\n - You will see commonalities amongst the objective functions. \n - Each objective function takes an MLautomator object and \"parameter space\" as arguments. \n - During each pass through this objective function, Hyperopt selects a subset of the search space for \n the appropriate algorithm in search_spaces.py.\n - Note that Hyperopt is also calling permutations of data transforms and feature selection as well.\n - Each objective function returns the mean cross-validated score, and the name of the algorithm. \n - This gets anaylzed and packaged in the automator class itself.\n '''\n\n @staticmethod\n def objective01(automator, space):\n '''\n Objective function for XGBoost Regressor.\n '''\n algo = 'xgboost_regressor'\n X = automator.x_train\n Y = automator.y_train\n \n #Define the subset of dictionary keys that should get passed to the machine learning\n #algorithm.\n \n keys = get_keys(algo)\n subspace = {k: space[k] for k in set(space).intersection(keys)}\n \n #Extract the remaining keys that are pertinent to data preprocessing.\n \n model = XGBRegressor(n_jobs = -1, **subspace) \n scaler = space.get('scaler')\n num_features = space.get('k_best')\n \n #Assemble a data pipeline with the extracted data preprocessing keys.\n pipeline = []\n pipeline = Pipeline([\n ('scaler', scaler),\n ('select_best', SelectKBest(k = num_features)),\n ('classifier', model),\n ])\n \n #perform cross validation and return the mean score.\n kfold = RepeatedKFold(n_splits = automator.num_cv_folds, n_repeats = automator.repeats)\n scores = -cross_val_score(pipeline, X, Y, cv = kfold, scoring = automator.score_metric, verbose = False).mean() \n return scores, algo\n\n\n @staticmethod\n def objective02(automator, space):\n '''\n Objective function for SGD Regressor.\n '''\n algo = 'SGDRegressor'\n X = automator.x_train\n Y = automator.y_train\n #Define the subset of dictionary keys that should get passed to the machine learning\n #algorithm.\n \n keys = get_keys(algo) \n subspace = {k: space[k] for k in set(space).intersection(keys)}\n \n #Extract the remaining keys that are pertinent to data preprocessing.\n model = SGDRegressor(**subspace) \n scaler = space.get('scaler')\n num_features = space.get('k_best')\n \n #Assemble a data pipeline with the extracted data preprocessing keys.\n pipeline = []\n pipeline = Pipeline([\n ('scaler', scaler),\n ('select_best', SelectKBest(k = num_features)),\n ('classifier', model),\n ])\n \n #perform cross validation and return the mean score.\n kfold = RepeatedKFold(n_splits = automator.num_cv_folds, n_repeats = automator.repeats)\n scores = -cross_val_score(pipeline, X, Y, cv = kfold, scoring = automator.score_metric, verbose = False).mean() \n return scores, algo\n\n\n @staticmethod\n def objective03(automator, space):\n '''\n Objective function for Random Forest Regressor.\n '''\n algo = 'RandomForestRegressor'\n X = automator.x_train\n Y = automator.y_train\n #Define the subset of dictionary keys that should get passed to the machine learning\n #algorithm.\n \n keys = get_keys(algo) \n subspace = {k: space[k] for k in set(space).intersection(keys)}\n \n #Extract the remaining keys that are pertinent to data preprocessing.\n model = RandomForestRegressor(**subspace) \n scaler = space.get('scaler')\n num_features = space.get('k_best')\n \n #Assemble a data pipeline with the extracted data preprocessing keys.\n pipeline = []\n pipeline = Pipeline([\n ('scaler', scaler),\n ('select_best', SelectKBest(k = num_features)),\n ('classifier', model),\n ])\n \n #perform two passes of 10-fold cross validation and return the mean score.\n kfold = RepeatedKFold(n_splits=10, n_repeats=1)\n scores = -cross_val_score(pipeline, X, Y, cv=kfold, scoring=automator.score_metric,verbose=False).mean()\n return scores, algo\n\n\n @staticmethod\n def objective04(automator, space):\n '''\n Objective function for Support Vector Machines. Note that this method uses a Bagged Classifier \n as a wrapper for SVC. Support Vector Machine run time scales by O(N^3). Using bagged classifiers\n break up the dataset into smaller samples so that runtime is manageable.\n '''\n algo = 'SVR'\n X = automator.x_train\n Y = automator.y_train\n\n #Define the subset of dictionary keys that should get passed to the machine learning\n #algorithm.\n \n keys = get_keys(algo) \n subspace = {k: space[k] for k in set(space).intersection(keys)}\n \n #Build a model with the parameters from our Hyperopt search space.\n\n n_estimators = space.get('n_estimators')\n model = BaggingRegressor(\n SVR(**subspace),\n max_samples = automator.num_samples // n_estimators,\n n_estimators = n_estimators,\n ) \n\n scaler = space.get('scaler')\n num_features = space.get('k_best')\n \n #Assemble a data pipeline with the extracted data preprocessing keys.\n pipeline = []\n pipeline = Pipeline([\n ('scaler', scaler),\n ('select_best', SelectKBest(k = num_features)),\n ('classifier', model),\n ])\n \n #perform cross validation and return the mean score.\n kfold = RepeatedKFold(n_splits = automator.num_cv_folds, n_repeats = automator.repeats)\n scores = -cross_val_score(pipeline, X, Y, cv=kfold, scoring = automator.score_metric, verbose=False, n_jobs=-1).mean() \n return scores, algo \n\n\n @staticmethod\n def objective05(automator, space):\n '''\n Objective function for K-Nearest Neighbors Voting Regressor.\n '''\n algo = 'KNeighborRegressor'\n X = automator.x_train\n Y = automator.y_train\n\n #Define the subset of dictionary keys that should get passed to the machine learning\n #algorithm.\n keys = get_keys(algo) \n subspace = {k:space[k] for k in set(space).intersection(keys)} \n\n #Build a model with the parameters from our Hyperopt search space.\n model = KNeighborsRegressor(n_jobs=-1, **subspace)\n scaler = space.get('scaler')\n num_features = space.get('k_best')\n \n #Assemble a data pipeline with the extracted data preprocessing keys.\n pipeline = []\n pipeline = Pipeline([\n ('scaler', scaler),\n ('select_best', SelectKBest(k = num_features)),\n ('classifier', model),\n ])\n \n #perform cross validation and return the mean score.\n kfold = RepeatedKFold(n_splits = automator.num_cv_folds, n_repeats = automator.repeats)\n scores = -cross_val_score(pipeline, X, Y, cv=kfold, scoring=automator.score_metric, verbose=False).mean() \n return scores, algo " ]
[ [ "sklearn.ensemble.RandomForestRegressor", "sklearn.model_selection.RepeatedKFold", "sklearn.model_selection.cross_val_score", "sklearn.linear_model.SGDRegressor", "sklearn.neighbors.KNeighborsRegressor", "sklearn.svm.SVR", "sklearn.feature_selection.SelectKBest" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NicolasHug/pygbm
[ "7891113aa074a0c33705b2d454b6609b3544eaf2" ]
[ "tests/test_binning.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_array_equal, assert_allclose\nimport pytest\n\nfrom pygbm.binning import BinMapper, _find_binning_thresholds, _map_to_bins\n\n\nDATA = np.random.RandomState(42).normal(\n loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2)\n).astype(np.float32)\n\n\ndef test_find_binning_thresholds_regular_data():\n data = np.linspace(0, 10, 1001).reshape(-1, 1)\n bin_thresholds = _find_binning_thresholds(data, max_bins=10)\n assert_allclose(bin_thresholds[0], [1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n bin_thresholds = _find_binning_thresholds(data, max_bins=5)\n assert_allclose(bin_thresholds[0], [2, 4, 6, 8])\n\n\ndef test_find_binning_thresholds_small_regular_data():\n data = np.linspace(0, 10, 11).reshape(-1, 1)\n\n bin_thresholds = _find_binning_thresholds(data, max_bins=5)\n assert_allclose(bin_thresholds[0], [2, 4, 6, 8])\n\n bin_thresholds = _find_binning_thresholds(data, max_bins=10)\n assert_allclose(bin_thresholds[0], [1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n bin_thresholds = _find_binning_thresholds(data, max_bins=11)\n assert_allclose(bin_thresholds[0], np.arange(10) + .5)\n\n bin_thresholds = _find_binning_thresholds(data, max_bins=255)\n assert_allclose(bin_thresholds[0], np.arange(10) + .5)\n\n\ndef test_find_binning_thresholds_random_data():\n bin_thresholds = _find_binning_thresholds(DATA, random_state=0)\n assert len(bin_thresholds) == 2\n for i in range(len(bin_thresholds)):\n assert bin_thresholds[i].shape == (255,) # 256 - 1\n assert bin_thresholds[i].dtype == DATA.dtype\n\n assert_allclose(bin_thresholds[0][[64, 128, 192]],\n np.array([-0.7, 0.0, 0.7]), atol=1e-1)\n\n assert_allclose(bin_thresholds[1][[64, 128, 192]],\n np.array([9.99, 10.00, 10.01]), atol=1e-2)\n\n\ndef test_find_binning_thresholds_low_n_bins():\n bin_thresholds = _find_binning_thresholds(DATA, max_bins=128,\n random_state=0)\n assert len(bin_thresholds) == 2\n for i in range(len(bin_thresholds)):\n assert bin_thresholds[i].shape == (127,) # 128 - 1\n assert bin_thresholds[i].dtype == DATA.dtype\n\n\ndef test_find_binning_thresholds_invalid_n_bins():\n with pytest.raises(ValueError):\n _find_binning_thresholds(DATA, max_bins=1024)\n\n\[email protected]('n_bins', [16, 128, 256])\ndef test_map_to_bins(n_bins):\n bin_thresholds = _find_binning_thresholds(DATA, max_bins=n_bins,\n random_state=0)\n binned = _map_to_bins(DATA, bin_thresholds)\n assert binned.shape == DATA.shape\n assert binned.dtype == np.uint8\n assert binned.flags.f_contiguous\n\n min_indices = DATA.argmin(axis=0)\n max_indices = DATA.argmax(axis=0)\n\n for feature_idx, min_idx in enumerate(min_indices):\n assert binned[min_idx, feature_idx] == 0\n for feature_idx, max_idx in enumerate(max_indices):\n assert binned[max_idx, feature_idx] == n_bins - 1\n\n\[email protected](\"n_bins\", [5, 10, 42])\ndef test_bin_mapper_random_data(n_bins):\n n_samples, n_features = DATA.shape\n\n expected_count_per_bin = n_samples // n_bins\n tol = int(0.05 * expected_count_per_bin)\n\n mapper = BinMapper(max_bins=n_bins, random_state=42).fit(DATA)\n binned = mapper.transform(DATA)\n\n assert binned.shape == (n_samples, n_features)\n assert binned.dtype == np.uint8\n assert_array_equal(binned.min(axis=0), np.array([0, 0]))\n assert_array_equal(binned.max(axis=0), np.array([n_bins - 1, n_bins - 1]))\n assert len(mapper.bin_thresholds_) == n_features\n for i in range(len(mapper.bin_thresholds_)):\n assert mapper.bin_thresholds_[i].shape == (n_bins - 1,)\n assert mapper.bin_thresholds_[i].dtype == DATA.dtype\n assert np.all(mapper.n_bins_per_feature_ == n_bins)\n\n # Check that the binned data is approximately balanced across bins.\n for feature_idx in range(n_features):\n for bin_idx in range(n_bins):\n count = (binned[:, feature_idx] == bin_idx).sum()\n assert abs(count - expected_count_per_bin) < tol\n\n\[email protected](\"n_samples, n_bins\", [\n (5, 5),\n (5, 10),\n (5, 11),\n (42, 255)\n])\ndef test_bin_mapper_small_random_data(n_samples, n_bins):\n data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)\n assert len(np.unique(data)) == n_samples\n\n mapper = BinMapper(max_bins=n_bins, random_state=42)\n binned = mapper.fit_transform(data)\n\n assert binned.shape == data.shape\n assert binned.dtype == np.uint8\n assert_array_equal(binned.ravel()[np.argsort(data.ravel())],\n np.arange(n_samples))\n\n\[email protected](\"n_bins, n_distinct, multiplier\", [\n (5, 5, 1),\n (5, 5, 3),\n (255, 12, 42),\n])\ndef test_bin_mapper_identity_repeated_values(n_bins, n_distinct, multiplier):\n data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)\n binned = BinMapper(max_bins=n_bins).fit_transform(data)\n assert_array_equal(data, binned)\n\n\[email protected]('n_distinct', [2, 7, 42])\ndef test_bin_mapper_repeated_values_invariance(n_distinct):\n rng = np.random.RandomState(42)\n distinct_values = rng.normal(size=n_distinct)\n assert len(np.unique(distinct_values)) == n_distinct\n\n repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)\n data = distinct_values[repeated_indices]\n rng.shuffle(data)\n assert_array_equal(np.unique(data), np.sort(distinct_values))\n\n data = data.reshape(-1, 1)\n\n mapper_1 = BinMapper(max_bins=n_distinct)\n binned_1 = mapper_1.fit_transform(data)\n assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))\n\n # Adding more bins to the mapper yields the same results (same thresholds)\n mapper_2 = BinMapper(max_bins=min(256, n_distinct * 3))\n binned_2 = mapper_2.fit_transform(data)\n\n assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])\n assert_array_equal(binned_1, binned_2)\n\n\[email protected](\"n_bins, scale, offset\", [\n (3, 2, -1),\n (42, 1, 0),\n (256, 0.3, 42),\n])\ndef test_bin_mapper_identity_small(n_bins, scale, offset):\n data = np.arange(n_bins).reshape(-1, 1) * scale + offset\n binned = BinMapper(max_bins=n_bins).fit_transform(data)\n assert_array_equal(binned, np.arange(n_bins).reshape(-1, 1))\n\n\[email protected]('n_bins_small, n_bins_large', [\n (2, 2),\n (3, 3),\n (4, 4),\n (42, 42),\n (256, 256),\n (5, 17),\n (42, 256),\n])\ndef test_bin_mapper_idempotence(n_bins_small, n_bins_large):\n assert n_bins_large >= n_bins_small\n data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)\n mapper_small = BinMapper(max_bins=n_bins_small)\n mapper_large = BinMapper(max_bins=n_bins_large)\n binned_small = mapper_small.fit_transform(data)\n binned_large = mapper_large.fit_transform(binned_small)\n assert_array_equal(binned_small, binned_large)\n\n\[email protected]('max_bins', [10, 100, 256])\[email protected]('diff', [-5, 0, 5])\ndef test_n_bins_per_feature(max_bins, diff):\n # Check that n_bins_per_feature is n_unique_values when\n # n_unique_values <= max_bins, else max_bins.\n\n n_unique_values = max_bins + diff\n X = list(range(n_unique_values)) * 2\n X = np.array(X).reshape(-1, 1)\n mapper = BinMapper(max_bins=max_bins).fit(X)\n assert np.all(mapper.n_bins_per_feature_ == min(max_bins, n_unique_values))\n\n\ndef test_subsample():\n # Make sure bin thresholds are different when applying subsampling\n mapper_no_subsample = BinMapper(subsample=None, random_state=0).fit(DATA)\n mapper_subsample = BinMapper(subsample=256, random_state=0).fit(DATA)\n\n for feature in range(DATA.shape[1]):\n with pytest.raises(AssertionError):\n np.testing.assert_array_almost_equal(\n mapper_no_subsample.bin_thresholds_[feature],\n mapper_subsample.bin_thresholds_[feature],\n decimal=3\n )\n" ]
[ [ "numpy.linspace", "numpy.unique", "numpy.arange", "numpy.sort", "numpy.all", "numpy.testing.assert_array_equal", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.RandomState", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shibing624/rater
[ "8437dea8baf0137ab3c07dd19c5f2bb8c15b4435", "8437dea8baf0137ab3c07dd19c5f2bb8c15b4435", "8437dea8baf0137ab3c07dd19c5f2bb8c15b4435" ]
[ "examples/flen_demo.py", "examples/agnn_demo.py", "rater/models/ctr/pnn.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing([email protected])\n@description: \n\"\"\"\n\nimport os\nimport sys\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.dataset import TensorDataset\n\nsys.path.append(\"..\")\nfrom rater.datasets.criteo import Criteo\nfrom rater.models.ctr.flen import FLEN\nfrom rater.models.model import train_model\n\npwd_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef train(x_idx, x_value, label, features, out_type='binary'):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n X_idx_tensor = torch.LongTensor(x_idx).to(device)\n X_value_tensor = torch.Tensor(x_value).to(device)\n y_tensor = torch.Tensor(label).to(device)\n y_tensor = y_tensor.reshape(-1, 1)\n\n X = TensorDataset(X_idx_tensor, y_tensor)\n model = FLEN(features.feature_size(), features.field_size(), features.feature_size(), field_ranges=features.field_range(),\n out_type=out_type).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n\n model_path = os.path.join(pwd_path, 'flen_model.pt')\n model, loss_history = train_model(model=model, model_path=model_path, dataset=X, loss_func=nn.BCELoss(),\n optimizer=optimizer, device=device, val_size=0.2, batch_size=32, epochs=10)\n print(loss_history)\n\n\nif __name__ == '__main__':\n # load criteo sample dataset\n dataset = Criteo(n_samples=100)\n features, X_idx, X_value, y, category_index, continuous_value = dataset.get_features(use_continuous_columns=True,\n use_category_columns=True)\n print(features.feature_size(), features.field_size())\n\n print(\"X_idx[0], X_value[0], y[0] :\\n\", X_idx[0], X_value[0], y[0])\n train(X_idx, X_value, y, features)\n", "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing([email protected])\n@description:\n\n@reference: https://github.com/tkipf/pygcn; https://github.com/dawnranger/pytorch-AGNN\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom rater.models.graph.agnn import AGNN\nfrom rater.models.graph.reader import load_data, accuracy\n\n\ndef train():\n t_total = time.time()\n for epoch in range(args.epochs):\n t = time.time()\n model.train()\n optimizer.zero_grad()\n output = model(features, adj)\n\n loss_train = F.nll_loss(output[idx_train], labels[idx_train])\n acc_train = accuracy(output[idx_train], labels[idx_train])\n loss_train.backward()\n optimizer.step()\n\n if not args.fastmode:\n # Evaluate validation set performance separately,\n # deactivates dropout during validation run.\n model.eval()\n output = model(features, adj)\n\n loss_val = F.nll_loss(output[idx_val], labels[idx_val])\n acc_val = accuracy(output[idx_val], labels[idx_val])\n print('Epoch: {:04d}'.format(epoch + 1),\n 'loss_train: {:.4f}'.format(loss_train.item()),\n 'acc_train: {:.4f}'.format(acc_train.item()),\n 'loss_val: {:.4f}'.format(loss_val.item()),\n 'acc_val: {:.4f}'.format(acc_val.item()),\n 'time: {:.4f}s'.format(time.time() - t))\n\n print(\"Optimization Finished!\")\n print(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))\n\n\ndef test():\n model.eval()\n output = model(features, adj)\n loss_test = F.nll_loss(output[idx_test], labels[idx_test])\n acc_test = accuracy(output[idx_test], labels[idx_test])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n\n\nif __name__ == \"__main__\":\n # Training settings\n parser = argparse.ArgumentParser()\n parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')\n parser.add_argument('--fastmode', action='store_true', default=True, help='Validate during training pass.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--epochs', type=int, default=500, help='Number of epochs to train.')\n parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')\n parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')\n parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.')\n parser.add_argument('--layers', type=int, default=3, help='Number of attention layers.')\n parser.add_argument('--dropout_rate', type=float, default=0.5, help='Dropout rate (1 - keep probability).')\n\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n # Load data\n adj, features, labels, idx_train, idx_val, idx_test = load_data()\n\n # Model and optimizer\n model = AGNN(nfeat=features.shape[1],\n nhid=args.hidden,\n nclass=labels.max() + 1,\n nlayers=args.layers,\n dropout_rate=args.dropout_rate)\n # print(model)\n\n optimizer = optim.Adam(model.parameters(),\n lr=args.lr, weight_decay=args.weight_decay)\n\n if args.cuda:\n model.cuda()\n features = features.cuda()\n adj = adj.cuda()\n labels = labels.cuda()\n idx_train = idx_train.cuda()\n idx_val = idx_val.cuda()\n idx_test = idx_test.cuda()\n\n features, adj, labels = Variable(features), Variable(adj), Variable(labels)\n\n train()\n test()\n", "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing([email protected]), jachin, Nie\n@description: A pytorch implementation of FNN\n\n\nReference:\n[1] Product-based Neural Networks for User Response Prediction (SJTU 2016)\n Yanru Qu, Han Cai, Kan Ren, Weinan Zhang, Yong Yu Shanghai Jiao Tong University\n {kevinqu, hcai, kren, wnzhang, yyu}@apex.sjtu.edu.cn Ying Wen, Jun Wang University College London {ying.wen, j.wang}@cs.ucl.ac.uk\n\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom ..basic.functional import build_cross\nfrom ..basic.mlp import MLP\nfrom ..basic.output_layer import OutputLayer\n\n\nclass PNN(nn.Module):\n \"\"\"\n PNN Network\n \"\"\"\n\n def __init__(self, feature_size, field_size, embedding_size=5, fc_dims=[32, 32], dropout=0.0, is_batch_norm=False,\n product_type='inner', out_type='binary'):\n \"\"\"\n Init model\n :param feature_size: int, size of the feature dictionary\n :param field_size: int, size of the feature fields\n :param embedding_size: int, size of the feature embedding\n :param fc_dims: range, sizes of fc dims\n :param dropout: float, dropout rate\n :param is_batch_norm: bool, use batch normalization\n :param product_type: str, product type layer, inner/outer\n :param out_type: str, output layer function, binary is Sigmoid\n \"\"\"\n super(PNN, self).__init__()\n self.feature_size = feature_size\n self.field_size = field_size\n # embedding layer\n self.embedding_size = embedding_size\n self.emb_layer = nn.Embedding(num_embeddings=self.feature_size,\n embedding_dim=self.embedding_size)\n nn.init.xavier_uniform_(self.emb_layer.weight)\n\n fc_dims = fc_dims if fc_dims else[32, 32]\n # linear signal layer, named l_z\n self.d1 = d1 = fc_dims[0]\n self.product_type = product_type\n if product_type == '*':\n d1 *= 2\n self.linear_signal_weights = nn.Linear(in_features=field_size * embedding_size, out_features=d1)\n nn.init.xavier_uniform_(self.linear_signal_weights.weight)\n\n # product layer, named l_p\n if product_type == 'inner':\n self.product_layer = InnerProductLayer(field_size, d1)\n elif product_type == 'outer':\n self.product_layer = OuterProductLayer(embedding_size, field_size, d1)\n else:\n self.product_layer = HybridProductLayer(embedding_size, field_size, d1)\n\n # fc layers\n # l_1=relu(l_z+l_p_b_1)\n self.l1_layer = nn.ReLU()\n self.l1_bias = nn.Parameter(torch.randn(d1), requires_grad=True)\n # l_2 to l_n\n self.fc_dims = fc_dims\n self.fc_layers = MLP(d1, fc_dims=fc_dims, dropout=dropout, is_batch_norm=is_batch_norm)\n\n # output layer\n self.output_layer = OutputLayer(fc_dims[-1], out_type)\n\n def forward(self, feat_index):\n \"\"\"\n Forward\n :param feat_index: index input tensor\n :return: predict y\n \"\"\"\n # feat_index: N * field_size\n feat_emb = self.emb_layer(feat_index) # N * field_size * embedding_size\n\n # compute linear signal l_z\n concat_z = feat_emb.reshape(-1, self.embedding_size * self.field_size)\n linear_signal = self.linear_signal_weights(concat_z)\n\n # product_layer\n product_out = self.product_layer(feat_emb)\n\n # fc layers from l_2 to l_n\n # l_1=relu(l_z+l_p_b_1)\n l1_in = torch.add(linear_signal, self.l1_bias)\n l1_in = torch.add(l1_in, product_out)\n l1_out = self.l1_layer(l1_in)\n y = self.fc_layers(l1_out)\n y = self.output_layer(y)\n return y\n\n\nclass InnerProductLayer(nn.Module):\n def __init__(self, field_size, d1):\n super(InnerProductLayer, self).__init__()\n self.field_size = field_size\n self.d1 = d1\n self.num_pairs = int(field_size * (field_size - 1) / 2)\n self.product_layer_weights = nn.Linear(in_features=self.num_pairs, out_features=d1)\n nn.init.xavier_uniform_(self.product_layer_weights.weight)\n\n def forward(self, feat_emb):\n # feat_emb: N * field_size * embedding_size\n\n # p_ij=<f_i,f_j>\n # p is symmetric matrix, so only upper triangular matrix needs calculation (without diagonal)\n p, q = build_cross(self.field_size, feat_emb)\n pij = p * q # N * num_pairs * embedding_size\n pij = torch.sum(pij, dim=2) # N * num_pairs\n\n # l_p\n lp = self.product_layer_weights(pij)\n return lp\n\n\nclass OuterProductLayer(nn.Module):\n def __init__(self, embedding_size, field_size, d1, kernel_type='mat'):\n super(OuterProductLayer, self).__init__()\n self.embedding_size = embedding_size\n self.field_size = field_size\n self.d1 = d1\n self.num_pairs = field_size * (field_size - 1) / 2\n self.kernel_type = kernel_type\n if kernel_type == 'vec':\n kernel_shape = (self.num_pairs, embedding_size)\n elif kernel_type == 'num':\n kernel_shape = (self.num_pairs, 1)\n else: # by default mat\n kernel_shape = (embedding_size, self.num_pairs, embedding_size)\n self.kernel_shape = kernel_shape\n self.kernel = nn.Parameter(torch.zeros(kernel_shape))\n nn.init.xavier_uniform_(self.kernel.data)\n self.num_pairs = field_size * (field_size - 1) / 2\n self.product_layer_weights = nn.Linear(in_features=field_size, out_features=d1)\n nn.init.xavier_uniform_(self.product_layer_weights.weight)\n\n def forward(self, feat_emb):\n p, q = build_cross(self.field_size, feat_emb) # p, q: N * num_pairs * embedding_size\n\n if self.kernel_type == 'mat':\n # self.kernel: embedding_size * num_pairs * embedding_size\n p = p.unsqueeze(1) # N * 1 * num_pairs * embedding_size\n p = p * self.kernel # N * embedding_size * num_pairs * embedding_size\n kp = torch.sum(p, dim=-1) # N * embedding_size * num_pairs\n kp = kp.permute(0, 2, 1) # N * num_pairs * embedding_size\n pij = torch.sum(kp * q, -1) # N * num_pairs\n else:\n # self.kernel: num_pairs * embedding_size/1\n kernel = self.kernel.unsqueeze(1) # 1 * num_pairs * embedding_size/1\n pij = p * q # N * num_pairs * embedding_size\n pij = pij * kernel # N * num_pairs * embedding_size\n pij = torch.sum(pij, -1) # N * num_pairs\n\n # l_p\n lp = self.product_layer_weights(pij)\n return lp\n\n\nclass HybridProductLayer(nn.Module):\n def __init__(self, embedding_size, field_size, d1):\n super(HybridProductLayer, self).__init__()\n self.field_size = field_size\n self.d1 = d1 / 2\n self.inner_product_layer = InnerProductLayer(field_size, d1)\n self.outer_product_layer = OuterProductLayer(embedding_size, field_size, d1)\n\n def forward(self, feat_emb):\n inner_product_out = self.inner_product_layer(feat_emb)\n outer_product_out = self.outer_product_layer(feat_emb)\n lp = torch.cat([inner_product_out, outer_product_out], dim=1)\n return lp\n" ]
[ [ "torch.LongTensor", "torch.Tensor", "torch.nn.BCELoss", "torch.utils.data.dataset.TensorDataset", "torch.cuda.is_available" ], [ "numpy.random.seed", "torch.cuda.manual_seed", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.cuda.is_available", "torch.autograd.Variable" ], [ "torch.add", "torch.cat", "torch.zeros", "torch.randn", "torch.sum", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IshanBaliyan/DEEP-TFM_with_cGAN
[ "8d711c025367031197e5b8c7c768fc9fbea406ce" ]
[ "Scripts_Python/Bead2_20200620/CGAN/train.py" ]
[ "from __future__ import print_function\nimport argparse\nimport os\nfrom math import log10\nimport numpy as np\nimport sys\nimport os\nimport random\nfrom glob import glob\nfrom PIL import Image\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as standard_transforms\nfrom model import UNet\nfrom Discriminator import Discriminator\nimport h5py\nfrom skimage import io, exposure, img_as_uint, img_as_float\nimport imageio\n\nfrom skimage import img_as_ubyte\n\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1,2\"\nargs = {\n 'num_class': 1,\n 'ignore_label': 255,\n 'num_gpus': 2,\n 'start_epoch': 1,\n 'num_epoch': 100,\n 'batch_size': 100,\n 'lr': 0.0002,\n 'lr_decay': 0.9,\n 'dice': 0,\n 'weight_decay': 1e-4,\n 'momentum': 0.9,\n 'snapshot': '',\n 'snapshot2': '',\n 'opt': 'adam',\n 'beta1': 0.5,\n 'input_nc': 32,\n 'output_nc': 1,\n 'dataset':'DEEP-TFM',\n 'ckpt':'checkpoint'\n}\nmax_im = 4200\nmax_gt = 666\n#max_im = 2000\n#max_gt = 200\nclass GANLoss(nn.Module):\n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,\n tensor=torch.FloatTensor):\n super(GANLoss, self).__init__()\n self.real_label = target_real_label\n self.fake_label = target_fake_label\n self.real_label_var = None\n self.fake_label_var = None\n self.Tensor = tensor\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCELoss()\n\n def get_target_tensor(self, input, target_is_real):\n target_tensor = None\n if target_is_real:\n create_label = ((self.real_label_var is None) or\n (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label)\n self.real_label_var = Variable(real_tensor, requires_grad=False)\n target_tensor = self.real_label_var\n else:\n create_label = ((self.fake_label_var is None) or\n (self.fake_label_var.numel() != input.numel()))\n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)\n self.fake_label_var = Variable(fake_tensor, requires_grad=False)\n target_tensor = self.fake_label_var\n return target_tensor\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor.cuda())\n\nclass HDF5Dataset(Dataset):\n def __init__(self,img_dir, isTrain=True):\n self.isTrain = isTrain\n \n #self.data_dict = pd.read_csv(data_dir) \n if isTrain: \n fold_dir = \"train.txt\" \n else: \n fold_dir = \"test.txt\"\n\n ids = open(fold_dir, 'r')\n\n self.index_list = []\n \n for line in ids:\n self.index_list.append(line[0:-1])\n self.img_dir = img_dir\n def __len__(self):\n return len(self.index_list)\n\n def __getitem__(self, index):\n _img = np.dtype('>u2') \n _target = np.dtype('>u2') \n id_ = int(self.index_list[index])\n with h5py.File(self.img_dir, 'r') as db:\n _img = db['input'][id_] \n _target = db['gt'][id_] \n if np.max(_target) == 0:\n with h5py.File(self.img_dir, 'r') as db:\n _img = db['input'][id_+1]\n _target = db['gt'][id_+1]\n _img = torch.from_numpy(np.divide(_img,max_im)) #.float()\n _target = torch.from_numpy(np.divide(_target,max_gt))\n return _img, _target\n\nclass XSigmoidLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, y_t, y_prime_t):\n ey_t = y_t - y_prime_t\n return torch.mean(2 * ey_t / (1 + torch.exp(-ey_t)) - ey_t)\n\nimg_dir = '/n/holyscratch01/wadduwage_lab/temp20200620/20-Jun-2020/beads_tr_data_5sls_20-Jun-2020.h5'\n\ndataset_ = HDF5Dataset(img_dir=img_dir, isTrain=True)\ntraining_data_loader = DataLoader(dataset=dataset_, batch_size=args['batch_size'], shuffle=True, num_workers=3, drop_last=True)\n\ndataset_test = HDF5Dataset(img_dir=img_dir, isTrain=False)\ntesting_data_loader = DataLoader(dataset=dataset_test, batch_size=args['batch_size'], shuffle=True, num_workers=3, drop_last=True)\n\n\nnetG = UNet(n_classes=args['output_nc']).cuda()\nnetG = torch.nn.parallel.DataParallel(netG, device_ids=range(args['num_gpus']))\nnetD = Discriminator().cuda()\n\nnetD = torch.nn.parallel.DataParallel(netD, device_ids=range(args['num_gpus']))\n\ncriterionGAN = GANLoss().cuda()\ncriterionL1 = nn.L1Loss().cuda()\ncriterionMSE = nn.MSELoss().cuda()\ncriterionxsig = XSigmoidLoss().cuda()\n# setup optimizer\noptimizerG = optim.Adam(netG.parameters(), lr=args['lr'], betas=(args['beta1'], 0.999))\noptimizerD = optim.Adam(netD.parameters(), lr=args['lr'], betas=(args['beta1'], 0.999))\n\nreal_a = torch.FloatTensor(args['batch_size'], args['input_nc'], 128, 128).cuda()\nreal_b = torch.FloatTensor(args['batch_size'], args['output_nc'], 128, 128).cuda()\n\n\nreal_a = Variable(real_a)\nreal_b = Variable(real_b)\n\nresume_epoch = 35\n\ndef test(args, model, device, test_loader, k_fold, class_weights):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += criterionL1(output, target) #.item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n report = classification_report(target.cpu(), pred.cpu(), labels= np.unique(pred.cpu().numpy()), digits=4)\n return test_loss, 100. * correct / len(test_loader.dataset) , report\n\ndef train(epoch):\n for iteration, batch in enumerate(training_data_loader, 1):\n \n # forward\n\n real_a_cpu, real_b_cpu = batch[0], batch[1]\n\n \treal_a.resize_(real_a_cpu.size()).copy_(real_a_cpu)\n \treal_b.resize_(real_b_cpu.size()).copy_(real_b_cpu)\n\n\n fake_b = netG(real_a)\n ############################\n # (1) Update D network: maximize log(D(x,y)) + log(1 - D(x,G(x)))\n ###########################\n\n optimizerD.zero_grad()\n \n # train with fake\n fake_ab = torch.cat((real_a, fake_b), 1)\n pred_fake = netD.forward(fake_ab.detach())\n #print(pred_fake.size())\n loss_d_fake = criterionGAN(pred_fake, False)\n #print(fake_b.unique())\n # train with real\n real_ab = torch.cat((real_a, real_b), 1)\n pred_real = netD.forward(real_ab)\n loss_d_real = criterionGAN(pred_real, True)\n \n # Combined loss\n loss_d = (loss_d_fake + loss_d_real) * 0.5\n \n loss_d.backward() \n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(x,G(x))) + L1(y,G(x))\n ##########################\n optimizerG.zero_grad()\n # First, G(A) should fake the discriminator\n fake_ab = torch.cat((real_a, fake_b), 1)\n pred_fake = netD.forward(fake_ab)\n loss_g_gan = criterionGAN(pred_fake, True)\n\n # Second, G(A) = B\n loss_g_l1 = criterionL1(fake_b, real_b) * 10\n print('loss_g_l1',loss_g_l1) \n\n loss_g = loss_g_gan + loss_g_l1\n\n loss_g.backward()\n\n optimizerG.step()\n \n if iteration % 10 == 0: \n print(\"===> Epoch[{}]({}/{}): Loss_D: {:.4f} Loss_G: {:.4f}\".format(\n epoch, iteration, len(training_data_loader), loss_d.item(), loss_g.item()))\n netG.eval()\n test_loss = 0\n for iteration, batch in enumerate(testing_data_loader, 1):\n real_a_cpu, real_b_cpu = batch[0], batch[1]\n \treal_a.resize_(real_a_cpu.size()).copy_(real_a_cpu)\n \treal_b.resize_(real_b_cpu.size()).copy_(real_b_cpu)\n fake_b = netG(real_a)\n test_loss += criterionMSE(fake_b, real_b).item()\n\n print(len(testing_data_loader.dataset))\n test_loss /= len(testing_data_loader.dataset)\n print('epoch[{}]: Loss_test: {:.4f}'.format(epoch,test_loss))\n \ndef checkpoint(epoch):\n if not os.path.exists(\"checkpoint\"):\n os.mkdir(\"checkpoint\")\n if not os.path.exists(os.path.join(\"checkpoint\", args['dataset'])):\n os.mkdir(os.path.join(\"checkpoint\", args['dataset']))\n net_g_model_out_path = \"checkpoint/{}/netG_model_epoch_{}.pth.tar\".format(args['dataset'], epoch)\n net_d_model_out_path = \"checkpoint/{}/netD_model_epoch_{}.pth.tar\".format(args['dataset'], epoch)\n torch.save(netG, net_g_model_out_path)\n torch.save(netD, net_d_model_out_path)\n print(\"Checkpoint saved to {}\".format(\"checkpoint\" + args['dataset']))\n\nfor epoch in range(1, args['num_epoch'] + 1):\n train(epoch)\n checkpoint(epoch)" ]
[ [ "torch.nn.MSELoss", "torch.cat", "torch.utils.data.DataLoader", "numpy.dtype", "torch.nn.BCELoss", "torch.autograd.Variable", "numpy.max", "torch.exp", "torch.FloatTensor", "torch.no_grad", "torch.nn.L1Loss", "numpy.divide", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
inyukwo1/qgm_decoder
[ "70e60afec140ec3e2ee04f980a384e1cf28d761c" ]
[ "commons/embeddings/word_embedding.py" ]
[ "import os\nimport json\nimport pickle\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nfrom pytorch_pretrained_bert import BertTokenizer\nfrom commons.embeddings.graph_utils import *\nfrom datasets.schema import Schema\nfrom typing import List\n\n\n\nclass WordEmbedding(nn.Module):\n def __init__(self, glove_path, N_word, gpu, SQL_TOK, use_bert=False,\n trainable=False, use_small=False):\n super(WordEmbedding, self).__init__()\n self.trainable = trainable\n self.N_word = N_word\n self.gpu = gpu\n self.SQL_TOK = SQL_TOK\n self.use_bert = use_bert\n self.bert_tokenizer = BertTokenizer.from_pretrained('bert-large-cased', do_lower_case=False)\n\n word_emb = self._load_glove(glove_path, trainable, use_small)\n\n if trainable:\n print(\"Using trainable embedding\")\n self.w2i, word_emb_val = word_emb\n # trainable when using pretrained model, init embedding weights using prev embedding\n self.embedding = nn.Embedding(len(self.w2i), N_word)\n self.embedding.weight = nn.Parameter(torch.from_numpy(word_emb_val.astype(np.float32)))\n else:\n # else use word2vec or glove\n self.word_emb = word_emb\n print(\"Using fixed embedding for words but trainable embedding for types\")\n\n def _load_glove(self, file_name, load_used, use_small):\n if not load_used:\n cached_file_path = file_name + \".cache\"\n if os.path.isfile(cached_file_path):\n with open(cached_file_path, 'rb') as f:\n return pickle.load(f)\n else:\n print(('Loading word embedding from %s' % file_name))\n ret = {}\n with open(file_name) as inf:\n for idx, line in enumerate(inf):\n if (use_small and idx >= 500):\n break\n info = line.strip().split(' ')\n if info[0].lower() not in ret:\n ret[info[0]] = np.array([float(x) for x in info[1:]])\n with open(cached_file_path, 'wb') as f:\n pickle.dump(ret, f)\n return ret\n else:\n print ('Load used word embedding')\n with open('../alt/glove/word2idx.json') as inf:\n w2i = json.load(inf)\n with open('../alt/glove/usedwordemb.npy') as inf:\n word_emb_val = np.load(inf)\n return w2i, word_emb_val\n\n def word_find(self, word):\n # word = ''.join([i for i in word if i.isalpha()])\n # word = word.lower()\n return self.word_emb.get(word, np.zeros(self.N_word, dtype=np.float32))\n\n def gen_xc_type_batch(self, xc_type, is_col=False, is_list=False):\n B = len(xc_type)\n val_embs = []\n val_len = np.zeros(B, dtype=np.int64)\n for i, one_q in enumerate(xc_type):\n if is_list:\n q_val = [*map(lambda x:self.w2i.get(\" \".join(sorted(x)), 0), one_q)]\n else:\n q_val = [*map(lambda x:self.w2i.get(x, 0), one_q)]\n if is_col:\n val_embs.append(q_val) #<BEG> and <END>\n val_len[i] = len(q_val)\n else:\n val_embs.append([1] + q_val + [2]) #<BEG> and <END>\n val_len[i] = 1 + len(q_val) + 1\n max_len = max(val_len)\n val_tok_array = np.zeros((B, max_len), dtype=np.int64)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_tok_array[i,t] = val_embs[i][t]\n val_tok = torch.from_numpy(val_tok_array)\n if self.gpu:\n val_tok = val_tok.cuda()\n val_tok_var = Variable(val_tok)\n val_inp_var = self.embedding(val_tok_var)\n\n return val_inp_var, val_len\n\n\n def gen_x_batch(self, q, is_list=False, is_q=False):\n B = len(q)\n val_embs = []\n val_len = np.zeros(B, dtype=np.int64)\n for i, one_q in enumerate(q):\n if self.trainable:\n q_val = [*map(lambda x:self.w2i.get(x, 0), one_q)]\n elif not is_list:\n q_val = [*map(lambda x:self.word_emb.get(x, np.zeros(self.N_word, dtype=np.float32)), one_q)]\n else:\n q_val = []\n for ws in one_q:\n emb_list = []\n ws_len = len(ws)\n for w in ws:\n tmp = self.word_emb.get(w, np.zeros(self.N_word, dtype=np.float32))\n tmp = list(tmp.tolist())\n tmp = np.array(tmp) if tmp else np.zeros(self.N_word, dtype=np.float32)\n emb_list.append(tmp)\n #emb_list.append(self.word_emb.get(w, np.zeros(self.N_word, dtype=np.float32))) \n if ws_len == 0:\n raise Exception(\"word list should not be empty!\")\n elif ws_len == 1:\n q_val.append(emb_list[0])\n else:\n q_val.append(sum(emb_list) / float(ws_len))\n if self.trainable:\n val_embs.append([1] + q_val + [2]) #<BEG> and <END>\n val_len[i] = 1 + len(q_val) + 1\n elif not is_list or is_q:\n val_embs.append([np.zeros(self.N_word, dtype=np.float32)] + q_val + [np.zeros(self.N_word, dtype=np.float32)]) #<BEG> and <END>\n val_len[i] = 1 + len(q_val) + 1\n else:\n val_embs.append(q_val)\n val_len[i] = len(q_val)\n max_len = max(val_len)\n\n if self.trainable:\n val_tok_array = np.zeros((B, max_len), dtype=np.int64)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_tok_array[i,t] = val_embs[i][t]\n val_tok = torch.from_numpy(val_tok_array)\n if self.gpu:\n val_tok = val_tok.cuda()\n val_tok_var = Variable(val_tok)\n val_inp_var = self.embedding(val_tok_var)\n else:\n val_emb_array = np.zeros((B, max_len, self.N_word), dtype=np.float32)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_emb_array[i,t,:] = val_embs[i][t]\n val_inp = torch.from_numpy(val_emb_array)\n if self.gpu:\n val_inp = val_inp.cuda()\n val_inp_var = Variable(val_inp)\n return val_inp_var, val_len\n\n def gen_x_q_bert_batch(self, q):\n tokenized_q = []\n q_len = np.zeros(len(q), dtype=np.int64)\n for idx, one_q in enumerate(q):\n tokenized_one_q = self.bert_tokenizer.tokenize(\" \".join(one_q))\n indexed_one_q = self.bert_tokenizer.convert_tokens_to_ids(tokenized_one_q)\n tokenized_q.append(indexed_one_q)\n q_len[idx] = len(indexed_one_q)\n max_len = max(q_len)\n for tokenized_one_q in tokenized_q:\n tokenized_one_q += [0] * (max_len - len(tokenized_one_q))\n tokenized_q = torch.LongTensor(tokenized_q)\n if self.gpu:\n tokenized_q = tokenized_q.cuda()\n return tokenized_q, q_len\n\n def encode_one_q_with_bert(self, one_q, schema: Schema, table_graph):\n input_q = \"[CLS] \" + \" \".join(one_q)\n one_q_q_len = len(self.bert_tokenizer.tokenize(input_q))\n for table_num in table_graph:\n input_q += \" [SEP] \" + schema.get_table_name(table_num)\n # table_names = [tables[idx][table_num] for table_num in generated_graph]\n # input_q += \" \".join(table_names)\n\n sep_embeddings = list(range(len(table_graph)))\n for k_idx, k in enumerate(table_graph):\n for col_id in schema.get_child_col_ids(k):\n col_name = schema.get_col_name(col_id)\n input_q += \" [SEP] \" + col_name\n sep_embeddings.append(k_idx)\n\n tokenozed_one_q = self.bert_tokenizer.tokenize(input_q)\n indexed_one_q = self.bert_tokenizer.convert_tokens_to_ids(tokenozed_one_q)\n\n sep_embeddings_per_loc = []\n cur_sep_cnt = -1\n for token_idx, token in enumerate(tokenozed_one_q):\n if token == '[SEP]':\n cur_sep_cnt += 1\n sep_embeddings_per_loc.append(sep_embeddings[cur_sep_cnt])\n else:\n sep_embeddings_per_loc.append(-1)\n return one_q_q_len, indexed_one_q, sep_embeddings_per_loc\n\n def gen_col_batch(self, cols):\n ret = []\n col_len = np.zeros(len(cols), dtype=np.int64)\n\n names = []\n for b, one_cols in enumerate(cols):\n names = names + one_cols\n col_len[b] = len(one_cols)\n #TODO: what is the diff bw name_len and col_len?\n name_inp_var, name_len = self.str_list_to_batch(names)\n\n return name_inp_var, name_len, col_len\n\n\n def gen_agg_batch(self, q):\n B = len(q)\n ret = []\n agg_ops = ['none', 'maximum', 'minimum', 'count', 'total', 'average']\n for b in range(B):\n if self.trainable:\n ct_val = map(lambda x:self.w2i.get(x, 0), agg_ops)\n else:\n ct_val = map(lambda x:self.word_emb.get(x, np.zeros(self.N_word, dtype=np.float32)), agg_ops)\n ret.append(ct_val)\n\n agg_emb_array = np.zeros((B, 6, self.N_word), dtype=np.float32)\n for i in range(B):\n for t in range(len(ret[i])):\n agg_emb_array[i,t,:] = ret[i][t]\n agg_inp = torch.from_numpy(agg_emb_array)\n if self.gpu:\n agg_inp = agg_inp.cuda()\n agg_inp_var = Variable(agg_inp)\n\n return agg_inp_var\n\n\n def str_list_to_batch(self, str_list):\n \"\"\"get a list var of wemb of words in each column name in current bactch\"\"\"\n B = len(str_list)\n\n val_embs = []\n val_len = np.zeros(B, dtype=np.int64)\n for i, one_str in enumerate(str_list):\n if self.trainable:\n val = [self.w2i.get(x, 0) for x in one_str]\n else:\n val = [self.word_emb.get(x, np.zeros(\n self.N_word, dtype=np.float32)) for x in one_str]\n val_embs.append(val)\n val_len[i] = len(val)\n max_len = max(val_len)\n\n if self.trainable:\n val_tok_array = np.zeros((B, max_len), dtype=np.int64)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_tok_array[i,t] = val_embs[i][t]\n val_tok = torch.from_numpy(val_tok_array)\n if self.gpu:\n val_tok = val_tok.cuda()\n val_tok_var = Variable(val_tok)\n val_inp_var = self.embedding(val_tok_var)\n else:\n val_emb_array = np.zeros(\n (B, max_len, self.N_word), dtype=np.float32)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_emb_array[i,t,:] = val_embs[i][t]\n val_inp = torch.from_numpy(val_emb_array)\n if self.gpu:\n val_inp = val_inp.cuda()\n val_inp_var = Variable(val_inp)\n\n return val_inp_var, val_len\n\n\n def gen_x_history_batch(self, history):\n B = len(history)\n val_embs = []\n val_len = np.zeros(B, dtype=np.int64)\n for i, one_history in enumerate(history):\n history_val = []\n for item in one_history:\n #col\n if isinstance(item, list) or isinstance(item, tuple):\n emb_list = []\n ws = item[0].split() + item[1].split()\n ws_len = len(ws)\n for w in ws:\n emb_list.append(self.word_find(w))\n if ws_len == 0:\n raise Exception(\"word list should not be empty!\")\n elif ws_len == 1:\n history_val.append(emb_list[0])\n else:\n history_val.append(sum(emb_list) / float(ws_len))\n #ROOT\n elif isinstance(item,str):\n if item == \"ROOT\":\n item = \"root\"\n elif item == \"asc\":\n item = \"ascending\"\n elif item == \"desc\":\n item = \"descending\"\n if item in (\n \"none\", \"select\", \"from\", \"where\", \"having\", \"limit\", \"intersect\", \"except\", \"union\", 'not',\n 'between', '=', '>', '<', 'in', 'like', 'is', 'exists', 'root', 'ascending', 'descending'):\n history_val.append(self.word_find(item))\n elif item == \"orderBy\":\n history_val.append((self.word_find(\"order\") +\n self.word_find(\"by\")) / 2)\n elif item == \"groupBy\":\n history_val.append((self.word_find(\"group\") +\n self.word_find(\"by\")) / 2)\n elif item in ('>=', '<=', '!='):\n history_val.append((self.word_find(item[0]) +\n self.word_find(item[1])) / 2)\n elif isinstance(item,int):\n history_val.append(self.word_find(AGG_OPS[item]))\n else:\n print((\"Warning: unsupported data type in history! {}\".format(item)))\n\n val_embs.append(history_val)\n val_len[i] = len(history_val)\n max_len = max(val_len)\n\n val_emb_array = np.zeros((B, max_len, self.N_word), dtype=np.float32)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_emb_array[i, t, :] = val_embs[i][t]\n val_inp = torch.from_numpy(val_emb_array)\n if self.gpu:\n val_inp = val_inp.cuda()\n val_inp_var = Variable(val_inp)\n\n return val_inp_var, val_len\n\n def gen_bert_batch_with_table(self, q, schemas: List[Schema], labels):\n tokenized_q = []\n q_len = []\n q_q_len = []\n anses = []\n sep_embeddings = []\n for idx, one_q in enumerate(q):\n if random.randint(0, 100) < 7:\n true_graph = 1.\n generated_graph = str_graph_to_num_graph(labels[idx])\n else:\n true_graph = 0.\n generated_graph = generate_random_graph_generate(schemas[idx])\n if graph_checker(generated_graph, labels[idx], schemas[idx]):\n true_graph = 1.\n anses.append(true_graph)\n\n one_q_q_len, indexed_one_q, one_sep_embeddings \\\n = self.encode_one_q_with_bert(one_q, schemas[idx], generated_graph)\n q_q_len.append(one_q_q_len)\n tokenized_q.append(indexed_one_q)\n q_len.append(len(indexed_one_q))\n sep_embeddings.append(one_sep_embeddings)\n\n max_len = max(q_len)\n for tokenized_one_q in tokenized_q:\n tokenized_one_q += [0] * (max_len - len(tokenized_one_q))\n tokenized_q = torch.LongTensor(tokenized_q)\n anses = torch.tensor(anses)\n if self.gpu:\n tokenized_q = tokenized_q.cuda()\n anses = anses.cuda()\n return tokenized_q, q_len, q_q_len, anses, sep_embeddings\n\n def gen_bert_for_eval(self, one_q, schema: Schema):\n tokenized_q = []\n sep_embeddings = []\n table_graph_lists = []\n\n for tab in schema.get_all_table_ids():\n table_graph_lists += list(generate_four_hop_path_from_seed(tab, schema))\n\n simple_graph_lists = []\n for graph in table_graph_lists:\n new_graph = deepcopy(graph)\n for k in new_graph:\n for idx, l in enumerate(new_graph[k]):\n new_graph[k][idx] = l[0]\n simple_graph_lists.append(new_graph)\n B = len(table_graph_lists)\n q_len = []\n q_q_len = []\n for b in range(B):\n\n one_q_q_len, indexed_one_q, one_sep_embeddings \\\n = self.encode_one_q_with_bert(one_q, schema, simple_graph_lists[b])\n q_q_len.append(one_q_q_len)\n tokenized_q.append(indexed_one_q)\n q_len.append(len(indexed_one_q))\n sep_embeddings.append(one_sep_embeddings)\n\n max_len = max(q_len)\n for tokenized_one_q in tokenized_q:\n tokenized_one_q += [0] * (max_len - len(tokenized_one_q))\n tokenized_q = torch.LongTensor(tokenized_q)\n if self.gpu:\n tokenized_q = tokenized_q.cuda()\n return tokenized_q, q_len, q_q_len, simple_graph_lists, table_graph_lists, sep_embeddings\n\n def gen_word_list_embedding(self,words,B):\n val_emb_array = np.zeros((B,len(words), self.N_word), dtype=np.float32)\n for i,word in enumerate(words):\n if len(word.split()) == 1:\n emb = self.word_find(word)\n else:\n word = word.split()\n emb = (self.word_find(word[0]) + self.word_find(word[1]))/2\n for b in range(B):\n val_emb_array[b,i,:] = emb\n val_inp = torch.from_numpy(val_emb_array)\n if self.gpu:\n val_inp = val_inp.cuda()\n val_inp_var = Variable(val_inp)\n return val_inp_var\n\n def gen_x_q_batch(self, q):\n B = len(q)\n val_embs = []\n val_len = np.zeros(B, dtype=np.int64)\n for i, one_q in enumerate(q):\n q_val = []\n for ws in one_q:\n q_val.append(self.word_find(ws))\n\n val_embs.append([np.zeros(self.N_word, dtype=np.float32)] + q_val + [np.zeros(self.N_word, dtype=np.float32)]) #<BEG> and <END>\n val_len[i] = 1 + len(q_val) + 1\n max_len = max(val_len)\n\n val_emb_array = np.zeros((B, max_len, self.N_word), dtype=np.float32)\n for i in range(B):\n for t in range(len(val_embs[i])):\n val_emb_array[i, t, :] = val_embs[i][t]\n val_inp = torch.from_numpy(val_emb_array)\n if self.gpu:\n val_inp = val_inp.cuda()\n val_inp_var = Variable(val_inp)\n\n return val_inp_var, val_len" ]
[ [ "torch.LongTensor", "torch.from_numpy", "torch.tensor", "numpy.load", "numpy.array", "numpy.zeros", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kylebarron/dask
[ "8663c6b7813fbdcaaa85d4fdde04ff42b1bb6ed0" ]
[ "dask/array/utils.py" ]
[ "import difflib\nimport functools\nimport math\nimport numbers\nimport os\nimport warnings\n\nimport numpy as np\nfrom tlz import frequencies, concat\n\nfrom .core import Array\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import has_keyword, ignoring, is_arraylike\n\ntry:\n AxisError = np.AxisError\nexcept AttributeError:\n try:\n np.array([0]).sum(axis=5)\n except Exception as e:\n AxisError = type(e)\n\n\ndef _is_cupy_type(x):\n # TODO: avoid explicit reference to CuPy\n return \"cupy\" in str(type(x))\n\n\ndef normalize_to_array(x):\n if _is_cupy_type(x):\n return x.get()\n else:\n return x\n\n\ndef meta_from_array(x, ndim=None, dtype=None):\n \"\"\"Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n elif dtype is None and hasattr(x, \"dtype\"):\n dtype = x.dtype\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n try:\n meta = meta.astype(dtype)\n except ValueError as e:\n if (\n any(\n s in str(e)\n for s in [\n \"invalid literal\",\n \"could not convert string to float\",\n ]\n )\n and meta.dtype.kind in \"SU\"\n ):\n meta = np.array([]).astype(dtype)\n else:\n raise e\n\n return meta\n\n\ndef compute_meta(func, _dtype, *args, **kwargs):\n with np.errstate(all=\"ignore\"), warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\n args_meta = [meta_from_array(x) if is_arraylike(x) else x for x in args]\n kwargs_meta = {\n k: meta_from_array(v) if is_arraylike(v) else v for k, v in kwargs.items()\n }\n\n # todo: look for alternative to this, causes issues when using map_blocks()\n # with np.vectorize, such as dask.array.routines._isnonzero_vec().\n if isinstance(func, np.vectorize):\n meta = func(*args_meta)\n else:\n try:\n # some reduction functions need to know they are computing meta\n if has_keyword(func, \"computing_meta\"):\n kwargs_meta[\"computing_meta\"] = True\n meta = func(*args_meta, **kwargs_meta)\n except TypeError as e:\n if any(\n s in str(e)\n for s in [\n \"unexpected keyword argument\",\n \"is an invalid keyword for\",\n \"Did not understand the following kwargs\",\n ]\n ):\n raise\n else:\n return None\n except ValueError as e:\n # min/max functions have no identity, attempt to use the first meta\n if \"zero-size array to reduction operation\" in str(e):\n meta = args_meta[0]\n else:\n return None\n except Exception:\n return None\n\n if _dtype and getattr(meta, \"dtype\", None) != _dtype:\n with ignoring(AttributeError):\n meta = meta.astype(_dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return meta\n\n\ndef allclose(a, b, equal_nan=False, **kwargs):\n a = normalize_to_array(a)\n b = normalize_to_array(b)\n if getattr(a, \"dtype\", None) != \"O\":\n return np.allclose(a, b, equal_nan=equal_nan, **kwargs)\n if equal_nan:\n return a.shape == b.shape and all(\n np.isnan(b) if np.isnan(a) else a == b for (a, b) in zip(a.flat, b.flat)\n )\n return (a == b).all()\n\n\ndef same_keys(a, b):\n def key(k):\n if isinstance(k, str):\n return (k, -1, -1, -1)\n else:\n return k\n\n return sorted(a.dask, key=key) == sorted(b.dask, key=key)\n\n\ndef _not_empty(x):\n return x.shape and 0 not in x.shape\n\n\ndef _check_dsk(dsk):\n \"\"\" Check that graph is well named and non-overlapping \"\"\"\n if not isinstance(dsk, HighLevelGraph):\n return\n\n dsk.validate()\n assert all(isinstance(k, (tuple, str)) for k in dsk.layers)\n freqs = frequencies(concat(dsk.layers.values()))\n non_one = {k: v for k, v in freqs.items() if v != 1}\n assert not non_one, non_one\n\n\ndef assert_eq_shape(a, b, check_nan=True):\n for aa, bb in zip(a, b):\n if math.isnan(aa) or math.isnan(bb):\n if check_nan:\n assert math.isnan(aa) == math.isnan(bb)\n else:\n assert aa == bb\n\n\ndef _get_dt_meta_computed(x, check_shape=True, check_graph=True):\n x_original = x\n x_meta = None\n x_computed = None\n\n if isinstance(x, Array):\n assert x.dtype is not None\n adt = x.dtype\n if check_graph:\n _check_dsk(x.dask)\n x_meta = getattr(x, \"_meta\", None)\n x = x.compute(scheduler=\"sync\")\n x_computed = x\n if hasattr(x, \"todense\"):\n x = x.todense()\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n if _not_empty(x):\n assert x.dtype == x_original.dtype\n if check_shape:\n assert_eq_shape(x_original.shape, x.shape, check_nan=False)\n else:\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n adt = getattr(x, \"dtype\", None)\n\n return x, adt, x_meta, x_computed\n\n\ndef assert_eq(a, b, check_shape=True, check_graph=True, check_meta=True, **kwargs):\n a_original = a\n b_original = b\n\n a, adt, a_meta, a_computed = _get_dt_meta_computed(\n a, check_shape=check_shape, check_graph=check_graph\n )\n b, bdt, b_meta, b_computed = _get_dt_meta_computed(\n b, check_shape=check_shape, check_graph=check_graph\n )\n\n if str(adt) != str(bdt):\n # Ignore check for matching length of flexible dtypes, since Array._meta\n # can't encode that information\n if adt.type == bdt.type and not (adt.type == np.bytes_ or adt.type == np.str_):\n diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())\n raise AssertionError(\n \"string repr are different\" + os.linesep + os.linesep.join(diff)\n )\n\n try:\n assert (\n a.shape == b.shape\n ), f\"a and b have different shapes (a: {a.shape}, b: {b.shape})\"\n if check_meta:\n if hasattr(a, \"_meta\") and hasattr(b, \"_meta\"):\n assert_eq(a._meta, b._meta)\n if hasattr(a_original, \"_meta\"):\n msg = (\n f\"compute()-ing 'a' changes its number of dimensions \"\n f\"(before: {a_original._meta.ndim}, after: {a.ndim})\"\n )\n assert a_original._meta.ndim == a.ndim, msg\n if a_meta is not None:\n msg = (\n f\"compute()-ing 'a' changes its type \"\n f\"(before: {type(a_original._meta)}, after: {type(a_meta)})\"\n )\n assert type(a_original._meta) == type(a_meta), msg\n if not (np.isscalar(a_meta) or np.isscalar(a_computed)):\n msg = (\n f\"compute()-ing 'a' results in a different type than implied by its metadata \"\n f\"(meta: {type(a_meta)}, computed: {type(a_computed)})\"\n )\n assert type(a_meta) == type(a_computed), msg\n if hasattr(b_original, \"_meta\"):\n msg = (\n f\"compute()-ing 'b' changes its number of dimensions \"\n f\"(before: {b_original._meta.ndim}, after: {b.ndim})\"\n )\n assert b_original._meta.ndim == b.ndim, msg\n if b_meta is not None:\n msg = (\n f\"compute()-ing 'b' changes its type \"\n f\"(before: {type(b_original._meta)}, after: {type(b_meta)})\"\n )\n assert type(b_original._meta) == type(b_meta), msg\n if not (np.isscalar(b_meta) or np.isscalar(b_computed)):\n msg = (\n f\"compute()-ing 'b' results in a different type than implied by its metadata \"\n f\"(meta: {type(b_meta)}, computed: {type(b_computed)})\"\n )\n assert type(b_meta) == type(b_computed), msg\n msg = \"found values in 'a' and 'b' which differ by more than the allowed amount\"\n assert allclose(a, b, **kwargs), msg\n return True\n except TypeError:\n pass\n\n c = a == b\n\n if isinstance(c, np.ndarray):\n assert c.all()\n else:\n assert c\n\n return True\n\n\ndef safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):\n \"\"\"Like functools.wraps, but safe to use even if wrapped is not a function.\n\n Only needed on Python 2.\n \"\"\"\n if all(hasattr(wrapped, attr) for attr in assigned):\n return functools.wraps(wrapped, assigned=assigned)\n else:\n return lambda x: x\n\n\ndef empty_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.empty_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.empty(shape, **kwargs).\n \"\"\"\n try:\n return np.empty_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.empty(shape, **kwargs)\n\n\ndef full_like_safe(a, fill_value, shape, **kwargs):\n \"\"\"\n Return np.full_like(a, fill_value, shape=shape, **kwargs) if the\n shape argument is supported (requires NumPy >= 1.17), otherwise\n falls back to using the old behavior, returning\n np.full(shape, fill_value, **kwargs).\n \"\"\"\n try:\n return np.full_like(a, fill_value, shape=shape, **kwargs)\n except TypeError:\n return np.full(shape, fill_value, **kwargs)\n\n\ndef ones_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.ones_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.ones(shape, **kwargs).\n \"\"\"\n try:\n return np.ones_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.ones(shape, **kwargs)\n\n\ndef zeros_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.zeros_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.zeros(shape, **kwargs).\n \"\"\"\n try:\n return np.zeros_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.zeros(shape, **kwargs)\n\n\ndef _array_like_safe(np_func, da_func, a, like, **kwargs):\n if like is a and hasattr(a, \"__array_function__\"):\n return a\n\n if isinstance(like, Array):\n return da_func(a, **kwargs)\n elif isinstance(a, Array):\n if _is_cupy_type(a._meta):\n a = a.compute(scheduler=\"sync\")\n\n try:\n return np_func(a, like=meta_from_array(like), **kwargs)\n except TypeError:\n return np_func(a, **kwargs)\n\n\ndef array_safe(a, like, **kwargs):\n \"\"\"\n If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`,\n otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching\n the call to the library that implements the like array. Note that\n when `a` is a `dask.Array` backed by `cupy.ndarray` but `like`\n isn't, this function will call `a.compute(scheduler=\"sync\")`\n before `np.array`, as downstream libraries are unlikely to know how\n to convert a `dask.Array` and CuPy doesn't implement `__array__` to\n prevent implicit copies to host.\n \"\"\"\n from .routines import array\n\n return _array_like_safe(np.array, array, a, like, **kwargs)\n\n\ndef asarray_safe(a, like, **kwargs):\n \"\"\"\n If a is dask.array, return dask.array.asarray(a, **kwargs),\n otherwise return np.asarray(a, like=like, **kwargs), dispatching\n the call to the library that implements the like array. Note that\n when a is a dask.Array but like isn't, this function will call\n a.compute(scheduler=\"sync\") before np.asarray, as downstream\n libraries are unlikely to know how to convert a dask.Array.\n \"\"\"\n from .core import asarray\n\n return _array_like_safe(np.asarray, asarray, a, like, **kwargs)\n\n\ndef asanyarray_safe(a, like, **kwargs):\n \"\"\"\n If a is dask.array, return dask.array.asanyarray(a, **kwargs),\n otherwise return np.asanyarray(a, like=like, **kwargs), dispatching\n the call to the library that implements the like array. Note that\n when a is a dask.Array but like isn't, this function will call\n a.compute(scheduler=\"sync\") before np.asanyarray, as downstream\n libraries are unlikely to know how to convert a dask.Array.\n \"\"\"\n from .core import asanyarray\n\n return _array_like_safe(np.asanyarray, asanyarray, a, like, **kwargs)\n\n\ndef validate_axis(axis, ndim):\n \"\"\" Validate an input to axis= keywords \"\"\"\n if isinstance(axis, (tuple, list)):\n return tuple(validate_axis(ax, ndim) for ax in axis)\n if not isinstance(axis, numbers.Integral):\n raise TypeError(\"Axis value must be an integer, got %s\" % axis)\n if axis < -ndim or axis >= ndim:\n raise AxisError(\n \"Axis %d is out of bounds for array of dimension %d\" % (axis, ndim)\n )\n if axis < 0:\n axis += ndim\n return axis\n\n\ndef svd_flip(u, v, u_based_decision=False):\n \"\"\"Sign correction to ensure deterministic output from SVD.\n\n This function is useful for orienting eigenvectors such that\n they all lie in a shared but arbitrary half-space. This makes\n it possible to ensure that results are equivalent across SVD\n implementations and random number generator states.\n\n Parameters\n ----------\n\n u : (M, K) array_like\n Left singular vectors (in columns)\n v : (K, N) array_like\n Right singular vectors (in rows)\n u_based_decision: bool\n Whether or not to choose signs based\n on `u` rather than `v`, by default False\n\n Returns\n -------\n\n u : (M, K) array_like\n Left singular vectors with corrected sign\n v: (K, N) array_like\n Right singular vectors with corrected sign\n \"\"\"\n # Determine half-space in which all singular vectors\n # lie relative to an arbitrary vector; summation\n # equivalent to dot product with row vector of ones\n if u_based_decision:\n dtype = u.dtype\n signs = np.sum(u, axis=0, keepdims=True)\n else:\n dtype = v.dtype\n signs = np.sum(v, axis=1, keepdims=True).T\n signs = 2.0 * ((signs >= 0) - 0.5).astype(dtype)\n # Force all singular vectors into same half-space\n u, v = u * signs, v * signs.T\n return u, v\n\n\ndef _is_nep18_active():\n class A:\n def __array_function__(self, *args, **kwargs):\n return True\n\n try:\n return np.concatenate([A()])\n except ValueError:\n return False\n\n\nIS_NEP18_ACTIVE = _is_nep18_active()\n" ]
[ [ "numpy.ones_like", "numpy.allclose", "numpy.isnan", "numpy.empty_like", "numpy.full", "numpy.full_like", "numpy.ones", "numpy.zeros_like", "numpy.isscalar", "numpy.errstate", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
epfl-lts2/spikexplore
[ "05c5ff1aa1cca3f77126c0de9a1b6b9360813afd" ]
[ "spikexplore/graph.py" ]
[ "import pandas as pd\nimport networkx as nx\nimport numpy as np\nimport json\nimport logging\nfrom .helpers import combine_dicts\nfrom datetime import datetime, timedelta\nimport community\nfrom tqdm import tqdm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_to_json(edge_df):\n \"\"\"\n Check if column type is list or dict and convert it to json\n list or dict can not be saved using gexf or graphml format.\n \"\"\"\n edge_df_str = edge_df.copy()\n for idx, col in enumerate(edge_df.columns):\n first_row_element = edge_df.iloc[0, idx]\n if isinstance(first_row_element, list) or isinstance(first_row_element, dict):\n edge_df_str[col] = edge_df[col].apply(json.dumps)\n logger.debug('Field \"{}\" of class {} converted to json string'.format(col, type(first_row_element)))\n return edge_df_str\n\n\ndef graph_from_edgeslist(edge_df, min_weight=0):\n logger.debug('Creating the graph from the edge list')\n # The indices in the dataframe are source and target for the edges\n G = nx.from_pandas_edgelist(edge_df[edge_df['weight'] >= min_weight],\n source='source', target='target', create_using=nx.DiGraph)\n logger.info('Nb of nodes: {}'.format(G.number_of_nodes()))\n return G\n\n\ndef attributes_tojson(data_dic):\n for propname, propdic in data_dic.items():\n for key, value in propdic.items():\n if isinstance(value, list):\n data_dic[propname][key] = json.dumps(value)\n else:\n data_dic[propname][key] = value\n return data_dic\n\n\ndef add_node_attributes(graph, node_df, attr_dic=None, attr_name=''):\n node_dic = node_df.to_dict()\n\n node_dic = attributes_tojson(node_dic)\n for propname, propdic in node_dic.items():\n nx.set_node_attributes(graph, propdic, name=propname)\n if attr_dic:\n nx.set_node_attributes(graph, attr_dic, name=attr_name)\n return graph\n\n\ndef add_edges_attributes(g, edges_df, drop_cols=None):\n if edges_df.empty:\n return g\n if drop_cols:\n edge_attr_df = edges_df.drop(columns=drop_cols)\n else:\n edge_attr_df = edges_df\n edge_attr_df['ii'] = edge_attr_df[['source', 'target']].apply(tuple, axis=1)\n edge_dic = edge_attr_df.set_index('ii').drop(columns=['source', 'target']).to_dict()\n\n for propname, propdic in edge_dic.items():\n nx.set_edge_attributes(g, propdic, name=propname)\n return g\n\n\ndef reduce_graph(g, degree_min):\n # Drop node with small degree\n remove = [node for node, degree in dict(g.degree()).items() if degree < degree_min]\n g.remove_nodes_from(remove)\n logger.info('Nb of nodes after removing nodes with degree strictly smaller than {}: {}'.format(degree_min,\n g.number_of_nodes()))\n isolates = list(nx.isolates(g))\n g.remove_nodes_from(isolates)\n logger.info('removed {} isolated nodes.'.format(len(isolates)))\n return g\n\n\ndef detect_communities(G):\n # first compute the best partition\n if isinstance(G, nx.DiGraph):\n Gu = G.to_undirected()\n else:\n Gu = G\n partition = community.best_partition(Gu, weight='weight')\n if not partition.values():\n logger.warning('No communities found in graph')\n return G, {}\n nx.set_node_attributes(G, partition, name='community')\n logger.debug('Communities saved on the graph as node attributes.')\n nb_partitions = max(partition.values()) + 1\n logger.info('Nb of partitions: {}'.format(nb_partitions))\n # Create a dictionary of subgraphs, one per community\n community_dic = {}\n for idx in range(nb_partitions):\n subgraph = G.subgraph([key for (key, value) in partition.items() if value == idx])\n community_dic[idx] = subgraph\n # clusters_modularity = community.modularity(partition, Gu)\n return G, community_dic\n\n\ndef remove_small_communities(G, community_dic, min_size):\n community_tmp = {k: v.copy() for k, v in community_dic.items()}\n nb_removed = 0\n for key in community_tmp:\n graph = community_tmp[key]\n if graph.number_of_nodes() <= min_size:\n G.remove_nodes_from(graph.nodes())\n nb_removed += 1\n logger.info('removed {} community(ies) smaller than {} nodes.'.format(nb_removed, min_size))\n return G\n\n\ndef process_hop(graph_handle, node_list, nodes_info_acc):\n \"\"\" collect the tweets and tweet info of the users in the list username_list\n \"\"\"\n new_node_dic = {}\n total_edges_df = pd.DataFrame()\n total_nodes_df = pd.DataFrame()\n\n # Display progress bar if needed\n disable_tqdm = logging.root.level >= logging.INFO\n logger.info('processing next hop with {} nodes'.format(len(node_list)))\n for node in tqdm(node_list, disable=disable_tqdm):\n # Collect neighbors for the next hop\n node_info, edges_df = graph_handle.get_neighbors(node)\n node_info, edges_df = graph_handle.filter(node_info, edges_df)\n\n total_nodes_df = total_nodes_df.append(node_info.get_nodes())\n nodes_info_acc.update(node_info) # add new info\n\n total_edges_df = total_edges_df.append(edges_df)\n neighbors_dic = graph_handle.neighbors_with_weights(edges_df)\n new_node_dic = combine_dicts(new_node_dic, neighbors_dic)\n\n return new_node_dic, total_edges_df, total_nodes_df, nodes_info_acc\n\n\ndef handle_spikyball_neighbors(graph, backend, remove=True, node_acc=None):\n # Complete the info of the nodes not collected\n sp_neighbors = [node for node, data in graph.nodes(data=True) if 'spikyball_hop' not in data]\n logger.info('Number of neighbors of the spiky ball: {}'.format(len(sp_neighbors)))\n\n # 2 options: 1) remove the neighbors or 2) rerun the collection to collect the missing node info\n if remove:\n # Option 1:\n logger.info('Removing spiky ball neighbors...')\n graph.remove_nodes_from(sp_neighbors)\n logger.info('Number of nodes after removal: {}'.format(graph.number_of_nodes()))\n else:\n # TODO this needs checking\n # Option 2: collect the missing node data\n logger.info('Collecting info for neighbors...')\n new_nodes_founds, edges_df, nodes_df, node_acc = process_hop(backend, sp_neighbors, node_acc)\n graph = add_node_attributes(graph, nodes_df)\n sp_nodes_dic = {node: -1 for node in sp_neighbors}\n nx.set_node_attributes(graph, sp_nodes_dic, name='spikyball_hop')\n logger.info('Node info added to the graph.')\n # Check integrity\n for node, data in graph.nodes(data=True):\n if 'spikyball_hop' not in data:\n logger.error('Missing information for node ', node)\n return graph\n\n\ndef compute_meantime(date_list):\n # return mean time and standard deviation of a list of dates in days\n # import numpy as np\n d_list = [datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') for dt in date_list]\n second_list = [x.timestamp() for x in d_list]\n meand = np.mean(second_list)\n stdd = np.std(second_list)\n return datetime.fromtimestamp(meand), timedelta(seconds=stdd)\n\n\ndef save_graph(graph, graphfilename):\n nx.write_gexf(graph, graphfilename)\n logger.debug('Graph saved to', graphfilename)\n" ]
[ [ "numpy.std", "numpy.mean", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yyj2013/RIR-Generator
[ "cbb219582585db1c17c72a913fc97c0ba9087cca" ]
[ "example_1.py" ]
[ "import numpy as np\nimport rirgenerator as RG\nimport matplotlib.pyplot as plt\n\nc = 340\t\t\t\t\t# Sound velocity (m/s)\nfs = 16000\t\t\t\t# Sample frequency (samples/s)\nr = [2,1.5,2]\t\t\t# Receiver position [x y z] (m)\ns = [2,3.5,2]\t\t\t# Source position [x y z] (m)\nL = [5,4,6]\t\t\t\t# Room dimensions [x y z] (m)\nbeta = 0.4\t\t\t\t# Reverberation time (s)\nn = 4096\t\t\t\t# Number of samples\n\nh = RG.rir_generator(c, fs, r, s, L, beta=beta, nsample=n)\n\nplt.plot(h[0,:])\nplt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
duartecosta1/first_repository
[ "6e31c8499c9d723ac876c9796c990de4a962174b", "6e31c8499c9d723ac876c9796c990de4a962174b" ]
[ "histogram.py", "find_days_above_percentile.py" ]
[ "import xarray\nfrom scipy.stats import norm\nimport numpy as np\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\nlist_indices = ['tasmax']\nindices = list_indices[0]\n\nexp = ['CTL_E0', '121GPsc_E0']\nexp1 = '121GPsc_E0' \n\nfor i,ind in enumerate(exp): \n#print ind\n\n#files = sorted(glob.glob('/g/data3/w97/dc8106/AMZ_def_EXPs/'+exp+'/tasmax_sc-only_1978-2011_'+exp+'.nc', chunks={'time':1000})\n#analysis/ensmean/'+indices+'/'+indices+'**_*diff_to_CTL.nc'))\n data = xarray.open_dataset('/g/data3/w97/dc8106/AMZ_def_EXPs/'+exp[i]+'/tasmax_sc-only_1978-2011_'+exp[i]+'.nc', chunks={'time':1000})\n#data1 = xarray.open_dataset('/g/data3/w97/dc8106/AMZ_def_EXPs/'+exp1+'/tasmax_sc-only_1978-2011_'+exp1+'.nc', chunks={'time':1000})\n tasmax = data.tasmax - 272.15\n#tasmax1 = data1.tasmax - 272.15\n#tasmin = data.tasmin\n\n lat = data.lat\n lon = data.lon\n lons,lats = np.meshgrid(lon,lat)\n\n ind_label = indices\n\n print(tasmax)\n print(\"tasmax\")\n print(tasmax.stack(dim=[\"lat\",\"lon\",\"time\"]))\n\n mu, sigma = tasmax.mean().values, tasmax.std().values\n\n# Print the values of mu and sigma which forces them to be evaluated so I can see how long it takes to do this, then I can tune the time chunking\n print(mu,sigma)\n\n# the histogram of the data \n n, bins, patches = plt.hist(tasmax.stack(dim=[\"lat\",\"lon\",\"time\"]), bins = 1000, normed=1, facecolor='green', alpha=0.75)\n plt.xticks(np.arange(20, 50, 2.0))\n print(n)\n print(bins)\n print(patches)\n\n# add a 'best fit' line \n y = mlab.normpdf( bins, mu, sigma)\n print(y)\n\n l = plt.plot(bins, y, 'r--', label=exp[0], linewidth=1)\n #l_legend = plt.legend(handles=l, loc=1)\nl1 = plt.plot(bins, y, 'b--', label=exp[1], linewidth=1) \n#l1_legend = plt.legend(handles=l1, loc=1)\n\nplt.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.)\nunits = 'Celsius'\n\nplt.axis([20, 50, 0, 0.18])\nplt.xlabel(indices+' in '+units)\n\nplt.suptitle(ind_label+ ' in ' +ind, fontsize=16) \n \nplt.savefig('/g/data3/w97/dc8106/images/'+ind_label+'_'+exp[0]+'_'+exp[1], format='png') \n\n# plt.ylabel('Probability')\n# plt.title(r'$\\mathrm{Histogram of '+indices+'\\ \\mu='+mu+',\\ \\sigma='+sigma+')')\n#plt.axis([25, 45, 0, 0.03]) \n#plt.grid(True) \n\nplt.show()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 6 14:54:23 2016\n\n@author: annaukkola\n\"\"\"\n\n\ndef find_days_above_percentile(mod_vec, no_days, percentile=100, other_vars={},\n lags=float('nan'), miss_val=float('nan')):\n\n\n # source packages\n import sys\n import os\n import numpy as np\n\n\n ########################\n ### Find tasmax days ###\n ########################\n\n #Convert mod_vec into np.array in case not already, indexing might fail otherwise\n mod_vec = np.asarray(mod_vec)\n \n\n ### Calculate percentile threshold ###\n\n\n ### Find tasmax days above or equal to threshold ###\n \n #Weird problem using percentile = 100.0, hacky fix\n #Percentile 100, i.e. max value\n if percentile == 100.0:\n \n #if found several, pick first one\n hot_days = np.where(mod_vec == np.max(mod_vec))[0]\n \n #If found more than one Txx, pick first instance\n if len(hot_days) > 1:\n hot_days = np.asarray(hot_days[0])\n \n #All other percentiles \n else:\n threshold = np.percentile(mod_vec, percentile)\n hot_days = np.where(mod_vec >= threshold)[0]\n \n\n #Initialise as the correct size\n tasmax = np.zeros(no_days) + miss_val\n\n\n #Then find temperatures of hot days\n tasmax[0:len(hot_days)] = mod_vec[hot_days]\n\n\n #Collate outputs (add 1 to hot day indices for normal 1-based indexing)\n #Similarly make hot days the correct size for outputting\n hot_days_out = np.zeros(no_days) + miss_val \n hot_days_out[0:len(hot_days)] = hot_days + 1 \n\n outs={'tasmax': tasmax, 'hot_day_ind': hot_days_out}\n\n\n #######################################################\n ### Find additional variables on the day and lagged ###\n #######################################################\n\n\n #If other variables to process\n #if len(other_vars) > 0 :\n \n #Get variable names\n #keys = other_vars.keys()\n \n #Initialise dictionary for outputs\n #vars_on_the_day = {}\n \n \n #Loop through variables\n #for k in other_vars:\n \n #Convert dict to np.array or indexing might fail\n #data = np.array(other_vars[k])\n \n #Get hot day values\n #vars_on_the_day[k] = data[hot_days]\n \n #Make data the correct size (no_days) for outputting\n #if(len(vars_on_the_day[k]) < no_days):\n \n #vars_on_the_day[k] = np.append(vars_on_the_day[k], \n #np.zeros(no_days - len(vars_on_the_day[k])) + \n #miss_val)\n \n \n \n #Also calculate lagged variables\n #if all(np.isnan(lags)) == False:\n \n \n #Then loop through lags\n #for l in range(len(lags)):\n \n #lag_varname = k + '_lag' + str(lags[l])\n\n #Initialise as correct length\n #lag_data = np.zeros(len(hot_days)) + miss_val\n \n \n \n #Loop through hot days\n #for h in range(len(hot_days)): \n \n #If can't count back, leave as missing\n #if (hot_days[h] - (lags[l] - 1)) < 0:\n #continue\n \n #else:\n #ind = np.arange(hot_days[h] - (lags[l]-1), hot_days[h]+1)\n #lag_data[h] = np.mean(data[ind])\n \n \n #Add lag data variable to output dictionary\n #vars_on_the_day[lag_varname] = lag_data\n \n #if(len(vars_on_the_day[lag_varname]) < no_days):\n \n #vars_on_the_day[lag_varname] = np.append(vars_on_the_day[lag_varname], \n #np.zeros(no_days - len(vars_on_the_day[lag_varname])) + \n #miss_val)\n \n\n \n #Add additional variables to outputs\n #outs.update(vars_on_the_day)\n\n \n\n #Return outputs\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.meshgrid", "numpy.arange", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "matplotlib.mlab.normpdf", "matplotlib.pyplot.show" ], [ "numpy.asarray", "numpy.percentile", "numpy.max", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LightForm-group/matflow-defdap
[ "563dd75c11b2eeba6fa76c9bf5b33ec549837d2f" ]
[ "matflow_defdap/snippets/get_EBSD_image.py" ]
[ "import numpy as np\nfrom defdap.quat import Quat\nfrom scipy.stats import mode\nfrom scipy.ndimage import zoom\n\nfrom matflow_defdap import main_func\n\n\n@main_func\ndef get_EBSD_image(EbsdMap, scaling_factor):\n\n # Construct an array of Euler angles\n grain_quats = np.empty((len(EbsdMap), 4))\n\n # Transformation orientations from EBSD orientation reference frame\n # to EBSD spatial reference frame\n frame_transform = Quat.fromAxisAngle(np.array((1, 0, 0)), np.pi)\n\n if EbsdMap.crystalSym == 'hexagonal':\n # Convert hex convention from y // a2 of EBSD map to x // a1 for DAMASK\n hex_transform = Quat.fromAxisAngle(np.array([0, 0, 1]), -np.pi/6)\n for i, grain in enumerate(EbsdMap):\n grain_quats[i] = (hex_transform * grain.refOri * frame_transform).quatCoef\n\n else:\n for i, grain in enumerate(EbsdMap):\n grain_quats[i] = (grain.refOri * frame_transform).quatCoef\n\n # Filter out -2 (too small grains) values in the grain image\n grain_image = EbsdMap.grains\n remove_small_grain_points(grain_image)\n\n # scale down image if needed\n if scaling_factor != 1:\n grain_image = zoom(grain_image, scaling_factor, order=0,\n prefilter=False, mode='nearest')\n\n # downstream expects grain numbering to start at 0 not 1\n grain_image -= 1\n\n EBSD_image = {\n 'orientations': {\n 'type': 'quat',\n 'unit_cell_alignment': {'x': 'a'},\n 'quaternions': grain_quats,\n 'P': 1, # DefDAP uses P=+1 (e.g see `defdap.quat.Quat.__mul__`)\n 'quat_component_ordering': 'scalar-vector',\n },\n 'grains': grain_image,\n 'scale': EbsdMap.scale,\n }\n\n return EBSD_image\n\n\ndef select_area(i, j, grain_image):\n i_min, i_max = 1, 1\n j_min, j_max = 1, 1\n\n on_edge = 0\n\n if i == 0:\n i_min = 0\n on_edge += 1\n elif i == grain_image.shape[0]-1:\n i_max = 0\n on_edge += 1\n\n if j == 0:\n j_min = 0\n on_edge += 1\n elif j == grain_image.shape[1]-1:\n j_max = 0\n on_edge += 1\n\n # select 3x3 region around point\n area = grain_image[i-i_min:i+i_max+1, j-j_min:j+j_max+1]\n\n return area, on_edge\n\n\ndef remove_small_grain_points(grain_image, max_iterations=200):\n # num_neighbours - must have at least this many pixels surrounding\n # start checking for 8 neighbours, then 7 until 2\n all_done = False\n for num_neighbours in list(range(8, 1, -1)):\n print(f\"Starting iterations with at least {num_neighbours} equal neighbours\")\n\n num_bad_prev = 0\n iteration = 0\n while True:\n num_bad = np.count_nonzero(grain_image == -2)\n if num_bad == 0:\n # No bad values left, done\n print(\"All bad points removed.\")\n all_done = True\n break\n elif num_bad == num_bad_prev:\n # Not removing any more\n print(\"Number of bad points is not decreasing!\")\n break\n if iteration > max_iterations:\n print(\"Max iterations.\")\n break\n\n iteration += 1\n print(\"Starting iteration {}, num bad: {}\".format(iteration, num_bad))\n\n grain_image_new = np.copy(grain_image)\n\n for i, j in zip(*np.where(grain_image == -2)):\n\n area, on_edge = select_area(i, j, grain_image)\n area = area.flatten()\n area = area[np.where(area > 0)] # remove -1 and -2\n\n mode_vals, mode_counts = mode(area)\n for mode_val, mode_count in zip(mode_vals, mode_counts):\n if mode_count >= num_neighbours:\n grain_image_new[i, j] = mode_val\n break\n\n num_bad_prev = num_bad\n # [:, :] required to update the array passed in\n grain_image[:, :] = grain_image_new\n\n if all_done:\n break\n" ]
[ [ "scipy.ndimage.zoom", "numpy.copy", "numpy.count_nonzero", "scipy.stats.mode", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
JankaSvK/thesis
[ "c440ab8242b058f580fdf9d5a1d00708a1696561" ]
[ "program/program/GUI.py" ]
[ "import functools\nimport random\nimport threading\nimport tkinter as tk\nimport tkinter.scrolledtext as tkst\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageTk\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.mplot3d import Axes3D # this import is important, even if the IDE is telling you something else\n\nfrom . import Config\nfrom .QueuesProvider import QueuesProvider\nfrom .TrackersProvider import get_tracker_uid, get_tracker_by_uid\n\n\nclass GUI(object):\n def __init__(self, stop_event, objects_count, tracked_points, trackers_initialization_events, image_streams,\n localization_data, console_output=None):\n self.camera_count = Config.camera_count\n\n self.initialized = threading.Event()\n self.stop_event = stop_event\n self.trackers_initialization_events = trackers_initialization_events\n\n self.image_streams = image_streams\n self.video_views = []\n\n self.objects_count = objects_count\n self.tracked_points = tracked_points\n self.initialization_buttons = []\n\n self.displayed_image_size_factor = 1/2\n self.size_to_display = None\n\n self.localization_data = localization_data\n self.last_drawn_points = [(None, None) for _ in range(self.objects_count)]\n self.minimal_distance = 20 # between points to be scattered, in millimeters\n\n if console_output is None:\n console_output = []\n self.console_output = console_output\n self.outputted_messages = 0\n\n self.buttons_per_line = 5\n\n self.rgb_colors_for_objects = [(1, 0, 0), (0, 0, 1), (0, 1, 0), (0.5, 0.5, 0.5), (0.1, 0.2, 0.5)]\n if len(self.rgb_colors_for_objects) < self.objects_count:\n for _ in range(self.objects_count - len(self.rgb_colors_for_objects)):\n self.rgb_colors_for_objects.append(tuple(random.random() for _ in range(3)))\n\n\n def set_scale_factor(self, window_size):\n if (window_size[0] > 1900):\n self.displayed_image_size_factor = 1\n else:\n self.displayed_image_size_factor = 0.60\n\n self.size_to_display = (int(Config.image_width * self.displayed_image_size_factor),\n int(Config.image_height * self.displayed_image_size_factor))\n\n def create_gui_objects(self):\n self.root = tk.Tk()\n self.root.protocol(\"WM_DELETE_WINDOW\", self.quit)\n self.root.title(\"Thesis\")\n self.set_scale_factor((self.root.winfo_screenwidth(), self.root.winfo_screenheight()))\n\n for cam_ind in range(self.camera_count):\n\n # Create camera view windows\n video_view = tk.Label(self.root)\n click_bind = functools.partial(self.click_callback, view_id=cam_ind)\n mouse_left_button = \"<Button-1>\"\n video_view.bind(mouse_left_button, click_bind)\n self.video_views.append(video_view)\n\n # Create buttons for tracker initialization\n buttons_frame = tk.Frame(self.root)\n buttons = []\n for obj_ind in range(self.objects_count):\n tracker_bind = functools.partial(self.tracker_callback, cam_ind=cam_ind, obj_ind=obj_ind)\n button = tk.Button(buttons_frame, text=\"Initialize object {}\".format(obj_ind + 1), command=tracker_bind)\n buttons.append(button)\n self.initialization_buttons.append({'frame': buttons_frame, 'buttons': buttons})\n\n # Create graph\n graph_new_size = (5 * self.displayed_image_size_factor, 4 * self.displayed_image_size_factor)\n self.graph_figure = Figure(figsize=graph_new_size, dpi=100)\n self.graph = FigureCanvasTkAgg(self.graph_figure, master=self.root)\n\n self.subplot = self.graph_figure.add_subplot(111, projection='3d')\n self.subplot.mouse_init()\n self.subplot.set_xlabel(\"X\")\n self.subplot.set_ylabel(\"Y\")\n self.subplot.set_zlabel(\"Z\")\n\n # Create logging windows\n self.console = tkst.ScrolledText(self.root, height=10)\n\n def set_gui_layout(self):\n # Place the camera vies\n for i, view in enumerate(self.video_views):\n view.grid(row=0, column=i)\n\n # Place graph\n self.graph.get_tk_widget().grid(row=0, column=self.camera_count, stick=\"nsew\")\n\n # Place the buttons\n for cam_ind, buttons_pack in enumerate(self.initialization_buttons):\n frame = buttons_pack['frame']\n buttons = buttons_pack['buttons']\n frame.grid(row=1, column=cam_ind, stick=\"w\")\n for obj_ind, button in enumerate(buttons):\n button.grid(row=obj_ind // self.buttons_per_line, column=obj_ind % self.buttons_per_line)\n\n # Place Console output\n self.console.grid(column=0, row=2, columnspan=3, sticky=\"nsew\")\n\n def click_callback(self, event, view_id):\n QueuesProvider.add_mouse_click(window_index=view_id, x=event.x, y=event.y)\n\n def tracker_callback(self, cam_ind, obj_ind):\n uid = get_tracker_uid(cam_ind, obj_ind)\n self.initialization_buttons[cam_ind]['buttons'][obj_ind].config(relief='sunken')\n QueuesProvider.MouseClicks[cam_ind] = []\n self.trackers_initialization_events[uid].set()\n self.console_output.append(\n \"To select an object, draw a rectangle by clicking on its top left and bottom right corner.\")\n\n def quit(self):\n self.stop_event.set()\n\n def start(self):\n self.create_gui_objects()\n self.initialized.set() # initialization is finished, so the program may correctly start\n self.set_gui_layout()\n self.start_streaming()\n\n def start_streaming(self):\n self.last_scattered = None\n while not self.stop_event.is_set():\n self.update_cameras_views()\n\n for object_id in range(self.objects_count):\n self.draw_located_point(object_id)\n\n # Raise buttons\n for tracker_id, initialize_tracker in enumerate(self.trackers_initialization_events):\n if not initialize_tracker.is_set():\n cam_ind, obj_id = get_tracker_by_uid(tracker_id)\n self.initialization_buttons[cam_ind]['buttons'][obj_id].config(relief='raised')\n\n # Add messages to output\n if self.console_output and len(self.console_output) > self.outputted_messages:\n self.print_messages()\n self.root.update_idletasks()\n self.root.update()\n\n def print_messages(self):\n self.console.config(state='normal')\n while self.outputted_messages < len(self.console_output):\n self.console.insert(tk.END, self.console_output[self.outputted_messages] + '\\n')\n self.outputted_messages += 1\n self.console.see(tk.END)\n self.console.config(state='disabled')\n\n def draw_located_point(self, object_id):\n if not QueuesProvider.LocalizatedPoints3D[object_id]:\n return\n\n point = QueuesProvider.LocalizatedPoints3D[object_id][-1].coordinates\n\n last_drawn, last_scattered = self.last_drawn_points[object_id]\n if last_drawn is None or np.linalg.norm(point - last_drawn) > self.minimal_distance:\n if last_drawn is not None:\n zipped = list(zip(last_drawn, point))\n self.subplot.plot(*zipped, color=self.rgb_colors_for_objects[object_id])\n\n if last_scattered is not None:\n last_scattered.set_visible(False)\n\n scattered = self.subplot.scatter(*point, c=self.rgb_colors_for_objects[object_id])\n self.last_drawn_points[object_id] = (point, scattered)\n self.graph.show()\n\n def update_cameras_views(self):\n for i, stream in enumerate(self.image_streams):\n if stream:\n image = stream[-1].image\n self.add_tracker_information(i, image)\n image = self.process_image_for_displaying(image)\n else:\n image = self.create_empty_image()\n\n self.video_views[i].configure(image=image)\n self.video_views[i].image = image\n\n def add_tracker_information(self, cam_ind, image):\n for obj_id in range(self.objects_count):\n uid = get_tracker_uid(cam_ind, obj_id)\n tracked_points = self.tracked_points[uid]\n\n if len(tracked_points) == 0:\n continue\n\n color = self.bgr_to_rgb_color_and_scale(self.rgb_colors_for_objects[obj_id])\n\n time, coords = tracked_points[-1].timestamp, tracked_points[-1].coordinates\n if coords is None:\n cv2.putText(image,\n \"Object {} was not found\".format(obj_id + 1),\n (10, (obj_id + 1) * 30), cv2.FONT_HERSHEY_COMPLEX, 1, color)\n else:\n cv2.circle(image, coords, 5, color, -1)\n\n def bgr_to_rgb_color_and_scale(self, color):\n return [c * 255 for c in reversed(color)]\n\n def process_image_for_displaying(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n if self.displayed_image_size_factor != 1:\n image = cv2.resize(image, self.size_to_display)\n image = Image.fromarray(image)\n return ImageTk.PhotoImage(image)\n\n def create_empty_image(self): # TODO fix resolution\n img = Image.new(\"RGB\", self.size_to_display, \"white\")\n return ImageTk.PhotoImage(image=img)\n" ]
[ [ "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "numpy.linalg.norm", "matplotlib.figure.Figure" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
maitreygram/DiNO
[ "468f5a474b20cc8ffd6299d1e8a8652ba2212ef0", "468f5a474b20cc8ffd6299d1e8a8652ba2212ef0" ]
[ "full_test.py", "affinity_helper.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSome parts are taken from https://github.com/Liusifei/UVC\n\"\"\"\nimport os\nimport copy\nimport glob\nimport queue\nfrom urllib.request import urlopen\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom pdb import set_trace\n\nimport cv2\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom PIL import Image\nfrom torchvision import transforms\n\nimport utils\nimport vision_transformer as vits\nimport helper\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\[email protected]_grad()\ndef eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette, diffuse, take_current=0):\n \"\"\"\n Evaluate tracking on a video given first frame & segmentation\n \"\"\"\n video_folder = os.path.join(args.output_dir+'video_'+str(diffuse), video_dir.split('/')[-1])\n os.makedirs(video_folder, exist_ok=True)\n\n # The queue stores the n preceeding frames\n que = queue.Queue(args.n_last_frames)\n\n # first frame\n frame1, ori_h, ori_w = read_frame(frame_list[0])\n # extract first frame feature\n frame1_feat = extract_feature(model, frame1).T # dim x h*w\n\n # saving first segmentation\n out_path = os.path.join(video_folder, \"00000.png\")\n imwrite_indexed(out_path, seg_ori, color_palette)\n mask_neighborhood = None\n for cnt in tqdm(range(1, len(frame_list))):\n frame_tar = read_frame(frame_list[cnt])[0]\n\n # we use the first segmentation and the n previous ones\n used_frame_feats = [frame1_feat] + [pair[0] for pair in list(que.queue)]\n used_segs = [first_seg] + [pair[1] for pair in list(que.queue)]\n frame_tar_avg, feat_tar, mask_neighborhood = label_propagation(args, model, frame_tar, used_frame_feats, used_segs, diffuse, mask_neighborhood, take_current=take_current)\n\n # pop out oldest frame if neccessary\n if que.qsize() == args.n_last_frames:\n que.get()\n # push current results into queue\n seg = copy.deepcopy(frame_tar_avg)\n que.put([feat_tar, seg])\n\n # upsampling & argmax\n frame_tar_avg = F.interpolate(frame_tar_avg, scale_factor=args.patch_size, mode='bilinear', align_corners=False, recompute_scale_factor=False)[0]\n frame_tar_avg = norm_mask(frame_tar_avg)\n _, frame_tar_seg = torch.max(frame_tar_avg, dim=0)\n\n # saving to disk\n frame_tar_seg = np.array(frame_tar_seg.squeeze().cpu(), dtype=np.uint8)\n frame_tar_seg = np.array(Image.fromarray(frame_tar_seg).resize((ori_w, ori_h), 0))\n frame_nm = frame_list[cnt].split('/')[-1].replace(\".jpg\", \".png\")\n imwrite_indexed(os.path.join(video_folder, frame_nm), frame_tar_seg, color_palette)\n\n\ndef restrict_neighborhood(h, w):\n # We restrict the set of source nodes considered to a spatial neighborhood of the query node (i.e. ``local attention'')\n mask = torch.zeros(h, w, h, w)\n for i in range(h):\n for j in range(w):\n for p in range(2 * args.size_mask_neighborhood + 1):\n for q in range(2 * args.size_mask_neighborhood + 1):\n if i - args.size_mask_neighborhood + p < 0 or i - args.size_mask_neighborhood + p >= h:\n continue\n if j - args.size_mask_neighborhood + q < 0 or j - args.size_mask_neighborhood + q >= w:\n continue\n mask[i, j, i - args.size_mask_neighborhood + p, j - args.size_mask_neighborhood + q] = 1\n\n mask = mask.reshape(h * w, h * w)\n return mask.cuda(non_blocking=True)\n\n\ndef norm_mask(mask):\n c, h, w = mask.size()\n for cnt in range(c):\n mask_cnt = mask[cnt,:,:]\n if(mask_cnt.max() > 0):\n mask_cnt = (mask_cnt - mask_cnt.min())\n mask_cnt = mask_cnt/mask_cnt.max()\n mask[cnt,:,:] = mask_cnt\n return mask\n\n\ndef label_propagation(args, model, frame_tar, list_frame_feats, list_segs, multiscale_rate, mask_neighborhood=None, take_current=0):\n \"\"\"\n propagate segs of frames in list_frames to frame_tar\n \"\"\"\n ## we only need to extract feature of the target frame\n feat_tar, h, w = extract_feature(model, frame_tar, return_h_w=True)\n\n return_feat_tar = feat_tar.T # dim x h*w\n\n ncontext = len(list_frame_feats)\n feat_sources = torch.stack(list_frame_feats) # nmb_context x dim x h*w\n feat_sources = feat_sources.transpose(2,1)\n\n '''\n #feat_tar = F.normalize(feat_tar, dim=1, p=2)\n #feat_sources = F.normalize(feat_sources, dim=1, p=2)\n overall_feat = F.normalize(overall_feat,dim=2,p=2)\n overall_feat = overall_feat.reshape((ncontext+1)*h*w,f_dim)\n # Normalizing\n #aff = torch.softmax(torch.matmul(overall_feat,overall_feat.T)/0.1,1)\n aff = torch.softmax(torch.matmul(overall_feat,overall_feat.T)/0.1,1)\n inf_aff = torch.inverse(torch.eye(aff.shape[0]).cuda()-aff)\n aff = inf_aff[:h*w,h*w:(ncontext+1)*h*w]\n #aff = aff.transpose(1,0)\n aff = aff.transpose(1,0).reshape(ncontext,h*w,h*w).transpose(2,1)\n #aff = aff.reshape(ncontext,h*w,h*w)\n\n\n #feat_tar = feat_tar.unsqueeze(0).repeat(ncontext, 1, 1)\n #aff = torch.exp(torch.bmm(feat_tar, feat_sources) / 0.1) # nmb_context x h*w (tar: query) x h*w (source: keys)\n\n if args.size_mask_neighborhood > 0:\n if mask_neighborhood is None:\n mask_neighborhood = restrict_neighborhood(h, w)\n mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)\n aff *= mask_neighborhood\n\n aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)\n #tk_val, _ = torch.topk(aff, dim=0, k=args.topk)\n #tk_val_min, _ = torch.min(tk_val, dim=0)\n #aff[aff < tk_val_min] = 0\n\n aff = aff / torch.sum(aff, keepdim=True, axis=0)\n \n size_mask_neighborhood = args.size_mask_neighborhood\n topk = args.topk\n att_alpha = 0.1\n infi_alpha = 1\n diffusion_num = 1\n\n f_dim = feat_tar.shape[1]\n if diffusion_num == \"infinite\":\n overall_feat = torch.cat((feat_tar.unsqueeze(0),feat_sources))\n overall_feat = F.normalize(overall_feat,dim=2,p=2)\n overall_feat = overall_feat.reshape((ncontext+1)*h*w,f_dim)\n # Normalizing\n aff = torch.softmax(torch.matmul(overall_feat,overall_feat.T)/att_alpha,1)\n # Implementing S = (I-\\alpha*A)^{-1}\n inf_aff = torch.inverse(torch.eye(aff.shape[0]).cuda()-infi_alpha*aff)\n aff = inf_aff[:h*w,h*w:(ncontext+1)*h*w]\n aff = aff.transpose(1,0).reshape(ncontext,h*w,h*w).transpose(2,1)\n else:\n assert type(diffusion_num)==int\n overall_feat = torch.cat((feat_tar.unsqueeze(0),feat_sources))\n overall_feat = F.normalize(overall_feat,dim=2,p=2)\n overall_feat = overall_feat.reshape((ncontext+1)*h*w,f_dim)\n aff = torch.matmul(overall_feat,overall_feat.T)/att_alpha\n # Normalizing\n if diffusion_num>1:\n aff = torch.softmax(aff,1)\n for i in range(diffusion_num-1):\n aff = torch.matmul(aff,aff)\n else:\n aff = torch.exp(aff) # nmb_context x h*w (tar: query) x h*w (source: keys)\n aff = aff[:h*w,h*w:(ncontext+1)*h*w]\n aff = aff.transpose(1,0).reshape(ncontext,h*w,h*w).transpose(2,1)\n\n if size_mask_neighborhood > 0:\n if mask_neighborhood is None:\n mask_neighborhood = restrict_neighborhood(h, w)\n mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)\n aff *= mask_neighborhood\n\n aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)\n tk_val, _ = torch.topk(aff, dim=0, k=topk)\n tk_val_min, _ = torch.min(tk_val, dim=0)\n aff[aff < tk_val_min] = 0\n aff = aff / torch.sum(aff, keepdim=True, axis=0)\n '''\n aff, mask_neighborhood, tr_aff = helper.diffusion(feat_tar, feat_sources, \"infinite\", args.size_mask_neighborhood, args.topk, \n h, w, ncontext, mask_neighborhood, multiscale_rate, multiscale=True, take_current=take_current)\n\n list_segs = [s.cuda() for s in list_segs]\n segs = torch.cat(list_segs)\n nmb_context, C, h, w = segs.shape\n segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T\n # set_trace()\n if take_current:\n list_segs = [torch.mm(segs, tr_aff).reshape(1, C, h, w)] + list_segs\n segs = torch.cat(list_segs)\n nmb_context, C, h, w = segs.shape\n segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T # C x nmb_context*h*w\n seg_tar = torch.mm(segs, aff)\n seg_tar = seg_tar.reshape(1, C, h, w)\n return seg_tar, return_feat_tar, mask_neighborhood\n \n\ndef extract_feature(model, frame, return_h_w=False):\n \"\"\"Extract one frame feature everytime.\"\"\"\n out = model.get_intermediate_layers(frame.unsqueeze(0).cuda(), n=1)[0]\n out = out[:, 1:, :] # we discard the [CLS] token\n h, w = int(frame.shape[1] / model.patch_embed.patch_size), int(frame.shape[2] / model.patch_embed.patch_size)\n dim = out.shape[-1]\n out = out[0].reshape(h, w, dim)\n out = out.reshape(-1, dim)\n if return_h_w:\n return out, h, w\n return out\n\n\ndef imwrite_indexed(filename, array, color_palette):\n \"\"\" Save indexed png for DAVIS.\"\"\"\n if np.atleast_3d(array).shape[2] != 1:\n raise Exception(\"Saving indexed PNGs requires 2D array.\")\n\n im = Image.fromarray(array)\n im.putpalette(color_palette.ravel())\n im.save(filename, format='PNG')\n\n\ndef to_one_hot(y_tensor, n_dims=None):\n \"\"\"\n Take integer y (tensor or variable) with n dims &\n convert it to 1-hot representation with n+1 dims.\n \"\"\"\n if(n_dims is None):\n n_dims = int(y_tensor.max()+ 1)\n _,h,w = y_tensor.size()\n y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1\n y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)\n y_one_hot = y_one_hot.view(h,w,n_dims)\n return y_one_hot.permute(2, 0, 1).unsqueeze(0)\n\n\ndef read_frame_list(video_dir):\n frame_list = [img for img in glob.glob(os.path.join(video_dir,\"*.jpg\"))]\n frame_list = sorted(frame_list)\n return frame_list\n\n\ndef read_frame(frame_dir, scale_size=[480]):\n \"\"\"\n read a single frame & preprocess\n \"\"\"\n img = cv2.imread(frame_dir)\n ori_h, ori_w, _ = img.shape\n if len(scale_size) == 1:\n if(ori_h > ori_w):\n tw = scale_size[0]\n th = (tw * ori_h) / ori_w\n th = int((th // 64) * 64)\n else:\n th = scale_size[0]\n tw = (th * ori_w) / ori_h\n tw = int((tw // 64) * 64)\n else:\n th, tw = scale_size\n img = cv2.resize(img, (tw, th))\n img = img.astype(np.float32)\n img = img / 255.0\n img = img[:, :, ::-1]\n img = np.transpose(img.copy(), (2, 0, 1))\n img = torch.from_numpy(img).float()\n img = color_normalize(img)\n return img, ori_h, ori_w\n\n\ndef read_seg(seg_dir, factor, scale_size=[480]):\n seg = Image.open(seg_dir)\n _w, _h = seg.size # note PIL.Image.Image's size is (w, h)\n if len(scale_size) == 1:\n if(_w > _h):\n _th = scale_size[0]\n _tw = (_th * _w) / _h\n _tw = int((_tw // 64) * 64)\n else:\n _tw = scale_size[0]\n _th = (_tw * _h) / _w\n _th = int((_th // 64) * 64)\n else:\n _th = scale_size[1]\n _tw = scale_size[0]\n small_seg = np.array(seg.resize((_tw // factor, _th // factor), 0))\n small_seg = torch.from_numpy(small_seg.copy()).contiguous().float().unsqueeze(0)\n return to_one_hot(small_seg), np.asarray(seg)\n\n\ndef color_normalize(x, mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]):\n for t, m, s in zip(x, mean, std):\n t.sub_(m)\n t.div_(s)\n return x\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Evaluation with video object segmentation on DAVIS 2017')\n parser.add_argument('--pretrained_weights', default='', type=str, help=\"Path to pretrained weights to evaluate.\")\n parser.add_argument('--arch', default='vit_small', type=str,\n choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).')\n parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')\n parser.add_argument(\"--checkpoint_key\", default=\"teacher\", type=str, help='Key to use in the checkpoint (example: \"teacher\")')\n parser.add_argument('--output_dir', default=\".\", help='Path where to save segmentations')\n parser.add_argument('--data_path', default='/path/to/davis/', type=str)\n parser.add_argument(\"--n_last_frames\", type=int, default=3, help=\"number of preceeding frames\")\n parser.add_argument(\"--size_mask_neighborhood\", default=12, type=int,\n help=\"We restrict the set of source nodes considered to a spatial neighborhood of the query node\")\n parser.add_argument(\"--topk\", type=int, default=5, help=\"accumulate label from top k neighbors\")\n parser.add_argument(\"--bs\", type=int, default=6, help=\"Batch size, try to reduce if OOM\")\n parser.add_argument(\"--tc\", type=int, default=0, help=\"Transform mask to current frame and use it as base mask for current frame\")\n args = parser.parse_args()\n\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n print(\"\\n\".join(\"%s: %s\" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))\n\n '''\n # building network\n model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)\n print(f\"Model {args.arch} {args.patch_size}x{args.patch_size} built.\")\n model.cuda()\n utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)\n for param in model.parameters():\n param.requires_grad = False\n model.eval()\n '''\n with torch.no_grad():\n model = torch.hub.load('facebookresearch/dino:main', 'dino_vits16')\n model.cuda()\n model.eval()\n\n color_palette = []\n for line in urlopen(\"https://raw.githubusercontent.com/Liusifei/UVC/master/libs/data/palette.txt\"):\n color_palette.append([int(i) for i in line.decode(\"utf-8\").split('\\n')[0].split(\" \")])\n color_palette = np.asarray(color_palette, dtype=np.uint8).reshape(-1,3)\n\n # rate_list = list(np.arange(0.55,1.05,0.05))\n video_list = open(os.path.join(args.data_path, \"ImageSets/2017/val.txt\")).readlines()\n diffuse_list = list(np.arange(0.00, 1.02,0.05))\n for diffuse in diffuse_list:\n print(\"Alpha: \", diffuse)\n for i, video_name in enumerate(video_list):\n video_name = video_name.strip()\n print(f'[{i}/{len(video_list)}] Begin to segmentate video {video_name}.')\n video_dir = os.path.join(args.data_path, \"JPEGImages/480p/\", video_name)\n frame_list = read_frame_list(video_dir)\n seg_path = frame_list[0].replace(\"JPEGImages\", \"Annotations\").replace(\"jpg\", \"png\")\n first_seg, seg_ori = read_seg(seg_path, args.patch_size)\n eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette, diffuse, args.tc)\n\n\n# python full_test.py --data_path ./train --output_dir ./full_test_output\n# python evaluation_method.py --task semi-supervised --results_path ./full_test_output --davis_path ./train\n\n\n\n\n\n\n\n\n\n", "import os\nimport copy\nimport glob\nimport queue\nfrom urllib.request import urlopen\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom pdb import set_trace\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom PIL import Image\nfrom torchvision import transforms\n\nimport utils\nimport vision_transformer as vits\nfrom pdb import set_trace\n\n\ndef restrict_neighborhood(h, w, args):\n # We restrict the set of source nodes considered to a spatial neighborhood of the query node (i.e. ``local attention'')\n mask = torch.zeros(h, w, h, w)\n for i in range(h):\n for j in range(w):\n for p in range(2 * args.size_mask_neighborhood + 1):\n for q in range(2 * args.size_mask_neighborhood + 1):\n if i - args.size_mask_neighborhood + p < 0 or i - args.size_mask_neighborhood + p >= h:\n continue\n if j - args.size_mask_neighborhood + q < 0 or j - args.size_mask_neighborhood + q >= w:\n continue\n mask[i, j, i - args.size_mask_neighborhood + p, j - args.size_mask_neighborhood + q] = 1\n\n mask = mask.reshape(h * w, h * w)\n return mask.cuda(non_blocking=True)\n\n\n# N = h*w, i.e. the number of patches in an image\n# new_feature: N x d\n# feature_stack: num_context x d x N\n# diffusion_num: ---\"k = number of diffusions wanted\", \"infinite\"\ndef diffusion(feat_tar, feat_sources, diffusion_num, size_mask_neighborhood, topk, h, w, \n\t\t\t ncontext, mask_neighborhood, multiscale_rate, att_alpha = 0.1, infi_alpha = 1, \n\t\t\t sparse = True, multiscale = False):\n\tf_dim = feat_tar.shape[1]\n\tif diffusion_num == \"infinite\":\n\t\toverall_feat = torch.cat((feat_tar.unsqueeze(0),feat_sources))\n\t\toverall_feat = F.normalize(overall_feat,dim=2,p=2)\n\t\toverall_feat = overall_feat.reshape((ncontext+1)*h*w,f_dim)\n\t\t# Normalizing\n\t\taff = torch.softmax(torch.matmul(overall_feat,overall_feat.T)/att_alpha,1)\n\t\t# Implementing S = (I-\\alpha*A)^{-1}\n\t\t# inf_aff = torch.inverse(torch.eye(aff.shape[0]).cuda()-multiscale_rate*aff)\n\t\taff = aff[:h*w,h*w:(ncontext+1)*h*w]\n\t\taff = aff.transpose(1,0).reshape(ncontext,h*w,h*w).transpose(2,1)\n\telse:\n\t\tassert type(diffusion_num)==int\n\t\toverall_feat = torch.cat((feat_tar.unsqueeze(0),feat_sources))\n\t\toverall_feat = F.normalize(overall_feat,dim=2,p=2)\n\t\toverall_feat = overall_feat.reshape((ncontext+1)*h*w,f_dim)\n\t\taff = torch.matmul(overall_feat,overall_feat.T)/att_alpha\n\t\t# Normalizing\n\t\tif diffusion_num>1:\n\t\t\taff = torch.softmax(aff,1)\n\t\t\tif multiscale:\n\t\t\t\taff_bank = []\n\t\t\t\taff_bank.append(aff)\n\t\t\tfor i in range(diffusion_num-1):\n\t\t\t\taff = torch.matmul(aff,aff)\n\t\t\t\tif multiscale:\n\t\t\t\t\taff_bank.append(aff)\n\t\t\tif multiscale:\n\t\t\t\taff_bank = torch.stack(aff_bank)\n\t\t\t\taff = (1-multiscale_rate)*aff_bank[0]+multiscale_rate*aff_bank[1]\n\t\t\t\t#torch.mean(aff_bank,0)\n\t\telse:\n\t\t\taff = torch.exp(aff) # nmb_context x h*w (tar: query) x h*w (source: keys)\n\t\taff = inf_aff[:h*w,h*w:(ncontext+1)*h*w]\n\t\taff = aff.transpose(1,0).reshape(ncontext,h*w,h*w).transpose(2,1)\n\n\tif size_mask_neighborhood > 0:\n\t\tif mask_neighborhood is None:\n\t\t\tmask_neighborhood = restrict_neighborhood(h, w, size_mask_neighborhood)\n\t\t\tmask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)\n\t\taff *= mask_neighborhood\n\t\t\n\taff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)\n\tif sparse:\n\t\ttk_val, _ = torch.topk(aff, dim=0, k=topk)\n\t\ttk_val_min, _ = torch.min(tk_val, dim=0)\n\t\taff[aff < tk_val_min] = 0\n\n\taff = aff / torch.sum(aff, keepdim=True, axis=0)\n\treturn aff, mask_neighborhood\n\n\n# N = h*w, i.e. the number of patches in an image\n# new_feature: N x d\ndef diffusion_image(feat_tar, size_mask_neighborhood, topk, h, w, \n\t\t\t mask_neighborhood, att_alpha = 0.1, infi_alpha = 1, \n\t\t\t sparse = True):\n\tf_dim = feat_tar.shape[1]\n\t# overall_feat = feat_tar.unsqueeze(0)\n\toverall_feat = F.normalize(feat_tar,dim=1,p=2)\n\t# overall_feat = overall_feat.reshape(h*w,f_dim)\n\t\t\n\t# Normalizing\n\taff = torch.softmax(torch.matmul(overall_feat,overall_feat.T)/att_alpha,1)\n\t# Implementing S = (I-\\alpha*A)^{-1}\n\t# aff1 = copy.deepcopy(aff)\n\t# aff = torch.inverse(torch.eye(aff.shape[0]).cuda()-infi_alpha*aff)\n\t# aff = aff.transpose(1,0).reshape(ncontext,h*w,h*w).transpose(2,1)\n\t\n\tif size_mask_neighborhood > 0:\n\t\tif mask_neighborhood is None:\n\t\t\tmask_neighborhood = restrict_neighborhood(h, w, size_mask_neighborhood)\n\t\t\t# mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)\n\t\taff *= mask_neighborhood\n\t\t# aff1 *= mask_neighborhood\n\n\taff = aff.transpose(1, 0).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)\n\t# aff1 = aff1.transpose(1, 0).reshape(-1, h * w)\n\tif sparse:\n\t\ttk_val, _ = torch.topk(aff, dim=0, k=topk)\n\t\ttk_val_min, _ = torch.min(tk_val, dim=0)\n\t\taff[aff < tk_val_min] = 0\n\n\t# if sparse:\n\t# \ttk_val, _ = torch.topk(aff1, dim=0, k=topk)\n\t# \ttk_val_min, _ = torch.min(tk_val, dim=0)\n\t# \taff1[aff1 < tk_val_min] = 0\n\n\taff = aff / torch.sum(aff, keepdim=True, axis=0)\n\t# aff1 = copy.deepcopy(aff)\n\tset_trace()\n\tpass\n\taff = torch.inverse(torch.eye(aff.shape[0]).cuda()-infi_alpha*aff)\n\taff = aff / torch.sum(aff, keepdim=True, axis=0)\n\t# aff1 = aff1 / torch.sum(aff1, keepdim=True, axis=0)\n\treturn aff, mask_neighborhood\n\n" ]
[ [ "torch.mm", "torch.max", "torch.cat", "torch.zeros", "numpy.asarray", "numpy.arange", "torch.from_numpy", "numpy.atleast_3d", "torch.no_grad", "torch.nn.functional.interpolate", "torch.stack", "torch.hub.load" ], [ "torch.nn.functional.normalize", "torch.softmax", "torch.zeros", "torch.min", "torch.sum", "torch.eye", "torch.exp", "torch.matmul", "torch.topk", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rwiuff/QuantumTransport
[ "5367ca2130b7cf82fefd4e2e7c1565e25ba68093", "5367ca2130b7cf82fefd4e2e7c1565e25ba68093" ]
[ "Listings/BuckyBall.py", "scripts/Tanos_libs/lib_bc.py" ]
[ "# -------------------------------------------------------------------- #\n# #\n# Python script for calculating the bandstructure of C60 #\n# #\n# This scgript calcutes the electron transport #\n# of the Bucky Ball (C60) #\n# #\n# Written by Rasmus Wiuff ([email protected]) #\n# #\n# -------------------------------------------------------------------- #\n\n# --------------------------Import Libraries-------------------------- #\nfrom matplotlib import pyplot as plt # Pyplot for nice graphs\nfrom mpl_toolkits.mplot3d import Axes3D # Used for 3D plots\nfrom matplotlib.widgets import Slider, Button\nimport numpy as np # NumPy\nfrom numpy import linalg as LA\nfrom collections import Counter\nVppi = -1\n\n# np.set_printoptions(threshold=np.inf)\n\n# BB = molecule('C60')\n# xyz = BB.get_positions()\n\nxyz = np.array([[1.3624, 1.5632, 2.8359], [2.0435, 0.36748, 2.7818],\n [1.6002, 2.5246, 1.8519], [0.0036388, 1.2996, 3.3319],\n [1.2172, -0.64172, 3.2237], [2.9886, 0.13386, 1.8164],\n [0.50174, 3.3131, 1.2672], [2.5073, 2.2423, 0.85514],\n [-1.1397, 2.0362, 2.6753], [-0.086852, -0.055936, 3.5613],\n [1.3122, -1.9012, 2.6354], [3.0831, -1.0979, 1.2391],\n [3.2202, 1.0708, 0.8538], [-0.90772, 2.9856, 1.7068],\n [0.78701, 3.4713, -0.071127], [2.0706, 2.8055, -0.32213],\n [-2.2925, 1.2502, 2.225], [-1.3338, -0.83053, 3.1472],\n [2.2289, -2.0986, 1.6273], [0.10933, -2.6948, 2.338],\n [3.3729, -0.9212, -0.082145], [3.4595, 0.4197, -0.32075],\n [-1.9189, 2.7734, 0.66243], [-0.30423, 3.3175, -1.1239],\n [2.3151, 2.1454, -1.5248], [-2.718, 1.7289, 1.0219],\n [-2.4072, -0.1101, 2.4492], [-1.2414, -2.0783, 2.5771],\n [1.6915, -2.9709, 0.70985], [0.34387, -3.3471, 1.1603],\n [2.7975, -1.7395, -1.0186], [2.9824, 0.94083, -1.4955],\n [-1.6529, 2.9328, -0.68622], [-0.061038, 2.6748, -2.3153],\n [1.2982, 2.0899, -2.5875], [-3.3109, 0.91875, 0.095886],\n [-3.0017, -0.92892, 1.5037], [-2.3116, -2.2045, 1.5437],\n [1.9754, -2.7766, -0.63964], [-0.75087, -3.4335, 0.13085],\n [2.3593, -1.2416, -2.2239], [2.4601, 0.1258, -2.4726],\n [-2.2474, 2.1044, -1.6233], [-1.2886, 1.912, -2.6947],\n [1.3859, 0.85338, -3.1625], [-3.5067, -0.40969, 0.32408],\n [-3.1274, 1.1072, -1.2394], [-2.0814, -2.8689, 0.37769],\n [0.92735, -2.9321, -1.6567], [-0.48135, -3.2351, -1.1932],\n [1.1636, -1.9938, -2.6284], [-1.1972, 0.6892, -3.2868],\n [0.12809, 0.10609, -3.5141], [-3.4109, -1.1172, -0.94606],\n [-3.1772, -0.1844, -1.9062], [-2.6065, -2.3553, -0.91036],\n [-1.6415, -2.5559, -1.8293], [0.018087, -1.2314, -3.2618],\n [-2.1215, -0.40907, -2.9139], [-1.3879, -1.5381, -2.8789]])\n\nHam = np.zeros((xyz.shape[0], xyz.shape[0]))\nfor i in range(xyz.shape[0]):\n for j in range(xyz.shape[0]):\n Ham[i, j] = LA.norm(np.subtract(xyz[i], xyz[j]))\nHam = np.where(Ham < 1.6, Vppi, 0)\nHam = np.subtract(Ham, Vppi * np.identity(xyz.shape[0]))\n\nprint(Ham.shape)\nprint(np.sum(Ham))\nplt.imshow(Ham)\nplt.colorbar()\nplt.show()\ne, v = LA.eig(Ham)\ne = np.round(e, decimals=3)\nw = e.real\nc = Counter(w)\ny = np.array([p for k, p in sorted(c.items())])\nx = np.asarray(sorted([*c]))\nfig, ax = plt.subplots(figsize=(16, 10), dpi=80)\nax.vlines(x=x, ymin=0, ymax=y,\n color='firebrick', alpha=0.7, linewidth=2)\nax.scatter(x=x, y=y, s=75, color='firebrick', alpha=0.7)\n\nax.set_title('Energy degeneracy', fontdict={'size': 22})\nax.set_ylabel('Degeneracy')\nax.set_xlabel('Energy')\nax.set_ylim(0, 10)\nax.tick_params(axis='both', which='both')\nax.spines['left'].set_position('center')\nplt.grid(which='both')\nfor i in range(x.size):\n ax.text(x[i], y[i] + .5, s=x[i], horizontalalignment='center',\n verticalalignment='bottom', fontsize=14)\nplt.show()\n\nxlin = np.array([[0, 0]])\nylin = np.array([[0, 0]])\nzlin = np.array([[0, 0]])\n\nfor i in range(xyz.shape[0]):\n for j in range(xyz.shape[0]):\n if LA.norm(np.subtract(xyz[i], xyz[j])) < 1.6:\n TmpArr = np.array([[xyz[i, 0], xyz[j, 0]]])\n xlin = np.append(xlin, TmpArr, axis=0)\n TmpArr = np.array([[xyz[i, 1], xyz[j, 1]]])\n ylin = np.append(ylin, TmpArr, axis=0)\n TmpArr = np.array([[xyz[i, 2], xyz[j, 2]]])\n zlin = np.append(zlin, TmpArr, axis=0)\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nfor i in range(xlin.shape[0]):\n ax.plot(xlin[i], ylin[i], zlin[i])\n\nax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], zdir='z', s=300)\nplt.gca().set_aspect('equal', adjustable='box')\nplt.show()\n\nval = 1\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor i in range(xlin.shape[0]):\n ax.plot(xlin[i], ylin[i], zlin[i])\ns = np.zeros(v.shape[0])\nc = np.zeros(v.shape[0])\nval = 1\ns = np.absolute(v[:, val - 1])\ns = s * 900\nc = np.where(v[:, val - 1] > 0, 0, 1)\nStateplot = ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], zdir='z', s=s)\nStateplot.set_cmap(\"bwr\")\nplt.subplots_adjust(bottom=0.25)\naxcolor = 'lightgoldenrodyellow'\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\nstate = Slider(axfreq, 'State', 1, 30, valinit=1, valstep=1)\n\n\ndef update(val):\n val = state.val\n val = int(val)\n s = np.absolute(v[:, val - 1])\n s = s * 900\n print(s)\n c = np.where(v[:, val - 1] > 0, 0, 1)\n print(c)\n Stateplot._sizes = s\n Stateplot.set_array(c)\n fig.canvas.draw_idle()\n\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', hovercolor='0.975')\n\n\ndef reset(event):\n state.reset()\n\n\nbutton.on_clicked(reset)\nstate.on_changed(update)\nplt.gca().set_aspect('equal', adjustable='box')\nplt.show()\n", "from __future__ import print_function, division\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport sisl as si\n\ndef read_bondcurrents(f, idx_elec, only='+', E=0.0, k='avg'):#, atoms=None):\n \"\"\"Read bond currents from tbtrans output\n\n Parameters\n ----------\n f : string\n TBT.nc file\n idx_elec : int\n the electrode of originating electrons\n only : {''+', '-', 'all'}\n If \"+\" is supplied only the positive orbital currents are used, for \"-\", \n only the negative orbital currents are used, else return the sum of both. \n E : float or int, \n A float for energy in eV, int for explicit energy index \n k : bool, int or array_like\n whether the returned bond current is k-averaged, \n an explicit k-point or a selection of k-points\n\n Returns\n -------\n bc, nc.E[idx_E], geom\n bc : bond currents\n nc.E[idx_E] : energy\n geom : geometry\n\n \"\"\"\n\n print('Reading: {}'.format(f))\n nc = si.get_sile(f)\n na, na_dev = nc.na, nc.na_dev\n print('Total number of atoms: {}'.format(na))\n print('Number of atoms in the device region: {}'.format(na_dev))\n geom = nc.geom\n\n elec = nc.elecs[idx_elec]\n print('Bond-currents from electrode: {}'.format(elec))\n\n # Check 'k' argument\n if k == 'avg':\n avg = True\n elif k == 'Gamma':\n kpts = nc.kpt\n idx_gamma = np.where(np.sum(np.abs(kpts), axis=1) == 0.)[0]\n if (kpts[idx_gamma] != np.zeros((1, 3))).any(axis=1):\n print('\\nThe selected k-point is not Gamma!\\n')\n exit(0)\n else:\n print('You have selected the Gamma point!')\n avg = idx_gamma # Index of Gamma point in nc.kpt\n else:\n print('\\nInvalid `k` argument: please keep the default `avg` or use `Gamma`!\\n')\n exit(0)\n\n idx_E = nc.Eindex(E)\n print('Extracting bond-currents at energy: {} eV'.format(nc.E[idx_E]))\n bc = nc.bond_current(elec, kavg=avg, isc=[0,0,0], only=only, E=idx_E, uc=True)\n\n return bc, nc.E[idx_E], geom\n\n\n# Adapted from KWANT\ndef mask_interpolate(coords, values, a=None, method='nearest', oversampling=300):\n \"\"\"Interpolate a scalar function in vicinity of given points.\n\n Create a masked array corresponding to interpolated values of the function\n at points lying not further than a certain distance from the original\n data points provided.\n\n Parameters\n ----------\n coords : np.ndarray\n An array with site coordinates.\n values : np.ndarray\n An array with the values from which the interpolation should be built.\n a : float, optional\n Reference length. If not given, it is determined as a typical\n nearest neighbor distance.\n method : string, optional\n Passed to ``scipy.interpolate.griddata``: \"nearest\" (default), \"linear\",\n or \"cubic\"\n oversampling : integer, optional\n Number of pixels per reference length. Defaults to 3.\n\n Returns\n -------\n array : 2d NumPy array\n The interpolated values.\n min, max : vectors\n The real-space coordinates of the two extreme ([0, 0] and [-1, -1])\n points of ``array``.\n\n Notes\n -----\n - `min` and `max` are chosen such that when plotting a system on a square\n lattice and `oversampling` is set to an odd integer, each site will lie\n exactly at the center of a pixel of the output array.\n\n - When plotting a system on a square lattice and `method` is \"nearest\", it\n makes sense to set `oversampling` to ``1``. Then, each site will\n correspond to exactly one pixel in the resulting array.\n \"\"\"\n\n from scipy import spatial, interpolate\n import warnings\n\n # Build the bounding box.\n cmin, cmax = coords.min(0), coords.max(0)\n\n tree = spatial.cKDTree(coords)\n\n points = coords[np.random.randint(len(coords), size=10)]\n min_dist = np.min(tree.query(points, 2)[0][:, 1])\n if min_dist < 1e-6 * np.linalg.norm(cmax - cmin):\n warnings.warn(\"Some sites have nearly coinciding positions, \"\n \"interpolation may be confusing.\",\n RuntimeWarning)\n\n if coords.shape[1] != 2:\n print('Only 2D systems can be plotted this way.')\n exit()\n\n if a is None:\n a = min_dist\n\n if a < 1e-6 * np.linalg.norm(cmax - cmin):\n print(\"The reference distance a is too small.\")\n exit()\n\n if len(coords) != len(values):\n print(\"The number of sites doesn't match the number of\"\n \"provided values.\")\n exit()\n\n shape = (((cmax - cmin) / a + 1) * oversampling).round()\n delta = 0.5 * (oversampling - 1) * a / oversampling\n cmin -= delta\n cmax += delta\n dims = tuple(slice(cmin[i], cmax[i], 1j * shape[i]) for i in range(len(cmin)))\n grid = tuple(np.ogrid[dims])\n img = interpolate.griddata(coords, values, grid, method)\n mask = np.mgrid[dims].reshape(len(cmin), -1).T\n # The numerical values in the following line are optimized for the common\n # case of a square lattice:\n # * 0.99 makes sure that non-masked pixels and sites correspond 1-by-1 to\n # each other when oversampling == 1.\n # * 0.4 (which is just below sqrt(2) - 1) makes tree.query() exact.\n mask = tree.query(mask, eps=0.4)[0] > 0.99 * a\n\n return np.ma.masked_array(img, mask), cmin, cmax\n\n\n\ndef plot_bondcurrents(f, idx_elec, only='+', E=0.0, k='avg', zaxis=2, avg=True, scale='raw', xyz_origin=None,\n vmin=None, vmax=None, lw=5, log=False, adosmap=False, ADOSmin=None, ADOSmax=None, arrows=False, \n lattice=False, ps=20, ados=False, atoms=None, out=None, ymin=None, ymax=None, xmin=None, xmax=None, \n spsite=None, dpi=180, units='angstrom'): \n \"\"\" Read bond currents from tbtrans output and plot them \n \n Parameters\n ----------\n f : string\n TBT.nc file\n idx_elec : int\n the electrode of originating electrons\n only : {'+', '-', 'all'}\n If \"+\" is supplied only the positive orbital currents are used, for \"-\", \n only the negative orbital currents are used, else return the sum of both. \n E : float or int, \n A float for energy in eV, int for explicit energy index \n k : bool, int or array_like\n whether the returned bond current is k-averaged, \n an explicit k-point or a selection of k-points\n zaxis : int\n index of out-of plane direction\n avg : bool\n if \"True\", then it averages all currents coming from each atom and plots \n them in a homogeneous map\n if \"False\" it plots ALL bond currents as lines originating from each atom\n scale : {'%' or 'raw'}\n wheter values are percent. Change vmin and vmax accordingly between 0% and 100%\n vmin : float\n min value in colormap. All data greater than this will be blue \n vmax : float\n max value in colormap. All data greater than this will be yellow \n lattice : bool\n whether you want xy coord of atoms plotted as black dots in the figure \n ps : float\n size of these dots\n spsite : list of int\n special atoms in the lattice that you want to plot as red dots instead\n atoms : np.array or list\n list of atoms for which reading and plotting bondcurrents\n out : string\n name of final png figure \n\n .....\n\n\n Returns\n -------\n bc, nc.E[idx_E], geom\n bc : bond currents\n nc.E[idx_E] : energy\n geom : geometry\n\n Notes\n -----\n - atoms must be 0-based\n - Be sure that atoms belong to a single plane (say, only graphene, no tip)\n \"\"\"\n t = time.time()\n print('\\n***** BOND-CURRENTS (2D map) *****\\n') \n nc = si.get_sile(f)\n elec = nc.elecs[idx_elec]\n \n # Read bond currents from TBT.nc file\n bc, energy, geom = read_bondcurrents(f, idx_elec, only, E, k)\n\n # If needed, select only selected atoms from bc_bg.\n bc_coo = bc.tocoo()\n i_list, j_list, bc_list = bc_coo.row, bc_coo.col, bc_coo.data\n if atoms is None:\n print('Reading bond-currents among all atoms in device region')\n atoms = nc.a_dev\n del bc_coo\n else:\n # Only choose atoms with positive indices\n atoms = atoms[atoms >= 0]\n select = np.logical_and(np.in1d(i_list, atoms), np.in1d(j_list, atoms))\n i_list, j_list, bc_list = i_list[select], j_list[select], bc_list[select]\n del bc_coo, select\n\n print('Number of bond-current entries: {}'.format(np.shape(bc_list)))\n print('MIN bc among selected atoms (from file) = {}'.format(np.min(bc_list)))\n print('MAX bc among selected atoms (from file) = {}'.format(np.max(bc_list)))\n #print('i\\tj\\tBond-current')\n #for i, j, bc in zip(i_list, j_list, bc_list):\n # print('{}\\t{}\\t{}'.format(i, j, bc))\n\n # Plot\n import matplotlib.collections as collections\n from matplotlib.colors import LogNorm\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n cmap = cm.viridis\n\n if out is None:\n figname = 'BondCurrents_{}_E{}.png'.format(elec, energy)\n else:\n figname = '{}_{}_E{}.png'.format(out, elec, energy)\n\n fig, ax = plt.subplots()\n ax.set_aspect('equal')\n\n if log:\n bc_list = np.log(bc_list+1)\n norm = LogNorm()\n else:\n norm=None\n\n if zaxis == 2:\n xaxis, yaxis = 0, 1\n elif zaxis == 0:\n xaxis, yaxis = 1, 2\n elif zaxis == 1:\n xaxis, yaxis = 0, 2\n\n if avg:\n # Plot bond currents as avg 2D map\n atoms_sort = np.sort(atoms)\n bc_avg = bc.sum(1).A.ravel()[atoms_sort]\n\n if scale is 'radial':\n _, r = geom.close_sc(xyz_origin, R=np.inf, idx=atoms_sort, ret_rij=True)\n bc_avg = np.multiply(bc_avg, r)\n\n if units == 'angstrom':\n unitstr = '$\\AA$'\n x, y = geom.xyz[atoms_sort, xaxis], geom.xyz[atoms_sort, yaxis]\n a_mask = 1.54\n elif units == 'nm':\n unitstr = 'nm'\n x, y = .1*geom.xyz[atoms_sort, xaxis], .1*geom.xyz[atoms_sort, yaxis]\n a_mask = .1*1.54\n\n if scale is '%':\n if vmin is None:\n vmin = np.amin(bc_avg)*100/np.amax(bc_avg) \n if vmax is None:\n vmax = 100\n vmin = vmin*np.amax(bc_avg)/100\n vmax = vmax*np.amax(bc_avg)/100\n else:\n if vmin is None:\n vmin = np.amin(bc_avg) \n if vmax is None:\n vmax = np.amax(bc_avg)\n\n coords = np.column_stack((x, y))\n\n img, min, max = mask_interpolate(coords, bc_avg, oversampling=30, a=a_mask)\n # Note that we tell imshow to show the array created by mask_interpolate\n # faithfully and not to interpolate by itself another time.\n image = ax.imshow(img.T, extent=(min[0], max[0], min[1], max[1]),\n origin='lower', interpolation='none', cmap='viridis',\n vmin=vmin, vmax=vmax)\n else:\n if vmin is None:\n vmin = np.min(bc_list) \n if vmax is None:\n vmax = np.max(bc_list)\n # Plot bond currents as half-segments\n start_list = zip(geom.xyz[i_list, xaxis], geom.xyz[i_list, yaxis])\n half_end_list = zip(.5*(geom.xyz[i_list, xaxis]+geom.xyz[j_list, xaxis]), \n .5*(geom.xyz[i_list, yaxis]+geom.xyz[j_list, yaxis]))\n line_list = list(map(list, zip(start_list, half_end_list))) # segments length = 1/2 bonds length\n linewidths = lw * bc_list / np.max(bc_list) \n lattice_bonds = collections.LineCollection(line_list, cmap=cmap, linewidths=linewidths, norm=norm)\n lattice_bonds.set_array(bc_list/np.amax(bc_list))\n lattice_bonds.set_clim(vmin/np.amax(bc_list), vmax/np.amax(bc_list))\n ax.add_collection(lattice_bonds)\n image = lattice_bonds\n \n if lattice:\n if units == 'angstrom':\n x, y = geom.xyz[atoms, xaxis], geom.xyz[atoms, yaxis]\n if units == 'nm':\n x, y = .1*geom.xyz[atoms, xaxis], .1*geom.xyz[atoms, yaxis]\n ax.scatter(x, y, s=ps*2, marker='o', facecolors='None', linewidth=0.8, edgecolors='k')\n\n if spsite is not None:\n if units == 'angstrom':\n xs, ys = geom.xyz[spsite, xaxis], geom.xyz[spsite, yaxis]\n if units == 'nm':\n xs, ys = .1*geom.xyz[spsite, xaxis], .1*geom.xyz[spsite, yaxis]\n ax.scatter(xs, ys, s=ps*2, marker='x', color='red')\n\n ax.autoscale()\n ax.margins(0.)\n #ax.margins(0.05)\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n plt.xlabel('x ({})'.format(unitstr))\n plt.ylabel('y ({})'.format(unitstr))\n plt.gcf()\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n if avg:\n axcb = plt.colorbar(image, cax=cax, format='%f', ticks=[vmin, vmax])\n if vmin == 0.:\n axcb.ax.set_yticklabels(['0', '$\\geq$ {:.3e}'.format(vmax)])\n else:\n axcb.ax.set_yticklabels(['$\\leq$ {:.3e}'.format(vmin), '$\\geq$ {:.3e}'.format(vmax)])\n print('MIN bc among selected atoms (in final plot) = {}'.format(vmin))\n print('MAX bc among selected atoms (in final plot) = {}'.format(vmax))\n else:\n axcb = plt.colorbar(image, cax=cax, format='%f', ticks=[vmin/np.amax(bc_list), vmax/np.amax(bc_list)])\n if scale is '%':\n vmin, vmax = vmin*100/max_newbc_bg, vmax*100/max_newbc_bg\n axcb.ax.set_yticklabels(['{:.1f} %'.format(vmin), '{:.1f} %'.format(vmax)])\n print('MIN bc among selected atoms (in final plot) = {:.1f} %'.format(vmin))\n print('MAX bc among selected atoms (in final plot) = {:.1f} %'.format(vmax))\n else:\n axcb.ax.set_yticklabels(['{:.3e}'.format(vmin), '{:.3e}'.format(vmax)])\n print('MIN bc among selected atoms (in final plot) = {}'.format(vmin))\n print('MAX bc among selected atoms (in final plot) = {}'.format(vmax))\n \n\n plt.savefig(figname, bbox_inches='tight', transparent=True, dpi=dpi)\n print('Successfully plotted to \"{}\"'.format(figname))\n print('Done in {} sec'.format(time.time() - t))\n\n return bc_list, vmin, vmax, i_list, j_list\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.round", "matplotlib.pyplot.axes", "numpy.where", "matplotlib.pyplot.gca", "numpy.linalg.eig", "numpy.subtract", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.append", "numpy.identity", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "numpy.absolute", "matplotlib.widgets.Button", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.widgets.Slider", "matplotlib.pyplot.grid" ], [ "numpy.amax", "numpy.in1d", "numpy.max", "scipy.interpolate.griddata", "scipy.spatial.cKDTree", "matplotlib.pyplot.gcf", "numpy.column_stack", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.min", "matplotlib.collections.LineCollection", "matplotlib.pyplot.ylim", "numpy.amin", "matplotlib.pyplot.savefig", "matplotlib.colors.LogNorm", "numpy.abs", "matplotlib.pyplot.subplots", "numpy.sort", "numpy.linalg.norm", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "numpy.shape", "numpy.ma.masked_array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
balamurali-m/Ridge-Lasso
[ "6d8cc5dac8e403223ffaf81a7886ed23490a154e" ]
[ "Lasso Regression.py" ]
[ "\"\"\"\r\nLasso Regression\r\nAuthor: Balamurali M\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn import linear_model\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n#Generating matrix with explanatory and response variable\r\nmatr = np.random.randint(200, size=(100, 5))\r\nprint (matr.shape)\r\n\r\ntrain_exp = matr[:80, :4]\r\ntrain_res = matr[:80, 4:]\r\ntest_exp = matr[80:, :4]\r\ntest_act = matr[80:, 4:]\r\n\r\nprint('train_exp',train_exp.shape)\r\nprint('train_res',train_res.shape)\r\nprint('test_exp',test_exp.shape)\r\nprint('test_act',test_act.shape)\r\n\r\n#Lasso\r\nls = linear_model.Lasso(alpha=0.1) #alpha can be fine tuned\r\nls.fit(train_exp, train_res)\r\npredicted1 = ls.predict(test_exp)\r\nprint(\"Lasso Predicted Values\")\r\nprint (predicted1)\r\nprint ('Mean Square Error Lasso')\r\nmse_1 = mean_squared_error(test_act, predicted1) \r\nprint (mse_1)\r\n\r\n#Linear Regression results\r\nLR = LinearRegression()\r\nLR.fit(train_exp, train_res)\r\npredicted2 = LR.predict(test_exp)\r\nprint(\"Linear Regression Predicted Values\")\r\nprint (predicted2)\r\nprint ('Mean Square Error Linear Reg')\r\nmse_2 = mean_squared_error(test_act, predicted2) \r\nprint (mse_2) " ]
[ [ "sklearn.metrics.mean_squared_error", "sklearn.linear_model.LinearRegression", "sklearn.linear_model.Lasso", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gcfntnu/single-cell
[ "d81372ea42ef372b585ae29927c4798f62259832" ]
[ "rules/quant/scripts/convert_scanpy.py" ]
[ "#!/usr/bin/env python\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n\nimport sys\nimport os\nimport argparse\nimport re\n\nimport scanpy as sc\nimport pandas as pd\nimport numpy as np\nimport anndata\nimport scvelo as sv\n\nGENOME = {'homo_sapiens': 'GRCh38',\n 'human': 'GRCh38',\n 'hg38': 'GRCh38',\n 'GRCh38': 'GRCh38',\n 'mus_musculus': 'mm10',\n 'mouse': 'mm10',\n 'mm10': 'mm10',\n 'GRCm38': 'mm10'\n}\n\nparser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n\nparser.add_argument('input', help='input file(s)', nargs='*', default=None)\nparser.add_argument('-o', '--outfile', help='output filename', required=True)\nparser.add_argument('-f', '--input-format', choices=['cellranger_aggr', 'cellranger', 'star', 'alevin', 'umitools', 'velocyto'],\n default='cellranger_aggr', help='input file format')\nparser.add_argument('-F', '--output-format', choices=['anndata', 'loom', 'csvs'], default='anndata', help='output file format')\nparser.add_argument('--aggr-csv', help='aggregation CSV with header and two columns. First column is `library_id` and second column is path to input file. This is used as a substitute for input files', default=None)\nparser.add_argument('--sample-info', help='samplesheet info, tab seprated file assumes `Sample_ID` in header', default=None)\nparser.add_argument('--feature-info', help='extra feature info filename, tab seprated file assumes `gene_ids` in header', default=None)\nparser.add_argument('--log', help='logfile', default=None)\nparser.add_argument('--filter-org', help='filter data (genes) by organism', default=None)\nparser.add_argument('--gex-only', help='only keep `Gene Expression` data and ignore other feature types.', default=True)\nparser.add_argument('--normalize', help='normalize depth across the input libraries', default='none', choices=['none', 'mapped'])\nparser.add_argument('--batch', help='column name in `sample-info` with batch covariate', default=None)\nparser.add_argument('--no-zero-cell-rm', help='do not remove cells with zero counts', action='store_true')\nparser.add_argument('--identify-doublets', help='estimate doublets using Scrublets (Single-Cell Remover of Doublets)', action='store_true')\nparser.add_argument('--identify-empty-droplets', help='estimate empty droplets using emptyDrops (DropletUtils)', action='store_true')\nparser.add_argument('-v ', '--verbose', help='verbose output.', action='store_true')\n\n\ndef downsample_gemgroup(data_list):\n \"\"\"downsample data total read count to gem group with lowest total count\n \"\"\"\n min_count = 1E99\n sampled_list = []\n for i, data in enumerate(data_list):\n isum = data.X.sum()\n if isum < min_count:\n min_count = isum\n idx = i\n for j, data in enumerate(data_list):\n if j != idx:\n sc.pp.downsample_counts(data, total_counts = min_count, replace=True)\n sampled_list.append(data)\n return sampled_list\n\ndef remove_duplicate_cols(df, copy=False):\n dups = {}\n for i, c in enumerate(df.columns):\n base, enum = c.split('-')\n if int(enum) > 0:\n orig = base + '-0'\n if orig in df.columns:\n orig = df[orig]\n col = df[c]\n if len(set(col).symmetric_difference(orig)) == 0:\n df.drop(labels=c, axis='columns')\n new_cols = [c.split('-')[0] for c in df.columns]\n df.columns = new_cols\n if copy:\n return df\n\ndef filter_input_by_csv(input, csv_fn, verbose=False):\n \"\"\"Filter input files based on match with Sample_ID in input path.\n\n Matching Sample_ID is first column in CSV file.\n \"\"\"\n filtered_input = []\n with open(csv_fn) as fh:\n txt = fh.read().splitlines()\n csv_rows = []\n for line in txt[1:]:\n csv_rows.append(line.split(','))\n for row in csv_rows:\n sample_id = row[0]\n patt = os.path.sep + sample_id + os.path.sep\n for pth in input:\n if patt in pth:\n filtered_input.append(pth)\n else:\n print(pth, sample_id)\n if verbose:\n print('Total input: {}'.format(len(input)))\n print('Filtered input: {}'.format(len(filtered_input)))\n return filtered_input\n\ndef identify_doublets(data, **kw):\n \"\"\"Detect doublets in single-cell RNA-seq data\n\n https://github.com/AllonKleinLab/scrublet\n \"\"\"\n import scrublet as scr\n adata = data.copy()\n col_sum = adata.X.sum(0)\n if hasattr(col_sum, 'A'):\n col_sum = col_sum.A.squeeze()\n keep = col_sum > 3\n adata = adata[:,keep]\n scrub = scr.Scrublet(adata.X, **kw)\n min_ncomp = min(10, min(adata.X.shape) - 1)\n doublet_score, predicted_doublets = scrub.scrub_doublets(n_prin_comps=min_ncomp, min_cells=1, min_counts=1)\n if predicted_doublets is None:\n predicted_doublets = scrub.call_doublets(threshold=0.34)\n data.obs['doublet_score'] = doublet_score\n data.obs['predicted_doublets'] = predicted_doublets\n return data\n\ndef identify_empty_droplets(data, min_cells=3, **kw):\n \"\"\"Detect empty droplets using DropletUtils\n\n \"\"\"\n import rpy2.robjects as robj\n from rpy2.robjects import default_converter\n from rpy2.robjects.packages import importr\n import anndata2ri\n from rpy2.robjects.conversion import localconverter\n importr(\"DropletUtils\")\n adata = data.copy()\n col_sum = adata.X.sum(0)\n if hasattr(col_sum, 'A'):\n col_sum = col_sum.A.squeeze()\n \n keep = col_sum > min_cells\n adata = adata[:,keep]\n #adata.X = adata.X.tocsc()\n anndata2ri.activate()\n robj.globalenv[\"X\"] = adata\n res = robj.r('res <- emptyDrops(assay(X))')\n anndata2ri.deactivate()\n keep = res.loc[res.FDR<0.01,:]\n data = data[keep.index,:] \n data.obs['empty_FDR'] = keep['FDR']\n \n return data\n\ndef read_cellranger(fn, args, rm_zero_cells=True, add_sample_id=True, **kw):\n \"\"\"read cellranger results\n\n Assumes the Sample_ID may be extracted from cellranger output dirname, \n e.g ` ... /Sample_ID/outs/filtered_feature_bc_matrix.h5 `\n \"\"\"\n if fn.endswith('.h5'):\n dirname = os.path.dirname(fn)\n data = sc.read_10x_h5(fn)\n data.var['gene_symbols'] = list(data.var_names)\n data.var_names = list(data.var['gene_ids'])\n else:\n mtx_dir = os.path.dirname(fn)\n dirname = os.path.dirname(mtx_dir)\n data = sc.read_10x_mtx(mtx_dir, gex_only=args.gex_only, var_names='gene_ids')\n data.var['gene_ids'] = list(data.var_names)\n \n barcodes = [b.split('-')[0] for b in data.obs.index]\n if len(barcodes) == len(set(barcodes)):\n data.obs_names = barcodes\n \n if add_sample_id:\n sample_id = os.path.basename(os.path.dirname(dirname))\n data.obs['library_id'] = sample_id\n data.obs['library_id'] = data.obs['library_id'].astype('category')\n data.obs_names = [i + '-' + sample_id for i in data.obs_names]\n \n data.obs.index.name = 'barcodes'\n data.var.index.name = 'gene_ids'\n return data\n \ndef read_cellranger_aggr(fn, args, **kw):\n data = read_cellranger(fn, args, add_sample_id=False)\n #if 'library_id' in data.obs:\n # data.obs.rename(index=str, columns={'library_id': 'group'}, inplace=True)\n dirname = os.path.dirname(fn)\n if not fn.endswith('.h5'):\n dirname = os.path.dirname(dirname)\n\n aggr_csv = os.path.join(dirname, 'aggregation.csv')\n aggr_csv = pd.read_csv(aggr_csv)\n sample_map = dict((str(i+1), n) for i, n in enumerate(aggr_csv['library_id']))\n barcodes_enum = [i.split('-')[1] for i in data.obs_names]\n samples = [sample_map[i] for i in barcodes_enum]\n data.obs['library_id'] = samples\n data.obs['library_id'] = data.obs['library_id'].astype('category')\n # use library_id to make barcodes unique\n barcodes = [b.split('-')[0] for b in data.obs.index]\n data.obs_names = ['{}-{}'.format(i,j) for i,j in zip(barcodes, samples)]\n return data\n\ndef read_velocyto_loom(fn, args, **kw):\n data = sc.read_loom(fn, var_names='Accession')\n data.var.rename(columns={'Gene': 'gene_symbols'}, inplace=True)\n sample_id = os.path.splitext(os.path.basename(fn))[0]\n data.obs['library_id'] = sample_id\n data.obs['library_id'] = data.obs['library_id'].astype('category')\n sv.utils.clean_obs_names(data)\n data.obs_names = [i + '-' + sample_id for i in data.obs_names]\n data.var.index.name = 'gene_ids'\n return data\n \ndef read_star(fn, args, **kw):\n mtx_dir = os.path.dirname(fn)\n data = sc.read(fn).T\n print(data)\n genes = pd.read_csv(os.path.join(mtx_dir, 'features.tsv'), header=None, sep='\\t')\n barcodes = pd.read_csv(os.path.join(mtx_dir, 'barcodes.tsv'), header=None)[0].values\n data.var_names = genes[0].values\n data.var['gene_symbols'] = genes[1].values\n sample_id = os.path.normpath(fn).split(os.path.sep)[-5]\n data.obs['library_id'] = sample_id\n data.obs['library_id'] = data.obs['library_id'].astype('category')\n barcodes = [b.split('-')[0] for b in barcodes]\n if len(barcodes) == len(set(barcodes)):\n data.obs_names = barcodes\n data.obs_names = [i + '-' + sample_id for i in data.obs_names]\n if not args.no_zero_cell_rm:\n row_sum = data.X.sum(1)\n if hasattr(row_sum, 'A'):\n row_sum = row_sum.A.squeeze()\n keep = row_sum > 1\n print(sum(keep), len(keep))\n data = data[keep,:]\n return data\n\ndef read_alevin(fn, args, **kw):\n from vpolo.alevin import parser as alevin_parser\n avn_dir = os.path.dirname(fn)\n dirname = os.path.dirname(avn_dir)\n if fn.endswith('.gz'):\n df = alevin_parser.read_quants_bin(dirname)\n else:\n df = alevin_parser.read_quants_csv(avn_dir)\n row = {'row_names': df.index.values.astype(str)}\n col = {'col_names': np.array(df.columns, dtype=str)}\n data = anndata.AnnData(df.values, row, col, dtype=np.float32)\n data.var['gene_ids'] = list(data.var_names)\n sample_id = os.path.basename(dirname)\n data.obs['library_id'] = [sample_id] * data.obs.shape[0]\n return data\n \ndef read_umitools(fn, **kw):\n raise NotImplementedError\n\nREADERS = {'cellranger_aggr': read_cellranger_aggr,\n 'cellranger': read_cellranger,\n 'star': read_star,\n 'umitools': read_umitools,\n 'alevin': read_alevin,\n 'velocyto': read_velocyto_loom}\n \nif __name__ == '__main__':\n args = parser.parse_args()\n\n if args.aggr_csv is not None:\n args.input = filter_input_by_csv(args.input, args.aggr_csv, verbose=args.verbose)\n \n reader = READERS.get(args.input_format.lower())\n if reader is None:\n raise ValueError('{} is not a supported input format'.format(args.input_format))\n for fn in args.input:\n if not os.path.exists(fn):\n raise IOError('file does not exist! {}'.format(fn))\n n_input = len(args.input)\n if n_input > 1:\n assert(args.input_format != 'cellranger_aggr')\n \n if args.sample_info is not None:\n sample_info = pd.read_csv(args.sample_info, sep='\\t')\n if not 'Sample_ID' in sample_info.columns:\n raise ValueError('sample_sheet needs a column called `Sample_ID`')\n sample_info.index = sample_info['Sample_ID']\n if args.batch is not None:\n batch_categories = sample_info[batch].astype('category')\n else:\n sample_info = None\n if args.batch is not None:\n raise ValueError('cannot use option `batch` when option `--sample-info` not used')\n batch_categories = None\n \n if args.feature_info is not None:\n feature_info = pd.read_csv(args.feature_info, sep='\\t')\n if not 'gene_ids' in feature_info.columns:\n raise ValueError('feature_info needs a column called `gene_ids`')\n \n feature_info.index = feature_info['gene_ids']\n else:\n feature_info = None\n \n data_list = []\n for i, fn in enumerate(args.input):\n fn = os.path.abspath(fn)\n data = reader(fn, args)\n if args.identify_empty_droplets:\n if args.verbose:\n print(\"identify empty droplets ...\")\n data = identify_empty_droplets(data)\n print(data.shape)\n if args.identify_doublets:\n if args.verbose:\n print(\"identify doublets ...\") \n data = identify_doublets(data)\n data_list.append(data)\n\n if len(data_list) > 1:\n if args.normalize == 'mapped':\n data_list = downsample_gemgroup(data_list)\n\n data = data_list.pop(0)\n if len(data_list) > 0:\n data = data.concatenate(*data_list, batch_categories=batch_categories)\n if any(i.endswith('-0') for i in data.var.columns):\n remove_duplicate_cols(data.var)\n \n if sample_info:\n lib_ids = set(data.obs['library_id'])\n for l in lib_ids:\n if l not in sample_info.index:\n raise ValueError('Library `{}` not present in sample_info'.format(l))\n obs = sample_info.loc[data.obs['library_id'],:]\n obs.index = data.obs.index.copy()\n data.obs = data.obs.merge(obs, how='left', left_index=True, right_index=True, suffixes=('', '_sample_info'), validate=True)\n\n if not args.no_zero_cell_rm:\n row_sum = data.X.sum(1)\n if hasattr(row_sum, 'A'):\n row_sum = row_sum.A.squeeze()\n keep = row_sum > 0\n data = data[keep,:]\n col_sum = data.X.sum(0)\n if hasattr(col_sum, 'A'):\n col_sum = col_sum.A.squeeze()\n keep = col_sum > 0\n data = data[:,keep]\n \n if feature_info:\n data.var = data.var.merge(feature_info, how='left', on='gene_ids', copy=False)\n \n if 'gene_symbols' in data.var.columns:\n mito_genes = data.var.gene_symbols.str.lower().str.startswith('mt-')\n try:\n data.obs['fraction_mito'] = np.sum(data[:, mito_genes].X, axis=1).A1 / np.sum(data.X, axis=1).A1\n except:\n data.obs['fraction_mito'] = np.sum(data[:, mito_genes].X, axis=1) / np.sum(data.X, axis=1)\n try:\n data.obs['n_counts'] = data.X.sum(axis=1).A1\n except:\n data.obs['n_counts'] = data.X.sum(axis=1)\n \n if args.verbose:\n print(data)\n \n if args.output_format == 'anndata':\n data.write(args.outfile)\n elif args.output_format == 'loom':\n data.write_loom(args.outfile)\n elif args.output_format == 'csvs':\n data.write_csvs(args.outpfile)\n else:\n raise ValueError(\"Unknown output format: {}\".format(args.output_format))\n" ]
[ [ "numpy.array", "pandas.read_csv", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
elensar92/DETR
[ "ca70e7c488d1ac797fc37e0b4d7e3e6eea3e8623" ]
[ "models/simple_backbone.py" ]
[ "import tensorflow as tf\nimport tensorflow.keras as keras\n\nclass Residual(keras.layers.Layer) : \n def __init__(self, filters = 32, strides = 1, use_1x1_conv=True) :\n super(Residual, self).__init__()\n self.use_1x1_conv = use_1x1_conv\n self.conv1 = keras.layers.Conv2D(filters, padding ='same', kernel_size = 3, strides = strides)\n self.conv2 = keras.layers.Conv2D(filters, padding ='same', kernel_size = 3)\n self.conv3 = None\n\n if use_1x1_conv : \n self.conv3 = keras.layers.Conv2D(filters, kernel_size=1, strides = strides)\n self.bn1 = keras.layers.BatchNormalization()\n self.bn2 = keras.layers.BatchNormalization()\n\n def call(self, X) : \n Y = self.conv1(X)\n Y = self.bn1(Y)\n Y = keras.activations.relu(Y)\n Y = self.conv2(Y)\n Y = self.bn2(Y)\n\n if self.conv3 is not None : \n X = self.conv3(X)\n Y+=X\n\n return keras.activations.relu(Y)\n\nclass ResBlock(keras.layers.Layer) : \n def __init__(self, channels, num_residuals, first_block = False, **kwargs) : \n super(ResBlock, self).__init__(**kwargs)\n self.residuals = list()\n\n for i in range(num_residuals) : \n if i == 0 and not first_block : \n self.residuals.append( Residual(filters=channels, strides = 2, use_1x1_conv = True) )\n else :\n self.residuals.append( Residual(filters=channels, strides = 1 ) )\n def call(self, X) : \n for layer in self.residuals.layers : \n X = layer(X)\n return X\n\nclass ResNet18(keras.models.Model) :\n def __init__(self, num_classes : int, **kwargs) :\n super(ResNet18, self).__init__()\n self.conv1 = keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same')\n self.bn1 = keras.layers.BatchNormalization()\n self.relu1 = keras.layers.Activation('relu')\n self.max_pool1 = keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')\n self.resblocks = [\n ResBlock(64, 2, first_block=True),\n ResBlock(128, 2),\n ResBlock(256, 2),\n ResBlock(512, 2)\n ]\n self.gap = keras.layers.GlobalAveragePooling2D()\n self.classifier = keras.layers.Dense(units = num_classes)\n\n def call(self, X) : \n X = self.conv1(X)\n X = self.bn1(X)\n X = self.relu1(X)\n X = self.max_pool1(X)\n\n for block in self.resblocks : \n X = block(X)\n\n X = self.gap(X)\n X = self.classifier(X)\n X = keras.activations.softmax(X)\n return X\n\nif __name__ =='__main__' : \n X = tf.random.uniform(shape=(1, 224, 224, 1))\n for layer in ResNet18(10).layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\\t', X.shape)" ]
[ [ "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.layers.Dense", "tensorflow.random.uniform", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.activations.relu", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.activations.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
weikang9009/segregation
[ "403cc63772545f688308692d446c289ed2e7f99a", "403cc63772545f688308692d446c289ed2e7f99a", "403cc63772545f688308692d446c289ed2e7f99a" ]
[ "segregation/tests/test_distance_decay_exposure.py", "segregation/tests/test_multi_gini_seg.py", "segregation/tests/test_local_multi_local_simpson_concentration.py" ]
[ "import unittest\nfrom libpysal.examples import load_example\nimport geopandas as gpd\nimport numpy as np\nfrom segregation.spatial import DistanceDecayExposure\n\n\nclass Distance_Decay_Exposure_Tester(unittest.TestCase):\n def test_Distance_Decay_Exposure(self):\n s_map = gpd.read_file(load_example(\"Sacramento1\").get_path(\"sacramentot2.shp\"))\n df = s_map[['geometry', 'HISP_', 'TOT_POP']]\n index = DistanceDecayExposure(df, 'HISP_', 'TOT_POP')\n np.testing.assert_almost_equal(index.statistic, 0.8396583368412371)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "import unittest\nfrom libpysal.examples import load_example\nimport geopandas as gpd\nimport numpy as np\nfrom segregation.aspatial import MultiGiniSeg\n\n\nclass Multi_Gini_Seg_Tester(unittest.TestCase):\n def test_Multi_Gini_Seg(self):\n s_map = gpd.read_file(load_example(\"Sacramento1\").get_path(\"sacramentot2.shp\"))\n groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']\n df = s_map[groups_list]\n index = MultiGiniSeg(df, groups_list)\n np.testing.assert_almost_equal(index.statistic, 0.5456349992598081)\n\n\nif __name__ == '__main__':\n unittest.main()", "import unittest\nfrom libpysal.examples import load_example\nimport geopandas as gpd\nimport numpy as np\nfrom segregation.local import MultiLocalSimpsonConcentration\n\n\nclass Multi_Local_Simpson_Concentration_Tester(unittest.TestCase):\n def test_Multi_Local_Simpson_Concentration(self):\n s_map = gpd.read_file(load_example(\"Sacramento1\").get_path(\"sacramentot2.shp\"))\n groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']\n df = s_map[groups_list]\n index = MultiLocalSimpsonConcentration(df, groups_list)\n np.testing.assert_almost_equal(index.statistics[0:10], np.array([0.84564007, 0.66608405, 0.50090253, 0.8700551 , 0.90194944,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 0.86871822, 0.95552644, 0.9601067 , 0.96276946, 0.88241452]))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_almost_equal" ], [ "numpy.testing.assert_almost_equal" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
viniciusguigo/kairos_minerl_basalt
[ "8f76e1d293dbcf62653ed3f7f326bd090a0af6f0" ]
[ "kairos_minerl/src/kairos_minerl/gail_wrapper.py" ]
[ "import numpy as np\nimport gym\n\n#*******************************************************************\n# FIND CAVE TASK\n#*******************************************************************\n# custom action wrapper for complete GAIL agent for MineRL\nclass ActionShaping_FindCave(gym.ActionWrapper):\n def __init__(self, env, camera_angle=10, always_attack=False):\n super().__init__(env)\n\n self.camera_angle = camera_angle\n self.always_attack = always_attack\n self._actions = [\n [('attack', 1)], #0\n [('forward', 1)], #1\n [('forward', 1), ('jump', 1)], #2\n [('camera', [-self.camera_angle, 0])], #3\n [('camera', [self.camera_angle, 0])], #4\n [('camera', [0, self.camera_angle])], #5\n [('camera', [0, -self.camera_angle])], #6\n [('back', 1)], #7\n [('left', 1)], #8\n [('right', 1)], #9\n [('jump', 1)], #10\n #[('equip',11), ('use', 1)],\n [('forward', 1), ('attack', 1)], #11\n ]\n\n self.actions = []\n for actions in self._actions:\n act = self.env.action_space.noop()\n for a, v in actions:\n act[a] = v\n if self.always_attack:\n act['attack'] = 1\n self.actions.append(act)\n # add no-op action\n act = self.env.action_space.noop()\n self.actions.append(act)\n \n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n def action(self, action):\n return self.actions[action]\n\n\ndef processed_actions_to_wrapper_actions_FindCave(dataset_actions, camera_margin=5):\n \"\"\"\n Turn a batch of actions from dataset (`batch_iter`) to a numpy\n array that corresponds to batch of actions of ActionShaping wrapper (_actions).\n\n Camera margin sets the threshold what is considered \"moving camera\".\n\n Note: Hardcoded to work for actions in ActionShaping._actions, with \"intuitive\"\n ordering of actions.\n If you change ActionShaping._actions, remember to change this!\n\n Array elements are integers corresponding to actions, or \"-1\"\n for actions that did not have any corresponding discrete match.\n \"\"\"\n # There are dummy dimensions of shape one\n camera_actions = dataset_actions[:,10:].astype(np.float32)\n attack_actions = dataset_actions[:,0].astype(np.float32)\n forward_actions = dataset_actions[:,3].astype(np.float32)\n jump_actions = dataset_actions[:,4].astype(np.float32)\n back_actions = dataset_actions[:,1].astype(np.float32)\n left_actions = dataset_actions[:,5].astype(np.float32)\n right_actions = dataset_actions[:,6].astype(np.float32)\n equip_actions = dataset_actions[:,2]\n use_actions = dataset_actions[:,9].astype(np.float32)\n sneak_actions = dataset_actions[:,7].astype(np.float32)\n sprint_actions = dataset_actions[:,8].astype(np.float32)\n batch_size = len(camera_actions)\n \n actions = np.zeros((batch_size,), dtype=int)\n\n for i in range(len(camera_actions)):\n # Moving camera is most important (horizontal first)\n if camera_actions[i][0] < -camera_margin:\n actions[i] = 3\n elif camera_actions[i][0] > camera_margin:\n actions[i] = 4\n elif camera_actions[i][1] > camera_margin:\n actions[i] = 5\n elif camera_actions[i][1] < -camera_margin:\n actions[i] = 6\n elif forward_actions[i] == 1:\n if jump_actions[i] == 1:\n actions[i] = 2\n elif attack_actions[i] == 1:\n actions[i] = 11\n else:\n actions[i] = 1\n elif attack_actions[i] == 1:\n actions[i] = 0\n elif left_actions[i] == 1:\n actions[i] = 8\n elif right_actions[i] ==1:\n actions[i] = 9\n elif back_actions[i] == 1:\n actions[i] = 7\n elif jump_actions[i] == 1:\n actions[i] = 10\n else:\n # No reasonable mapping (would be no-op)\n actions[i] = 12\n return actions\n\n\n#*******************************************************************\n# WATERFALL TASK\n#*******************************************************************\n# custom action wrapper for complete GAIL agent for MineRL\nclass ActionShaping_Waterfall(gym.ActionWrapper):\n def __init__(self, env, camera_angle=10, always_attack=False):\n super().__init__(env)\n\n self.camera_angle = camera_angle\n self.always_attack = always_attack\n self._actions = [\n [('attack', 1)], #0\n [('forward', 1)], #1\n [('forward', 1), ('jump', 1)], #2\n [('camera', [-self.camera_angle, 0])], #3\n [('camera', [self.camera_angle, 0])], #4\n [('camera', [0, self.camera_angle])], #5\n [('camera', [0, -self.camera_angle])], #6\n [('back', 1)], #7\n [('left', 1)], #8\n [('right', 1)], #9\n [('jump', 1)], #10\n [('forward', 1), ('attack', 1)], #11\n [('equip','water_bucket'), ('use', 1)], #12 #water bucket\n [('equip','stone_pickaxe'), ('use', 1)], #13 #stone pickaxe\n [('equip','stone_shovel'), ('use', 1)], #14 #stone shovel\n [('equip','cobblestone'), ('use', 1)], #15 #cobblestone\n #[('equip',1), ('use', 1)], #16 #bucket\n ]\n\n self.actions = []\n for actions in self._actions:\n act = self.env.action_space.noop()\n for a, v in actions:\n act[a] = v\n if self.always_attack:\n act['attack'] = 1\n self.actions.append(act)\n # add no-op action\n act = self.env.action_space.noop()\n self.actions.append(act)\n \n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n def action(self, action):\n return self.actions[action]\n\n\ndef processed_actions_to_wrapper_actions_Waterfall(dataset_actions, camera_margin=5):\n \"\"\"\n Turn a batch of actions from dataset (`batch_iter`) to a numpy\n array that corresponds to batch of actions of ActionShaping wrapper (_actions).\n\n Camera margin sets the threshold what is considered \"moving camera\".\n\n Note: Hardcoded to work for actions in ActionShaping._actions, with \"intuitive\"\n ordering of actions.\n If you change ActionShaping._actions, remember to change this!\n\n Array elements are integers corresponding to actions, or \"-1\"\n for actions that did not have any corresponding discrete match.\n \"\"\"\n # There are dummy dimensions of shape one\n camera_actions = dataset_actions[:,10:].astype(np.float32)\n attack_actions = dataset_actions[:,0].astype(np.float32)\n forward_actions = dataset_actions[:,3].astype(np.float32)\n jump_actions = dataset_actions[:,4].astype(np.float32)\n back_actions = dataset_actions[:,1].astype(np.float32)\n left_actions = dataset_actions[:,5].astype(np.float32)\n right_actions = dataset_actions[:,6].astype(np.float32)\n equip_actions = dataset_actions[:,2]\n use_actions = dataset_actions[:,9].astype(np.float32)\n sneak_actions = dataset_actions[:,7].astype(np.float32)\n sprint_actions = dataset_actions[:,8].astype(np.float32)\n batch_size = len(camera_actions)\n \n actions = np.zeros((batch_size,), dtype=int)\n\n\n #Enum(air,bucket,carrot,cobblestone,fence,fence_gate,none,other,snowball,stone_pickaxe,stone_shovel,water_bucket,wheat,wheat_seeds),\n\n equip_actions_dict = dict()\n equip_actions_dict['water_bucket'] = 12\n equip_actions_dict['stone_pickaxe'] = 13\n equip_actions_dict['stone_shovel'] = 14\n equip_actions_dict['cobblestone'] = 15\n #equip_actions_dict['bucket'] = 16\n # step through all actions \n currently_equipped_item = 'stone_pickaxe'\n for i in range(len(camera_actions)):\n \n # keep track of what is currently equipped\n if equip_actions[i] != 'none' and equip_actions[i] in equip_actions_dict:\n currently_equipped_item = equip_actions[i]\n \n # equip and use actions are the most important\n if use_actions[i] == 1: \n actions[i] = equip_actions_dict[currently_equipped_item]\n # Moving camera is second most important (horizontal first)\n elif camera_actions[i][0] < -camera_margin:\n actions[i] = 3\n elif camera_actions[i][0] > camera_margin:\n actions[i] = 4\n elif camera_actions[i][1] > camera_margin:\n actions[i] = 5\n elif camera_actions[i][1] < -camera_margin:\n actions[i] = 6\n elif forward_actions[i] == 1:\n if jump_actions[i] == 1:\n actions[i] = 2\n elif attack_actions[i] == 1:\n actions[i] = 11\n else:\n actions[i] = 1\n elif attack_actions[i] == 1:\n actions[i] = 0\n elif left_actions[i] == 1:\n actions[i] = 8\n elif right_actions[i] ==1:\n actions[i] = 9\n elif back_actions[i] == 1:\n actions[i] = 7\n elif jump_actions[i] == 1:\n actions[i] = 10\n else:\n # No reasonable mapping (would be no-op)\n actions[i] = 16\n return actions\n\n\n#*******************************************************************\n# ANIMAL PEN TASK\n#*******************************************************************\n# custom action wrapper for complete GAIL agent for MineRL\nclass ActionShaping_Animalpen(gym.ActionWrapper):\n def __init__(self, env, camera_angle=10, always_attack=False):\n super().__init__(env)\n self.equip_mapping = {'air':0,'bucket':1,'carrot':2,'cobblestone':3,'fence':4,'fence_gate':5,\n 'none':6,'other':7,'snowball':8,'stone_pickaxe':9,'stone_shovel':10,'water_bucket':11,\n 'wheat':12,'wheat_seeds':13}\n self.camera_angle = camera_angle\n self.always_attack = always_attack\n self._actions = [\n [('attack', 1)], #0\n [('forward', 1)], #1\n [('forward', 1), ('jump', 1)], #2\n [('camera', [-self.camera_angle, 0])], #3\n [('camera', [self.camera_angle, 0])], #4\n [('camera', [0, self.camera_angle])], #5\n [('camera', [0, -self.camera_angle])], #6\n [('back', 1)], #7\n [('left', 1)], #8\n [('right', 1)], #9\n [('jump', 1)], #10\n [('forward', 1), ('attack', 1)], #11\n [('equip','carrot')], #12 #carrot\n [('equip','fence'), ('use', 1)], #13 #fence\n [('equip','fence_gate'), ('use', 1)], #14 #fence_gate\n [('equip','wheat')], #15 #wheat\n [('equip','wheat_seeds')], #16 #wheat_seeds\n ]\n\n self.actions = []\n for actions in self._actions:\n act = self.env.action_space.noop()\n for a, v in actions:\n act[a] = v\n if self.always_attack:\n act['attack'] = 1\n self.actions.append(act)\n # add no-op action\n act = self.env.action_space.noop()\n self.actions.append(act)\n \n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n def action(self, action):\n return self.actions[action]\n\n\ndef processed_actions_to_wrapper_actions_Animalpen(dataset_actions, camera_margin=5):\n \"\"\"\n Turn a batch of actions from dataset (`batch_iter`) to a numpy\n array that corresponds to batch of actions of ActionShaping wrapper (_actions).\n\n Camera margin sets the threshold what is considered \"moving camera\".\n\n Note: Hardcoded to work for actions in ActionShaping._actions, with \"intuitive\"\n ordering of actions.\n If you change ActionShaping._actions, remember to change this!\n\n Array elements are integers corresponding to actions, or \"-1\"\n for actions that did not have any corresponding discrete match.\n \"\"\"\n # There are dummy dimensions of shape one\n camera_actions = dataset_actions[:,10:].astype(np.float32)\n attack_actions = dataset_actions[:,0].astype(np.float32)\n forward_actions = dataset_actions[:,3].astype(np.float32)\n jump_actions = dataset_actions[:,4].astype(np.float32)\n back_actions = dataset_actions[:,1].astype(np.float32)\n left_actions = dataset_actions[:,5].astype(np.float32)\n right_actions = dataset_actions[:,6].astype(np.float32)\n equip_actions = dataset_actions[:,2]\n use_actions = dataset_actions[:,9].astype(np.float32)\n sneak_actions = dataset_actions[:,7].astype(np.float32)\n sprint_actions = dataset_actions[:,8].astype(np.float32)\n batch_size = len(camera_actions)\n \n actions = np.zeros((batch_size,), dtype=int)\n\n\n #Enum(air,bucket,carrot,cobblestone,fence,fence_gate,none,other,snowball,stone_pickaxe,stone_shovel,water_bucket,wheat,wheat_seeds)\n\n equip_actions_dict = dict()\n equip_actions_dict['carrot'] = 12\n equip_actions_dict['fence'] = 13\n equip_actions_dict['fence_gate'] = 14\n equip_actions_dict['wheat'] = 15\n equip_actions_dict['wheat_seeds'] = 16\n # step through all actions \n currently_equipped_item = 'stone_pickaxe'\n for i in range(len(camera_actions)):\n \n # keep track of what is currently equipped\n if equip_actions[i] != 'none' and equip_actions[i] in equip_actions_dict:\n currently_equipped_item = equip_actions[i]\n \n # equip and use actions are the most important\n if equip_actions[i] == 'carrot':\n actions[i] = equip_actions_dict['carrot']\n elif equip_actions[i] == 'wheat':\n actions[i] = equip_actions_dict['wheat']\n elif equip_actions[i] == 'wheat_seeds':\n actions[i] = equip_actions_dict['wheat_seeds']\n elif use_actions[i] == 1: \n actions[i] = equip_actions_dict[currently_equipped_item]\n # Moving camera is second most important (horizontal first)\n elif camera_actions[i][0] < -camera_margin:\n actions[i] = 3\n elif camera_actions[i][0] > camera_margin:\n actions[i] = 4\n elif camera_actions[i][1] > camera_margin:\n actions[i] = 5\n elif camera_actions[i][1] < -camera_margin:\n actions[i] = 6\n elif forward_actions[i] == 1:\n if jump_actions[i] == 1:\n actions[i] = 2\n elif attack_actions[i] == 1:\n actions[i] = 11\n else:\n actions[i] = 1\n elif attack_actions[i] == 1:\n actions[i] = 0\n elif left_actions[i] == 1:\n actions[i] = 8\n elif right_actions[i] ==1:\n actions[i] = 9\n elif back_actions[i] == 1:\n actions[i] = 7\n elif jump_actions[i] == 1:\n actions[i] = 10\n else:\n # No reasonable mapping (would be no-op)\n actions[i] = 17\n return actions\n\n\n#*******************************************************************\n# VILLAGE HOUSE TASK\n#*******************************************************************\n# custom action wrapper for complete GAIL agent for MineRL\nclass ActionShaping_Villagehouse(gym.ActionWrapper):\n def __init__(self, env, camera_angle=10, always_attack=False):\n super().__init__(env)\n self.equip_mapping = {'acacia_door':0,'acacia_fence':1,'cactus':2,'cobblestone':3,'dirt':4,'fence':5,'flower_pot':6,\n 'glass':7,'ladder':8,'log#0':9,'log#1':10,'log2#0':12,'none':13,'other':14,'planks#0':15,\n 'planks#1':16,'planks#4':17,'red_flower':18,'sand,sandstone#0':19,'sandstone#2':20,'sandstone_stairs':21,\n 'snowball':22,'spruce_door':23,'spruce_fence':24,'stone_axe':25,'stone_pickaxe':26,'stone_stairs':27,\n 'torch':28,'wooden_door':29,'wooden_pressure_plate':30}\n self.camera_angle = camera_angle\n self.always_attack = always_attack\n self._actions = [\n [('attack', 1)], #0\n [('forward', 1)], #1\n [('forward', 1), ('jump', 1)], #2\n [('camera', [-self.camera_angle, 0])], #3\n [('camera', [self.camera_angle, 0])], #4\n [('camera', [0, self.camera_angle])], #5\n [('camera', [0, -self.camera_angle])], #6\n [('back', 1)], #7\n [('left', 1)], #8\n [('right', 1)], #9\n [('jump', 1)], #10\n [('forward', 1), ('attack', 1)], #11\n [('equip','acacia_door'), ('use', 1)], #12 \n [('equip','acacia_fence'), ('use', 1)], #13\n [('equip','cactus'), ('use', 1)], #14\n [('equip','cobblestone'), ('use', 1)], #15\n [('equip','dirt'), ('use', 1)], #16 \n [('equip','fence'), ('use', 1)], #17\n [('equip','flower_pot'), ('use', 1)], #18\n [('equip','glass'), ('use', 1)], #19\n [('equip','ladder'), ('use', 1)], #20\n [('equip','log#0'), ('use', 1)], #21\n [('equip','log#1'), ('use', 1)], #22\n [('equip','log2#0'), ('use', 1)], #23\n [('equip','planks#0'), ('use', 1)], #24\n [('equip','planks#1'), ('use', 1)], #25\n [('equip','planks#4'), ('use', 1)], #26\n [('equip','red_flower'), ('use', 1)], #27 \n [('equip','sand,sandstone#0'), ('use', 1)], #28\n [('equip','sandstone#2'), ('use', 1)], #29 \n [('equip','sandstone_stairs'), ('use', 1)],#30\n [('equip','spruce_door'), ('use', 1)], #31\n [('equip','spruce_fence'), ('use', 1)], #32\n [('equip','stone_axe'), ('use', 1)], #33 \n [('equip','stone_pickaxe'), ('use', 1)], #34 \n [('equip','stone_stairs'), ('use', 1)], #35\n [('equip','torch'), ('use', 1)], #36 \n [('equip','wooden_door'), ('use', 1)], #37 \n [('equip','wooden_pressure_plate'), ('use', 1)], #38\n ]\n\n self.actions = []\n for actions in self._actions:\n act = self.env.action_space.noop()\n for a, v in actions:\n act[a] = v\n if self.always_attack:\n act['attack'] = 1\n self.actions.append(act)\n # add no-op action\n act = self.env.action_space.noop()\n self.actions.append(act)\n \n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n def action(self, action):\n return self.actions[action]\n\n\ndef processed_actions_to_wrapper_actions_Villagehouse(dataset_actions, camera_margin=5):\n \"\"\"\n Turn a batch of actions from dataset (`batch_iter`) to a numpy\n array that corresponds to batch of actions of ActionShaping wrapper (_actions).\n\n Camera margin sets the threshold what is considered \"moving camera\".\n\n Note: Hardcoded to work for actions in ActionShaping._actions, with \"intuitive\"\n ordering of actions.\n If you change ActionShaping._actions, remember to change this!\n\n Array elements are integers corresponding to actions, or \"-1\"\n for actions that did not have any corresponding discrete match.\n \"\"\"\n # There are dummy dimensions of shape one\n camera_actions = dataset_actions[:,10:].astype(np.float32)\n attack_actions = dataset_actions[:,0].astype(np.float32)\n forward_actions = dataset_actions[:,3].astype(np.float32)\n jump_actions = dataset_actions[:,4].astype(np.float32)\n back_actions = dataset_actions[:,1].astype(np.float32)\n left_actions = dataset_actions[:,5].astype(np.float32)\n right_actions = dataset_actions[:,6].astype(np.float32)\n equip_actions = dataset_actions[:,2]\n use_actions = dataset_actions[:,9].astype(np.float32)\n sneak_actions = dataset_actions[:,7].astype(np.float32)\n sprint_actions = dataset_actions[:,8].astype(np.float32)\n batch_size = len(camera_actions)\n \n actions = np.zeros((batch_size,), dtype=int)\n\n\n #Enum(acacia_door,acacia_fence,cactus,cobblestone,dirt,fence,flower_pot,glass,ladder,log#0,log#1,log2#0,none,other,planks#0,planks#1,planks#4,red_flower,sand,sandstone#0,sandstone#2,sandstone_stairs,snowball,spruce_door,spruce_fence,stone_axe,stone_pickaxe,stone_stairs,torch,wooden_door,wooden_pressure_plate)\n\n equip_actions_dict = dict()\n equip_actions_dict['carrot'] = 12\n equip_actions_dict['fence'] = 13\n equip_actions_dict['fence_gate'] = 14\n equip_actions_dict['wheat'] = 15\n equip_actions_dict['wheat_seeds'] = 16 \n \n equip_actions_dict['acacia_door']=12 \n equip_actions_dict['acacia_fence']=13\n equip_actions_dict['cactus']=14\n equip_actions_dict['cobblestone']=15\n equip_actions_dict['dirt']=16 \n equip_actions_dict['fence']=17\n equip_actions_dict['flower_pot']=18\n equip_actions_dict['glass']=19\n equip_actions_dict['ladder']=20\n equip_actions_dict['log#0']=21\n equip_actions_dict['log#1']=22\n equip_actions_dict['log2#0']=23\n equip_actions_dict['planks#0']=24\n equip_actions_dict['planks#1']=25\n equip_actions_dict['planks#4']=26\n equip_actions_dict['red_flower']=27 \n equip_actions_dict['sand,sandstone#0']=28\n equip_actions_dict['sandstone#2']=29 \n equip_actions_dict['sandstone_stairs']=30\n equip_actions_dict['spruce_door']=31\n equip_actions_dict['spruce_fence']=32\n equip_actions_dict['stone_axe']=33 \n equip_actions_dict['stone_pickaxe']=34 \n equip_actions_dict['stone_stairs']=35\n equip_actions_dict['torch']=36 \n equip_actions_dict['wooden_door']=37 \n equip_actions_dict['wooden_pressure_plate']=38\n \n \n # step through all actions \n currently_equipped_item = 'stone_pickaxe'\n for i in range(len(camera_actions)):\n \n # keep track of what is currently equipped\n if equip_actions[i] != 'none' and equip_actions[i] in equip_actions_dict:\n currently_equipped_item = equip_actions[i]\n \n # equip and use actions are the most important\n if use_actions[i] == 1: \n actions[i] = equip_actions_dict[currently_equipped_item]\n # Moving camera is second most important (horizontal first)\n elif camera_actions[i][0] < -camera_margin:\n actions[i] = 3\n elif camera_actions[i][0] > camera_margin:\n actions[i] = 4\n elif camera_actions[i][1] > camera_margin:\n actions[i] = 5\n elif camera_actions[i][1] < -camera_margin:\n actions[i] = 6\n elif forward_actions[i] == 1:\n if jump_actions[i] == 1:\n actions[i] = 2\n elif attack_actions[i] == 1:\n actions[i] = 11\n else:\n actions[i] = 1\n elif attack_actions[i] == 1:\n actions[i] = 0\n elif left_actions[i] == 1:\n actions[i] = 8\n elif right_actions[i] ==1:\n actions[i] = 9\n elif back_actions[i] == 1:\n actions[i] = 7\n elif jump_actions[i] == 1:\n actions[i] = 10\n else:\n # No reasonable mapping (would be no-op)\n actions[i] = 39\n return actions\n\n\n# custom action wrapper for Simple GAIL agent for MineRL\n#*******************************************************************\n# NAVIGATION SUBTASK\n#*******************************************************************\n# custom action wrapper for complete GAIL agent for MineRL\nclass ActionShaping_Navigation(gym.ActionWrapper):\n def __init__(self, env, camera_angle=10, always_attack=False):\n super().__init__(env)\n\n self.camera_angle = camera_angle\n self.always_attack = always_attack\n self._actions = [\n [('attack', 1)], #0\n [('forward', 1)], #1\n [('forward', 1), ('jump', 1)], #2\n [('camera', [0, self.camera_angle])], #3 #horizontal (right)\n [('camera', [0, -self.camera_angle])], #4 #horizontal (left)\n [('camera', [-self.camera_angle, 0])], #5 #verticle\n [('camera', [self.camera_angle, 0])], #6 #verticle\n [('back', 1)], #7\n [('left', 1)], #8\n [('right', 1)], #9\n [('jump', 1)], #10\n #[('equip',11), ('use', 1)],\n [('forward', 1), ('attack', 1)], #11\n ]\n\n self.actions = []\n for actions in self._actions:\n act = self.env.action_space.noop()\n for a, v in actions:\n act[a] = v\n if self.always_attack:\n act['attack'] = 1\n self.actions.append(act)\n # add no-op action\n act = self.env.action_space.noop()\n self.actions.append(act)\n \n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n def action(self, action):\n return self.actions[action]\n return self.actions[action]\n\n\ndef processed_actions_to_wrapper_actions_Navigation(dataset_actions, camera_margin=5):\n \"\"\"\n Turn a batch of actions from dataset (`batch_iter`) to a numpy\n array that corresponds to batch of actions of ActionShaping wrapper (_actions).\n\n Camera margin sets the threshold what is considered \"moving camera\".\n\n Note: Hardcoded to work for actions in ActionShaping._actions, with \"intuitive\"\n ordering of actions.\n If you change ActionShaping._actions, remember to change this!\n\n Array elements are integers corresponding to actions, or \"-1\"\n for actions that did not have any corresponding discrete match.\n \"\"\"\n # There are dummy dimensions of shape one\n camera_actions = dataset_actions[:,10:].astype(np.float32)\n attack_actions = dataset_actions[:,0].astype(np.float32)\n forward_actions = dataset_actions[:,3].astype(np.float32)\n jump_actions = dataset_actions[:,4].astype(np.float32)\n back_actions = dataset_actions[:,1].astype(np.float32)\n left_actions = dataset_actions[:,5].astype(np.float32)\n right_actions = dataset_actions[:,6].astype(np.float32)\n equip_actions = dataset_actions[:,2]\n use_actions = dataset_actions[:,9].astype(np.float32)\n sneak_actions = dataset_actions[:,7].astype(np.float32)\n sprint_actions = dataset_actions[:,8].astype(np.float32)\n batch_size = len(camera_actions)\n \n actions = np.zeros((batch_size,), dtype=int)\n\n for i in range(len(camera_actions)):\n # Moving camera is most important (horizontal first!!!)\n if camera_actions[i][1] < -camera_margin:\n actions[i] = 3\n elif camera_actions[i][1] > camera_margin:\n actions[i] = 4\n elif camera_actions[i][0] > camera_margin:\n actions[i] = 5\n elif camera_actions[i][0] < -camera_margin:\n actions[i] = 6\n elif forward_actions[i] == 1:\n if jump_actions[i] == 1:\n actions[i] = 2\n elif attack_actions[i] == 1:\n actions[i] = 11\n else:\n actions[i] = 1\n elif attack_actions[i] == 1:\n actions[i] = 0\n elif left_actions[i] == 1:\n actions[i] = 8\n elif right_actions[i] ==1:\n actions[i] = 9\n elif jump_actions[i] == 1:\n actions[i] = 10\n elif back_actions[i] == 1:\n actions[i] = 7\n elif sum(dataset_actions[i,(0,1,3,4,5,6,7,8,9)].astype(np.float32)):\n # actual noop\n actions[i] = 12\n else: #catch everthing else and remove later\n actions[i] = 99\n \n return actions\n\n\n\n\n# return only image as the observation\nclass PovOnlyObservation(gym.ObservationWrapper):\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = self.env.observation_space['pov']\n\n def observation(self, observation):\n obs = observation['pov'].squeeze().astype(np.float32)\n # Transpose observations to be channel-first (BCHW instead of BHWC)\n obs = obs.transpose(2, 0, 1)\n # Normalize observations\n obs /= 255.0\n return obs" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
soloist-v/yi
[ "d7c04fe6266441d2629ba35f69c9fc659a52b370" ]
[ "yi/core/functional.py" ]
[ "import numpy as np\n\n\ndef get_im2col_indices(x_shape, field_height=3, field_width=3, padding=1, stride=1):\n # First figure out what the size of the output should be\n N, C, H, W = x_shape\n assert (H + 2 * padding - field_height) % stride == 0\n assert (W + 2 * padding - field_height) % stride == 0\n out_height = (H + 2 * padding - field_height) / stride + 1\n out_width = (W + 2 * padding - field_width) / stride + 1\n\n i0 = np.repeat(np.arange(field_height, dtype='int32'), field_width)\n i0 = np.tile(i0, C)\n i1 = stride * np.repeat(np.arange(out_height, dtype='int32'), out_width)\n j0 = np.tile(np.arange(field_width), field_height * C)\n j1 = stride * np.tile(np.arange(out_width, dtype='int32'), int(out_height))\n i = i0.reshape(-1, 1) + i1.reshape(1, -1)\n j = j0.reshape(-1, 1) + j1.reshape(1, -1)\n\n k = np.repeat(np.arange(C, dtype='int32'), field_height * field_width).reshape(-1, 1)\n\n return (k, i, j)\n\n\ndef im2col_indices(x, field_height=3, field_width=3, padding=1, stride=1):\n \"\"\" An implementation of im2col based on some fancy indexing \"\"\"\n # Zero-pad the input\n p = padding\n x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n\n k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,\n stride)\n\n cols = x_padded[:, k, i, j]\n C = x.shape[1]\n cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n return cols\n\n\ndef col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,\n stride=1):\n \"\"\" An implementation of col2im based on fancy indexing and np.add.at \"\"\"\n N, C, H, W = x_shape\n H_padded, W_padded = H + 2 * padding, W + 2 * padding\n x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)\n k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,\n stride)\n cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)\n cols_reshaped = cols_reshaped.transpose(2, 0, 1)\n np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)\n if padding == 0:\n return x_padded\n return x_padded[:, :, padding:-padding, padding:-padding]\n\n\ndef im2col_indices_tensor(x, field_height=3, field_width=3, padding=1, stride=1):\n \"\"\" An implementation of im2col based on some fancy indexing \"\"\"\n # Zero-pad the input\n from ._tensor import Tensor\n from .nodes import PadConstant\n x: Tensor\n p = padding\n # x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n x_padded = PadConstant(x, ((0, 0), (0, 0), (p, p), (p, p)))\n k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,\n stride)\n cols = x_padded[:, k, i, j]\n C = x.shape[1]\n cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n return cols\n\n\ndef col2im_indices_tensor(cols, x_shape, field_height=3, field_width=3, padding=1, stride=1):\n \"\"\" An implementation of col2im based on fancy indexing and np.add.at\n tip: 构建计算图只需要关注Tensor\"操作\"或\"变换\"即可,因为只有Tensor的变换操作才会产生计算图\n \"\"\"\n from ._tensor import Tensor\n from .nodes import PadConstant, Add\n cols: Tensor\n N, C, H, W = x_shape\n H_padded, W_padded = H + 2 * padding, W + 2 * padding\n x_padded = Tensor(np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype))\n k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,\n stride)\n cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)\n cols_reshaped = cols_reshaped.transpose(2, 0, 1)\n np.add.at(x_padded.data, (slice(None), k, i, j), cols_reshaped)\n if padding == 0:\n return x_padded\n return x_padded[:, :, padding:-padding, padding:-padding]\n\n\nif __name__ == '__main__':\n img = np.zeros((2, 3, 16, 16))\n cols = im2col_indices(img, 3, 3, 1, 1)\n res = col2im_indices(cols, img.shape, 3, 3, 1, 1)\n print(res.shape)\n" ]
[ [ "numpy.arange", "numpy.zeros", "numpy.tile", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
linsats/GRAC_Discrete
[ "33f7917cc23e50bb3326b6d94a7957ff2fe419f7" ]
[ "main.py" ]
[ "import numpy as np\nimport torch\nimport gym\nimport argparse\nimport os\nimport cv2\nimport imageio\nfrom skimage.color import rgb2gray\n\nimport datetime\nimport utils\n\nfrom torch.utils.tensorboard import SummaryWriter\n\ndef eval_policy(policy, env_name, seed, eval_episodes=10):\n eval_env = gym.make(env_name)\n #eval_env.seed(seed + 100)\n \n avg_reward = 0\n for _ in range(eval_episodes):\n state, done = eval_env.reset(), False\n while not done:\n action = policy.select_action(np.array(state), test=True)\n state, reward, done, _ = eval_env.step(action)\n avg_reward += reward\n avg_reward /= float(eval_episodes)\n\n print(\"---------------------------------------\")\n print(\"Evaluation over {} episodes: {:.3f}\".format(eval_episodes, avg_reward))\n print(\"---------------------------------------\")\n return avg_reward\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--policy\", default=\"GRAC\") # Policy name (GRAC)\n parser.add_argument(\"--env\", default=\"Breakout-ram-v0\") # OpenAI gym environment name\n parser.add_argument(\"--seed\", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds\n parser.add_argument(\"--start_timesteps\", default=1e4, type=int) # Time steps initial random policy is used\n parser.add_argument(\"--eval_freq\", default=5e3, type=int) # How often (time steps) we evaluate\n parser.add_argument(\"--max_timesteps\", default=2e6, type=int) # Max time steps to run environment\n parser.add_argument(\"--expl_noise\", default=0.1) # Std of Gaussian exploration noise\n parser.add_argument(\"--batch_size\", default=64, type=int) # Batch size for both actor and critic\n parser.add_argument(\"--discount\", default=0.99) # Discount factor\n parser.add_argument(\"--n_repeat\", default=200, type=int) # Frequency of delayed policy updates\n parser.add_argument(\"--save_model\", action=\"store_true\") # Save model and optimizer parameters\n parser.add_argument(\"--load_model\", default=\"\") # Model load file name, \"\" doesn't load, \"default\" uses file_name\n parser.add_argument('--actor_cem_clip', default=0.5)\n parser.add_argument('--use_expl_noise', action=\"store_true\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--comment\", default=\"\")\n parser.add_argument(\"--exp_name\", default=\"exp_logs\")\n parser.add_argument(\"--which_cuda\", default=0, type=int)\n\n args = parser.parse_args()\n\n device = torch.device('cuda:{}'.format(args.which_cuda))\n\n file_name = \"{}_{}_{}\".format(args.policy, args.env, args.seed)\n file_name += \"_{}\".format(args.comment) if args.comment != \"\" else \"\"\n folder_name = datetime.datetime.now().strftime('%b%d_%H-%M-%S_') + file_name\n result_folder = 'runs/{}'.format(folder_name) \n if args.exp_name is not \"\":\n result_folder = '{}/{}'.format(args.exp_name, folder_name)\n if args.debug: \n result_folder = 'debug/{}'.format(folder_name)\n if not os.path.exists('{}/models/'.format(result_folder)):\n os.makedirs('{}/models/'.format(result_folder))\n print(\"---------------------------------------\")\n print(\"Policy: {}, Env: {}, Seed: {}\".format(args.policy, args.env, args.seed))\n\n if not os.path.exists(\"./results\"):\n os.makedirs(\"./results\")\n if args.save_model and not os.path.exists(\"./models\"):\n os.makedirs(\"./models\")\n\n env = gym.make(args.env)\n state_dim = env.observation_space.shape[0]\n print(state_dim)\n action_dim = 4#env.action_space.shape[0]\n print(action_dim)\n print(type(action_dim))\n\n\n if args.save_model is False:\n args.save_model = True\n kwargs = {\n\t\t\"env\": args.env,\n\t\t\"state_dim\": state_dim,\n\t\t\"action_dim\": action_dim,\n\t\t\"batch_size\": args.batch_size,\n\t\t\"discount\": args.discount,\n\t\t\"device\": device,\n } \n\n # Initialize policy\n if \"GRAC\" in args.policy:\n GRAC = __import__(args.policy)\n policy = GRAC.GRAC(**kwargs)\n\n replay_buffer = utils.ReplayBuffer(state_dim, device=device)\n \n evaluations = [eval_policy(policy, args.env, args.seed)] \n\n #### Evaluation\n state,done = env.reset(), False\n episode_reward = 0\n episode_timesteps = 0\n episode_num = 0\n\n writer = SummaryWriter(log_dir=result_folder, comment=file_name)\n \n with open(\"{}/parameters.txt\".format(result_folder), 'w') as file:\n for key, value in vars(args).items():\n file.write(\"{} = {}\\n\".format(key, value))\n\n\n for t in range(int(args.max_timesteps)):\n episode_timesteps += 1\n \n # select action randomly or according to policy\n if t < args.start_timesteps:\n action = np.random.randint(action_dim)\n else:\n action = policy.select_action(np.array(state),writer=writer)\n\n #Performa action\n next_state, reward, done, _ = env.step(action)\n writer.add_scalar('train/reward',reward,t+1)\n #img = np.copy(state)\n #img_g = cv2.resize(img,(128,128))\n #print(\"img_g\",img_g.shape)\n #print(img_g)\n #print(\"state\",state.shape)\n #cv2.imshow('image',img_g)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n # Store data in replay buffer\n replay_buffer.add(state,action,next_state,reward,done) \n state = next_state\n episode_reward += reward\n if t >= args.start_timesteps and (t + 1) % 20 == 0:\n policy.train(replay_buffer, args.batch_size, writer, 20.0)\n if done:\n print(\"Total T {} Episode Num:{} Episode T:{} Reward: {:.3f}\".format(t+1, episode_num+1, episode_timesteps, episode_reward))\n \n # reset environment\n state, done = env.reset(), False\n episode_reward = 0\n episode_timesteps = 0\n episode_num += 1\n\n #Evaluate episode \n if t >= args.start_timesteps and (t + 1) % args.eval_freq == 0:\n evaluation = eval_policy(policy,args.env, args.seed)\n evaluations.append(evaluation)\n writer.add_scalar('test/avg_return', evaluation, t+1)\n #np.save(\"{}/evaluations\".format(result_folder), evaluations)\n" ]
[ [ "numpy.array", "torch.utils.tensorboard.SummaryWriter", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
onwebbe/pyFaceRecognize
[ "10f38d57542991f23957affea78763febf6f2980" ]
[ "start.py" ]
[ "import faceRecorgnize.cropFaces as cropFaces\nimport faceRecorgnize.faceRecorgnize as faceRecorgnize\nimport utils.ImageUtils as ImageUtils\nimport utils.Constants as Constants\nimport utils.FaceUtils as FaceUtils\nimport cv2 as cv\nimport os\nimport numpy as np\nimport db.FaceData as FaceData\n\nrawImageRootPath = \"/Users/i326432/Documents/kimiface/\"\n\ndef updateMostSimilarPerson(faceId):\n faceDB = FaceData.getFaceData()\n compareFace = FaceUtils.compareFaceByOthers(faceId)\n\n\n for valueObj in compareFace:\n compareFaceId = valueObj[0]\n similarValue = valueObj[1]\n compareFaceObj = faceDB.findFaceById(compareFaceId)\n compareFaceAssignStatus = compareFaceObj['assignedStatus']\n if (compareFaceAssignStatus == 'U' or compareFaceAssignStatus == 'A'):\n if (similarValue <= 0.4):\n compareFaceData = faceDB.findFaceById(compareFaceId)\n comparePersonId = compareFaceData['personId']\n faceDB.changeFacePerson(faceId, comparePersonId, 'A')\n print('找到相似的脸' + str(comparePersonId))\n else:\n faceDB.changeFacePerson(faceId, Constants.PERSON_ID_UNNAMED, 'U')\n print('没有相似的脸, 更新为 匿名')\n else:\n print('这张脸手动改过')\n break\n\ndef displayFaceCompareResult(sourceFaceId):\n faceDB = FaceData.getFaceData()\n compareFace = FaceUtils.compareFaceByOthers(sourceFaceId)\n faceData = faceDB.findFaceById(sourceFaceId)\n faceFilePath = faceData['imagePath']\n faceId = faceData['faceId']\n faceImage = cv.imread(faceFilePath)\n\n namedWindowName = 'test_' + str(faceId)\n cv.namedWindow(namedWindowName)\n cv.imshow(namedWindowName, faceImage)\n\n\n for valueObj in compareFace:\n faceId = valueObj[0]\n similarValue = valueObj[1]\n if (similarValue <= 0.42):\n faceData = faceDB.findFaceById(faceId)\n faceFilePath = faceData['imagePath']\n faceId = faceData['faceId']\n faceImage = cv.imread(faceFilePath)\n\n namedWindowName = 'test_' + str(faceId)\n cv.namedWindow(namedWindowName)\n cv.imshow(namedWindowName, faceImage)\n else:\n break\n\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\n\n\ndef processAllFiles(path):\n allfile=[]\n for dirpath,dirnames,filenames in os.walk(path):\n for name in filenames:\n processFile(os.path.join(dirpath, name), name)\n for dir in dirnames:\n processAllFiles(os.path.join(path, dir))\n return allfile\n \ndef processFile(filePath, filename):\n rawFilePath = filePath\n print(\"---------开始处理: \" + rawFilePath + \" ---------\")\n existingRawFileInDB = ImageUtils.getFaceInDBByRawImage(rawFilePath)\n if (len(existingRawFileInDB) == 0):\n resultData = cropFaces.cropFaces2(rawFilePath)\n if (resultData is not None):\n faceList = resultData['croppedImageList']\n featureList = resultData['croppedFeatureList']\n faceIndex = 0\n if (len(faceList) == 0):\n faceId = FaceUtils.createNewFaceForPerson('', rawFilePath, Constants.PERSON_ID_UNNAMED)\n FaceUtils.updateFaceFeatureFile(faceId, '')\n else:\n for index in range(0, len(faceList)):\n faceImage = faceList[index]\n featureData = featureList[index]\n \n faceFileName = os.path.join(Constants.DATA_ROOT_PATH, Constants.FACE_IMG_FILE_PATH, \"face_\" + filename + \"_\" + str(faceIndex) + \".bmp\")\n cv.imwrite(faceFileName, faceImage)\n \n faceIndex = faceIndex + 1\n\n faceId = FaceUtils.createNewFaceForPerson(faceFileName, rawFilePath, Constants.PERSON_ID_UNNAMED)\n faceFeaturePath = os.path.join(Constants.DATA_ROOT_PATH, Constants.FEATURE_FILE_PATH, 'faceFeature_' + str(faceId) + '.npy')\n print (\"开始保存feature:\" + faceFeaturePath)\n saveFeatureData = np.array(featureData)\n np.save(faceFeaturePath, saveFeatureData)\n FaceUtils.updateFaceFeatureFile(faceId, faceFeaturePath)\n\n updateMostSimilarPerson(faceId)\n else:\n print(\" \" + rawFilePath + \" 已处理过了\")\n \n print(\"---------结束处理: \" + rawFilePath + \" ---------\")\n\nprocessAllFiles(rawImageRootPath)" ]
[ [ "numpy.array", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JohnGBaker/ptmcmc
[ "a8878d6a79019fa5e2144a0b5fb88c04d4659e7e" ]
[ "python/covar.py" ]
[ "# -*- coding: utf-8 -*-\n#This code is adaped from\n# https://github.com/dfm/corner.py\n# git hash 5c2cd63 on May 25\n# Modifications by John Baker NASA-GSFC (2016-18)\n#Copyright (c) 2013-2016 Daniel Foreman-Mackey\n#All rights reserved.\n#\n#Redistribution and use in source and binary forms, with or without\n#modification, are permitted provided that the following conditions are met:\n#\n#1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n#The views and conclusions contained in the software and documentation are those\n#of the authors and should not be interpreted as representing official policies,\n#either expressed or implied, of the FreeBSD Project.\n\n\nfrom __future__ import print_function, absolute_import\n\nimport logging\nimport math\nimport numpy as np\nimport matplotlib.pyplot as pl\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.colors import LinearSegmentedColormap, colorConverter\nfrom matplotlib.ticker import ScalarFormatter\nfrom matplotlib.patches import Ellipse\n\ntry:\n from scipy.ndimage import gaussian_filter\nexcept ImportError:\n gaussian_filter = None\n\n__all__ = [\"corner\", \"hist2d\", \"quantile\"]\n\n\ndef corner(xs, bins=20, range=None, weights=None, cov=None, color=\"k\",\n smooth=None, smooth1d=None,\n labels=None, label_kwargs=None,\n show_titles=False, title_fmt=\".2f\", title_kwargs=None,\n truths=None, truth_color=\"#4682b4\",\n scale_hist=False, quantiles=None, verbose=False, fig=None,\n max_n_ticks=5, top_ticks=False, use_math_text=False,\n hist_kwargs=None, **hist2d_kwargs):\n \"\"\"\n Make a *sick* corner plot showing the projections of a data set in a\n multi-dimensional space. kwargs are passed to hist2d() or used for\n `matplotlib` styling.\n\n Parameters\n ----------\n xs : array_like[nsamples, ndim]\n The samples. This should be a 1- or 2-dimensional array. For a 1-D\n array this results in a simple histogram. For a 2-D array, the zeroth\n axis is the list of samples and the next axis are the dimensions of\n the space.\n\n bins : int or array_like[ndim,]\n The number of bins to use in histograms, either as a fixed value for\n all dimensions, or as a list of integers for each dimension.\n\n weights : array_like[nsamples,]\n The weight of each sample. If `None` (default), samples are given\n equal weight.\n\n color : str\n A ``matplotlib`` style color for all histograms.\n\n smooth, smooth1d : float\n The standard deviation for Gaussian kernel passed to\n `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms\n respectively. If `None` (default), no smoothing is applied.\n\n labels : iterable (ndim,)\n A list of names for the dimensions. If a ``xs`` is a\n ``pandas.DataFrame``, labels will default to column names.\n\n label_kwargs : dict\n Any extra keyword arguments to send to the `set_xlabel` and\n `set_ylabel` methods.\n\n show_titles : bool\n Displays a title above each 1-D histogram showing the 0.5 quantile\n with the upper and lower errors supplied by the quantiles argument.\n\n title_fmt : string\n The format string for the quantiles given in titles. If you explicitly\n set ``show_titles=True`` and ``title_fmt=None``, the labels will be\n shown as the titles. (default: ``.2f``)\n\n title_kwargs : dict\n Any extra keyword arguments to send to the `set_title` command.\n\n range : iterable (ndim,)\n A list where each element is either a length 2 tuple containing\n lower and upper bounds or a float in range (0., 1.)\n giving the fraction of samples to include in bounds, e.g.,\n [(0.,10.), (1.,5), 0.999, etc.].\n If a fraction, the bounds are chosen to be equal-tailed.\n\n truths : iterable (ndim,)\n A list of reference values to indicate on the plots. Individual\n values can be omitted by using ``None``.\n\n truth_color : str\n A ``matplotlib`` style color for the ``truths`` makers.\n\n scale_hist : bool\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n quantiles : iterable\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n verbose : bool\n If true, print the values of the computed quantiles.\n\n plot_contours : bool\n Draw contours for dense regions of the plot.\n\n use_math_text : bool\n If true, then axis tick labels for very large or small exponents will\n be displayed as powers of 10 rather than using `e`.\n\n max_n_ticks: int\n Maximum number of ticks to try to use\n\n top_ticks : bool\n If true, label the top ticks of each axis\n\n fig : matplotlib.Figure\n Overplot onto the provided figure object.\n\n hist_kwargs : dict\n Any extra keyword arguments to send to the 1-D histogram plots.\n\n **hist2d_kwargs\n Any remaining keyword arguments are sent to `corner.hist2d` to generate\n the 2-D histogram plots.\n\n \"\"\"\n if quantiles is None:\n quantiles = []\n if title_kwargs is None:\n title_kwargs = dict()\n if label_kwargs is None:\n label_kwargs = dict()\n\n # Try filling in labels from pandas.DataFrame columns.\n if labels is None:\n try:\n labels = xs.columns\n except AttributeError:\n pass\n\n # Deal with 1D sample lists.\n xs = np.atleast_1d(xs)\n if len(xs.shape) == 1:\n xs = np.atleast_2d(xs)\n else:\n assert len(xs.shape) == 2, \"The input sample array must be 1- or 2-D.\"\n xs = xs.T\n assert xs.shape[0] <= xs.shape[1], \"I don't believe that you want more \" \\\n \"dimensions than samples!\"\n\n # Parse the weight array.\n if weights is not None:\n weights = np.asarray(weights)\n if weights.ndim != 1:\n raise ValueError(\"Weights must be 1-D\")\n if xs.shape[1] != weights.shape[0]:\n raise ValueError(\"Lengths of weights must match number of samples\")\n\n # Parse the parameter ranges.\n if range is None:\n if \"extents\" in hist2d_kwargs:\n logging.warn(\"Deprecated keyword argument 'extents'. \"\n \"Use 'range' instead.\")\n range = hist2d_kwargs.pop(\"extents\")\n else:\n range = [[x.min(), x.max()] for x in xs]\n # Check for parameters that never change.\n m = np.array([e[0] == e[1] for e in range], dtype=bool)\n if np.any(m):\n raise ValueError((\"It looks like the parameter(s) in \"\n \"column(s) {0} have no dynamic range. \"\n \"Please provide a `range` argument.\")\n .format(\", \".join(map(\n \"{0}\".format, np.arange(len(m))[m]))))\n\n else:\n # If any of the extents are percentiles, convert them to ranges.\n # Also make sure it's a normal list.\n range = list(range)\n for i, _ in enumerate(range):\n try:\n emin, emax = range[i]\n except TypeError:\n q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]\n range[i] = quantile(xs[i], q, weights=weights)\n\n if len(range) != xs.shape[0]:\n raise ValueError(\"Dimension mismatch between samples and range\")\n\n # Parse the bin specifications.\n try:\n bins = [int(bins) for _ in range]\n except TypeError:\n if len(bins) != len(range):\n raise ValueError(\"Dimension mismatch between bins and range\")\n\n # Some magic numbers for pretty axis layout.\n K = len(xs)\n factor = 2.0 # size of one side of one panel\n lbdim = 0.5 * factor # size of left/bottom margin\n trdim = 0.2 * factor # size of top/right margin\n whspace = 0.05 # w/hspace size\n plotdim = factor * K + factor * (K - 1.) * whspace\n dim = lbdim + plotdim + trdim\n\n # Create a new figure if one wasn't provided.\n if fig is None:\n fig, axes = pl.subplots(K, K, figsize=(dim, dim))\n else:\n try:\n axes = np.array(fig.axes).reshape((K, K))\n except:\n raise ValueError(\"Provided figure has {0} axes, but data has \"\n \"dimensions K={1}\".format(len(fig.axes), K))\n\n \n #idea is to pass in covariance, otherwise concoct something from the 1-sigma range.\n if(cov==[]):\n print(\"concocting covar elements from 1-sigma ranges\")\n cov=np.zeros((K,K))\n for k in np.arange(K):\n q_16, q_50, q_84 = quantile(xs[k], [0.16, 0.5, 0.84],weights=weights)\n deltax=(q_84-q_16)/2.0\n cov[k,k]=deltax**2\n print(\"cov=\",cov)\n print(\"covdiag=\",np.diag(cov))\n\n # Format the figure.\n lb = lbdim / dim\n tr = (lbdim + plotdim) / dim\n fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,\n wspace=whspace, hspace=whspace)\n\n # Set up the default histogram keywords.\n if hist_kwargs is None:\n hist_kwargs = dict()\n hist_kwargs[\"color\"] = hist_kwargs.get(\"color\", color)\n if smooth1d is None:\n hist_kwargs[\"histtype\"] = hist_kwargs.get(\"histtype\", \"step\")\n\n for i, x in enumerate(xs):\n # Deal with masked arrays.\n if hasattr(x, \"compressed\"):\n x = x.compressed()\n\n if np.shape(xs)[0] == 1:\n ax = axes\n else:\n ax = axes[i, i]\n #This is to normalize the histogram so that different data can be compared\n if(weights is None):\n hist1d_wts=[1.0/len(x) for w in x]\n else:\n hist1d_wts=[w*1.0/len(x) for w in weights]\n # Plot the histograms.\n if smooth1d is None:\n n, _, _ = ax.hist(x, bins=bins[i], weights=hist1d_wts,\n range=np.sort(range[i]), **hist_kwargs)\n else:\n if gaussian_filter is None:\n raise ImportError(\"Please install scipy for smoothing\")\n n, b = np.histogram(x, bins=bins[i], weights=hist1d_wts,\n range=np.sort(range[i]))\n n = gaussian_filter(n, smooth1d)\n x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()\n y0 = np.array(list(zip(n, n))).flatten()\n ax.plot(x0, y0, **hist_kwargs)\n\n if truths is not None and truths[i] is not None:\n ax.axvline(truths[i], color=truth_color)\n\n # Plot quantiles if wanted.\n if len(quantiles) > 0:\n qvalues = quantile(x, quantiles, weights=weights)\n for q in qvalues:\n ax.axvline(q, ls=\"dashed\", color=color)\n\n if verbose:\n print(\"Quantiles:\")\n print([item for item in zip(quantiles, qvalues)])\n\n if show_titles:\n title = None\n if title_fmt is not None:\n # Compute the quantiles for the title. This might redo\n # unneeded computation but who cares.\n q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],\n weights=weights)\n q_m, q_p = q_50-q_16, q_84-q_50\n\n # Format the quantile display.\n fmt = \"{{0:{0}}}\".format(title_fmt).format\n title = r\"${{{0}}}_{{-{1}}}^{{+{2}}}$\"\n title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))\n\n # Add in the column name if it's given.\n if labels is not None:\n title = \"{0} = {1}\".format(labels[i], title)\n\n elif labels is not None:\n title = \"{0}\".format(labels[i])\n\n if title is not None:\n ax.set_title(title, **title_kwargs)\n\n # Set up the axes.\n ax.set_xlim(range[i])\n if scale_hist:\n maxn = np.max(n)\n ax.set_ylim(-0.1 * maxn, 1.1 * maxn)\n else:\n ax.set_ylim(0, 1.1 * np.max(n))\n ax.set_yticklabels([])\n ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune=\"lower\"))\n\n if i < K - 1:\n if top_ticks:\n ax.xaxis.set_ticks_position(\"top\")\n [l.set_rotation(45) for l in ax.get_xticklabels()]\n else:\n ax.set_xticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_xticklabels()]\n if labels is not None:\n ax.set_xlabel(labels[i], **label_kwargs)\n ax.xaxis.set_label_coords(0.5, -0.3)\n\n # use MathText for axes ticks\n ax.xaxis.set_major_formatter(\n ScalarFormatter(useMathText=use_math_text))\n\n for j, y in enumerate(xs):\n if np.shape(xs)[0] == 1:\n ax = axes\n else:\n ax = axes[i, j]\n\n if j > 0:\n ax.set_yticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_yticklabels()]\n if labels is not None:\n ax.set_ylabel(labels[i], **label_kwargs)\n ax.yaxis.set_label_coords(-0.3, 0.5)\n\n # use MathText for axes ticks\n ax.yaxis.set_major_formatter(\n ScalarFormatter(useMathText=use_math_text))\n\n\n if j > i:\n ax.set_frame_on(False)\n ax.set_xticks([])\n ax.set_yticks([])\n continue\n elif j == i:\n continue\n\n # Deal with masked arrays.\n if hasattr(y, \"compressed\"):\n y = y.compressed()\n\n hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,\n color=color, smooth=smooth, bins=[bins[j], bins[i]],\n **hist2d_kwargs)\n\n #add covariance ellipses\n if(cov is not None):\n #center\n cx=truths[j]#need to add checking for availability of truths?\n cy=truths[i]\n\n #ang=math.acos(cov[0,1]/math.sqrt(cov[0,0]*cov[1,1]))*180/math.pi\n #print (j,i,labels[j],labels[i],\"center=\",cx,cy)\n #add an error ellipse\n N_thetas=60\n dtheta=2.0*math.pi/(N_thetas-1)\n thetas=np.arange(0,(2.0*math.pi+dtheta),dtheta)\n #Cplus=(cov[i,i]+cov[j,j])/2.0\n #Cminus=(-cov[i,i]+cov[j,j])/2.0\n #print(\"cov[ii],cov[ij],cov[jj],Cplus,Cminus:\",cov[i,i],cov[i,j],cov[j,j],Cplus,Cminus)\n ang=-math.pi/4.\n root=cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])\n if(root>1):root=1\n if(root<-1):root=-1\n acoeff=math.sqrt(1-root)\n bcoeff=math.sqrt(1+root)\n xcoeff=math.sqrt(cov[j,j])\n ycoeff=math.sqrt(cov[i,i])\n #print(\"a2,b2\",acoeff*acoeff,bcoeff*bcoeff)\n #print(\"a,b,ang, xcoeff,ycoeff, root=\",acoeff,bcoeff,ang,xcoeff,ycoeff,root)\n if \"levels\" in hist2d_kwargs:\n levels= hist2d_kwargs[\"levels\"]\n else:\n levels== 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)\n\n for xlev in levels:\n #in the next line we convert the credibility limit\n #to a \"sigma\" limit for a 2-d normal\n #this becomes a scale-factor for the error ellipse\n #1-exp(x^2/(-2)=y\n #-2*log(1-y)=x^2\n lev_fac = math.sqrt( -2 * math.log( 1 - xlev ) )\n #print (\"scales for quantile level = \",xlev,\" -> \",lev_fac,\": (\",xcoeff*lev_fac,\",\",ycoeff*lev_fac,\")\")\n elxs=[cx+lev_fac*xcoeff*(acoeff*math.cos(th)*math.cos(ang)-bcoeff*math.sin(th)*math.sin(ang)) for th in thetas] \n elys=[cy+lev_fac*ycoeff*(acoeff*math.cos(th)*math.sin(ang)+bcoeff*math.sin(th)*math.cos(ang)) for th in thetas] \n ax.plot(elxs,elys,color='r')\n\n ax.grid()\n if truths is not None:\n if truths[i] is not None and truths[j] is not None:\n ax.plot(truths[j], truths[i], \"s\", color=truth_color)\n if truths[j] is not None:\n ax.axvline(truths[j], color=truth_color)\n if truths[i] is not None:\n ax.axhline(truths[i], color=truth_color)\n\n ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune=\"lower\"))\n ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune=\"lower\"))\n\n if i < K - 1:\n ax.set_xticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_xticklabels()]\n if labels is not None:\n ax.set_xlabel(labels[j], **label_kwargs)\n ax.xaxis.set_label_coords(0.5, -0.3)\n\n # use MathText for axes ticks\n ax.xaxis.set_major_formatter(\n ScalarFormatter(useMathText=use_math_text))\n\n if j > 0:\n ax.set_yticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_yticklabels()]\n if labels is not None:\n ax.set_ylabel(labels[i], **label_kwargs)\n ax.yaxis.set_label_coords(-0.3, 0.5)\n\n # use MathText for axes ticks\n ax.yaxis.set_major_formatter(\n ScalarFormatter(useMathText=use_math_text))\n\n return fig\n\n\ndef quantile(x, q, weights=None):\n \"\"\"\n Compute sample quantiles with support for weighted samples.\n\n Note\n ----\n When ``weights`` is ``None``, this method simply calls numpy's percentile\n function with the values of ``q`` multiplied by 100.\n\n Parameters\n ----------\n x : array_like[nsamples,]\n The samples.\n\n q : array_like[nquantiles,]\n The list of quantiles to compute. These should all be in the range\n ``[0, 1]``.\n\n weights : Optional[array_like[nsamples,]]\n An optional weight corresponding to each sample. These\n\n Returns\n -------\n quantiles : array_like[nquantiles,]\n The sample quantiles computed at ``q``.\n\n Raises\n ------\n ValueError\n For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch\n between ``x`` and ``weights``.\n\n \"\"\"\n x = np.atleast_1d(x)\n q = np.atleast_1d(q)\n\n if np.any(q < 0.0) or np.any(q > 1.0):\n raise ValueError(\"Quantiles must be between 0 and 1\")\n\n if weights is None:\n return np.percentile(x, 100.0 * q)\n else:\n weights = np.atleast_1d(weights)\n if len(x) != len(weights):\n raise ValueError(\"Dimension mismatch: len(weights) != len(x)\")\n idx = np.argsort(x)\n sw = weights[idx]\n cdf = np.cumsum(sw)[:-1]\n cdf /= cdf[-1]\n cdf = np.append(0, cdf)\n return np.interp(q, cdf, x[idx]).tolist()\n\n\ndef hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,\n ax=None, color=None, plot_datapoints=True, plot_density=True,\n plot_contours=True, no_fill_contours=False, fill_contours=False,\n contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,\n **kwargs):\n \"\"\"\n Plot a 2-D histogram of samples.\n\n Parameters\n ----------\n x : array_like[nsamples,]\n The samples.\n\n y : array_like[nsamples,]\n The samples.\n\n levels : array_like\n The contour levels to draw.\n\n ax : matplotlib.Axes\n A axes instance on which to add the 2-D histogram.\n\n plot_datapoints : bool\n Draw the individual data points.\n\n plot_density : bool\n Draw the density colormap.\n\n plot_contours : bool\n Draw the contours.\n\n no_fill_contours : bool\n Add no filling at all to the contours (unlike setting\n ``fill_contours=False``, which still adds a white fill at the densest\n points).\n\n fill_contours : bool\n Fill the contours.\n\n contour_kwargs : dict\n Any additional keyword arguments to pass to the `contour` method.\n\n contourf_kwargs : dict\n Any additional keyword arguments to pass to the `contourf` method.\n\n data_kwargs : dict\n Any additional keyword arguments to pass to the `plot` method when\n adding the individual data points.\n\n \"\"\"\n if ax is None:\n ax = pl.gca()\n\n # Set the default range based on the data range if not provided.\n if range is None:\n if \"extent\" in kwargs:\n logging.warn(\"Deprecated keyword argument 'extent'. \"\n \"Use 'range' instead.\")\n range = kwargs[\"extent\"]\n else:\n range = [[x.min(), x.max()], [y.min(), y.max()]]\n\n # Set up the default plotting arguments.\n if color is None:\n color = \"k\"\n\n # Choose the default \"sigma\" contour levels.\n if levels is None:\n levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)\n\n # This is the color map for the density plot, over-plotted to indicate the\n # density of the points near the center.\n density_cmap = LinearSegmentedColormap.from_list(\n \"density_cmap\", [color, (1, 1, 1, 0)])\n\n # This color map is used to hide the points at the high density areas.\n white_cmap = LinearSegmentedColormap.from_list(\n \"white_cmap\", [(1, 1, 1), (1, 1, 1)], N=2)\n\n # This \"color map\" is the list of colors for the contour levels if the\n # contours are filled.\n rgba_color = colorConverter.to_rgba(color)\n contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]\n for i, l in enumerate(levels):\n contour_cmap[i][-1] *= float(i) / (len(levels)+1)\n\n # We'll make the 2D histogram to directly estimate the density.\n try:\n H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,\n range=list(map(np.sort, range)),\n weights=weights)\n except ValueError:\n raise ValueError(\"It looks like at least one of your sample columns \"\n \"have no dynamic range. You could try using the \"\n \"'range' argument.\")\n\n if smooth is not None:\n if gaussian_filter is None:\n raise ImportError(\"Please install scipy for smoothing\")\n H = gaussian_filter(H, smooth)\n\n # Compute the density levels.\n Hflat = H.flatten()\n inds = np.argsort(Hflat)[::-1]\n Hflat = Hflat[inds]\n sm = np.cumsum(Hflat)\n sm /= sm[-1]\n V = np.empty(len(levels))\n for i, v0 in enumerate(levels):\n try:\n V[i] = Hflat[sm <= v0][-1]\n except:\n V[i] = Hflat[0]\n V.sort()\n m = np.diff(V) == 0\n if np.any(m):\n logging.warning(\"Too few points to create valid contours\")\n while np.any(m):\n V[np.where(m)[0][0]] *= 1.0 - 1e-4\n m = np.diff(V) == 0\n V.sort()\n\n # Compute the bin centers.\n X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])\n\n # Extend the array for the sake of the contours at the plot edges.\n H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))\n H2[2:-2, 2:-2] = H\n H2[2:-2, 1] = H[:, 0]\n H2[2:-2, -2] = H[:, -1]\n H2[1, 2:-2] = H[0]\n H2[-2, 2:-2] = H[-1]\n H2[1, 1] = H[0, 0]\n H2[1, -2] = H[0, -1]\n H2[-2, 1] = H[-1, 0]\n H2[-2, -2] = H[-1, -1]\n X2 = np.concatenate([\n X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),\n X1,\n X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),\n ])\n Y2 = np.concatenate([\n Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),\n Y1,\n Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),\n ])\n\n if plot_datapoints:\n if data_kwargs is None:\n data_kwargs = dict()\n data_kwargs[\"color\"] = data_kwargs.get(\"color\", color)\n data_kwargs[\"ms\"] = data_kwargs.get(\"ms\", 2.0)\n data_kwargs[\"mec\"] = data_kwargs.get(\"mec\", \"none\")\n data_kwargs[\"alpha\"] = data_kwargs.get(\"alpha\", 0.1)\n ax.plot(x, y, \"o\", zorder=-1, rasterized=True, **data_kwargs)\n\n # Plot the base fill to hide the densest data points.\n if (plot_contours or plot_density) and not no_fill_contours:\n ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],\n cmap=white_cmap, antialiased=False)\n\n if plot_contours and fill_contours:\n if contourf_kwargs is None:\n contourf_kwargs = dict()\n contourf_kwargs[\"colors\"] = contourf_kwargs.get(\"colors\", contour_cmap)\n contourf_kwargs[\"antialiased\"] = contourf_kwargs.get(\"antialiased\",\n False)\n ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),\n **contourf_kwargs)\n\n # Plot the density map. This can't be plotted at the same time as the\n # contour fills.\n elif plot_density:\n ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)\n\n # Plot the contour edge colors.\n if plot_contours:\n if contour_kwargs is None:\n contour_kwargs = dict()\n contour_kwargs[\"colors\"] = contour_kwargs.get(\"colors\", color)\n ax.contour(X2, Y2, H2.T, V, **contour_kwargs)\n \n ax.set_xlim(range[0])\n ax.set_ylim(range[1])\n" ]
[ [ "numpy.diag", "numpy.asarray", "numpy.cumsum", "numpy.max", "numpy.any", "numpy.where", "matplotlib.pyplot.gca", "numpy.arange", "numpy.atleast_1d", "numpy.diff", "numpy.interp", "matplotlib.colors.colorConverter.to_rgba", "matplotlib.ticker.ScalarFormatter", "numpy.zeros", "numpy.atleast_2d", "numpy.append", "numpy.argsort", "matplotlib.colors.LinearSegmentedColormap.from_list", "numpy.array", "scipy.ndimage.gaussian_filter", "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.sort", "numpy.shape", "matplotlib.ticker.MaxNLocator" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
alipay/Parameter_Inference_Efficient_PIE
[ "660add7705432a526aa3335fff3d8cf1c7d015a4", "660add7705432a526aa3335fff3d8cf1c7d015a4" ]
[ "entity_typing/src/data_processer.py", "candidate/cat.py" ]
[ "import os\nimport re\nimport pickle\nimport random\nimport pylab\nimport os.path as osp\nimport numpy as np\nfrom collections import defaultdict\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom scipy.sparse import coo_matrix\nfrom utils import count_all_paths_with_mp, count_paths, get_path_dict_and_length, one_hot_path_id, sample_paths\n\n\ne2re = defaultdict(set) # entity index -> set of pair (relation, entity) connecting to this entity\n\n\ndef read_entities(file_name):\n d = {}\n file = open(file_name)\n for line in file:\n index, name = line.strip().split('\\t')\n d[name] = int(index)\n file.close()\n return d\n\n\ndef read_relations(file_name, add_reverse):\n d = {}\n file = open(file_name)\n for line in file:\n index, name = line.strip().split('\\t')\n d[name] = int(index)\n file.close()\n\n rel_num = len(d)\n if add_reverse:\n file = open(file_name)\n for line in file:\n index, name = line.strip().split('\\t')\n d[name+'rev'] = int(index) + rel_num\n file.close()\n\n return d\n\n\ndef read_triplets(file_name, entity_dict, relation_dict, add_reverse):\n data = []\n\n file = open(file_name)\n for line in file:\n head, relation, tail = line.strip().split('\\t')\n\n head_idx = entity_dict[head]\n relation_idx = relation_dict[relation]\n tail_idx = entity_dict[tail]\n\n data.append((head_idx, relation_idx, tail_idx))\n if add_reverse:\n data.append((tail_idx, relation_dict[relation+'rev'], head_idx))\n file.close()\n\n return np.array(data)\n\n\ndef build_kg(train_data, add_reverse, directory):\n\n if os.path.exists(os.path.join(directory, \"entity2relation.pkl\")):\n print('loading the cached data ...')\n entity2relation = pickle.load(open(os.path.join(directory, \"entity2relation.pkl\"), 'rb'))\n entity2edges = np.load(os.path.join(directory, \"entity2edges.npy\"))\n edge2entities = np.load(os.path.join(directory, \"edge2entities.npy\"))\n edge2relation = np.load(os.path.join(directory, \"edge2relation.npy\"))\n train_triples_new = np.load(os.path.join(directory, \"train_triples_new.npy\"))\n else:\n entity2edge_set = defaultdict(set)\n entity2relation_set = defaultdict(set)\n entity2relation = defaultdict(set)\n entity2edges = [] # each row in entity2edges is the sampled edges connecting to this entity\n edge2entities = [] # each row in edge2entities is the two entities connected by this edge\n edge2relation = [] # each row in edge2relation is the relation type of this edge\n\n edge_idx_total = np.arange(train_data.shape[0])\n np.random.shuffle(edge_idx_total)\n\n train_triples_new = []\n edge_idx_new = 0\n for idx, edge_idx in enumerate(edge_idx_total):\n if idx % 10000000 == 0:\n print(\"%d/%d=%f\" %\n (idx, train_data.shape[0], float(idx)/float(train_data.shape[0])))\n head_idx, relation_idx, tail_idx = train_data[edge_idx]\n # single dirction\n # direction: relation --> entity\n if (len(entity2edge_set[tail_idx]) > args.neighbor_samples) and (relation_idx in entity2relation_set[tail_idx]):\n continue\n else:\n entity2relation_set[tail_idx].add(relation_idx)\n # the index for triple in train_triples_new\n entity2edge_set[tail_idx].add(edge_idx_new)\n edge2entities.append([head_idx])\n edge2relation.append(relation_idx)\n\n train_triples_new.append(train_data[edge_idx])\n edge_idx_new += 1\n\n null_entity = nentity\n null_relation = nrelation\n null_edge = len(edge2entities)\n edge2entities.append([null_entity])\n edge2relation.append(null_relation)\n\n train_triples_new = np.stack(train_triples_new)\n\n print('sampling neighbors ...')\n for i in range(nentity + 1):\n if i % 10000000 == 0:\n print(\"%d/%d=%f\" % (i, nentity, float(i)/float(nentity)))\n\n if i not in entity2edge_set:\n entity2edge_set[i] = {null_edge}\n\n if len(entity2edge_set[i]) < args.neighbor_samples:\n sampled_neighbors = list(entity2edge_set[i]) + [null_edge] * \\\n (args.neighbor_samples - len(entity2edge_set[i]))\n else:\n rels_dict = {}\n edges = list(entity2edge_set[i])\n num_edges = len(edges)\n\n for edge in edges:\n p = train_triples_new[edge][1]\n if p not in rels_dict:\n rels_dict[p] = 1.0\n else:\n rels_dict[p] += 1.0\n ps = []\n for edge in edges:\n p = train_triples_new[edge][1]\n ps.append(1.0 / (float(len(rels_dict)) * rels_dict[p]))\n sampled_neighbors = np.random.choice(edges, size=args.neighbor_samples,\n replace=False, p=ps)\n entity2edges.append(sampled_neighbors)\n entity2relation[i] = list(entity2relation_set[i])\n\n del entity2edge_set\n del entity2relation_set\n\n print('saving the processed data ...')\n with open(os.path.join(directory, \"entity2relation.pkl\"), 'wb') as f:\n pickle.dump(entity2relation, f)\n entity2edges = np.array(entity2edges)\n edge2entities = np.array(edge2entities)\n edge2relation = np.array(edge2relation)\n np.save(os.path.join(directory, \"entity2edges.npy\"), entity2edges)\n np.save(os.path.join(directory, \"edge2entities.npy\"), edge2entities)\n np.save(os.path.join(directory, \"edge2relation.npy\"), edge2relation)\n np.save(os.path.join(directory, \"train_triples_new.npy\"), train_triples_new)\n return entity2relation, entity2edges, edge2entities, edge2relation, train_triples_new\n\n\ndef load_data(model_args):\n global args, entity_dict, relation_dict, nentity, nrelation\n args = model_args\n directory = '../data/' + args.dataset + '/'\n\n print('reading entity dict and relation dict ...')\n entity_dict = read_entities(directory + 'entities.dict')\n relation_dict = read_relations(directory + 'relations.dict', args.add_reverse)\n nentity = len(entity_dict)\n nrelation = len(relation_dict)\n\n print('reading train, validation, and test data ...')\n train_triplets = read_triplets(directory + 'train.txt', entity_dict,\n relation_dict, args.add_reverse)\n valid_triplets = read_triplets(directory + 'valid.txt', entity_dict,\n relation_dict, args.add_reverse)\n test_triplets = read_triplets(directory + 'test.txt', entity_dict,\n relation_dict, args.add_reverse)\n\n print('processing the knowledge graph ...')\n entity2relation, entity2edges, edge2entities, edge2relation, train_triplets = build_kg(\n train_triplets, args.add_reverse, directory)\n\n infer_triplets = np.array([np.arange(nentity), [0]*nentity, np.arange(nentity)]).T\n\n triplets = [train_triplets, valid_triplets, test_triplets, infer_triplets]\n\n neighbor_data = [entity2edges, edge2entities, edge2relation, entity2relation]\n return triplets, nrelation, neighbor_data\n\n\ndef load_data_wikikg(model_args, num_entity, num_relation, directory):\n global args, entity_dict, relation_dict, nentity, nrelation\n args = model_args\n nentity = num_entity\n nrelation = num_relation\n\n print('reading train, validation, and test data ...')\n train_triplets = np.load(directory + 'train_hrt.npy')\n # add_reverse\n train_triplets_reverse = np.stack(\n (train_triplets[:, 2], train_triplets[:, 1] + nrelation, train_triplets[:, 0]), axis=1)\n train_triplets = np.concatenate([train_triplets, train_triplets_reverse])\n nrelation = nrelation*2\n\n valid_triplets = np.load(directory + 'val_hr.npy')\n valid_triplets = np.stack((valid_triplets[:, 0], valid_triplets[:, 1], np.load(\n directory + 'val_t.npy')), axis=1)\n test_triplets = None\n\n print('processing the knowledge graph ...')\n entity2relation, entity2edges, edge2entities, edge2relation, train_triplets = build_kg(\n train_triplets, True, directory)\n print('processing the knowledge graph done')\n\n infer_triplets = np.array([np.arange(nentity), [0]*nentity, np.arange(nentity)]).T\n triplets = [train_triplets, valid_triplets, test_triplets, infer_triplets]\n\n neighbor_data = [entity2edges, edge2entities, edge2relation, entity2relation]\n\n return triplets, nrelation, neighbor_data\n", "import numpy as np\nimport sys\n\ninput_prefix=sys.argv[1]\noutput=sys.argv[2]\n\ne2r=[]\nfor i in range(15):\n e2r.append(np.load(input_prefix + '_%d.npy' % i)[:, 3:])\n print(e2r[-1].shape)\ne2r = np.concatenate(e2r, axis=0)\nprint(e2r.shape)\n\nnp.save(output, e2r)\n" ]
[ [ "numpy.random.choice", "numpy.arange", "numpy.stack", "numpy.random.shuffle", "numpy.concatenate", "numpy.load", "numpy.array" ], [ "numpy.concatenate", "numpy.load", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
usc-isi-i2/dsbox-ta2
[ "85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2" ]
[ "python/dsbox/template/template_files/loaded/RegressionWithSelection.py" ]
[ "from dsbox.template.template import DSBoxTemplate \nfrom d3m.metadata.problem import TaskKeyword \nfrom dsbox.template.template_steps import TemplateSteps \nfrom dsbox.schema import SpecializedProblem \nimport typing \nimport numpy as np # type: ignore \nclass RegressionWithSelection(DSBoxTemplate):\n def __init__(self):\n DSBoxTemplate.__init__(self)\n self.template = {\n \"name\": \"regression_with_feature_selection\",\n \"taskSubtype\": {TaskKeyword.UNIVARIATE.name, TaskKeyword.MULTIVARIATE.name},\n \"taskType\": TaskKeyword.REGRESSION.name,\n \"inputType\": \"table\", # See SEMANTIC_TYPES.keys() for range of values\n \"output\": \"model_step\", # Name of the final step generating the prediction\n \"target\": \"extract_target_step\", # Name of the step generating the ground truth\n \"steps\": TemplateSteps.human_steps() + TemplateSteps.dsbox_feature_selector(\"regression\") +\n [\n {\n \"name\": \"model_step\",\n \"primitives\": [\n {\n \"primitive\": \"d3m.primitives.regression.sgd.SKlearn\",\n \"hyperparameters\": {\n \"loss\": ['squared_loss', 'huber'],\n \"alpha\": [float(x) for x in np.logspace(-5, -1.004, 7)], # cannot reach 0.1\n \"l1_ratio\": [0.01, 0.15, 0.3, 0.5, 0.6, 0.7, 0.9], # cannot reach 1\n \"learning_rate\": ['optimal', 'invscaling'],\n 'add_index_columns': [True],\n 'use_semantic_types':[True],\n }\n },\n {\n \"primitive\":\n \"d3m.primitives.regression.gradient_boosting.SKlearn\",\n \"hyperparameters\":\n {\n 'max_depth': [2, 3, 5],\n 'n_estimators': [100, 150, 200],\n 'learning_rate': [0.1, 0.3, 0.5],\n 'min_samples_split': [2, 3],\n 'min_samples_leaf': [1, 2],\n 'add_index_columns': [True],\n 'use_semantic_types':[True],\n }\n },\n ],\n \"inputs\": [\"feature_selector_step\", \"extract_target_step\"]\n }\n ]\n }\n\n\n" ]
[ [ "numpy.logspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iwan933/wavenet-lstm-timeseries
[ "2be7f9384b72d3acd03c22272e187bc431040295" ]
[ "model/lstm.py" ]
[ "import tensorflow as tf\nimport kerastuner as kt\n\nfrom sacred import Experiment\n\nfrom model.training import sharpe_loss, fit\nfrom util.data import load_data, preprocess, split_train_test_validation, make_dataset, create_full_datasets\n\nex = Experiment()\n\n\[email protected]\ndef config():\n data_dir = 'data'\n alpha = 0.01\n dropout = 0\n learning_rate = 1e-4\n patience = 10\n epochs = 100\n batch_size = 32\n loss = sharpe_loss\n target = 0.15\n sequence_length = 60\n\n\ndef compile_lstm_model(loss, target, alpha, dropout, learning_rate) -> tf.keras.Model:\n \"\"\"\n Creates a lstm model based on the passed hyper parameter\n :param target: target annual returns\n :param loss: target loss function\n :param learning_rate: learning rate\n :param alpha: l1 regularization constant\n :param dropout: dropout rate for lstm\n :return:\n \"\"\"\n model = tf.keras.models.Sequential([\n tf.keras.layers.LSTM(50, return_sequences=True, dropout=dropout),\n tf.keras.layers.Dense(units=1, activation='tanh', kernel_regularizer=tf.keras.regularizers.l1(alpha))\n ])\n model.compile(loss=loss(model, target=target),\n optimizer=tf.optimizers.Adam(learning_rate),\n metrics=[loss(model, target=target)])\n return model\n\n\[email protected]\ndef train_lstm(data_dir, alpha, dropout, loss, patience, epochs, learning_rate, target, batch_size, sequence_length):\n train, validation, test = create_full_datasets(data_dir, sequence_length=sequence_length,\n return_sequence=True, shift=1, batch_size=batch_size)\n model = compile_lstm_model(loss=loss, target=target, alpha=alpha, dropout=dropout, learning_rate=learning_rate)\n history = fit(model, train, validation, patience=patience, epochs=epochs)\n\n\[email protected]\ndef search_params(data_dir, sequence_length, loss, target, batch_size):\n print('starting parameter search...')\n train, validation, test = create_full_datasets(data_dir, sequence_length=sequence_length,\n return_sequence=True, shift=1, batch_size=batch_size)\n\n def build_model(hp: kt.HyperParameters):\n model = tf.keras.models.Sequential([\n tf.keras.layers.LSTM(hp.Int('units', min_value=32, max_value=256, step=32), return_sequences=True, dropout=hp.Float('dropout', 0, 0.5, step=0.1)),\n tf.keras.layers.Dense(units=1, activation='tanh', kernel_regularizer=tf.keras.regularizers.l1(\n hp.Float('alpha', 1e-3, 1e+1, sampling='log')))\n ])\n model.compile(loss=loss(model, target=target),\n optimizer=tf.optimizers.Adam(hp.Float('learning_rate', 1e-5, 1e-1,\n sampling='log')),\n metrics=[loss(model, target=target)])\n return model\n\n tuner = kt.Hyperband(\n build_model,\n objective='val_loss',\n max_epochs=30,\n hyperband_iterations=2)\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=3,\n mode='min')\n tuner.search(train, epochs=30,\n validation_data=validation,\n callbacks=[early_stopping])\n best_model = tuner.get_best_models(1)[0]\n best_hyperparameters = tuner.get_best_hyperparameters(1)[0]\n print(best_hyperparameters)\n" ]
[ [ "tensorflow.optimizers.Adam", "tensorflow.keras.regularizers.l1", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.LSTM" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.2", "1.10" ] } ]
joseignaciorc/fastparquet
[ "38922d599e18aa082b7a5f14bfdc42b86fbcea52" ]
[ "fastparquet/test/test_api.py" ]
[ "# -*- coding: utf-8 -*-\nimport io\nimport os\nimport subprocess\nimport sys\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\ntry:\n from pandas.tslib import Timestamp\nexcept ImportError:\n from pandas import Timestamp\nimport pytest\n\nfrom .util import tempdir\nimport fastparquet\nfrom fastparquet import write, ParquetFile\nfrom fastparquet.api import statistics, sorted_partitioned_columns, filter_in, filter_not_in\nfrom fastparquet.util import join_path\n\nTEST_DATA = \"test-data\"\nWIN = os.name == 'nt'\n\n\[email protected](reason=\"new numpy\")\ndef test_import_without_warning():\n # in a subprocess to avoid import chacing issues.\n subprocess.check_call([sys.executable, \"-Werror\", \"-c\", \"import fastparquet\"])\n\n\ndef test_statistics(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3],\n 'y': [1.0, 2.0, 1.0],\n 'z': ['a', 'b', 'c']})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n\n p = ParquetFile(fn)\n\n s = statistics(p)\n expected = {'distinct_count': {'x': [None, None],\n 'y': [None, None],\n 'z': [None, None]},\n 'max': {'x': [2, 3], 'y': [2.0, 1.0], 'z': ['b', 'c']},\n 'min': {'x': [1, 3], 'y': [1.0, 1.0], 'z': ['a', 'c']},\n 'null_count': {'x': [0, 0], 'y': [0, 0], 'z': [0, 0]}}\n\n assert s == expected\n\n\ndef test_logical_types(tempdir):\n df = pd.util.testing.makeMixedDataFrame()\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n\n p = ParquetFile(fn)\n\n s = statistics(p)\n\n assert isinstance(s['min']['D'][0], (np.datetime64, Timestamp))\n\n\ndef test_text_schema(tempdir):\n df = pd.util.testing.makeMixedDataFrame()\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n p = ParquetFile(fn)\n t = p.schema.text\n expected = ('- schema: \\n'\n '| - A: DOUBLE, OPTIONAL\\n'\n '| - B: DOUBLE, OPTIONAL\\n'\n '| - C: BYTE_ARRAY, UTF8, OPTIONAL\\n'\n ' - D: INT64, TIMESTAMP[NANOS], OPTIONAL')\n assert t == expected\n assert repr(p.schema) == \"<Parquet Schema with 5 entries>\"\n\n\ndef test_empty_statistics(tempdir):\n p = ParquetFile(os.path.join(TEST_DATA, \"nation.impala.parquet\"))\n\n s = statistics(p)\n assert s == {'distinct_count': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]},\n 'max': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]},\n 'min': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]},\n 'null_count': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]}}\n\n\ndef test_sorted_row_group_columns(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'v': [{'a': 0}, {'b': -1}, {'c': 5}, {'a': 0}],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], object_encoding={'v': 'json',\n 'z': 'utf8'})\n\n pf = ParquetFile(fn)\n\n # string stats should be stored without byte-encoding\n zcol = [c for c in pf.row_groups[0].columns\n if c.meta_data.path_in_schema == ['z']][0]\n assert zcol.meta_data.statistics.min == b'a'\n\n result = sorted_partitioned_columns(pf)\n expected = {'x': {'min': [1, 3], 'max': [2, 4]},\n 'z': {'min': ['a', 'c'], 'max': ['b', 'd']}}\n\n # NB column v should not feature, as dict are unorderable\n assert result == expected\n\n\ndef test_sorted_row_group_columns_with_filters(tempdir):\n dd = pytest.importorskip('dask.dataframe')\n # create dummy dataframe\n df = pd.DataFrame({'unique': [0, 0, 1, 1, 2, 2, 3, 3],\n 'id': ['id1', 'id2',\n 'id1', 'id2',\n 'id1', 'id2',\n 'id1', 'id2']},\n index=[0, 0, 1, 1, 2, 2, 3, 3])\n df = dd.from_pandas(df, npartitions=2)\n fn = os.path.join(tempdir, 'foo.parquet')\n df.to_parquet(fn,\n engine='fastparquet',\n partition_on=['id'])\n # load ParquetFile\n pf = ParquetFile(fn)\n filters = [('id', '==', 'id1')]\n\n # without filters no columns are sorted\n result = sorted_partitioned_columns(pf)\n expected = {}\n assert result == expected\n\n # with filters both columns are sorted\n result = sorted_partitioned_columns(pf, filters=filters)\n expected = {'__null_dask_index__': {'min': [0, 2], 'max': [1, 3]},\n 'unique': {'min': [0, 2], 'max': [1, 3]}}\n assert result == expected\n\n\ndef test_iter(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], write_index=True)\n pf = ParquetFile(fn)\n out = iter(pf.iter_row_groups(index='index'))\n d1 = next(out)\n pd.testing.assert_frame_equal(d1, df[:2], check_dtype=False, check_index_type=False)\n d2 = next(out)\n pd.testing.assert_frame_equal(d2, df[2:], check_dtype=False, check_index_type=False)\n with pytest.raises(StopIteration):\n next(out)\n\n\ndef test_pickle(tempdir):\n import pickle\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], write_index=True)\n pf = ParquetFile(fn)\n pf2 = pickle.loads(pickle.dumps(pf))\n assert pf.to_pandas().equals(pf2.to_pandas())\n\n\ndef test_directory_local(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write(os.path.join(tempdir, 'foo1.parquet'), df)\n write(os.path.join(tempdir, 'foo2.parquet'), df)\n pf = ParquetFile(tempdir)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n\n\ndef test_directory_error(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write(os.path.join(tempdir, 'foo1.parquet'), df)\n write(os.path.join(tempdir, 'foo2.parquet'), df)\n with pytest.raises(ValueError, match=\"fsspec\"):\n ParquetFile(tempdir, open_with=lambda *args: open(*args))\n\n\ndef test_directory_mem():\n import fsspec\n m = fsspec.filesystem(\"memory\")\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write('/dir/foo1.parquet', df, open_with=m.open)\n write('/dir/foo2.parquet', df, open_with=m.open)\n\n # inferred FS\n pf = ParquetFile(\"/dir\", open_with=m.open)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n\n # inferred FS\n pf = ParquetFile(\"/dir/*\", open_with=m.open)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n\n # explicit FS\n pf = ParquetFile(\"/dir\", fs=m)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n m.store.clear()\n\n\ndef test_directory_mem_nest():\n import fsspec\n m = fsspec.filesystem(\"memory\")\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write('/dir/field=a/foo1.parquet', df, open_with=m.open)\n write('/dir/field=b/foo2.parquet', df, open_with=m.open)\n\n pf = ParquetFile(\"/dir\", fs=m)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n assert pf.to_pandas()['field'].tolist() == ['a'] * 4 + ['b'] * 4\n\n\ndef test_attributes(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n pf = ParquetFile(fn)\n assert pf.columns == ['x', 'y', 'z']\n assert len(pf.row_groups) == 2\n assert pf.count() == 4\n assert join_path(fn).replace(\"\\\\\", \"/\") == pf.info['name']\n assert join_path(fn).replace(\"\\\\\", \"/\") in str(pf)\n for col in df:\n assert getattr(pf.dtypes[col], \"numpy_dtype\", pf.dtypes[col]) == df.dtypes[col]\n\n\ndef test_open_standard(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], file_scheme='hive',\n open_with=open)\n pf = ParquetFile(fn, open_with=open)\n d2 = pf.to_pandas()\n pd.testing.assert_frame_equal(d2, df, check_dtype=False)\n\n\ndef test_filelike(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n with open(fn, 'rb') as f:\n pf = ParquetFile(f, open_with=open)\n d2 = pf.to_pandas()\n pd.testing.assert_frame_equal(d2, df, check_dtype=False)\n\n b = io.BytesIO(open(fn, 'rb').read())\n pf = ParquetFile(b, open_with=open)\n d2 = pf.to_pandas()\n pd.testing.assert_frame_equal(d2, df, check_dtype=False)\n\n\ndef test_cast_index(tempdir):\n df = pd.DataFrame({'i8': np.array([1, 2, 3, 4], dtype='uint8'),\n 'i16': np.array([1, 2, 3, 4], dtype='int16'),\n 'i32': np.array([1, 2, 3, 4], dtype='int32'),\n 'i64': np.array([1, 2, 3, 4], dtype='int64'),\n 'f16': np.array([1, 2, 3, 4], dtype='float16'),\n 'f32': np.array([1, 2, 3, 4], dtype='float32'),\n 'f64': np.array([1, 2, 3, 4], dtype='float64'),\n })\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n pf = ParquetFile(fn)\n for col in ['i32']: #list(df):\n d = pf.to_pandas(index=col)\n if d.index.dtype.kind == 'i':\n assert d.index.dtype == 'int64'\n elif d.index.dtype.kind == 'u':\n assert d.index.dtype == 'uint64'\n else:\n assert d.index.dtype == 'float64'\n print(col, (d.index == df[col]).all())\n\n # assert (d.index == df[col]).all()\n\n\ndef test_zero_child_leaf(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3]})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n\n pf = ParquetFile(fn)\n assert pf.columns == ['x']\n\n pf._schema[1].num_children = 0\n assert pf.columns == ['x']\n\n\ndef test_request_nonexistent_column(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3]})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n\n pf = ParquetFile(fn)\n with pytest.raises(ValueError):\n pf.to_pandas(columns=['y'])\n\n\ndef test_read_multiple_no_metadata(tempdir):\n df = pd.DataFrame({'x': [1, 5, 2, 5]})\n write(tempdir, df, file_scheme='hive', row_group_offsets=[0, 2])\n os.unlink(os.path.join(tempdir, '_metadata'))\n os.unlink(os.path.join(tempdir, '_common_metadata'))\n import glob\n flist = list(sorted(glob.glob(os.path.join(tempdir, '*'))))\n pf = ParquetFile(flist)\n assert len(pf.row_groups) == 2\n out = pf.to_pandas()\n pd.testing.assert_frame_equal(out, df, check_dtype=False)\n\n\ndef test_single_upper_directory(tempdir):\n df = pd.DataFrame({'x': [1, 5, 2, 5], 'y': ['aa'] * 4})\n write(tempdir, df, file_scheme='hive', partition_on='y')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert (out.y == 'aa').all()\n\n os.unlink(os.path.join(tempdir, '_metadata'))\n os.unlink(os.path.join(tempdir, '_common_metadata'))\n import glob\n flist = list(sorted(glob.glob(os.path.join(tempdir, '*/*'))))\n pf = ParquetFile(flist, root=tempdir)\n assert pf.fn == join_path(os.path.join(tempdir, '_metadata'))\n out = pf.to_pandas()\n assert (out.y == 'aa').all()\n\n\ndef test_numerical_partition_name(tempdir):\n df = pd.DataFrame({'x': [1, 5, 2, 5], 'y1': ['aa', 'aa', 'bb', 'aa']})\n write(tempdir, df, file_scheme='hive', partition_on=['y1'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out[out.y1 == 'aa'].x.tolist() == [1, 5, 5]\n assert out[out.y1 == 'bb'].x.tolist() == [2]\n\n\ndef test_floating_point_partition_name(tempdir):\n df = pd.DataFrame({'x': [1e99, 5e-10, 2e+2, -0.1], 'y1': ['aa', 'aa', 'bb', 'aa']})\n write(tempdir, df, file_scheme='hive', partition_on=['y1'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out[out.y1 == 'aa'].x.tolist() == [1e99, 5e-10, -0.1]\n assert out[out.y1 == 'bb'].x.tolist() == [200.0]\n\n\[email protected](WIN, reason=\"path contains ':'\")\ndef test_datetime_partition_names(tempdir):\n dates = pd.to_datetime(['2015-05-09', '2018-10-15', '2020-10-17', '2015-05-09'])\n df = pd.DataFrame({\n 'date': dates,\n 'x': [1, 5, 2, 5]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['date'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert set(out.date.tolist()) == set(dates.tolist())\n assert out[out.date == '2015-05-09'].x.tolist() == [1, 5]\n assert out[out.date == '2020-10-17'].x.tolist() == [2]\n\n\ndef test_string_partition_names(tempdir):\n date_strings = ['2015-05-09', '2018-10-15', '2020-10-17', '2015-05-09']\n df = pd.DataFrame({\n 'date': date_strings,\n 'x': [1, 5, 2, 5]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['date'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert set(out.date.tolist()) == set(date_strings)\n assert out[out.date == '2015-05-09'].x.tolist() == [1, 5]\n assert out[out.date == '2020-10-17'].x.tolist() == [2]\n\n\[email protected]('partitions', [['2017-01-05', '1421'], ['0.7', '10']])\ndef test_mixed_partition_types(tempdir, partitions):\n df = pd.DataFrame({\n 'partitions': partitions,\n 'x': [1, 2]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['partitions'])\n out = ParquetFile(tempdir).to_pandas()\n assert (out.sort_values(\"x\").set_index(\"x\").partitions == df.sort_values(\"x\").set_index(\"x\").partitions).all()\n\n\ndef test_filter_without_paths(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n 'letter': ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n })\n write(fn, df)\n\n pf = ParquetFile(fn)\n out = pf.to_pandas(filters=[['x', '>', 3]])\n pd.testing.assert_frame_equal(out, df, check_dtype=False)\n out = pf.to_pandas(filters=[['x', '>', 30]])\n assert len(out) == 0\n\n\ndef test_filter_special(tempdir):\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n 'symbol': ['NOW', 'OI', 'OI', 'OI', 'NOW', 'NOW', 'OI']\n })\n write(tempdir, df, file_scheme='hive', partition_on=['symbol'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('symbol', '==', 'NOW')])\n assert out.x.tolist() == [1, 5, 6]\n assert out.symbol.tolist() == ['NOW', 'NOW', 'NOW']\n\n\ndef test_filter_dates(tempdir):\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n 'date': [\n '2015-05-09', '2017-05-15', '2017-05-14',\n '2017-05-13', '2015-05-10', '2015-05-11', '2017-05-12'\n ]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['date'])\n pf = ParquetFile(tempdir)\n out_1 = pf.to_pandas(filters=[('date', '>', '2017-01-01')])\n\n assert set(out_1.x.tolist()) == {2, 3, 4, 7}\n expected_dates = set(['2017-05-15', '2017-05-14', '2017-05-13', '2017-05-12'])\n assert set(out_1.date.tolist()) == expected_dates\n\n out_2 = pf.to_pandas(filters=[('date', '==', pd.to_datetime('may 9 2015'))])\n assert out_2.x.tolist() == [1]\n assert out_2.date.tolist() == ['2015-05-09']\n\n\ndef test_in_filter(tempdir):\n symbols = ['a', 'a', 'b', 'c', 'c', 'd']\n values = [1, 2, 3, 4, 5, 6]\n df = pd.DataFrame(data={'symbols': symbols, 'values': values})\n write(tempdir, df, file_scheme='hive', partition_on=['symbols'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('symbols', 'in', ['a', 'c'])])\n assert set(out.symbols) == {'a', 'c'}\n\n\ndef test_partition_columns(tempdir):\n symbols = ['a', 'a', 'b', 'c', 'c', 'd']\n values = [1, 2, 3, 4, 5, 6]\n df = pd.DataFrame(data={'symbols': symbols, 'values': values})\n write(tempdir, df, file_scheme='hive', partition_on=['symbols'])\n pf = ParquetFile(tempdir)\n\n # partition columns always come after actual columns\n assert pf.to_pandas().columns.tolist() == ['values', 'symbols']\n assert pf.to_pandas(columns=['symbols']).columns.tolist() == ['symbols']\n assert pf.to_pandas(columns=['values']).columns.tolist() == ['values']\n assert pf.to_pandas(columns=[]).columns.tolist() == []\n\n\ndef test_in_filter_numbers(tempdir):\n symbols = ['a', 'a', 'b', 'c', 'c', 'd']\n values = [1, 2, 3, 4, 5, 6]\n df = pd.DataFrame(data={'symbols': symbols, 'values': values})\n write(tempdir, df, file_scheme='hive', partition_on=['values'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('values', 'in', ['1', '4'])])\n assert set(out.symbols) == {'a', 'c'}\n out = pf.to_pandas(filters=[('values', 'in', [1, 4])])\n assert set(out.symbols) == {'a', 'c'}\n\n\ndef test_filter_stats(tempdir):\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n })\n write(tempdir, df, file_scheme='hive', row_group_offsets=[0, 4])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('x', '>=', 5)])\n assert out.x.tolist() == [5, 6, 7]\n\n\[email protected](\"vals,vmin,vmax,expected_in, expected_not_in\", [\n # no stats\n ([3, 6], None, None, False, False),\n\n # unique values\n ([3, 6], 3, 3, False, True),\n ([3, 6], 2, 2, True, False),\n\n # open-ended intervals\n ([3, 6], None, 7, False, False),\n ([3, 6], None, 2, True, False),\n ([3, 6], 2, None, False, False),\n ([3, 6], 7, None, True, False),\n\n # partial matches\n ([3, 6], 2, 4, False, False),\n ([3, 6], 5, 6, False, True),\n ([3, 6], 2, 3, False, True),\n ([3, 6], 6, 7, False, True),\n\n # non match\n ([3, 6], 1, 2, True, False),\n ([3, 6], 7, 8, True, False),\n\n # spanning interval\n ([3, 6], 1, 8, False, False),\n\n # empty values\n ([], 1, 8, True, False),\n\n])\ndef test_in_filters(vals, vmin, vmax, expected_in, expected_not_in):\n assert filter_in(vals, vmin, vmax) == expected_in\n assert filter_in(list(reversed(vals)), vmin, vmax) == expected_in\n\n assert filter_not_in(vals, vmin, vmax) == expected_not_in\n assert filter_not_in(list(reversed(vals)), vmin, vmax) == expected_not_in\n\n\ndef test_in_filter_rowgroups(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n df = pd.DataFrame({\n 'x': range(10),\n })\n write(fn, df, row_group_offsets=2)\n pf = ParquetFile(fn)\n row_groups = list(pf.iter_row_groups(filters=[('x', 'in', [2])]))\n assert len(row_groups) == 1\n assert row_groups[0].x.tolist() == [2, 3]\n\n row_groups = list(pf.iter_row_groups(filters=[('x', 'in', [9])]))\n assert len(row_groups) == 1\n assert row_groups[0].x.tolist() == [8, 9]\n\n row_groups = list(pf.iter_row_groups(filters=[('x', 'in', [2, 9])]))\n assert len(row_groups) == 2\n assert row_groups[0].x.tolist() == [2, 3]\n assert row_groups[1].x.tolist() == [8, 9]\n\n\ndef test_unexisting_filter_cols(tempdir):\n fn = os.path.join(tempdir, 'test.parq') \n df = pd.DataFrame({'a': range(5), 'b': [1, 1, 2, 2, 2]})\n write(fn, df, file_scheme='hive', partition_on='b')\n pf = ParquetFile(fn)\n with pytest.raises(ValueError, match=\"{'c'}.$\"):\n rec_df = ParquetFile(fn).to_pandas(filters=[(('a', '>=', 0),\n ('c', '==', 0),)])\n \n\ndef test_index_not_in_columns(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]}).set_index('a')\n write(tempdir, df, file_scheme='hive')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(columns=['b'])\n assert out.index.tolist() == ['x', 'y', 'z']\n out = pf.to_pandas(columns=['b'], index=False)\n assert out.index.tolist() == [0, 1, 2]\n\n\ndef test_no_index_name(tempdir):\n df = pd.DataFrame({'__index_level_0__': ['x', 'y', 'z'],\n 'b': [4, 5, 6]}).set_index('__index_level_0__')\n write(tempdir, df, file_scheme='hive')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out.index.name is None\n assert out.index.tolist() == ['x', 'y', 'z']\n\n df = pd.DataFrame({'__index_level_0__': ['x', 'y', 'z'],\n 'b': [4, 5, 6]})\n write(tempdir, df, file_scheme='hive')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(index='__index_level_0__', columns=['b'])\n assert out.index.name is None\n assert out.index.tolist() == ['x', 'y', 'z']\n\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out.index.name is None\n assert out.index.tolist() == [0, 1, 2]\n\n\ndef test_input_column_list_not_mutated(tempdir):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n write(tempdir, df, file_scheme='hive')\n cols = ['a']\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(columns=cols)\n assert cols == ['a']\n\n\ndef test_drill_list(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x')\n fn1 = os.path.join(dir1, 'part.0.parquet')\n os.makedirs(dir1)\n write(fn1, df)\n dir2 = os.path.join(tempdir, 'y')\n fn2 = os.path.join(dir2, 'part.0.parquet')\n os.makedirs(dir2)\n write(fn2, df)\n\n pf = ParquetFile([fn1, fn2])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n assert out.dir0.tolist() == ['x'] * 3 + ['y'] * 3\n\n\ndef test_multi_list(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x')\n write(dir1, df, file_scheme='hive')\n dir2 = os.path.join(tempdir, 'y')\n write(dir2, df, file_scheme='hive')\n dir3 = os.path.join(tempdir, 'z', 'deep')\n write(dir3, df, file_scheme='hive')\n\n pf = ParquetFile([dir1, dir2])\n out = pf.to_pandas() # this version may have extra column!\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n pf = ParquetFile([dir1, dir2, dir3])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 3\n\n\ndef test_hive_and_drill_list(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x=0')\n fn1 = os.path.join(dir1, 'part.0.parquet')\n os.makedirs(dir1)\n write(fn1, df)\n dir2 = os.path.join(tempdir, 'y')\n fn2 = os.path.join(dir2, 'part.0.parquet')\n os.makedirs(dir2)\n write(fn2, df)\n\n pf = ParquetFile([fn1, fn2])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n assert out.dir0.tolist() == ['x=0'] * 3 + ['y'] * 3\n\n\ndef test_bad_file_paths(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x=0')\n fn1 = os.path.join(dir1, 'part.=.parquet')\n os.makedirs(dir1)\n write(fn1, df)\n dir2 = os.path.join(tempdir, 'y/z')\n fn2 = os.path.join(dir2, 'part.0.parquet')\n os.makedirs(dir2)\n write(fn2, df)\n\n pf = ParquetFile([fn1, fn2])\n assert pf.file_scheme == 'other'\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n assert 'dir0' not in out\n\n path1 = os.path.join(tempdir, 'data')\n fn1 = os.path.join(path1, 'out.parq')\n os.makedirs(path1)\n write(fn1, df)\n path2 = os.path.join(tempdir, 'data2')\n fn2 = os.path.join(path2, 'out.parq')\n os.makedirs(path2)\n write(fn2, df)\n pf = ParquetFile([fn1, fn2])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n\n\ndef test_compression_zstd(tempdir):\n df = pd.DataFrame(\n {\n 'x': np.arange(1000),\n 'y': np.arange(1, 1001),\n 'z': np.arange(2, 1002),\n }\n )\n\n fn = os.path.join(tempdir, 'foocomp.parquet')\n\n c = {\n \"x\": {\n \"type\": \"gzip\",\n \"args\": {\n \"compresslevel\": 5,\n }\n },\n \"y\": {\n \"type\": \"zstd\",\n \"args\": {\n \"level\": 5,\n }\n },\n \"_default\": {\n \"type\": \"gzip\",\n \"args\": None\n }\n }\n write(fn, df, compression=c)\n\n p = ParquetFile(fn)\n\n df2 = p.to_pandas()\n\n pd.testing.assert_frame_equal(df, df2, check_dtype=False)\n\n\ndef test_compression_lz4(tempdir):\n df = pd.DataFrame(\n {\n 'x': np.arange(1000),\n 'y': np.arange(1, 1001),\n 'z': np.arange(2, 1002),\n }\n )\n\n fn = os.path.join(tempdir, 'foocomp.parquet')\n\n c = {\n \"x\": {\n \"type\": \"gzip\",\n \"args\": {\n \"compresslevel\": 5,\n }\n },\n \"y\": {\n \"type\": \"lz4\",\n \"args\": {\n \"compression\": 5,\n \"store_size\": False,\n }\n },\n \"_default\": {\n \"type\": \"gzip\",\n \"args\": None\n }\n }\n write(fn, df, compression=c)\n\n p = ParquetFile(fn)\n\n df2 = p.to_pandas()\n\n pd.testing.assert_frame_equal(df, df2, check_dtype=False)\n\n\ndef test_compression_snappy(tempdir):\n df = pd.DataFrame(\n {\n 'x': np.arange(1000),\n 'y': np.arange(1, 1001),\n 'z': np.arange(2, 1002),\n }\n )\n\n fn = os.path.join(tempdir, 'foocomp.parquet')\n\n c = {\n \"x\": {\n \"type\": \"gzip\",\n \"args\": {\n \"compresslevel\": 5,\n }\n },\n \"y\": {\n \"type\": \"snappy\",\n \"args\": None\n },\n \"_default\": {\n \"type\": \"gzip\",\n \"args\": None\n }\n }\n write(fn, df, compression=c)\n\n p = ParquetFile(fn)\n\n df2 = p.to_pandas()\n\n pd.testing.assert_frame_equal(df, df2, check_dtype=False)\n\n\ndef test_int96_stats(tempdir):\n df = pd.util.testing.makeMixedDataFrame()\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], times='int96')\n\n p = ParquetFile(fn)\n\n s = statistics(p)\n assert isinstance(s['min']['D'][0], (np.datetime64, Timestamp))\n assert 'D' in sorted_partitioned_columns(p)\n\n\ndef test_only_partition_columns(tempdir):\n df = pd.DataFrame({'a': np.random.rand(20),\n 'b': np.random.choice(['hi', 'ho'], size=20),\n 'c': np.random.choice(['a', 'b'], size=20)})\n write(tempdir, df, file_scheme='hive', partition_on=['b'])\n pf = ParquetFile(tempdir)\n df2 = pf.to_pandas(columns=['b'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n write(tempdir, df, file_scheme='hive', partition_on=['a', 'b'])\n pf = ParquetFile(tempdir)\n df2 = pf.to_pandas(columns=['a', 'b'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n df2 = pf.to_pandas(columns=['b'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n df2 = pf.to_pandas(columns=['b', 'c'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n with pytest.raises(ValueError):\n # because this leaves no data to write\n write(tempdir, df[['b']], file_scheme='hive', partition_on=['b'])\n\n\ndef test_path_containing_metadata_df():\n p = ParquetFile(os.path.join(TEST_DATA, \"dir_metadata\", \"empty.parquet\"))\n df = p.to_pandas()\n assert list(p.columns) == ['a', 'b', 'c', '__index_level_0__']\n assert len(df) == 0\n\n\ndef test_empty_df():\n p = ParquetFile(os.path.join(TEST_DATA, \"empty.parquet\"))\n df = p.to_pandas()\n assert list(p.columns) == ['a', 'b', 'c', '__index_level_0__']\n assert len(df) == 0\n\n\ndef test_unicode_cols(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n df = pd.DataFrame({u\"région\": [1, 2, 3]})\n write(fn, df)\n pf = ParquetFile(fn)\n pf.to_pandas()\n\n\ndef test_multi_cat(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df['a'] = df.a.astype('category')\n df['b'] = df.b.astype('category')\n df = df.set_index(['a', 'b'])\n write(fn, df)\n\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert (df1.index.values == df.index.values).all()\n assert (df1.loc[1, 'a'].values == df.loc[1, 'a'].values).all()\n\n\ndef test_multi_cat_single(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df = df.set_index(['a', 'b'])\n write(fn, df)\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert (df1.index.values == df.index.values).all()\n assert (df1.loc[1, 'a'].values == df.loc[1, 'a'].values).all()\n\n\ndef test_multi_cat_split(tempdir):\n # like test above, but across multiple row-groups; we test that the\n # categories are consistent\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df = df.set_index(['a', 'b'])\n write(fn, df, row_group_offsets=25)\n\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert (df1.index.values == df.index.values).all()\n assert (df1.loc[1, 'a'].values == df.loc[1, 'a'].values).all()\n\n\ndef test_multi(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df = df.set_index(['a', 'b'])\n write(fn, df)\n\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert (df1.index.values == df.index.values).all()\n assert (df1.loc[1, 'a'].values == df.loc[1, 'a'].values).all()\n\n\ndef test_simple_nested():\n fn = os.path.join(TEST_DATA, 'nested1.parquet')\n pf = ParquetFile(fn)\n assert len(pf.dtypes) == 5\n out = pf.to_pandas()\n assert len(out.columns) == 5\n assert '_adobe_corpnew' not in out.columns\n assert all('_adobe_corpnew' + '.' in c for c in out.columns)\n\n\ndef test_pandas_metadata_inference():\n fn = os.path.join(TEST_DATA, 'metas.parq')\n df = ParquetFile(fn).to_pandas()\n assert df.columns.name == 'colindex'\n assert df.index.name == 'rowindex'\n assert df.index.tolist() == [2, 3]\n\n df = ParquetFile(fn).to_pandas(index='a')\n assert df.index.name == 'a'\n assert df.columns.name == 'colindex'\n\n df = ParquetFile(fn).to_pandas(index=False)\n assert df.index.tolist() == [0, 1]\n assert df.index.name is None\n\n\ndef test_write_index_false(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n df = pd.DataFrame(0, columns=['a'], index=range(1, 3))\n write(fn, df, write_index=False)\n rec_df = ParquetFile(fn).to_pandas()\n assert rec_df.index[0] == 0\n\n\ndef test_timestamp_filer(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n ts = [pd.Timestamp('2021/01/01 08:00:00'),\n pd.Timestamp('2021/01/05 10:00:00')]\n val = [10, 34]\n df = pd.DataFrame({'val': val, 'ts': ts})\n # two row-groups\n write(fn, df, row_group_offsets=1, file_scheme='hive')\n\n ts_filter = pd.Timestamp('2021/01/03 00:00:00')\n pf = ParquetFile(fn)\n filt = [[('ts', '<', ts_filter)], [('ts', '>=', ts_filter)]]\n assert pf.to_pandas(filters=filt).val.tolist() == [10, 34]\n\n filt = [[('ts', '>=', ts_filter)], [('ts', '<', ts_filter)]]\n assert pf.to_pandas(filters=filt).val.tolist() == [10, 34]\n\n ts_filter_down = pd.Timestamp('2021/01/03 00:00:00')\n ts_filter_up = pd.Timestamp('2021/01/06 00:00:00')\n # AND filter\n filt = [[('ts', '>=', ts_filter_down), ('ts', '<', ts_filter_up)]]\n assert pf.to_pandas(filters=filt).val.tolist() == [34]\n\n\[email protected](condition=fastparquet.writer.DATAPAGE_VERSION == 2, reason=\"not implemented\")\ndef test_row_filter(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n df = pd.DataFrame({\n 'a': ['o'] * 10 + ['i'] * 5,\n 'b': range(15)\n })\n write(fn, df, row_group_offsets=8)\n pf = ParquetFile(fn)\n assert pf.count(filters=[[\"a\", \"==\", \"o\"]]) == 15\n assert pf.count(filters=[[\"a\", \"==\", \"o\"]], row_filter=True) == 10\n assert pf.count(filters=[[\"a\", \"==\", \"i\"]], row_filter=True) == 5\n assert pf.count(filters=[[\"b\", \"in\", [1, 3, 4]]]) == 8\n assert pf.count(filters=[[\"b\", \"in\", [1, 3, 4]]], row_filter=True) == 3\n assert pf.to_pandas(filters=[[\"b\", \"in\", [1, 3, 4]]], row_filter=True\n ).b.tolist() == [1, 3, 4]\n assert pf.to_pandas(filters=[[\"a\", \"<\", \"o\"]], row_filter=True).b.tolist() == [\n 10, 11, 12, 13, 14\n ]\n\n\ndef test_select(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n val = [2, 10, 34, 76]\n df = pd.DataFrame({'val': val})\n write(fn, df, row_group_offsets=1)\n\n pf = ParquetFile(fn)\n assert len(pf[0].row_groups) == 1\n assert pf[0].to_pandas().val.tolist() == [2]\n assert pf[1].to_pandas().val.tolist() == [10]\n assert pf[-1].to_pandas().val.tolist() == [76]\n assert pf[:].to_pandas().val.tolist() == val\n assert pf[::2].to_pandas().val.tolist() == val[::2]\n\n\ndef test_head(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n val = [2, 10, 34, 76]\n df = pd.DataFrame({'val': val})\n write(fn, df)\n\n pf = ParquetFile(fn)\n assert pf.head(1).val.tolist() == [2]\n\n\ndef test_spark_date_empty_rg():\n # https://github.com/dask/fastparquet/issues/634\n # first file has header size much smaller than others as it contains no row groups\n fn = os.path.join(TEST_DATA, 'spark-date-empty-rg.parq')\n pf = ParquetFile(fn)\n out = pf.to_pandas(columns=['Date'])\n assert out.Date.tolist() == [pd.Timestamp(\"2020-1-1\"), pd.Timestamp(\"2020-1-2\")]\n" ]
[ [ "pandas.to_datetime", "numpy.random.choice", "numpy.arange", "pandas.util.testing.makeMixedDataFrame", "pandas.DataFrame", "pandas.testing.assert_frame_equal", "numpy.random.rand", "numpy.array", "pandas.Timestamp", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dhmlee/rafamultilayerperceptron
[ "0af5759e95cbfc730d0b733d65b87a6186e49fcc" ]
[ "neuralnet.py" ]
[ "import numpy as np\nfrom scipy.special import expit\nfrom constants import *\n\nclass NeuralNetMLP(object):\n\tdef __init__(self, layers, random_state=None):\n\t\t\"\"\" Initialise the layers as list(input_layer, ...hidden_layers..., output_layer) \"\"\"\n\t\tnp.random.seed(random_state)\n\t\tself.num_layers = len(layers)\n\t\tself.layers = layers\n\t\tself.initialize_weights()\n\n\tdef initialize_weights(self):\n\t\t\"\"\" Randomly generate biases and weights for hidden layers. \n\t\tWeights have a Gaussian distribution with mean 0 and\n\t\tstandard deviation 1 over the square root of the number\n\t\tof weights connecting to the same neuron \"\"\"\n\t\tself.biases = [np.random.randn(y, 1) for y in self.layers[1:]]\n\t\tself.weights = [np.random.randn(y, x)/np.sqrt(x) for x, y in zip(self.layers[:-1], self.layers[1:])]\n\n\tdef fit(self, training_data, l1=0.0, l2=0.0, epochs=500, eta=0.001, minibatches=1, regularization = L2):\n\t\t\"\"\" Fits the parameters according to training data.\n\t\tl1(2) is the L1(2) regularization coefficient. \"\"\"\n\t\tself.l1 = l1\n\t\tself.l2 = l2\n\t\tn = len(training_data)\n\t\tfor epoch in xrange(epochs):\n\t\t\trandom.shuffle(training_data)\n\t\t\tmini_batches = [training_data[k:k+mini_batch_size] for k in xrange(0, n, minibatches)]\n\t\t\tfor mini_batch in mini_batches:\n\t\t\t\tself.batch_update( mini_batch, eta, len(training_data), regularization)\n\n\tdef batch_update(self, mini_batch, eta, n, regularization=L2):\n\t\t\"\"\" Update the network's weights and biases by applying gradient\n\t\tdescent using backpropagation to a single mini batch. \"\"\"\n\t\tnabla_b = [np.zeroes(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\tfor x, y in mini_batch:\n\t\t\tdelta_nabla_b, delta_nabla_w = self.back_propogation(x, y)\n\t\t\tnabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n\t\t\tnabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n\t\tself.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]\n\t\tif regularization == L2:\n\t\t\tself.weights = [(1-eta*(self.l2/n))*w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]\n\t\telif regularization == L1:\n\t\t\tself.weights = [w - eta*self.l1*np.sign(w)/n-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]\n\n\n\tdef back_propogation(self, x, y, fn = SIGMOID):\n\t\t\"\"\" Gradient for cost function is calculated from a(L) and \n\t\tback-propogated to the input layer.\n\t\tCross Entropy cost functionis associated with sigmoid neurons, while\n\t\tLog-Likelihood cost function is associated with softmax neurons.\"\"\"\n\t\tnabla_b = [np.zeros(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\tactivation = x\n\t\tactivations = [x]\n\t\tzs = [] \n\t\tfor b, w in zip(self.biases, self.weights):\n\t\t\tz = np.dot(w, activation)+b\n\t\t\tzs.append(z)\n\t\t\tif fn == SIGMOID:\n\t\t\t\tactivation = sigmoid(z)\n\t\t\telse:\n\t\t\t\tactivation = softmax(z)\n\t\t\tactivations.append(activation)\n\t\tdell = delta(activations[-1], y)\n\t\tnabla_b[-1] = dell\n\t\tnabla_w[-1] = np.dot(dell, activations[-2].transpose())\n\t\tfor l in xrange(2, self.num_layers -2, 0, -1):\n\t\t\tdell = np.dot(self.weights[l+1].transpose(), dell) * derivative(zs[l], fn)\n\t\t\tnabla_b[-l] = dell\n\t\t\tnabla_w[-l] = np.dot(dell, activations[-l-1].transpose())\n\t\treturn (nabla_b, nabla_w)\n\n\tdef cross_entropy_loss(a, y):\n\t\treturn np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))\n\n\tdef log_likelihood_loss(a, y):\n\t\treturn -np.dot(y, softmax(a).transpose())\n\n\tdef delta(a, y):\n\t\t\"\"\" delta for both activations works out to be the same\"\"\"\n\t\treturn (a-y)\n\n\tdef sigmoid(z):\n\t\t\"\"\" expit is equivalent to 1.0/(1.0 + np.exp(-z)) \"\"\"\n\t\treturn expit(z)\n\n\tdef softmax(z):\n\t\te = np.exp(float(z))\n\t\treturn (e/np.sum(e))\n\t\n\tdef derivative(z, fn):\n\t\t\"\"\" derivative for f is f(1-f) for respective cost functions \"\"\"\n\t\tif fn == SIGMOID:\n\t\t\tf = sigmoid\n\t\telif fn == SOFTMAX:\n\t\t\tf = softmax\n\t\treturn f(z)*(1-f(z))" ]
[ [ "numpy.dot", "numpy.zeroes", "numpy.log", "numpy.sqrt", "numpy.random.seed", "scipy.special.expit", "numpy.sign", "numpy.random.randn", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Abhis-123/Mallmetering
[ "720c66c9f2f3c767eb798daaa86f9d71cee13122" ]
[ "backend/Superadmin/views.py" ]
[ "from rest_framework import status\nfrom uuid import uuid4\nimport datetime;\nimport csv\n# Create your views here.\nfrom rest_framework.decorators import api_view\nfrom datetime import datetime\nfrom django.utils.encoding import smart_str\nfrom rest_framework import generics\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import CutomerSerializer, MeterReadingsSerializer,RegisterSerializer,RegisterSerializerCustomer,RegisterSerializerSupervisor,MemorySerializer,SupervisorSerializer, MeterSerializer,AllMeterSerializer\nfrom .models import Memory , CustomerModel,SupervisorModel,Meters,Meter_Readings, Authorization,Superadmin\nimport sys\nfrom Subadmins.models import Connections\n\n# Register API\nclass RegisterAPI(generics.GenericAPIView):\n serializer_class = RegisterSerializer\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n return Response(data={'message':str(e)}, status=400)\n user = serializer.save()\n return Response(data=serializer.validated_data,status=200)\n\n\n\n\ndef isAuthenticated(request):\n token =request.headers.get('Authorization')\n \n if Authorization.objects.filter(token=token).count()==0:\n return False\n oauth= Authorization.objects.get(token=token)\n ct = datetime.now()\n present=ct.timestamp()\n session_expirey_date=oauth.session_expirey_date\n if present >session_expirey_date:\n return False\n return True\n\n\n\n################################################################\n\"\"\"\" super admin dashboard\"\"\"\n@api_view(('GET','POST',))\ndef AdminLogin(request):\n data = request.data\n if 'username' not in data:\n return Response(data={\"message\":\" username is required\"},status=400)\n if 'password' not in data:\n return Response(data={\"message\":\" password is required\"},status=400)\n username=data['username']\n password=data['password']\n try:\n user= Superadmin.objects.get(id=1)\n except Exception as e:\n return Response(data={\"message\":e})\n\n if user == None:\n return Response(data={\"message\":\"user does not exist\"},status=400) \n if (password==user.password) == False:\n return Response(data={\"message\":\" password is incorrect\"},status=400)\n token = uuid4()\n ct = datetime.now()\n session_start_date= ct.timestamp()\n session_expirey_date=session_start_date+ 60*60*24*30 \n obj= Authorization.objects.create(username=username,token=token,session_start_date=session_start_date,session_expirey_date=session_expirey_date) \n obj.save()\n return Response(data={\n \"username\":username,\n \"token\":token,\n \"expiry\":session_expirey_date,\n },status=200)\n\n\ndef auth(request):\n if isAuthenticated(request):\n return Response(data={'message':' logged in!'},status=200)\n else:\n return Response(data={'message':'not logged in'}, status=403)\n\n\n\ndef get_admin(request):\n pics=Superadmin.objects.get(id=1)\n serializer=RegisterSerializer(pics)\n return Response(serializer.data)\n\n\ndef get_statsdata(request):\n type= request.GET.get('type')\n data= {}\n if type=='total_customers': \n data['total_customers']= CustomerModel.objects.all().count()\n if type=='active_customers':\n data['active_customers']=CustomerModel.objects.filter(linked=True).count() \n if type=='total_meters':\n data['total_meters']=Meters.objects.filter().count()\n if type=='linked_meters':\n data['linked_meters']= Meters.objects.filter(linked=True).count()\n if type=='total_consumption':\n meters= Meters.objects.filter(linked=True)\n meters= MeterSerializer(meters,many=True)\n consumption=0\n for meter in meters.data:\n meter_id=meter['id']\n reading= Meter_Readings.objects.filter(meter_id=meter_id).order_by('-time_stamp').first()\n if reading!=None:\n consumption=consumption+reading.reading_value\n data['total_consumption']=consumption\n return Response(data=data, status=200)\ndef get_cunsumption_summary(request):\n customers = CustomerModel.objects.filter(linked=True)\n customers = CutomerSerializer(customers,many=True)\n data =[]\n for customer in customers.data:\n username = customer['username']\n try:\n connection=Connections.objects.get(username=username)\n meter_name = connection.meter_name\n meter_id = Meters.objects.get(meter_name=meter_name).id\n readings = Meter_Readings.objects.filter(meter_id=meter_id)\n readings = MeterReadingsSerializer(readings,many=True)\n except Exception as e:\n continue\n data.append({\n 'name': username,\n 'values': readings.data\n })\n return Response(data=data, status=200)\n\n@api_view(('GET','POST','DELETE','PUT'))\ndef dashboard(request):\n operation = request.GET.get('operation')\n if operation == 'getadmin':\n return get_admin(request)\n if operation == 'isAuthenticated':\n q= isAuthenticated(request)\n if q:\n return Response(data={'message':'logged in'}, status=200)\n else:\n return Response(data={'message':'not logged in'}, status=403)\n if operation == 'statsdata':\n return get_statsdata(request)\n if operation =='get_cunsumption_summary':\n return get_cunsumption_summary(request)\n\n return Response(data={'message':'invalid operation'}, status=400)\n################################################################\n\"\"\"config section of the superadmin\"\"\"\n@api_view(('GET',)) \ndef list_memory(request):\n dummy=Memory.objects.all()\n serializer=MemorySerializer(dummy,many=True)\n return Response(serializer.data)\n\ndef archive_data(request):\n meter_id = request.data['meter_id']\n print(request.data)\n meter_id = request.data['meter_id']\n start_date=datetime.strptime(str(request.data['start_date']),\"%Y-%m-%d\")\n end_date=datetime.strptime(str(request.data['end_date']),\"%Y-%m-%d\")\n delete= False\n if request.data['delete']=='true':\n delete=True\n readings = Meter_Readings.objects.filter(meter_id=meter_id,time_stamp__gt=start_date,time_stamp__lt=end_date)\n serializer = MeterReadingsSerializer(readings,many=True)\n res = serializer.data\n if delete:\n readings.delete()\n return Response(data=res, status=200)\n\n\n \n\ndef memory(request):\n my_model_admin = Superadmin()\n total_size_Admin = sys.getsizeof(my_model_admin) * Superadmin.objects.count()\n my_model_customer = CustomerModel()\n total_size_customer = sys.getsizeof(my_model_customer) * CustomerModel.objects.count()\n my_model_supervisor = SupervisorModel()\n total_size_supervisor = sys.getsizeof(my_model_supervisor) * SupervisorModel.objects.count() \n meters_model= Meters() \n meters_size = sys.getsizeof(meters_model) * Meters.objects.count()\n meter_readings= Meter_Readings()\n meter_reading_size = sys.getsizeof(meter_readings) * Meter_Readings.objects.count()\n\n return Response([{\n \"database\":\"Admin\",\n \"bytesusage\":total_size_Admin},{\n \"database\":\"Customer\",\n \"bytesusage\":total_size_customer},{\n \"database\":\"Supervisor\",\n \"bytesusage\":total_size_supervisor},{\n \"database\":\"Meter\",\n \"bytesusage\":meters_size},{\n \"database\":\"MeterReadings\",\n 'bytesusage':meter_reading_size\n }])\n \n\n\n@api_view(('GET','POST','DELETE','PUT'))\ndef config(request):\n operation = request.GET.get('operation')\n if operation=='archivedata':\n return archive_data(request)\n if operation==\"memorydetails\":\n return memory(request)\n return Response(data={'message':'invalid opration'})\n\n###########################################################################\n\"\"\"profile section for super admin\"\"\"\n\ndef update_admin(request):\n if len(request.data['username'])<1:\n return Response(data={'message':'username cannot be empty'},status=400)\n if len(request.data['email'])<1:\n return Response(data={'message':'email cannot be empty'},status=400)\n\n if len(request.data['mobile_no'])<1:\n return Response(data={'message':'mobile number cannot be empty'},status=400)\n\n admin = Superadmin.objects.get(id=1)\n admin.username = request.data['username']\n admin.email = request.data['email']\n admin.mobile_no = request.data['mobile_no']\n admin.save(update_fields=['username','email','mobile_no'])\n return Response(data={\n 'username':admin.username,'email':admin.email,'mobile_no':admin.mobile_no\n },status=200)\n\n\n\ndef changepassword(request):\n data = request.data\n if ('oldpassword' in data)== False:\n return Response(data={'message':'old password not specified'},status=400)\n if ('newpassword' in data)== False:\n return Response(data={'message':'new password not specified'},status=400)\n \n admin= Superadmin.objects.get(id=1)\n if len(data['newpassword'])<8:\n return Response(data={'message':'password cannot be less than 8 characters'},status=400)\n admin.password=data['newpassword']\n admin.save(update_fields=['password'])\n return Response(data={'message':'changed password'},status=200)\n\n\ndef changeprofileimage(request):\n serializer=RegisterSerializer(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n return Response(data={'message':str(e)}, status=400)\n admin=Superadmin.objects.get(id=1)\n admin.profile_pic= serializer.validated_data['profile_pic']\n admin.save(update_fields=['profile_pic'])\n return Response(data={'message':'updated profile_pic'})\n\n\n@api_view(('GET','POST','DELETE','PUT'))\ndef profile(request):\n operation = request.GET.get('operation')\n if operation == 'get':\n pics=Superadmin.objects.get(id=1)\n serializer=RegisterSerializer(pics)\n return Response(serializer.data)\n \n if operation == 'update':\n return update_admin(request)\n if operation==\"changepassword\":\n return changepassword(request)\n return Response(data={'message':'incorrect url'},status=400)\n\n\n######################################\n\"\"\" section for supervisors\"\"\"\n\ndef all_supervisors(request):\n pics=SupervisorModel.objects.all()\n serializer=SupervisorSerializer(pics,many=True)\n return Response(serializer.data)\n\ndef one_supervisor(request,user_id):\n pics=SupervisorModel.objects.get(id=user_id)\n serializer=SupervisorSerializer(pics)\n return Response(serializer.data)\n\n\ndef delete_supervisor(request, user_id):\n supervisor = SupervisorModel.objects.get(id=user_id)\n\n if request.method == 'DELETE': \n supervisor.delete() \n return Response(data={'message': 'supervisor was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n\ndef register_supervisor(request):\n data = request.data\n serializer= RegisterSerializerSupervisor(data=data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n return Response(data={'message':str(e)}, status=status.HTTP_400_BAD_REQUEST)\n print(serializer.validated_data)\n obj = SupervisorModel.objects.create(\n username=serializer.validated_data['username'], \n password=serializer.validated_data['password'], \n email=serializer.validated_data['email'],\n mobile_no=serializer.validated_data['mobile_no'],\n profile_pic=serializer.validated_data['profile_pic'] \n )\n return Response(data={'message': 'registered successfully'},status=200)\n\n\n\n\n\ndef update_supervisor(request,user_id):\n try:\n supervisor = SupervisorModel.objects.get(id=user_id)\n except SupervisorModel.DoesNotExist as e:\n return Response(data={' message':' Supervisor does not exist'},status=400)\n except Exception as e:\n return Response(data={' message':'Databse error'},status=400)\n\n if ('username' in request.data)== False:\n return Response(data={' message':'Username key not provided'},status=400)\n \n if ('password' in request.data)== False:\n return Response(data={'message':'password key not provided'},status=400)\n if ('email' in request.data)== False:\n return Response(data={' message':'email key not provided'},status=400)\n\n if('mobile_no' in request.data)== False:\n return Response(data={' message':'mobile_no key not provided'},status=400)\n\n if SupervisorModel.objects.filter(username=request.data['username']).count()>0 and supervisor.username!=request.data['username']:\n return Response(data={' message':'There is already a supervisor with this username'},status=400)\n\n supervisor.username=request.data['username']\n supervisor.password=request.data['password']\n supervisor.email = request.data['email']\n supervisor.mobile_no=request.data['mobile_no']\n supervisor.save(update_fields=['username','email','password','mobile_no'])\n return Response(data={'message':' Successfully updated details'},status=200) \n\n\n \n\n@api_view(('GET','POST','DELETE','PUT')) \ndef supervisors(request):\n operation = request.GET.get('operation')\n if operation =='get':\n id = request.GET.get('id')\n if id==None:\n return all_supervisors(request)\n return one_supervisor(request,id)\n if operation =='add':\n return register_supervisor(request)\n if operation =='delete':\n id = request.GET.get('id')\n if id ==None:\n return Response(data={'message':'id not found in url'},status=400)\n return delete_supervisor(request,id)\n if operation =='update':\n id = request.GET.get('id')\n if id ==None:\n return Response(data={'message':'id not found in url'},status=400)\n return update_supervisor(request,id)\n return Response(data={\"message\":\"url not found\"},status=400)\n\n\n\n\n######################################\n\"\"\" section for customers\"\"\"\n\ndef update_customer_status(id):\n if CustomerModel.objects.filter(id=id).count()==0:\n return Response(data={'message':'Customer not found'})\n \n customer = CustomerModel.objects.get(id=id)\n if customer.status:\n customer.status= False\n else:\n customer.status= True\n customer.save(update_fields=['status'])\n return Response(data={\"message\":'updated successfully'},status=status.HTTP_200_OK)\n\n\n\n\ndef register_customer(request):\n data=request.data\n\n serializer= RegisterSerializerCustomer(data=data)\n\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n return Response(data={'message':str(e)},status=400)\n\n if CustomerModel.objects.filter(username=serializer.validated_data['username']).count() > 0:\n return Response(data={'message':'A customer with the specified username already exists'},status=400)\n serializer.save()\n return Response(data={'message':'Registered Successfully'},status=status.HTTP_200_OK)\n\n \ndef one_customer(request,user_id):\n pics=CustomerModel.objects.get(id=user_id)\n serializer=CutomerSerializer(pics)\n return Response(serializer.data)\n\ndef update_customer(request,user_id):\n serializer= RegisterSerializerCustomer(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n return Response(data={\"message\":str(e)},status=status.HTTP_400_BAD_REQUEST)\n try:\n instance= CustomerModel.objects.get(id=request.data['id'])\n except Exception as e:\n return Response(data={\"message\":str(e)},status=status.HTTP_400_BAD_REQUEST)\n\n if CustomerModel.objects.filter(id=user_id).count() > 0 and instance.username !=serializer.validated_data['username']:\n return Response(data={'message':'A customer with the specified username already exists'},status=400)\n try:\n connection = Connections.objects.get(username=instance.username)\n except Exception as e:\n return Response(data={'message':'internal server error'}, status=400)\n \n instance.username = serializer.validated_data['username']\n instance.password = serializer.validated_data['password']\n instance.email = serializer.validated_data['email']\n instance.status =serializer.validated_data['status']\n instance.mobile_no = serializer.validated_data['mobile_no']\n instance.address = serializer.validated_data['address']\n instance.subscription= serializer.validated_data['subscription']\n \n \n connection.username = serializer.validated_data['username']\n\n connection.save(update_fields=['username'])\n instance.save(update_fields=['username','email','password','status','mobile_no','address','subscription'])\n return Response(data={'message':'updated successfully'},status=200)\n\n\ndef all_customers(request):\n pics=CustomerModel.objects.all()\n data= CutomerSerializer(pics,many=True)\n response =[]\n for customer in data.data:\n new_customer = customer \n connection = Connections.objects.get(username=customer['username'])\n new_customer['linked_meter']=connection.meter_name\n \n response.append(new_customer)\n print(response)\n return Response(data=response,status=200)\n\ndef delete_customer(request,user_id):\n try:\n tutorial = CustomerModel.objects.get(id=user_id) \n except Exception as e:\n return Response(data={'message':'database confilict'},status=400)\n if request.method == 'DELETE': \n if tutorial.linked==True:\n connection = Connections.objects.get(username=tutorial.username)\n if connection.meter_name!=\"Not Selected\" or Meters.objects.filter(meter_name=connection.meter_name).count()>0:\n meters = Meters.objects.get(meter_name=connection.meter_name)\n meters.linked=False\n tutorial.delete() \n connection.delete()\n \n return Response(data={'message': 'Customer removed successfully!'}, status=status.HTTP_204_NO_CONTENT)\n \n\n\ndef countcustomers(request):\n count1=CustomerModel.objects.all().count()\n count2=CustomerModel.objects.filter(status=True).count()\n count3= CustomerModel.objects.filter(linked=True).count()\n return Response({\n \"totalcustomers\":count1,\n \"activecustomers\":count2,\n 'linkedcustomers':count3\n })\n\n\n@api_view(('GET','POST','DELETE','PUT'))\ndef customers(request):\n \n operation=request.GET.get('operation')\n if operation==\"get\":\n id = request.GET.get('id')\n if id==None:\n return all_customers(request)\n return one_customer(request,id)\n if operation=='add':\n return register_customer(request)\n if operation==\"update\":\n id = request.GET.get('id')\n if id == None:\n return Response(data={'message':'user id not specified'},status=400) \n return update_customer(request,id)\n if operation==\"delete\":\n id = request.GET.get('id')\n if id==None:\n return Response(data={'message':'user id not specified'},status=400)\n return delete_customer(request,id)\n if operation==\"updatestatus\":\n id = request.GET.get('id')\n return update_customer_status(id)\n if operation == \"count\":\n return countcustomers(request)\n return Response({\"maessage\":\"bad request\"},status=status.HTTP_400_BAD_REQUEST)\n\n\n\n\n\n\n\n\n\n\n\n################################################################################################\n\"\"\"\nsection for operation on meters and readings\n\"\"\"\n\n\n\n\ndef all_meters(request):\n pics=Meters.objects.filter(linked=False)\n serializer=AllMeterSerializer(pics,many=True)\n return Response(serializer.data)\n\ndef RegisterMeter(request):\n data={}\n data['meter_name']=request.data['meter_name']\n data['meter_url']=request.data['meter_url']\n if 'working' in request.data.keys():\n data['working']=request.data['working']\n else:\n data['working']=True\n \n \n if Meters.objects.filter(meter_name=data['meter_name']).count()>0:\n return Response(data={'message':\"A meter already exists with this name\"},status=400)\n \n meter_serializer=MeterSerializer(data=data)\n try:\n meter_serializer.is_valid(raise_exception=True) \n except Exception as e:\n Response(data={\"message\":str(e)})\n user=meter_serializer.save() \n return Response(data=meter_serializer.validated_data,status=200)\n\n\n\n# get one meter connection\ndef get_onemeter(request,id):\n pics=Meters.objects.get(id=id)\n serializer=MeterSerializer(pics)\n return Response(serializer.data)\n\n# update meter connections\ndef getmeters(request):\n pics=Meters.objects.all()\n serializer=MeterSerializer(pics,many=True)\n return Response(serializer.data)\n\n\ndef updatemeter(request):\n if request.data['id'] == '':\n return Response(data={'message':\"id is required\"},status=400)\n id=request.data['id']\n meter=Meters.objects.get(id=id)\n serializer= MeterSerializer(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n return Response(data={\"message\":e})\n if Meters.objects.filter(meter_name=request.data['meter_name']).count()>0 and meter.meter_name!=request.data['meter_name']:\n return Response(data={'message':\"A meter already exists with this name\"},status=400)\n try:\n connection = Connections.objects.get(meter_name=meter.meter_name)\n except Exception as e:\n return Response(data={'message':'something went wrong in database'},status=400)\n\n connection.meter_name=serializer.validated_data['meter_name']\n \n meter.meter_name=serializer.validated_data['meter_name']\n meter.meter_url=serializer.validated_data['meter_url']\n meter.working=serializer.validated_data['working']\n connection.save(update_fields=['meter_name'])\n meter.save(update_fields=['meter_name', 'meter_url','working']) \n return Response(data=serializer.data,status=200)\n\n\ndef insert_reading(request):\n data=request.data\n meter_id = data['meter_id']\n reading_value=data['reading_value']\n time_stamp = datetime.strptime(data['time_stamp'], '%Y-%m-%d') \n new_data = { \"meter_id\":meter_id, \"reading_value\":reading_value, \"time_stamp\":time_stamp}\n serializer=MeterReadingsSerializer(data=new_data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n print(e)\n return Response(data={\"message\":e})\n serializer.save()\n return Response(serializer.data,status=200)\n\n\ndef get_readings(request):\n meter_id = request.data['meter_id']\n readings = Meter_Readings.objects.filter(meter_id=meter_id)\n serializer = MeterReadingsSerializer(readings,many=True)\n return Response(data=serializer.data,status=200)\n\n\nfrom datetime import timedelta, date\nfrom numpy import random\nimport pandas as pd\ndef daterange(date1, date2):\n for n in range(int ((date2 - date1).days)+1):\n yield date1 + timedelta(n)\n\n\n\ndef insert_dummy_data(request):\n meter_id = request.data['meter_id']\n start_date=datetime.strptime(str(request.data['start_date']),\"%Y-%m-%d\")\n end_date=datetime.strptime(str(request.data['end_date']),\"%Y-%m-%d\")\n date_list=daterange(start_date,end_date)\n reading_list=[]\n df=pd.DataFrame(date_list, columns=['time_stamp'])\n \n variation= [7,15,13,10,-4,-5,6,-7]\n for i in range(0,len(df)):\n base_reading=i*10\n x=random.choice(variation)\n reading_value=base_reading+x\n time_stamp = df['time_stamp'].iloc[i]\n new_data = { \"meter_id\":meter_id, \"reading_value\":reading_value, \"time_stamp\":time_stamp}\n serializer=MeterReadingsSerializer(data=new_data)\n try:\n serializer.is_valid(raise_exception=True)\n except Exception as e:\n print(e)\n return Response(data={\"message\":e})\n serializer.save()\n reading_list.append(reading_value)\n df['reading_value']=reading_list\n return Response(data=df.to_json())\n\n\n\ndef delete_meter(request):\n id = request.GET.get('id')\n if id == '':\n return Response(data={'message':\"meter is not supplied\"})\n try:\n meter = Meters.objects.get(id=id)\n except Meters.DoesNotExist:\n return Response(data={'message':\"meter is already deleted from database\"},status=status.HTTP_204_NO_CONTENT)\n except Exception as e:\n return Response(data={'message':e},status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n if Connections.objects.filter(meter_name=meter.meter_name).count() > 0:\n connection = Connections.objects.get(meter_name=meter.meter_name)\n customer = CustomerModel.objects.get(username=connection.username)\n customer.linked=False\n connection.meter_name=\"Not Selected\"\n connection.save(update_fields=['meter_name'])\n customer.save(update_fields=['linked'])\n if Meter_Readings.objects.filter(meter_id=meter.id).count() > 0:\n readings = Meter_Readings.objects.filter(meter_id=meter.id)\n readings.delete()\n meter.delete()\n return Response(data={'message':\"successfully deleted\"},status=status.HTTP_204_NO_CONTENT)\n\ndef get_counts(request):\n count1=Meters.objects.all().count()\n count2= Meters.objects.filter(linked=True).count()\n return Response(data={'all':count1,\"linked\":count2},status=status.HTTP_200_OK)\n \n\n@api_view(('GET','POST','DELETE','PUT'))\ndef meters(request):\n operation = request.GET.get('operation')\n print(operation)\n print(request.GET.get('id'))\n if operation==\"get\":\n id= request.GET.get('id')\n if id ==None:\n return getmeters(request)\n return get_onemeter(request,id)\n if operation==\"add\" and request.method == 'POST':\n return RegisterMeter(request)\n if operation==\"delete\":\n return delete_meter(request)\n if operation==\"update\":\n if request.method == 'POST' or request.method == 'PUT':\n return updatemeter(request)\n if operation==\"insertreading\" and request.method == 'POST':\n return insert_reading(request)\n if operation==\"insertdummy\":\n return insert_dummy_data(request)\n if operation==\"getreadings\":\n return get_readings(request)\n if operation==\"counts\":\n return get_counts(request)\n if operation==\"getfilter\":\n return all_meters(request)\n return Response(data={\"message\":'invalid operation'}, status=400)\n\n" ]
[ [ "pandas.DataFrame", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
nathangeology/cyclist_dataset
[ "44ad4a3765e86cba934bfdbfb151a788eddfbead", "44ad4a3765e86cba934bfdbfb151a788eddfbead" ]
[ "data_science_layer/reporting/pred_vs_actual.py", "data_science_layer/sampling/up_sampler.py" ]
[ "from data_science_layer.reporting.abstract_report import AbstractReport\nfrom data_science_layer.pipeline.abstract_pipline import AbstractPipeline\nimport matplotlib.pyplot as plt\nimport pkg_resources\n\n\nclass ActualVsPredictionPlot(AbstractReport):\n\n sub_folder = 'reports'\n\n def report(self, pipeline: AbstractPipeline):\n x = pipeline.train\n y = pipeline.train_y\n pred_y = pipeline(x)\n plt.scatter(y, pred_y)\n plt.suptitle('Predicted vs Actual', fontsize=18, y=1.0)\n plt.xlabel('Actual', fontsize=22)\n plt.ylabel('Predicted', fontsize=22)\n plt.legend( )\n folder = ''\n path = pkg_resources.resource_filename('crcdal', 'cache/' + folder + '/' + self.sub_folder +'/')\n pkg_resources.ensure_directory(path)\n plt.savefig(path + self.dataset_tag + '_Predicted_vs_actual.jpg')\n", "from imblearn.over_sampling import RandomOverSampler\nimport pandas as pd\n\n\nclass UpSampler(object):\n\n sampling_strategy = 'auto'\n return_indices = False\n random_state = 1\n ratio = None\n\n def fit_sample(self, data, y):\n self._upsampler = RandomOverSampler(sampling_strategy=self.sampling_strategy,\n return_indices=self.return_indices,\n random_state=self.random_state,\n ratio=self.ratio)\n\n ros_data, ros_y = self._upsampler.fit_sample(data, y)\n data = pd.DataFrame(ros_data, columns=data.columns)\n y = pd.Series(ros_y, name=y.name)\n return data, y\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.ylabel" ], [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
fjwillemsen/BayesianOptimization-autotuning
[ "9af48014079a98e05324cb9d67cb8660aaf26c28" ]
[ "cached_data_used/convolution.py" ]
[ "#!/usr/bin/env python\nimport sys\n\nimport numpy\nimport logging\nimport kernel_tuner\nfrom collections import OrderedDict\nimport gc\n\n\ndef tune(device_name, strategy=\"bayes_opt_GPyTorch_lean\", strategy_options=None, verbose=True, quiet=False, simulation_mode=True):\n\n #input dimensions and data\n image_width = 4096\n image_height = 4096\n filter_width = 15\n filter_height = 15\n problem_size = (image_width, image_height)\n size = numpy.prod(problem_size)\n\n args = []\n\n metrics = OrderedDict()\n metrics[\"GFLOP/s\"] = lambda p: (image_width * image_height * filter_width * filter_height * 2 / 1e9) / (p[\"time\"] / 1e3)\n\n #setup tunable parameters\n tune_params = OrderedDict()\n tune_params[\"filter_width\"] = [filter_width]\n tune_params[\"filter_height\"] = [filter_height]\n tune_params[\"block_size_x\"] = [1, 2, 4, 8, 16, 32, 48, 64, 80, 96, 112, 128]\n tune_params[\"block_size_y\"] = [1, 2, 4, 8, 16, 32]\n tune_params[\"tile_size_x\"] = [1, 2, 3, 4, 5, 6, 7, 8]\n tune_params[\"tile_size_y\"] = [1, 2, 3, 4, 5, 6, 7, 8]\n tune_params[\"use_padding\"] = [0, 1]\n tune_params[\"read_only\"] = [0, 1]\n\n restrict = [\"block_size_x*block_size_y>=64\", \"tile_size_x*tile_size_y<30\"]\n\n grid_div_x = [\"block_size_x\", \"tile_size_x\"]\n grid_div_y = [\"block_size_y\", \"tile_size_y\"]\n\n #start tuning\n results, env = kernel_tuner.tune_kernel(\"convolution_kernel\", \"convolution.cu\", problem_size, args, tune_params, grid_div_y=grid_div_y,\n grid_div_x=grid_div_x, metrics=metrics, verbose=verbose, quiet=quiet, restrictions=restrict,\n cache=\"cache_files/convolution_\" + device_name, strategy=strategy, strategy_options=strategy_options,\n simulation_mode=simulation_mode)\n\n # print(len(results))\n\n return results, env\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: ./convolution.py [device name]\")\n exit(1)\n\n device_name = sys.argv[1]\n\n tune(device_name)\n" ]
[ [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rgevrey/portfolio-management-and-risk-tools
[ "2bdb3ddb168b4ca973ab73b412cb4c77afbc0859" ]
[ "src/risk_kit.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nimport pdb as pdb\r\nimport os\r\n\r\n# Last updated on 5/01/2022 19:08\r\n\r\n#######\r\n# Data Extraction\r\n#######\r\n\r\ndef path_data():\r\n # Path parsing to make it work across computers\r\n path_directory = os.path.dirname(os.getcwd())\r\n path_data = path_directory + \"/data/\"\r\n return path_data\r\n\r\ndef get_ffme_returns(get_all = False):\r\n \"\"\"\r\n Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by\r\n Market Cap\r\n \"\"\"\r\n me_m = pd.read_csv(\"data/Portfolios_Formed_on_ME_monthly_EW.csv\",\r\n header=0, index_col=0, parse_dates=True, na_values=-99.99)\r\n if get_all == False:\r\n rets = me_m[['Lo 10', 'Hi 10']]\r\n rets.columns = ['SmallCap', 'LargeCap']\r\n else:\r\n rets = me_m\r\n rets = rets/100\r\n rets.index = pd.to_datetime(rets.index, format = \"%Y%m\").to_period('M')\r\n return rets\r\n\r\ndef get_hfi_returns():\r\n \"\"\"\r\n Load and format the EDHEC Hedge Fun Index Returns\r\n \"\"\"\r\n hfi = pd.read_csv(\"data/edhec-hedgefundindices.csv\",\r\n header=0, index_col=0, parse_dates=True, na_values=-99.99)\r\n hfi = hfi/100\r\n hfi.index = pd.to_datetime(hfi.index, format = \"%Y%m\").to_period('M')\r\n return hfi\r\n\r\ndef get_ind_returns():\r\n \"\"\"\r\n Load and format the Ken French industry returns\r\n \"\"\"\r\n ind = pd.read_csv(\"data/ind30_m_vw_rets.csv\", header=0, index_col =0, parse_dates=True)/100\r\n ind.index = pd.to_datetime(ind.index, format =\"%Y%m\").to_period('M')\r\n ind.columns = ind.columns.str.strip()\r\n return ind\r\n\r\ndef get_ind_size():\r\n \"\"\"\r\n Load and format the Ken French 30 Industry Portfolio Value Weighted size data\r\n TO MERGE INTO A SINGLE RETRIEVAL FUNCTION\r\n \"\"\"\r\n ind = pd.read_csv(\"data/ind30_m_size.csv\", header=0, index_col =0, parse_dates=True)\r\n ind.index = pd.to_datetime(ind.index, format =\"%Y%m\").to_period('M')\r\n ind.columns = ind.columns.str.strip()\r\n return ind\r\n\r\ndef get_ind_nfirms():\r\n \"\"\"\r\n Load and format the Ken French 30 Industry Portfolio Value Weighted number of firms\r\n TO MERGE INTO A SINGLE RETRIEVAL FUNCTION\r\n \"\"\"\r\n ind = pd.read_csv(\"data/ind30_m_nfirms.csv\", header=0, index_col =0, parse_dates=True)\r\n ind.index = pd.to_datetime(ind.index, format =\"%Y%m\").to_period('M')\r\n ind.columns = ind.columns.str.strip()\r\n return ind\r\n\r\ndef get_total_market_index_returns():\r\n \"\"\"\r\n Retrieves the return from the market-weighted index we created\r\n \"\"\"\r\n ind_return = get_ind_returns()\r\n ind_nfirms = get_ind_nfirms()\r\n ind_size = get_ind_size()\r\n \r\n if ind_return.shape == ind_size.shape == ind_return.shape == ind_nfirms.shape:\r\n ind_mktcap = ind_nfirms * ind_size\r\n \r\n # Summing across columns rather than lines\r\n total_mktcap = ind_mktcap.sum(axis=\"columns\")\r\n ind_capweight = ind_mktcap.divide(total_mktcap, axis=\"rows\")\r\n total_market_return = (ind_capweight*ind_return).sum(axis=\"columns\")\r\n \r\n else: \r\n raise ValueError(\"Arrays must have the same size\")\r\n \r\n return total_market_return\r\n\r\n#######\r\n# Distribution statistics\r\n#######\r\n\r\ndef skewness(r):\r\n \"\"\"\r\n Alternative to scipy.stats.skewness()\r\n Computes the skewness of a series\r\n Returns a float or a series\r\n \"\"\"\r\n demeaned_r = r - r.mean()\r\n # use the population standard deviation, so set dof=0\r\n sigma_r = r.std(ddof=0)\r\n exp = (demeaned_r**3).mean()\r\n return exp/sigma_r**3\r\n\r\n\r\ndef kurtosis(r):\r\n \"\"\"\r\n Alternative to scipy.stats.kurtosis()\r\n Computes the kurtosis of a series\r\n Returns a float or a series\r\n ALTERNATIVELY YOU CAN MAKE A PARAMETER RATHER THAT COPYING THE CODE AGAIN\r\n \"\"\"\r\n demeaned_r = r - r.mean()\r\n # use the population standard deviation, so set dof=0\r\n sigma_r = r.std(ddof=0)\r\n exp = (demeaned_r**4).mean()\r\n return exp/sigma_r**4\r\n\r\nimport scipy.stats\r\ndef is_normal(r, level=0.01):\r\n \"\"\"\r\n Applies the Jarque-Bera test to determine if a Series is normal or not\r\n Test is applied at the 1% level by default\r\n Returns True if the hypothesis of normality is acccepted, False otherwise\r\n \"\"\"\r\n statistic, p_value = scipy.stats.jarque_bera(r) # capturing a tupple here\r\n return p_value > level\r\n\r\n#######\r\n# RISK METRICS\r\n#######\r\n\r\ndef semideviation(r):\r\n \"\"\"\r\n Returns the semideviation aka negative semideviation of r\r\n r must be a Series or a DataFrame\r\n \"\"\"\r\n is_negative = r < 0\r\n return r[is_negative].std(ddof=0)\r\n\r\ndef var_historic(r, level =5):\r\n \"\"\"\r\n Returns the historical Value at Risk at a specified level\r\n i.e. returns the number such that \"level\" percent of the returns\r\n fall below that number and the (100-level) percent are above\r\n \"\"\"\r\n if isinstance(r, pd.DataFrame):\r\n return r.aggregate(var_historic, level=level) # weird, will be called on all columns of dataframe first\r\n elif isinstance(r, pd.Series):\r\n return -np.percentile(r, level)\r\n else:\r\n raise TypeError(\"Expected r to be Series or DataFrame\")\r\n \r\ndef var_gaussian(r, level =5, modified = False):\r\n \"\"\"\r\n Returns the parametric gaussian VaR of a series or DataFrame\r\n If \"modified\" is True, then the modified VaR is returned\r\n using the Cornish-Fisher modification\r\n \"\"\"\r\n # Compute the Z score assuming it was Gaussian\r\n z = norm.ppf(level/100)\r\n if modified:\r\n # modify the Z score base on observed skewness and kurtosis\r\n s = skewness(r)\r\n k = kurtosis(r)\r\n z = (z +\r\n (z**2 - 1)*s/6 +\r\n (z**3 -3*z)*(k-3)/24 - \r\n (2*z**3 -5*z)*(s**2)/36\r\n )\r\n return -(r.mean() + z*r.std(ddof=0))\r\n \r\ndef cvar_historic(r, level =5):\r\n \"\"\"\r\n Computes the Conditional VaR of series or Dataframe\r\n \"\"\"\r\n if isinstance(r, pd.Series):\r\n is_beyond = r <= -var_historic(r, level = level)\r\n return -r[is_beyond].mean()\r\n elif isinstance(r, pd.DataFrame):\r\n return r.aggregate(cvar_historic, level=level)\r\n else:\r\n raise TypeError(\"Expected r to be a series or DataFrame\")\r\n \r\ndef drawdown(returns_series: pd.Series): \r\n \"\"\"\r\n Takes a time series of returns\r\n Computes and returns a DataFrame that contains:\r\n - the wealth index\r\n - the previous peaks\r\n - percent drawdowns (in decimals\r\n \"\"\"\r\n wealth_index = 1000*(1+returns_series).cumprod()\r\n previous_peaks = wealth_index.cummax()\r\n drawdowns = (wealth_index - previous_peaks)/previous_peaks\r\n return pd.DataFrame({\r\n \"Wealth\": wealth_index,\r\n \"Peaks\" : previous_peaks,\r\n \"Drawdown\": drawdowns})\r\n#######\r\n# PORTFOLIO STATISTICS \r\n#######\r\n\r\ndef compound(r):\r\n \"\"\"\r\n returns the result of compounding the set of returns in r\r\n \"\"\"\r\n return np.expm1(np.log1p(r).sum())\r\n\r\ndef period_returns(r,output=\"All\"):\r\n \"\"\"\r\n Hopefully correctly computes the total period money return, monthly and annual\r\n provided monthly data is supplied\r\n returns a df in %\r\n Output = \"All\" for full report \r\n Output = \"Annual\" for annual only\r\n Output = \"Monthly\" for monthly only\r\n Output = \"Period\" for matching the period only\r\n Output = \"Cumprod\" for cumprod\r\n \"\"\"\r\n r_prod = (np.prod(r+1) - 1)\r\n r_cumprod = (np.cumprod(r+1) - 1)\r\n r_monthly = (((1+r_prod)**(1/r_cumprod.shape[0])) - 1)\r\n r_annual = (((1 + r_monthly)**12) - 1)\r\n if output == \"All\":\r\n r = pd.DataFrame({\"Period\": r_prod.values*100,\r\n \"Monthly\": r_monthly.values*100, \r\n \"Annual\" : r_annual.values*100})\r\n elif output == \"Annual\":\r\n r = r_annual\r\n elif output == \"Monthly\":\r\n r = r_monthly\r\n elif output == \"Period\":\r\n r = r.pct_change().dropna()\r\n elif output == \"Cumprod\":\r\n r = r_cumprod\r\n return r\r\n \r\ndef period_volatility(r, output=\"All\"):\r\n \"\"\"\r\n Hopefully correctly computes the monthly and annual vol\r\n provided monthly data is supplised\r\n returns a df in %\r\n PENDING = ANY PERIODS\r\n Output = \"All\" for full report \r\n Output = \"Annual\" for annual only\r\n \"\"\"\r\n volatility_monthly = r.std()\r\n volatility_annual = volatility_monthly * np.sqrt(12)\r\n if output == \"All\":\r\n v = pd.DataFrame({\"Monthly\": volatility_monthly.values*100, \r\n \"Annual\" : volatility_annual.values*100})\r\n elif output == \"Annual\":\r\n v = volatility_annual\r\n return v\r\n \r\ndef sharpe_ratio(r, riskfree_rate, periods_per_year):\r\n \"\"\"\r\n Computes the annualized sharpe ratio of a set of returns\r\n \"\"\"\r\n rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1\r\n excess_ret = r - rf_per_period\r\n ann_ex_ret = period_returns(excess_ret, output=\"Annual\")\r\n ann_vol = period_volatility(r, output=\"Annual\")\r\n return ann_ex_ret/ann_vol\r\n\r\ndef portfolio_return(weights, returns):\r\n \"\"\"\r\n Weights to Returns\r\n \"\"\"\r\n # W transpose R in matrix notation, @ covmat weights\r\n return weights.T @ returns\r\n\r\ndef portfolio_vol(weights, covmat):\r\n \"\"\"\r\n Weights to vol\r\n Turns lists,arrays into float volatility figure\r\n \r\n Last updated 24/12/2020\r\n \"\"\"\r\n if isinstance(weights, list):\r\n weights = np.array(weights)\r\n \r\n return (weights.T @ covmat @ weights) ** 0.5\r\n\r\ndef terminal_values(rets):\r\n \"\"\"\r\n Returns the final value of a dollar at the end of the return period for each scenario\r\n Nothing more than the compounded return\r\n \"\"\"\r\n return (rets+1).prod()\r\n\r\ndef terminal_stats(rets, floor = 0.8, cap=np.inf, name=\"Stats\"):\r\n \"\"\"\r\n Produce summary statistics on the terminal values per invested dollar\r\n across a range of N scenarios\r\n rets is a T x N dataframe of returns, where T is the time-step (we assume rets is sorted by time)\r\n Returns a 1 column DataFrame of Summary Stats indexed by the stat name\r\n \"\"\"\r\n terminal_wealth = (rets+1).prod()\r\n breach = terminal_wealth < floor # how often I end up below my floor, gives booleans\r\n reach = terminal_wealth >= cap \r\n p_breach = breach.mean() if breach.sum() > 0 else np.nan\r\n p_reach = breach.mean() if reach.sum() > 0 else np.nan\r\n e_short = (floor-terminal_wealth[breach]).mean() if breach.sum() > 0 else np.nan # expected shortfall\r\n e_surplus = (cap-terminal_wealth[reach].mean()) if reach.sum() > 0 else np.nan\r\n sum_stats = pd.DataFrame.from_dict({\r\n \"mean\": terminal_wealth.mean(),\r\n \"std\": terminal_wealth.std(),\r\n \"p_breach\": p_breach,\r\n \"e_short\": e_short,\r\n \"p_reach\": p_reach,\r\n \"e_surplus\": e_surplus\r\n }, orient=\"index\", columns=[name])\r\n return sum_stats\r\n\r\n#######\r\n# PORTFOLIO ALLOCATION\r\n#######\r\n\r\ndef plot_ef2(n_points, er, cov, style = \".-\"):\r\n \"\"\"\r\n Plots the 2-asset efficient frontier\r\n \"\"\"\r\n if er.shape[0] != 2:\r\n raise ValueError(\"plot_ef2 can only plot 2-asset frontiers\")\r\n weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]\r\n rets = [portfolio_return(w, er) for w in weights]\r\n vols = [portfolio_vol(w, cov) for w in weights]\r\n ef = pd.DataFrame({\"R\": rets, \"Vol\": vols})\r\n return ef.plot.line(x=\"Vol\", y=\"R\", style=style)\r\n\r\nfrom scipy.optimize import minimize\r\ndef minimize_vol(target_r, er, cov):\r\n \"\"\"\r\n From target return to a weight vector\r\n \"\"\"\r\n n = er.shape[0]\r\n init_guess = np.repeat(1/n, n)\r\n #constraints\r\n bounds = ((0.0, 1.0),)*n\r\n return_is_target = {\r\n 'type': 'eq',\r\n 'args': (er,),\r\n 'fun': lambda weights, er: target_r - portfolio_return(weights, er)\r\n }\r\n weights_sum_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda weights: np.sum(weights) - 1\r\n }\r\n # running the optimizer\r\n results = minimize(portfolio_vol, init_guess,\r\n args=(cov,), method=\"SLSQP\",\r\n #options={'disp':False},\r\n constraints = (return_is_target, weights_sum_to_1),\r\n bounds=bounds\r\n )\r\n return results.x\r\n\r\ndef bt_mix(r1, r2, allocator, **kwargs):\r\n \"\"\"\r\n Runs a back test (simulation) of allocating between a two set of returns\r\n r1 and r2 are T x N DataFrames or returns where T is the time step and N is\r\n the number of scenarios.\r\n allocator is a function that takes two sets of returns and allocator specific parameters,\r\n and produces an allocation to the first portfolio (the rest of the money is invested in the GHP)\r\n as a T x 1 Dataframe. \r\n Returns a T x N DataFrame of the resulting N portfolio scenarios\r\n \"\"\"\r\n if not r1.shape == r2.shape:\r\n raise ValueError(\"r1 and r2 need to be the same shape\")\r\n \r\n # Function as an object\r\n weights = allocator(r1, r2, **kwargs)\r\n if not weights.shape == r1.shape:\r\n raise ValueError(\"Allocator returned weights that don't match r1\")\r\n \r\n # Now computing the returns of the mix\r\n r_mix = weights*r1 + (1-weights)*r2\r\n return r_mix\r\n\r\ndef fixedmix_allocator(r1, r2, w1, **kwargs):\r\n \"\"\"\r\n Produces a time series over T steps of allocations between the PSP and the GHP across N scenarios\r\n PSP and GHP are T x N DataFrames that represent the returns of the PSP and the GHP such that:\r\n each column is a scenario\r\n each row is the price for a timestep\r\n Returns a T x N DataFrame of PSP weights\r\n \"\"\"\r\n return pd.DataFrame(data=w1, index=r1.index, columns=r1.columns)\r\n\r\ndef floor_allocator(psp_r, ghp_r, floor, zc_prices, m=3):\r\n \"\"\"\r\n Allocate betwen PSP and GHP with the goal to provide exposure to the upside of the PSP without\r\n violating the floor. \r\n Uses a CPPI-style dynamic risk budgeting algorithm by investing a multiple of the cushion in the PSP\r\n Returns a dataframe with the same shape as the PSP/GHP representing the weights in the PSP\r\n \"\"\"\r\n if zc_prices.shape != psp_r.shape:\r\n raise ValueError(\"PSP and ZC prices must have the same shape\")\r\n n_steps, n_scenarios = psp_r.shape \r\n account_value = np.repeat(1, n_scenarios) # computing a vector of n scenarios, initialised at 1 for each scenario\r\n floor_value = np.repeat(1, n_scenarios) # set the floor value at 1, doesn't matter because we update later\r\n w_history = pd.DataFrame(index=psp_r.index, columns=psp_r.columns) # about to return a sequence of weight\r\n for step in range(n_steps):\r\n floor_value = floor*zc_prices.iloc[step] ## PV of Floor assuming today's rates and flat YC, because ZC = factor\r\n cushion = (account_value - floor_value)/account_value\r\n psp_w = (m*cushion).clip(0,1) # same as applying min and max, more compact\r\n ghp_w = 1 - psp_w\r\n psp_alloc = account_value*psp_w\r\n ghp_alloc = account_value*ghp_w\r\n # recompute the new account value at the end of this step\r\n account_value = psp_alloc*(1+psp_r.iloc[step]) + ghp_alloc*(1+ghp_r.iloc[step])\r\n w_history.iloc[step] = psp_w\r\n return w_history # sequence of weight over time\r\n\r\ndef drawdown_allocator(psp_r, ghp_r, maxdd, m=3):\r\n \"\"\"\r\n Allocate betwen PSP and GHP with the goal to provide exposure to the upside of the PSP without\r\n violating the drawdown constrainst.\r\n Uses a CPPI-style dynamic risk budgeting algorithm by investing a multiple of the cushion in the PSP\r\n Returns a dataframe with the same shape as the PSP/GHP representing the weights in the PSP\r\n \"\"\"\r\n n_steps, n_scenarios = psp_r.shape \r\n account_value = np.repeat(1, n_scenarios) # computing a vector of n scenarios, initialised at 1 for each scenario\r\n floor_value = np.repeat(1, n_scenarios) # set the floor value at 1, doesn't matter because we update later\r\n peak_value = np.repeat(1, n_scenarios)\r\n w_history = pd.DataFrame(index=psp_r.index, columns=psp_r.columns) # about to return a sequence of weight\r\n for step in range(n_steps):\r\n floor_value = (1-maxdd)*peak_value ## Floor is based on previous peak\r\n cushion = (account_value - floor_value)/account_value\r\n psp_w = (m*cushion).clip(0,1) # same as applying min and max, more compact\r\n ghp_w = 1 - psp_w\r\n psp_alloc = account_value*psp_w\r\n ghp_alloc = account_value*ghp_w\r\n # recompute the new account value at the end of this step\r\n account_value = psp_alloc*(1+psp_r.iloc[step]) + ghp_alloc*(1+ghp_r.iloc[step])\r\n peak_value = np.maximum(peak_value, account_value)\r\n w_history.iloc[step] = psp_w\r\n return w_history # sequence of weight over time\r\n\r\n\r\ndef glidepath_allocator(r1, r2, start_glide=1, end_glide=0): # 100% to 0% is the most extreme version\r\n \"\"\"\r\n Simulates a Target-Date-Fund style gradual move from r1 to r2\r\n \"\"\"\r\n n_points = r1.shape[0]\r\n n_col = r1.shape[1]\r\n path = pd.Series(data=np.linspace(start_glide, end_glide, num=n_points))\r\n paths = pd.concat([path]*n_col, axis=1) # replicating n of those in a dataframe, multiplying a list replicates it\r\n paths.index = r1.index\r\n paths.columns = r1.columns\r\n return paths\r\n\r\ndef optimal_weights(n_points, er, cov):\r\n '''\r\n Generates a list of weights to run the optimiser on to minimise the vol\r\n '''\r\n target_rs = np.linspace(er.min(), er.max(), n_points)\r\n weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]\r\n return weights\r\n\r\ndef gmv(cov):\r\n \"\"\"\r\n Returns the weights of the Global Minimum Vol portfolio given the covariance matrix\r\n \"\"\"\r\n n = cov.shape[0]\r\n return msr(0, np.repeat(1,n), cov)\r\n\r\ndef plot_ef(n_points, er, cov, style = \".-\", show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):\r\n \"\"\"\r\n Plots the N-asset efficient frontier\r\n \"\"\"\r\n weights = optimal_weights(n_points, er, cov)\r\n rets = [portfolio_return(w, er) for w in weights]\r\n vols = [portfolio_vol(w, cov) for w in weights]\r\n ef = pd.DataFrame({\r\n \"R\": rets,\r\n \"Vol\": vols})\r\n ax = ef.plot.line(x=\"Vol\", y=\"R\", style=style)\r\n if show_ew:\r\n n = er.shape[0]\r\n w_ew = np.repeat(1/n, n)\r\n r_ew = portfolio_return(w_ew, er)\r\n vol_ew = portfolio_vol(w_ew, cov)\r\n #display EW\r\n ax.plot([vol_ew], [r_ew], color=\"goldenrod\", marker=\"o\", markersize=12)\r\n if show_gmv:\r\n w_gmv = gmv(cov) #only depends on covariance matrix\r\n r_gmv = portfolio_return(w_gmv, er)\r\n vol_gmv = portfolio_vol(w_gmv, cov)\r\n #display gmv\r\n ax.plot([vol_gmv], [r_gmv], color=\"midnightblue\", marker=\"o\", markersize=12)\r\n if show_cml:\r\n rf = 0.1\r\n w_msr = msr(riskfree_rate, er, cov)\r\n r_msr = portfolio_return(w_msr, er)\r\n vol_msr = portfolio_vol(w_msr, cov)\r\n # Add CML\r\n cml_x = [0,vol_msr]\r\n cml_y = [riskfree_rate, r_msr]\r\n ax.plot(cml_x, cml_y, color=\"green\", marker=\"o\", linestyle=\"dashed\", markersize=12, linewidth=2)\r\n return ax\r\n\r\ndef msr(riskfree_rate, er, cov):\r\n \"\"\"\r\n From riskfree_rate to a weight vector\r\n \"\"\"\r\n n = er.shape[0]\r\n init_guess = np.repeat(1/n, n)\r\n #constraints\r\n bounds = ((0.0, 1.0),)*n\r\n weights_sum_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda weights: np.sum(weights) - 1\r\n }\r\n # running the optimizer, we want to maximize the Sharpe Ratio\r\n def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\r\n \"\"\"\r\n Returns the negative of the sharpe ratio, given weights\r\n \"\"\"\r\n r = portfolio_return(weights, er)\r\n vol = portfolio_vol(weights, cov)\r\n return -(r - riskfree_rate)/vol\r\n \r\n results = minimize(neg_sharpe_ratio, init_guess,\r\n args=(riskfree_rate, er, cov,), method=\"SLSQP\",\r\n #options={'disp':False},\r\n constraints = (weights_sum_to_1),\r\n bounds=bounds\r\n )\r\n return results.x\r\n\r\ndef run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):\r\n \"\"\"\r\n Runs a backtest of the CPPI strategy, given a set of return for the risky asset\r\n Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History\r\n \"\"\"\r\n \r\n # CPPI parameters\r\n dates = risky_r.index\r\n n_steps = len(dates)\r\n account_value = start\r\n floor_value = start*floor\r\n peak = start\r\n \r\n if isinstance(risky_r, pd.Series):\r\n risky_r = pd.DataFrame(risky_r, columns = [\"R\"])\r\n \r\n if safe_r is None:\r\n safe_r = pd.DataFrame().reindex_like(risky_r)\r\n safe_r.values[:] = riskfree_rate/12 # Fast way to set all values to a number\r\n\r\n # Tracking back test values\r\n account_history = pd.DataFrame().reindex_like(risky_r)\r\n cushion_history = pd.DataFrame().reindex_like(risky_r)\r\n risky_w_history = pd.DataFrame().reindex_like(risky_r)\r\n \r\n for step in range(n_steps):\r\n if drawdown is not None:\r\n peak = np.maximum(peak, account_value)\r\n floor_value = peak*(1-drawdown)\r\n cushion = (account_value - floor_value)/account_value\r\n risky_w = m*cushion\r\n risky_w = np.minimum(risky_w, 1) # We don't want leverage\r\n risky_w = np.maximum(risky_w, 0) # We don't want to go short\r\n safe_w = 1-risky_w\r\n risky_alloc = account_value*risky_w\r\n safe_alloc = account_value*safe_w\r\n\r\n # update the account value for this time step\r\n # import pdb; pdb.set_trace()\r\n account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])\r\n # save the values so we can look at the history and plot it etc\r\n cushion_history.iloc[step] = cushion\r\n risky_w_history.iloc[step] = risky_w\r\n account_history.iloc[step] = account_value\r\n \r\n risky_wealth = start*(1+risky_r).cumprod()\r\n \r\n backtest_result = {\r\n \r\n \"Wealth\": account_history,\r\n \"Risky Wealth\": risky_wealth,\r\n \"Risky Budget\": cushion_history,\r\n \"Risky Allocation\": risky_w_history,\r\n \"m\": m,\r\n \"start\" : start,\r\n \"floor\" : floor,\r\n \"risky_r\": risky_r,\r\n \"safe_r\": safe_r\r\n }\r\n return backtest_result\r\n\r\ndef summary_stats(r, riskfree_rate=0.03):\r\n \"\"\"\r\n Return a DataFrame that contains aggregated summary stats for the returns in the columns of r\r\n \"\"\"\r\n ann_r = r.aggregate(period_returns, output=\"Annual\")\r\n ann_vol = r.aggregate(period_volatility, output=\"Annual\")\r\n ann_sr = r.aggregate(sharpe_ratio, riskfree_rate=riskfree_rate, periods_per_year=12)\r\n dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())\r\n skew = r.aggregate(skewness)\r\n kurt = r.aggregate(kurtosis)\r\n cf_var5 = r.aggregate(var_gaussian, modified=True)\r\n hist_cvar5 = r.aggregate(var_gaussian, modified=True)\r\n return pd.DataFrame({\r\n \"Annualized Return\": ann_r,\r\n \"Annualized Vol\": ann_vol,\r\n \"Skewness\": skew,\r\n \"Kurtosis\": kurt,\r\n \"Cornish-Fisher VaR (5%)\": cf_var5,\r\n \"Historic CVaR (5%)\": hist_cvar5,\r\n \"Sharpe Ratio\": ann_sr,\r\n \"Max Drawdown (5%)\": dd\r\n })\r\n\r\n#######\r\n# SIMULATIONS\r\n#######\r\n\r\ndef gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):\r\n \"\"\"\r\n Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo\r\n :param n_years: The number of years to generate data for\r\n :param n_paths: The number of scenarios/trajectories\r\n :param mu: Annualized Drift, e.g. Market Return\r\n :param sigma: Annualized Volatility\r\n :param steps_per_year: granularity of the simulation\r\n :param s_0: initial value\r\n :return: a numpy array of n_paths columns and n_years*steps_per_year rows\r\n \"\"\"\r\n # Derive per-step Model Parameters from User Specifications\r\n dt = 1/steps_per_year\r\n n_steps = int(n_years*steps_per_year) + 1\r\n # the standard way ...\r\n # rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))\r\n # without discretization error ...\r\n rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))\r\n rets_plus_1[0] = 1\r\n ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1\r\n return ret_val\r\n\r\n\r\ndef show_cppi(n_scenarios = 50, mu=0.07, sigma=0.15, m=3, floor=0., riskfree_rate=0.03, y_max=100, steps_per_year=12):\r\n \"\"\"\r\n Plots the results of a Monte Carlo Simulation of CPPI\r\n \"\"\"\r\n import matplotlib.pyplot as plt\r\n import numpy as np\r\n \r\n start = 100\r\n sim_rets = gbm(n_scenarios=n_scenarios, mu=mu, sigma=sigma, steps_per_year=steps_per_year)\r\n risky_r = pd.DataFrame(sim_rets)\r\n risky_r = period_returns(risky_r,output=\"Period\")\r\n \r\n # Run the backtest\r\n btr = run_cppi(risky_r=pd.DataFrame(risky_r), riskfree_rate=riskfree_rate, m=m, start=start, floor=floor)\r\n wealth = btr[\"Wealth\"]\r\n \r\n # Calculate terminal wealth stats\r\n y_max = wealth.values.max()*y_max/100\r\n terminal_wealth = wealth.iloc[-1] # picking up the last row via -1\r\n \r\n # Boolean mask = an array of booleans\r\n tw_mean = terminal_wealth.mean()\r\n tw_median = terminal_wealth.median()\r\n tw_min = terminal_wealth.min()\r\n tw_max = terminal_wealth.max()\r\n failure_mask = np.less(terminal_wealth, start*floor)\r\n n_failures = failure_mask.sum()\r\n p_fail = n_failures/n_scenarios\r\n \r\n # If there's a failure, what's the average failure extent?\r\n # Dot is the dot product\r\n e_shortfall = np.dot(terminal_wealth-start*floor, failure_mask)/n_failures if n_failures > 0 else 0.0\r\n \r\n # Plotting\r\n fig, (wealth_ax, hist_ax) = plt.subplots(nrows=1, ncols=2, sharey=True, gridspec_kw={\"width_ratios\":[3,2]},figsize=(24,9))\r\n plt.subplots_adjust(wspace=0.0)\r\n \r\n wealth.plot(ax = wealth_ax, legend=False, alpha=0.3, color=\"indianred\")\r\n wealth_ax.axhline(y=start, ls=\":\", color=\"black\")\r\n wealth_ax.axhline(y=start*floor, ls=\"--\", color='red')\r\n wealth_ax.set_ylim(top=y_max)\r\n \r\n terminal_wealth.plot.hist(ax=hist_ax, bins=50, ec='w', fc=\"indianred\", orientation='horizontal')\r\n hist_ax.axhline(y=start, ls=\":\", color=\"black\")\r\n hist_ax.axhline(y=tw_mean, ls=\":\", color=\"blue\")\r\n hist_ax.axhline(y=tw_median, ls=\":\", color=\"purple\")\r\n hist_ax.annotate(f\"Range: ${int(tw_min)} - ${int(tw_max)} ({int(tw_max-tw_min)})\", xy=(.5,.95),\r\n xycoords='axes fraction', fontsize=24)\r\n hist_ax.annotate(f\"Mean: ${int(tw_mean)}\", xy=(.5, .9), xycoords='axes fraction', fontsize=24)\r\n hist_ax.annotate(f\"Median: ${int(tw_median)}\", xy=(.5, .85), xycoords='axes fraction', fontsize=24)\r\n if (floor > 0.01):\r\n hist_ax.axhline(y=start*floor, ls=\"--\", color=\"red\", linewidth=3)\r\n hist_ax.annotate(f\"Violations: {n_failures} ({p_fail*100:2.2f}%)\\nE(shortfall)=${e_shortfall:.2}\",\r\n xy=(.5,.7), xycoords='axes fraction', fontsize=24)\r\n \r\n cppi_controls = widgets.interactive(show_cppi,\r\n n_scenarios=widgets.IntSlider(min=1, max=1000, step=5, value=50),\r\n mu=(0., +.2, .01),\r\n sigma=(0,.5,.05),\r\n floor=(0,2,.1),\r\n m=(1,5,.5),\r\n riskfree_rate=(0, .05, .01),\r\n steps_per_year=widgets.IntSlider(min=1,max=12, step=1, value=12,\r\n description=\"Rebals/Year\"),\r\n y_max=widgets.IntSlider(min=0,max=100,step=1,value=100,\r\n description=\"Zoom Y Axis\")\r\n )\r\n display(cppi_controls)\r\n\r\n#######\r\n# ASSET LIABILITY MANAGEMENT AND FIXED INCOME\r\n#######\r\n\r\ndef funding_ratio(assets, liabilities, r):\r\n \"\"\"\r\n Computes the funding ratio of some assets given liabilities and interest rate\r\n \"\"\"\r\n return float(pv(assets, r)/pv(liabilities, r))\r\n\r\ndef pv(cashflows, r):\r\n \"\"\"\r\n Computes the present value of a sequence of cash flows given by the time (as an index) and amounts\r\n r can be a scalar, or a Series or DataFrame with the number of rows matching the num of rows in flows\r\n \"\"\"\r\n dates = cashflows.index\r\n discounts = discount(dates, r)\r\n return discounts.multiply(cashflows, axis='rows').sum()\r\n\r\ndef discount(time, rate):\r\n \"\"\"\r\n Compute the price of a pure discount bond that pays a dollar at time t given interest rate r\r\n Rate is the per period interest rate\r\n Returns a |t| x |r| Series or Dataframe\r\n Rate can be a float, Series or Dataframe\r\n Returns a Dataframe indexed by time\r\n \"\"\"\r\n discounts = pd.DataFrame([(rate+1)**-i for i in time])\r\n discounts.index = time\r\n return discounts\r\n\r\ndef macaulay_duration(cashflows, discount_rate):\r\n \"\"\"\r\n Computes the Macaulay Duration of a sequence of cash flows\r\n \"\"\"\r\n discounted_flows = discount(cashflows.index, discount_rate)*cashflows\r\n weights = discounted_flows/discounted_flows.sum()\r\n return np.average(cashflows.index, weights=weights.squeeze())\r\n\r\ndef show_funding_ratio(assets,r):\r\n fr = funding_ratio(assets, liabilities, r)\r\n print(f'{fr*100:.2f}')\r\n \r\n controls = widgets.interactive(show_funding_ratio,\r\n assets = widgets.IntSlider(min=1,max=10, step=1, value=5),\r\n r = (0, .20, .01)\r\n )\r\n display(controls)\r\n\r\ndef inst_to_ann(r):\r\n \"\"\"\r\n Converts short rate to an annualized rate\r\n \"\"\"\r\n return np.expm1(r)\r\n\r\ndef ann_to_inst(r):\r\n \"\"\"\r\n Converts annualized to a short rate\r\n \"\"\"\r\n return np.log1p(r)\r\n\r\ndef bond_cash_flows(maturity, principal=100, coupon_rate=0.03, coupons_per_year=12):\r\n \"\"\"\r\n Returns a dataframe of cash flows generated by a bond,\r\n indexed by a coupon number (I think)\r\n \"\"\"\r\n n_coupons = round(maturity*coupons_per_year)\r\n coupon_amt = principal*coupon_rate/coupons_per_year\r\n coupon_times = np.arange(1, n_coupons+1)\r\n cash_flows = pd.Series(data=coupon_amt, index=coupon_times)\r\n cash_flows.iloc[-1] += principal # For the last cash flow, also add the principal\r\n return pd.DataFrame(data=cash_flows)\r\n\r\ndef bond_price(maturity, principal=100, coupon_rate=0.03, coupons_per_year=12, discount_rate=0.03):\r\n \"\"\"\r\n Computes the price of a bond that pays regular coupons until maturity\r\n at which time the principal and the final coupon is returned\r\n This is not designed to be efficient, rather it is to\r\n illustrate the underlying principle behind bond pricing!\r\n If discount_rate is a DataFrame, then this is assumed to be the rate on each coupon date\r\n and the bond value is computed over time\r\n i.e. the index of the discount_rate DataFrame is assumed to be the coupon number\r\n \"\"\"\r\n \r\n if isinstance(discount_rate, pd.DataFrame):\r\n pricing_dates = discount_rate.index\r\n prices = pd.DataFrame(index=pricing_dates, columns=discount_rate.columns)\r\n for t in pricing_dates:\r\n prices.loc[t] = bond_price(maturity-t/coupons_per_year, principal, coupon_rate, coupons_per_year, discount_rate.loc[t])\r\n return prices\r\n \r\n else: # Base case of a single time period\r\n if maturity <= 0: return principal+principal*coupon_rate/coupons_per_year\r\n cash_flows = bond_cash_flows(maturity, principal, coupon_rate, coupons_per_year)\r\n return pv(cash_flows, discount_rate/coupons_per_year)\r\n\r\ndef match_duration(cf_target, cf_short_duration_bond, cf_long_duration_bond, discount_rate):\r\n \"\"\"\r\n Takes cash flows, calculates durations and returns the weight W in cf_short_bond that, along wiht (1-W) in cf_l will have an effective\r\n duration that matches cf_target\r\n \"\"\"\r\n d_target = macaulay_duration(cf_target, discount_rate)\r\n d_short = macaulay_duration(cf_short_duration_bond, discount_rate)\r\n d_long = macaulay_duration(cf_long_duration_bond, discount_rate)\r\n return (d_long - d_target)/(d_long - d_short)\r\n\r\n\r\ndef cir(n_years = 10, n_scenarios = 1, a=0.05, b=0.03, sigma=0.05, steps_per_year=12, r_0=None):\r\n \"\"\"\r\n Implements Cox Ingersoll Ross Model for interest rate\r\n ## $$ dr_t = a(b - r_i)dt + \\sigma\\sqrt{r_t}dW_t $$\r\n b and r_0 are assumed to be the annualized rates, not the short rate\r\n The returned values are the annualized rates as well\r\n Returns dataframe of simulated changes in interest rates over time and prices!\r\n \"\"\"\r\n import math\r\n if r_0 is None: r_0 = b\r\n \r\n # Need to convert the rate to an instantaneous rate, but the difference isn't that problematic\r\n r_0 = ann_to_inst(r_0)\r\n dt = 1/steps_per_year\r\n \r\n # We need random numbers, that's dWt\r\n num_steps = int(n_years*steps_per_year) + 1 # +1 because we initialise an array of rates that starts at row 0, so need one more\r\n shock = np.random.normal(0, scale=np.sqrt(dt), size=(num_steps, n_scenarios))\r\n \r\n # Array of rates\r\n rates = np.empty_like(shock)\r\n rates[0] = r_0\r\n \r\n # For price generation\r\n h = math.sqrt(a**2 + 2*sigma**2)\r\n prices = np.empty_like(shock)\r\n \r\n def price(ttm, r):\r\n _A = ((2*h*math.exp((h+a)*ttm/2))/(2*h+(h+a)*(math.exp(h*ttm)-1)))**(2*a*b/sigma**2)\r\n _B = (2*(math.exp(h*ttm)-1))/(2*h + (h+a)*(math.exp(h*ttm)-1))\r\n _P = _A*np.exp(-_B*r)\r\n return _P\r\n prices[0] = price(n_years, r_0)\r\n \r\n # Now time to simulate the changes in the rates\r\n for step in range(1, num_steps):\r\n r_t = rates[step-1]\r\n d_r_t = a*(b-r_t)*dt + sigma*np.sqrt(r_t)*shock[step]\r\n rates[step] = abs(r_t + d_r_t)\r\n prices[step] = price(n_years-step*dt, rates[step])\r\n \r\n rates = pd.DataFrame(data=inst_to_ann(rates), index=range(num_steps))\r\n prices = pd.DataFrame(data=prices, index=range(num_steps))\r\n \r\n return rates, prices\r\n \r\ndef bond_total_return(monthly_prices, principal, coupon_rate, coupons_per_year):\r\n \"\"\"\r\n Computes the total return of a Bond based on monthly bond prices (big assumption) and coupon payments\r\n Assumes that dividends (coupons) are paid out at the end of the period (e.g. end of 3 months for quarterly div)\r\n and that dividends are reinvested in the bond\r\n \"\"\"\r\n coupons = pd.DataFrame(data=0, index=monthly_prices.index, columns=monthly_prices.columns)\r\n t_max = monthly_prices.index.max()\r\n \r\n # Spreads out coupon payments over the period\r\n pay_date = np.linspace(12/coupons_per_year, t_max, int(coupons_per_year*t_max/12), dtype=int)\r\n coupons.iloc[pay_date] = principal*coupon_rate/coupons_per_year\r\n # The shift approach to calculating returns\r\n total_returns = (monthly_prices + coupons)/monthly_prices.shift()-1\r\n return total_returns.dropna()\r\n" ]
[ [ "scipy.stats.norm.ppf", "numpy.dot", "pandas.to_datetime", "numpy.minimum", "pandas.Series", "numpy.sqrt", "numpy.linspace", "pandas.DataFrame", "numpy.exp", "pandas.read_csv", "numpy.less", "numpy.arange", "numpy.empty_like", "matplotlib.pyplot.subplots_adjust", "numpy.log1p", "numpy.repeat", "pandas.concat", "numpy.cumprod", "scipy.optimize.minimize", "numpy.array", "numpy.sum", "numpy.maximum", "matplotlib.pyplot.subplots", "numpy.expm1", "numpy.percentile", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
jaseweir/tensor2tensor
[ "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd", "2a33b152d7835af66a6d20afe7961751047e28dd" ]
[ "tensor2tensor/data_generators/gym_env_test.py", "tensor2tensor/utils/multistep_with_adamoptimizer.py", "tensor2tensor/data_generators/wikisum/get_references_commoncrawl.py", "tensor2tensor/models/resnet_test.py", "tensor2tensor/bin/t2t_translate_all.py", "tensor2tensor/data_generators/audio_test.py", "tensor2tensor/data_generators/problem_hparams.py", "tensor2tensor/data_generators/celeba_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Gym env tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\n\nimport gym\nfrom gym.spaces import Box\nfrom gym.spaces import Discrete\nimport numpy as np\n\nfrom tensor2tensor.data_generators import gym_env\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.rl.gym_utils import make_gym_env\n\nimport tensorflow.compat.v1 as tf\n\n\nclass TestEnv(gym.Env):\n \"\"\"Test environment.\n\n Odd frames are \"done\".\n \"\"\"\n\n action_space = Discrete(1)\n # TODO(afrozm): Gym's Box has a bug for uint8 type, which doesn't allow\n # sampling, send them a PR. Till that time let this be np.int64\n observation_space = Box(\n low=0, high=255, shape=(2, 6, 3), dtype=np.int64\n )\n\n def __init__(self):\n self._counter = 0\n\n def _generate_ob(self):\n return self.observation_space.sample()\n\n def step(self, action):\n done = self._counter % 2 == 1\n self._counter += 1\n reward = 5 if done else -5\n return (self._generate_ob(), reward, done, {})\n\n def reset(self):\n return self._generate_ob()\n\nTEST_ENV_NAME = \"T2TTestEnv-v1\"\n\ngym.envs.register(id=TEST_ENV_NAME, entry_point=TestEnv)\n\n\nclass GymEnvTest(tf.test.TestCase):\n\n splits = (problem.DatasetSplit.TRAIN, problem.DatasetSplit.EVAL)\n\n # TODO(koz4k): Tests for loading:\n # - loaded epoch is read-only\n # - partial write detection (should raise on loading)\n\n def setUp(self):\n self.out_dir = tf.test.get_temp_dir()\n shutil.rmtree(self.out_dir)\n os.mkdir(self.out_dir)\n np.random.seed(0)\n\n def init_batch_and_play(self, env_name, steps_per_epoch=1, epochs=(0,),\n generate_data=False, batch_size=2, **kwargs):\n env = gym_env.T2TGymEnv(env_name, batch_size=batch_size, **kwargs)\n obs = []\n rewards = []\n num_dones = 0\n for epoch in epochs:\n env.start_new_epoch(epoch, self.out_dir)\n _, epoch_obs, epoch_rewards, epoch_num_dones = \\\n self.play(env, steps_per_epoch)\n epoch_obs.append(env.reset())\n if generate_data:\n env.generate_data(self.out_dir)\n obs.extend(epoch_obs)\n rewards.extend(epoch_rewards)\n num_dones += epoch_num_dones\n return env, obs, rewards, num_dones\n\n def play(self, env, n_steps):\n obs = []\n rewards = []\n obs.append(env.reset())\n num_dones = 0\n for _ in range(n_steps):\n step_obs, step_rewards, dones = env.step(actions=[0, 0])\n obs.append(step_obs)\n rewards.append(step_rewards)\n for (i, done) in enumerate(dones):\n if done:\n env.reset([i])\n num_dones += 1\n return env, obs, rewards, num_dones\n\n def test_splits_dataset(self):\n env, _, _, _ = self.init_batch_and_play(\n TEST_ENV_NAME, steps_per_epoch=20, generate_data=True\n )\n\n for split in self.splits:\n self.assertTrue(env.current_epoch_rollouts(split))\n\n def test_split_preserves_number_of_rollouts(self):\n batch_size = 2\n env, _, _, num_dones = self.init_batch_and_play(\n TEST_ENV_NAME, steps_per_epoch=20, generate_data=True,\n batch_size=batch_size\n )\n\n num_rollouts_after_split = sum(\n len(env.current_epoch_rollouts(split)) for split in self.splits\n )\n # After the end of epoch all environments are reset, which increases number\n # of rollouts by batch size. Number of rollouts could be increased by one\n # in case a rollout is broken on a boundary between the dataset splits.\n self.assertGreaterEqual(num_rollouts_after_split, num_dones + batch_size)\n self.assertLessEqual(num_rollouts_after_split, num_dones + batch_size + 1)\n\n def test_split_preserves_number_of_frames(self):\n batch_size = 2\n env, _, _, num_dones = self.init_batch_and_play(\n TEST_ENV_NAME, steps_per_epoch=20, generate_data=True,\n batch_size=batch_size\n )\n\n num_frames = sum(\n len(rollout)\n for split in self.splits\n for rollout in env.current_epoch_rollouts(split)\n )\n # There are 3 frames in every rollout: the initial one and two returned by\n # step(). Additionally there are batch_size observations coming from final\n # reset at the end of epoch.\n self.assertEqual(num_frames, 3 * num_dones + batch_size)\n\n def test_generates_data(self):\n # This test needs base env which outputs done after two steps.\n self.init_batch_and_play(\n TEST_ENV_NAME, steps_per_epoch=20, generate_data=True\n )\n\n filenames = os.listdir(self.out_dir)\n self.assertTrue(filenames)\n for filename in filenames:\n path = os.path.join(self.out_dir, filename)\n records = list(tf.python_io.tf_record_iterator(path))\n self.assertTrue(records)\n\n def test_shards_per_epoch(self):\n def num_ending_with(filenames, suffix):\n return sum(\n 1 for filename in filenames if filename.endswith(suffix)\n )\n\n env = gym_env.T2TGymEnv(TEST_ENV_NAME, batch_size=2)\n env.start_new_epoch(0, self.out_dir)\n self.play(env, n_steps=20)\n env.generate_data(self.out_dir)\n\n filenames = os.listdir(self.out_dir)\n num_shards_per_epoch = len(filenames)\n self.assertEqual(num_ending_with(filenames, \".0\"), num_shards_per_epoch)\n\n env.start_new_epoch(1, self.out_dir)\n self.play(env, n_steps=20)\n env.generate_data(self.out_dir)\n\n filenames = os.listdir(self.out_dir)\n self.assertEqual(len(filenames), 2 * num_shards_per_epoch)\n for suffix in (\".0\", \".1\"):\n self.assertEqual(num_ending_with(filenames, suffix), num_shards_per_epoch)\n\n def test_frame_numbers_are_continuous(self):\n env, _, _, _ = self.init_batch_and_play(\n TEST_ENV_NAME, steps_per_epoch=20, generate_data=True\n )\n\n frame_numbers = [\n tf.train.Example.FromString(\n record\n ).features.feature[\"frame_number\"].int64_list.value[0]\n for (_, paths) in env.splits_and_paths(self.out_dir)\n for path in paths\n for record in tf.python_io.tf_record_iterator(path)\n ]\n last_frame_number = -1\n for frame_number in frame_numbers:\n # Every consecutive frame number should be either zero (first frame in\n # a new rollout) or one bigger than the last one (next frame in the same\n # rollout).\n if frame_number > 0:\n self.assertEqual(frame_number, last_frame_number + 1)\n last_frame_number = frame_number\n\n def test_clipping(self):\n _, _, rewards, _ = self.init_batch_and_play(TEST_ENV_NAME,\n steps_per_epoch=2)\n self.assertTrue(np.max(rewards) == 1)\n self.assertTrue(np.min(rewards) == -1)\n\n def test_resize(self):\n env_name = TEST_ENV_NAME\n orig_env = make_gym_env(env_name)\n resize_height_factor = 2\n resize_width_factor = 3\n orig_height, orig_width = orig_env.observation_space.shape[:2]\n env, obs, _, _ = self.init_batch_and_play(\n env_name, steps_per_epoch=1,\n resize_height_factor=resize_height_factor,\n resize_width_factor=resize_width_factor)\n for obs_batch in obs:\n ob = obs_batch[0]\n self.assertEqual(ob.shape, env.observation_space.shape)\n height, width = ob.shape[:2]\n self.assertEqual(height, orig_height // resize_height_factor)\n self.assertEqual(width, orig_width // resize_width_factor)\n\n def test_no_resize_option(self):\n env_name = TEST_ENV_NAME\n orig_env = make_gym_env(env_name)\n resize_height_factor = 2\n resize_width_factor = 3\n orig_height, orig_width = orig_env.observation_space.shape[:2]\n env, obs, _, _ = self.init_batch_and_play(\n env_name, steps_per_epoch=1,\n resize_height_factor=resize_height_factor,\n resize_width_factor=resize_width_factor,\n should_derive_observation_space=False)\n for obs_batch in obs:\n ob = obs_batch[0]\n self.assertEqual(ob.shape, env.observation_space.shape)\n height, width = ob.shape[:2]\n self.assertEqual(height, orig_height)\n self.assertEqual(width, orig_width)\n\n def assert_channels(self, env, obs, n_channels):\n self.assertEqual(env.observation_space.shape[2], n_channels)\n self.assertEqual(env.num_channels, n_channels)\n for obs_batch in obs:\n ob = obs_batch[0]\n self.assertEqual(ob.shape[2], n_channels)\n\n def test_channels(self):\n env_name = TEST_ENV_NAME\n env, obs, _, _ = self.init_batch_and_play(env_name, grayscale=True)\n self.assert_channels(env, obs, n_channels=1)\n\n env, obs, _, _ = self.init_batch_and_play(env_name, grayscale=False)\n self.assert_channels(env, obs, n_channels=3)\n\n def test_generating_and_loading_preserves_rollouts(self):\n env_name = TEST_ENV_NAME\n from_env = gym_env.T2TGymEnv(env_name, batch_size=1)\n from_env.start_new_epoch(0, self.out_dir)\n self.play(from_env, n_steps=20)\n from_env.generate_data(self.out_dir)\n\n to_env = gym_env.T2TGymEnv(env_name, batch_size=1)\n to_env.start_new_epoch(0, self.out_dir)\n\n self.assertEqual(\n from_env.current_epoch_rollouts(), to_env.current_epoch_rollouts()\n )\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Multi-step optimizers simulating large batches.\n\nOptimizer variants which make it possible to use very large batch sizes with\nlimited GPU memory. Optimizers in this module accumulate the gradients for n\nbatches, and call the optimizer's update rule every n batches with the\naccumulated gradients.\n\nSee [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.training import training_ops\n# pylint: enable=g-direct-tensorflow-import\n\n\nclass MultistepAdamOptimizer(tf.train.Optimizer):\n \"\"\"Adam with SGD updates every n steps with accumulated gradients.\"\"\"\n\n def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n use_locking=False,\n name=\"Adam\",\n n=1):\n super(MultistepAdamOptimizer, self).__init__(\n use_locking=use_locking, name=name)\n self._lr = learning_rate\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n # Tensor versions of the constructor arguments, created in _prepare().\n self._lr_t = None\n self._beta1_t = None\n self._beta2_t = None\n self._epsilon_t = None\n self._n = n # Call Adam optimizer every n batches with accumulated grads\n self._n_t = None # n as tensor\n\n def _get_beta_accumulators(self):\n with tf.init_scope():\n if tf.executing_eagerly():\n graph = None\n else:\n graph = tf.get_default_graph()\n return (self._get_non_slot_variable(\"beta1_power\", graph=graph),\n self._get_non_slot_variable(\"beta2_power\", graph=graph))\n\n def _create_slots(self, var_list):\n \"\"\"Create slot variables for Adam with accumulated gradients.\"\"\"\n first_var = min(var_list, key=lambda x: x.name)\n self._create_non_slot_variable(\n initial_value=self._beta1, name=\"beta1_power\", colocate_with=first_var)\n self._create_non_slot_variable(\n initial_value=self._beta2, name=\"beta2_power\", colocate_with=first_var)\n # if iter is initialized as an int32, this optimizer could not run\n # with tensorflow_hub with a tensorflow-gpu version\n self._create_non_slot_variable(\n initial_value=0.0 if self._n == 1 else 1.0,\n name=\"iter\",\n colocate_with=first_var)\n # Create slots for the first and second moments, as well as grad_acc.\n for v in var_list:\n self._zeros_slot(v, \"m\", self._name)\n self._zeros_slot(v, \"v\", self._name)\n self._zeros_slot(v, \"grad_acc\", self._name)\n\n def _get_iter_variable(self):\n graph = (None if tf.executing_eagerly() else tf.get_default_graph())\n return self._get_non_slot_variable(\"iter\", graph=graph)\n\n def _prepare(self):\n lr = self._call_if_callable(self._lr)\n beta1 = self._call_if_callable(self._beta1)\n beta2 = self._call_if_callable(self._beta2)\n epsilon = self._call_if_callable(self._epsilon)\n self._beta1_t = tf.convert_to_tensor(beta1, name=\"beta1\")\n self._beta2_t = tf.convert_to_tensor(beta2, name=\"beta2\")\n self._lr_t = tf.convert_to_tensor(lr, name=\"learning_rate\")\n self._epsilon_t = tf.convert_to_tensor(epsilon, name=\"epsilon\")\n self._n_t = tf.convert_to_tensor(self._n, name=\"n\")\n\n def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):\n \"\"\"Apply conditionally if counter is zero.\"\"\"\n grad_acc = self.get_slot(var, \"grad_acc\")\n\n def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):\n total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)\n adam_op = apply_fn(total_grad, var, *args, **kwargs)\n with tf.control_dependencies([adam_op]):\n grad_acc_to_zero_op = grad_acc.assign(\n tf.zeros_like(grad_acc), use_locking=self._use_locking)\n return tf.group(adam_op, grad_acc_to_zero_op)\n\n def accumulate_gradient(grad_acc, grad):\n assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)\n return tf.group(assign_op) # Strip return value\n\n return tf.cond(\n tf.equal(self._get_iter_variable(), 0),\n lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),\n lambda: accumulate_gradient(grad_acc, grad))\n\n def _apply_dense(self, grad, var):\n return self._apply_cond(self._apply_dense_in_action, grad, var)\n\n def _apply_dense_in_action(self, grad, var):\n m = self.get_slot(var, \"m\")\n v = self.get_slot(var, \"v\")\n beta1_power, beta2_power = self._get_beta_accumulators()\n return training_ops.apply_adam(\n var,\n m,\n v,\n tf.cast(beta1_power, var.dtype.base_dtype),\n tf.cast(beta2_power, var.dtype.base_dtype),\n tf.cast(self._lr_t, var.dtype.base_dtype),\n tf.cast(self._beta1_t, var.dtype.base_dtype),\n tf.cast(self._beta2_t, var.dtype.base_dtype),\n tf.cast(self._epsilon_t, var.dtype.base_dtype),\n grad,\n use_locking=self._use_locking).op\n\n def _resource_apply_dense(self, grad, var):\n return self._apply_cond(self._resource_apply_dense_in_action, grad, var)\n\n def _resource_apply_dense_in_action(self, grad, var):\n m = self.get_slot(var, \"m\")\n v = self.get_slot(var, \"v\")\n beta1_power, beta2_power = self._get_beta_accumulators()\n return training_ops.resource_apply_adam(\n var.handle,\n m.handle,\n v.handle,\n tf.cast(beta1_power, grad.dtype.base_dtype),\n tf.cast(beta2_power, grad.dtype.base_dtype),\n tf.cast(self._lr_t, var.dtype.base_dtype),\n tf.cast(self._beta1_t, grad.dtype.base_dtype),\n tf.cast(self._beta2_t, grad.dtype.base_dtype),\n tf.cast(self._epsilon_t, grad.dtype.base_dtype),\n grad,\n use_locking=self._use_locking)\n\n def _apply_sparse_shared(self, grad, var, indices, scatter_add):\n beta1_power, beta2_power = self._get_beta_accumulators()\n beta1_power = tf.cast(beta1_power, var.dtype.base_dtype)\n beta2_power = tf.cast(beta2_power, var.dtype.base_dtype)\n lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)\n beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)\n beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)\n epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)\n lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))\n # m_t = beta1 * m + (1 - beta1) * g_t\n m = self.get_slot(var, \"m\")\n m_scaled_g_values = grad * (1 - beta1_t)\n m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)\n with tf.control_dependencies([m_t]):\n m_t = scatter_add(m, indices, m_scaled_g_values)\n # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)\n v = self.get_slot(var, \"v\")\n v_scaled_g_values = (grad * grad) * (1 - beta2_t)\n v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)\n with tf.control_dependencies([v_t]):\n v_t = scatter_add(v, indices, v_scaled_g_values)\n v_sqrt = tf.sqrt(v_t)\n var_update = tf.assign_sub(\n var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)\n return tf.group(*[var_update, m_t, v_t])\n\n def _apply_sparse(self, grad, var):\n # TODO(fstahlberg): Implement a sparse version\n tf.logging.warning(\"MultistepAdamOptimizer does not support sparse updates\")\n dense_grad = tf.convert_to_tensor(grad)\n return self._apply_cond(self._apply_dense_in_action, dense_grad, var)\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n tf.logging.warning(\"MultistepAdamOptimizer does not support sparse updates\")\n # Note that conversion to a dense Tensor handles duplicate `indices`\n # correctly (summing them). A real sparse implementation will probably want\n # to override _resource_apply_sparse instead so it gets them de-duplicated\n # automatically.\n dense_grad = tf.convert_to_tensor(\n tf.IndexedSlices(\n values=grad, indices=indices, dense_shape=tf.shape(var)))\n return self._apply_cond(self._resource_apply_dense_in_action, dense_grad,\n var)\n\n def _resource_scatter_add(self, x, i, v):\n with tf.control_dependencies(\n [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\n return x.value()\n\n def _resource_apply_sparse(self, grad, var, indices):\n return self._apply_sparse_shared(grad, var, indices,\n self._resource_scatter_add)\n\n def _finish(self, update_ops, name_scope):\n \"\"\"Updates beta_power variables every n batches and incrs counter.\"\"\"\n iter_ = self._get_iter_variable()\n beta1_power, beta2_power = self._get_beta_accumulators()\n with tf.control_dependencies(update_ops):\n with tf.colocate_with(iter_):\n\n def update_beta_op():\n update_beta1 = beta1_power.assign(\n beta1_power * self._beta1_t, use_locking=self._use_locking)\n update_beta2 = beta2_power.assign(\n beta2_power * self._beta2_t, use_locking=self._use_locking)\n return tf.group(update_beta1, update_beta2)\n\n maybe_update_beta = tf.cond(\n tf.equal(iter_, 0), update_beta_op, tf.no_op)\n with tf.control_dependencies([maybe_update_beta]):\n # TODO(cuong): It is suboptimal here because we have to cast twice\n # (float to int, and then int to float)\n update_iter = iter_.assign(\n tf.cast(\n tf.mod(tf.cast(iter_ + 1.0, tf.int32), self._n_t),\n tf.float32),\n use_locking=self._use_locking)\n return tf.group(\n *update_ops + [update_iter, maybe_update_beta], name=name_scope)\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Extract references from CommonCrawl files.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom tensor2tensor.data_generators.wikisum import utils\nfrom tensor2tensor.data_generators.wikisum import wikisum\n\nimport tensorflow.compat.v1 as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"num_tasks\", 1000, \"Number of parallel tasks.\")\nflags.DEFINE_integer(\"task_id\", 0, \"Task id in a parallel run.\")\nflags.DEFINE_string(\"metadata_dir\",\n \"gs://tensor2tensor-data/wikisum/commoncrawl_metadata/\",\n \"Path to metadata files specifying what references are in \"\n \"which CommonCrawl files.\")\nflags.DEFINE_string(\"out_dir\", None, \"Directory to write references to.\")\nflags.DEFINE_string(\"commoncrawl_wet_dir\", None,\n \"Path to CommonCrawl wet.gz files locally. If not \"\n \"provided, will download.\")\n\n\ndef main(_):\n assert FLAGS.out_dir\n assert FLAGS.metadata_dir\n out_dir = os.path.join(FLAGS.out_dir, \"process_%d\" % FLAGS.task_id)\n tf.gfile.MakeDirs(out_dir)\n\n with utils.timing(\"get_refs_commoncrawl\"):\n # Get all WET files\n if FLAGS.commoncrawl_wet_dir:\n wet_files = tf.gfile.Glob(\n os.path.join(FLAGS.commoncrawl_wet_dir, \"*.wet.gz\"))\n else:\n tmp_dir = tempfile.gettempdir()\n wet_files = list(\n utils.wet_download_urls(utils.WET_PATHS_BY_DATE[\"0917\"], tmp_dir))\n\n # Shard and select this task's work\n wet_files.sort()\n wet_files = utils.shard(wet_files, FLAGS.num_tasks)[FLAGS.task_id]\n tf.logging.info(\"Sharded out WET files. Processing %d files\",\n len(wet_files))\n\n wikisum.extract_references_from_wets(wet_files, FLAGS.metadata_dir, out_dir)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resnet tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensor2tensor.data_generators import problem_hparams\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.models import resnet\n\nimport tensorflow.compat.v1 as tf\n\n\ndef resnet_tiny_cpu():\n hparams = resnet.resnet_base()\n hparams.layer_sizes = [2, 2, 2, 2]\n hparams.use_nchw = False\n return hparams\n\n\nclass ResnetTest(tf.test.TestCase):\n\n def _test_resnet(self, img_size, output_size):\n vocab_size = 9\n batch_size = 2\n x = np.random.randint(\n 256, size=(batch_size, img_size, img_size, 3))\n y = np.random.randint(\n 1, high=vocab_size, size=(batch_size, 1, 1, 1))\n hparams = resnet_tiny_cpu()\n p_hparams = problem_hparams.test_problem_hparams(vocab_size,\n vocab_size,\n hparams)\n p_hparams.modality[\"inputs\"] = modalities.ModalityType.IMAGE\n p_hparams.modality[\"targets\"] = modalities.ModalityType.CLASS_LABEL\n with self.test_session() as session:\n features = {\n \"inputs\": tf.constant(x, dtype=tf.int32),\n \"targets\": tf.constant(y, dtype=tf.int32),\n }\n model = resnet.Resnet(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)\n logits, _ = model(features)\n session.run(tf.global_variables_initializer())\n res = session.run(logits)\n self.assertEqual(res.shape, (batch_size,) + output_size + (1, vocab_size))\n\n def testResnetLarge(self):\n self._test_resnet(img_size=224, output_size=(1, 1))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Translate a file with all checkpoints in a given directory.\n\nt2t-decoder will be executed with these parameters:\n--problem\n--data_dir\n--output_dir with the value of --model_dir\n--decode_from_file with the value of --source\n--decode_hparams with properly formatted --beam_size and --alpha\n--checkpoint_path automatically filled\n--decode_to_file automatically filled\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nfrom tensor2tensor.utils import bleu_hook\n\nimport tensorflow.compat.v1 as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# t2t-translate-all specific options\nflags.DEFINE_string(\"decoder_command\", \"t2t-decoder {params}\",\n \"Which command to execute instead t2t-decoder. \"\n \"{params} is replaced by the parameters. Useful e.g. for \"\n \"qsub wrapper.\")\nflags.DEFINE_string(\"model_dir\", \"\",\n \"Directory to load model checkpoints from.\")\nflags.DEFINE_string(\"source\", None,\n \"Path to the source-language file to be translated\")\nflags.DEFINE_string(\"translations_dir\", \"translations\",\n \"Where to store the translated files.\")\nflags.DEFINE_integer(\"min_steps\", 0, \"Ignore checkpoints with less steps.\")\nflags.DEFINE_integer(\"wait_minutes\", 0,\n \"Wait upto N minutes for a new checkpoint\")\n\n# options derived from t2t-decoder\nflags.DEFINE_integer(\"beam_size\", 4, \"Beam-search width.\")\nflags.DEFINE_float(\"alpha\", 0.6, \"Beam-search alpha.\")\nflags.DEFINE_string(\"model\", \"transformer\", \"see t2t-decoder\")\nflags.DEFINE_string(\"t2t_usr_dir\", None, \"see t2t-decoder\")\nflags.DEFINE_string(\"data_dir\", None, \"see t2t-decoder\")\nflags.DEFINE_string(\"problem\", None, \"see t2t-decoder\")\nflags.DEFINE_string(\"hparams_set\", \"transformer_big_single_gpu\",\n \"see t2t-decoder\")\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n # pylint: disable=unused-variable\n model_dir = os.path.expanduser(FLAGS.model_dir)\n translations_dir = os.path.expanduser(FLAGS.translations_dir)\n source = os.path.expanduser(FLAGS.source)\n tf.gfile.MakeDirs(translations_dir)\n translated_base_file = os.path.join(translations_dir, FLAGS.problem)\n\n # Copy flags.txt with the original time, so t2t-bleu can report correct\n # relative time.\n flags_path = os.path.join(translations_dir, FLAGS.problem + \"-flags.txt\")\n if not os.path.exists(flags_path):\n shutil.copy2(os.path.join(model_dir, \"flags.txt\"), flags_path)\n\n locals_and_flags = {\"FLAGS\": FLAGS}\n for model in bleu_hook.stepfiles_iterator(model_dir, FLAGS.wait_minutes,\n FLAGS.min_steps):\n tf.logging.info(\"Translating \" + model.filename)\n out_file = translated_base_file + \"-\" + str(model.steps)\n locals_and_flags.update(locals())\n if os.path.exists(out_file):\n tf.logging.info(out_file + \" already exists, so skipping it.\")\n else:\n tf.logging.info(\"Translating \" + out_file)\n params = (\n \"--t2t_usr_dir={FLAGS.t2t_usr_dir} --output_dir={model_dir} \"\n \"--data_dir={FLAGS.data_dir} --problem={FLAGS.problem} \"\n \"--decode_hparams=beam_size={FLAGS.beam_size},alpha={FLAGS.alpha} \"\n \"--model={FLAGS.model} --hparams_set={FLAGS.hparams_set} \"\n \"--checkpoint_path={model.filename} --decode_from_file={source} \"\n \"--decode_to_file={out_file} --keep_timestamp\"\n ).format(**locals_and_flags)\n command = FLAGS.decoder_command.format(**locals())\n tf.logging.info(\"Running:\\n\" + command)\n os.system(command)\n # pylint: enable=unused-variable\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensor2tensor.data_generators.audio.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\nfrom tensor2tensor.data_generators import audio\n\nimport tensorflow.compat.v1 as tf\n\n\nclass AudioTest(tf.test.TestCase):\n\n def testDataCollection(self):\n # Generate a trivial source and target file.\n tmp_dir = self.get_temp_dir()\n test_files = [\n \"dir1/file1\",\n \"dir1/file2\",\n \"dir1/dir2/file3\",\n \"dir1/dir2/dir3/file4\",\n ]\n for filename in test_files:\n input_filename = os.path.join(tmp_dir, filename + \".WAV\")\n target_filename = os.path.join(tmp_dir, filename + \".WRD\")\n directories = os.path.dirname(input_filename)\n if not os.path.exists(directories):\n os.makedirs(directories)\n io.open(input_filename, \"wb\")\n io.open(target_filename, \"wb\")\n\n data_dict = audio._collect_data(tmp_dir, \".WAV\", \".WRD\")\n expected = [os.path.join(tmp_dir, filename) for filename in test_files]\n self.assertEqual(sorted(list(data_dict)), sorted(expected))\n\n # Clean up.\n for filename in test_files:\n os.remove(os.path.join(tmp_dir, \"%s.WAV\" % filename))\n os.remove(os.path.join(tmp_dir, \"%s.WRD\" % filename))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hyperparameters defining different problems.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import registry\n\nimport tensorflow.compat.v1 as tf\n\n# TODO(rsepassi): Merge these problems with their data generators. Currently\n# they only implement the hparams.\n\n\nclass AudioTimitProblem(problem.Problem):\n \"\"\"Base class for TIMIT problems.\"\"\"\n\n def example_reading_spec(self):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"audio/sample_count\": tf.FixedLenFeature((), tf.int64),\n \"audio/sample_width\": tf.FixedLenFeature((), tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64),\n }\n return data_fields, None\n\n def preprocess_example(self, example, mode, hparams):\n example = super(AudioTimitProblem, self).preprocess_example(\n example, mode, hparams)\n # Reshape audio to proper shape\n sample_count = tf.to_int32(example.pop(\"audio/sample_count\"))\n sample_width = tf.to_int32(example.pop(\"audio/sample_width\"))\n channel_count = 1\n example[\"inputs\"] = tf.reshape(example[\"inputs\"],\n [sample_count, sample_width, channel_count])\n return example\n\n\[email protected]_problem\nclass AudioTimitCharactersTune(AudioTimitProblem):\n \"\"\"TIMIT to characters.\"\"\"\n\n def feature_encoders(self, _):\n return {\n \"inputs\": text_encoder.TextEncoder(),\n \"targets\": text_encoder.ByteTextEncoder(),\n }\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.ModalityType.SPEECH_RECOGNITION,\n \"targets\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\"inputs\": None,\n \"targets\": 256}\n\n\[email protected]_problem\nclass AudioTimitTokens8kTune(AudioTimitProblem):\n \"\"\"TIMIT to tokens.\"\"\"\n\n @property\n def target_vocab_size(self):\n return 2**13 # 8192\n\n def feature_encoders(self, data_dir):\n vocab_filename = os.path.join(data_dir,\n \"vocab.endefr.%d\" % self.target_vocab_size)\n subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)\n return {\n \"inputs\": text_encoder.TextEncoder(),\n \"targets\": subtokenizer,\n }\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.ModalityType.SPEECH_RECOGNITION,\n \"targets\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\n \"inputs\": None,\n \"targets\": self.get_feature_encoders()[\"targets\"].vocab_size,\n }\n hp.batch_size_multiplier = 256\n hp.loss_multiplier = 2.0\n hp.input_space_id = 13\n hp.target_space_id = 3\n\n\[email protected]_problem\nclass AudioTimitTokens8kTest(AudioTimitTokens8kTune):\n \"\"\"TIMIT to tokens.\"\"\"\n pass\n\n\[email protected]_problem\nclass ParsingEnglishPtb8k(problem.Problem):\n \"\"\"Parsing.\"\"\"\n\n @property\n def target_vocab_size(self):\n return 2**13 # 8192\n\n def feature_encoders(self, data_dir):\n vocab_filename = os.path.join(data_dir,\n \"vocab.endefr.%d\" % self.target_vocab_size)\n subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)\n return {\n \"inputs\": subtokenizer,\n \"targets\": subtokenizer,\n }\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.ModalityType.SYMBOL,\n \"targets\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\n \"inputs\": self.get_feature_encoders()[\"inputs\"].vocab_size,\n \"targets\": self.get_feature_encoders()[\"targets\"].vocab_size,\n }\n hp.batch_size_multiplier = 256\n hp.loss_multiplier = 2.0\n hp.input_space_id = 3\n hp.target_space_id = 15\n\n\[email protected]_problem\nclass ParsingEnglishPtb16k(problem.Problem):\n \"\"\"Parsing.\"\"\"\n\n @property\n def vocab_prefix(self):\n return \"wsj\"\n\n @property\n def inputs_target_vocab_size(self):\n return 2**9 # 512\n\n @property\n def targets_target_vocab_size(self):\n return 2**14 # 16384\n\n def feature_encoders(self, data_dir):\n source_vocab_filename = os.path.join(\n data_dir,\n self.vocab_prefix + \"_source.vocab.%d\" % self.inputs_target_vocab_size)\n target_vocab_filename = os.path.join(\n data_dir,\n self.vocab_prefix + \"_target.vocab.%d\" % self.targets_target_vocab_size)\n source_subtokenizer = text_encoder.SubwordTextEncoder(source_vocab_filename)\n target_subtokenizer = text_encoder.SubwordTextEncoder(target_vocab_filename)\n return {\n \"inputs\": source_subtokenizer,\n \"targets\": target_subtokenizer,\n }\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.ModalityType.SYMBOL,\n \"targets\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\n \"inputs\": self.get_feature_encoders()[\"inputs\"].vocab_size,\n \"targets\": self.get_feature_encoders()[\"targets\"].vocab_size,\n }\n hp.input_space_id = 3\n hp.target_space_id = 15\n\n\nclass TestProblem(problem.Problem):\n \"\"\"Test problem.\"\"\"\n\n def __init__(self, input_vocab_size, target_vocab_size):\n super(TestProblem, self).__init__(False, False)\n self.input_vocab_size = input_vocab_size\n self.target_vocab_size = target_vocab_size\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.ModalityType.SYMBOL,\n \"targets\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\"inputs\": self.input_vocab_size,\n \"targets\": self.target_vocab_size}\n\n\ndef test_problem_hparams(input_vocab_size=None,\n target_vocab_size=None,\n model_hparams=None):\n \"\"\"Problem hparams for testing model bodies.\"\"\"\n p = TestProblem(input_vocab_size, target_vocab_size)\n return p.get_hparams(model_hparams)\n", "# coding=utf-8\n# Copyright 2021 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for CelebA.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nfrom tensor2tensor.data_generators import celeba\nfrom tensor2tensor.utils import hparam\n\nimport tensorflow.compat.v1 as tf\n\n\nclass CelebaTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.named_parameters(\n (\"Default\", None),\n (\"Area\", \"AREA\"),\n (\"Dilated\", \"DILATED\"))\n def testCelebaMultiResolutionPreprocessExample(self, resize_method):\n example = {\"inputs\": tf.random_uniform([218, 178, 3], minval=-1.)}\n mode = tf.estimator.ModeKeys.TRAIN\n hparams = hparam.HParams(resolutions=[8, 16, 32])\n if resize_method is not None:\n hparams.resize_method = resize_method\n\n problem = celeba.ImageCelebaMultiResolution()\n preprocessed_example = problem.preprocess_example(example, mode, hparams)\n self.assertLen(preprocessed_example, 2)\n self.assertEqual(preprocessed_example[\"inputs\"].shape, (138, 138, 3))\n self.assertEqual(preprocessed_example[\"targets\"].shape, (42, 32, 3))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.train.Example.FromString", "numpy.random.seed", "tensorflow.compat.v1.test.main", "numpy.min", "numpy.max", "tensorflow.compat.v1.test.get_temp_dir", "tensorflow.compat.v1.python_io.tf_record_iterator" ], [ "tensorflow.compat.v1.assign_sub", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.assign", "tensorflow.compat.v1.equal", "tensorflow.compat.v1.convert_to_tensor", "tensorflow.compat.v1.sqrt", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.executing_eagerly", "tensorflow.compat.v1.logging.warning", "tensorflow.compat.v1.group", "tensorflow.compat.v1.assign_add", "tensorflow.compat.v1.shape", "tensorflow.python.ops.resource_variable_ops.resource_scatter_add", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.init_scope", "tensorflow.compat.v1.colocate_with" ], [ "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.app.run", "tensorflow.compat.v1.logging.set_verbosity" ], [ "tensorflow.compat.v1.global_variables_initializer", "numpy.random.randint", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.app.run", "tensorflow.compat.v1.logging.set_verbosity" ], [ "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.FixedLenFeature", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.VarLenFeature" ], [ "tensorflow.compat.v1.random_uniform", "tensorflow.compat.v1.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rlouf/socio-spatial-stratification
[ "0b6229248882253548efed2442fd04d69cb617e4" ]
[ "bin/plot_neighbourhoods.py" ]
[ "\"\"\"plot_neighbourhoods.py\n\nPlot the neighbourhoods of all classes for the city specified as an input with the following color codes\n* Black: where the class is over-represented (with 99% CI)\n* Light grey: where the class is 'normally' represented\n* White: where the class is under-represented\n\"\"\"\nimport sys\nimport math\nimport csv\nimport fiona\nfrom descartes import PolygonPatch\nfrom shapely.geometry import shape\nfrom matplotlib import pylab as plt\n\n\n#\n# Parameters\n#\n\n## Read city from input\ncity = sys.argv[1]\n\n## Colors\ncolours = {'over': 'black',\n 'norm': 'grey',\n 'under': 'white'}\n\n\n\n\n#\n# Import data\n#\n\n## Blockgroups borders\nblocks = {}\nwith fiona.open('data/shp/msa/%s/blockgroups.shp'%city, 'r', 'ESRI Shapefile') as source:\n for f in source:\n blocks[f['properties']['BKGPIDFP00']] = shape(f['geometry'])\n\n## List of MSA\nmsa = {}\nwith open('data/names/msa.csv', 'r') as source:\n reader = csv.reader(source, delimiter='\\t')\n reader.next()\n for rows in reader:\n msa[rows[0]] = rows[1]\n\n## Classes\nclasses = {}\nwith open('extr/classes/msa_average/classes.csv', 'r') as source:\n reader = csv.reader(source, delimiter='\\t')\n reader.next()\n for rows in reader:\n classes[rows[0]] =[int(r) for r in rows[1:]]\n\n## Representation values\nrep_vals = {}\nwith open('extr/representation/classes/msa/%s_values.csv'%city, 'r') as source:\n reader = csv.reader(source, delimiter='\\t')\n classes_list = reader.next()[1:]\n for rows in reader:\n rep_vals[rows[0]] = {cl:float(r) for cl,r in zip(classes_list,\n rows[1:])}\n\n## Representation variance\nrep_var = {}\nwith open('extr/representation/classes/msa/%s_variance.csv'%city, 'r') as source:\n reader = csv.reader(source, delimiter='\\t')\n classes_list = reader.next()[1:]\n for rows in reader:\n rep_var[rows[0]] = {cl:float(r) for cl,r in zip(classes_list,\n rows[1:])}\n\n \n#\n# Transform representation values and variance into list of areal units\n#\nrepresentation = {cl:{} for cl in classes} # cl:{bckgp:over, under, or norm}\nfor bg in rep_vals:\n for cl in classes:\n rep = rep_vals[bg][cl]-1\n std = math.sqrt(rep_var[bg][cl])\n\n ## if wihin 2.57 sigma or nan, mark as normal\n if abs(rep) <= 2.57*std or math.isnan(rep):\n representation[cl][bg] = 'norm'\n ## else it is over-represented or under\n else:\n if rep < 0:\n representation[cl][bg] = 'under'\n else: \n representation[cl][bg] = 'over'\n\n\n#\n# Plot \n#\n\nfig = plt.figure()\nfor i,cl in enumerate(classes):\n if i==0:\n ax = fig.add_subplot(1,len(classes),i+1)\n else:\n ax = fig.add_subplot(1,len(classes),i+1, sharex=ax, sharey=ax)\n for bg in representation[cl]:\n color = colours[representation[cl][bg]]\n if blocks[bg].geom_type==\"Polygon\":\n patch = PolygonPatch(blocks[bg], fc=color, ec='None', alpha=1, zorder=1)\n ax.add_patch(patch)\n else:\n for t in blocks[bg]:\n patch = PolygonPatch(t, fc=color, ec='None', alpha=1, zorder=1)\n ax.add_patch(patch)\n\n ax.relim()\n ax.axis('off')\n ax.autoscale_view(True,True,True)\n ax.set_title(r\"$%s$\"%cl,fontsize=25)\n\n#plt.savefig('figures/paper/%s_neighbourhoods.png'%msa[city].replace(\" \",\"\").replace(\",\", \"\"),\n# bbox_inches='tight')\nplt.show()\n" ]
[ [ "matplotlib.pylab.show", "matplotlib.pylab.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hchaudhari73/ga-learner-dsmp-repo
[ "42c0bf7b4bbeef10d187c74c8803b1fdca5d2cdd", "42c0bf7b4bbeef10d187c74c8803b1fdca5d2cdd" ]
[ "Car-insurance-claim/code.py", "leading_club/code.py" ]
[ "# --------------\nimport pandas as pd \nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv(path)\nprint(df.head())\nprint(df.info())\ncolumns = [\"INCOME\", \"HOME_VAL\",'BLUEBOOK','OLDCLAIM','CLM_AMT']\nfor col in columns:\n df[col] = df[col].map(lambda x: str(x).replace(\"$\",\"\").replace(\",\",\"\"))\nX = df.drop(\"CLAIM_FLAG\", axis=1)\ny = df.CLAIM_FLAG\nX_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.3, random_state=6)\n\n\n# --------------\n# Code starts here\nfor col in columns:\n X_train[col] = X_train[col].astype(float)\n X_test[col] = X_test[col].astype(float)\n\nX_train.isnull().any()\nX_test.isnull().any()\n# Code ends here\n\n\n# --------------\n# Code starts here\nX_train.dropna(subset=['YOJ','OCCUPATION'],inplace=True)\nX_test.dropna(subset=['YOJ','OCCUPATION'],inplace=True)\ny_train = y_train[X_train.index]\ny_test = y_test[X_test.index]\n\n\nfor col in [\"AGE\",\"CAR_AGE\",\"INCOME\", \"HOME_VAL\"]:\n X_train[col].fillna(X_train[col].mean(), inplace=True)\n X_test[col].fillna(X_test[col].mean(), inplace=True)\n\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.preprocessing import LabelEncoder\ncolumns = [\"PARENT1\",\"MSTATUS\",\"GENDER\",\"EDUCATION\",\"OCCUPATION\",\"CAR_USE\",\"CAR_TYPE\",\"RED_CAR\",\"REVOKED\"]\n\n# Code starts here\nle = LabelEncoder()\nfor col in columns:\n X_train[col] = le.fit_transform(X_train[col].astype(str))\n X_test[col] = le.transform(X_test[col].astype(str))\n\n# Code ends here\n\n\n\n# --------------\nfrom sklearn.metrics import precision_score \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\n\n\n# code starts here \nmodel = LogisticRegression(random_state=6)\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nscore = accuracy_score(y_test, y_pred)\nprint(score)\n# Code ends here\n\n\n# --------------\nfrom sklearn.preprocessing import StandardScaler\nfrom imblearn.over_sampling import SMOTE\n\n# code starts here\nsmote = SMOTE(random_state=9)\nX_train, y_train = smote.fit_sample(X_train, y_train)\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n# Code ends here\n\n\n# --------------\n# Code Starts here\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nscore = accuracy_score(y_test, y_pred)\nprint(score)\n# Code ends here\n\n\n", "# --------------\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# code starts here\ndf = pd.read_csv(path)\np_a = (df.fico>700).sum()/len(df)\np_b = (df.purpose==\"debt_consolidation\").sum()/len(df)\ndf1 = df[df.purpose==\"debt_consolidation\"]\np_b_a = (df1.fico>700).sum()/len(df)\np_a_b = p_a*p_b_a/p_b\nresult = p_b_a==p_a\nprint(result)\n\n\n\n# code ends here\n\n\n# --------------\n# code starts here\nprob_lp = (df[\"paid.back.loan\"]==\"Yes\").sum()/len(df)\n\nprob_cs = (df[\"credit.policy\"] == \"Yes\").sum()/len(df)\n\nnew_df = df[df[\"paid.back.loan\"]==\"Yes\"]\n\ndf2 = df[df[\"credit.policy\"]==\"Yes\"]\n\nprob_pd_cs = (new_df[\"credit.policy\"]==\"Yes\").sum()/len(new_df)\n\nbayes = prob_lp*prob_pd_cs/prob_cs\n\n\n# code ends here\n\n\n# --------------\n# code starts here\ndf.purpose.value_counts().plot(kind = \"bar\")\nplt.show()\n\ndf1 = df[df[\"paid.back.loan\"]==\"No\"]\n\ndf1.purpose.value_counts().plot(kind = \"bar\")\nplt.show()\n# code ends here\n\n\n# --------------\ninst_median = df.installment.median()\n\ninst_mean = df.installment.mean()\n\ndf.installment.plot(kind = \"hist\")\nplt.show()\ndf[\"log.annual.inc\"].plot(kind = \"hist\")\nplt.show()\n\n\n\n# code ends here\n\n\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.accuracy_score" ], [ "pandas.read_csv", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
FawwazMayda/car-defect-mask
[ "1f2287e470e9b28f0d871dd607dc6fffe9d3babf" ]
[ "samples/balloon/balloon.py" ]
[ "\"\"\"\nMask R-CNN\nTrain on the toy Balloon dataset and implement color splash effect.\n\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\n------------------------------------------------------------\n\nUsage: import the module (see Jupyter notebooks for examples), or run from\n the command line as such:\n\n # Train a new model starting from pre-trained COCO weights\n python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=coco\n\n # Resume training a model that you had trained earlier\n python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=last\n\n # Train a new model starting from ImageNet weights\n python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=imagenet\n\n # Apply color splash to an image\n python3 balloon.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>\n\n # Apply color splash to video using the last weights you trained\n python3 balloon.py splash --weights=last --video=<URL or path to file>\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Configurations\n############################################################\n\n\nclass BalloonConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"defect\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # Background + balloon\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 100\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass BalloonDataset(utils.Dataset):\n\n def load_balloon(self, dataset_dir, subset):\n \"\"\"Load a subset of the Balloon dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes. We have only one class to add.\n self.add_class(\"defect\", 1, \"defect\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n # Load annotations\n # VGG Image Annotator (up to version 1.6) saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n # Note: In VIA 2.0, regions was changed from a dict to a list.\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n annotations = list(annotations.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. These are stores in the\n # shape_attributes (see json format above)\n # The if condition is needed to support VIA versions 1.x and 2.x.\n if type(a['regions']) is dict:\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\n else:\n polygons = [r['shape_attributes'] for r in a['regions']] \n\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"defect\",\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"defect\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"defect\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\ndef train(model):\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = BalloonDataset()\n dataset_train.load_balloon(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = BalloonDataset()\n dataset_val.load_balloon(args.dataset, \"val\")\n dataset_val.prepare()\n\n # *** This training schedule is an example. Update to your needs ***\n # Since we're using a very small dataset, and starting from\n # COCO trained weights, we don't need to train too long. Also,\n # no need to train all layers, just the heads should do it.\n print(\"Training network heads\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=40,\n layers='heads')\n\n\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # Copy color pixels from the original color image where mask is set\n if mask.shape[-1] > 0:\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray.astype(np.uint8)\n return splash\n\n\ndef detect_and_color_splash(model, image_path=None, video_path=None):\n assert image_path or video_path\n\n # Image or video?\n if image_path:\n # Run model detection and generate the color splash effect\n print(\"Running on {}\".format(args.image))\n # Read image\n image = skimage.io.imread(args.image)\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # Save output\n file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n skimage.io.imsave(file_name, splash)\n elif video_path:\n import cv2\n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n\n # Define codec and create video writer\n file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n\n count = 0\n success = True\n while success:\n print(\"frame: \", count)\n # Read next image\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = image[..., ::-1]\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # RGB -> BGR to save image to video\n splash = splash[..., ::-1]\n # Add image to video writer\n vwriter.write(splash)\n count += 1\n vwriter.release()\n print(\"Saved to \", file_name)\n\n\n############################################################\n# Training\n############################################################\n\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect balloons.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/balloon/dataset/\",\n help='Directory of the Balloon dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n # Configurations\n if args.command == \"train\":\n config = BalloonConfig()\n else:\n class InferenceConfig(BalloonConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()\n elif args.weights.lower() == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n\n # Train or evaluate\n if args.command == \"train\":\n train(model)\n elif args.command == \"splash\":\n detect_and_color_splash(model, image_path=args.image,\n video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))\n" ]
[ [ "numpy.where", "numpy.sum", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KIZI/pyIDS
[ "76ad9d9b2bc12630ae1a36cb8f96103a5e5bbfce", "76ad9d9b2bc12630ae1a36cb8f96103a5e5bbfce", "76ad9d9b2bc12630ae1a36cb8f96103a5e5bbfce", "76ad9d9b2bc12630ae1a36cb8f96103a5e5bbfce" ]
[ "pyids/model_selection/coordinate_ascent.py", "scripts/use_case/thesis/testing_xml.py", "scripts/use_case/thesis/test_coordinate_ascent_effeciency_auc_interpretability_distance_euclidean.py", "pyids/data_structures/ids_rule.py" ]
[ "from .param_space_optimizer import ParameterSpaceOptimizer\nfrom typing import Tuple, Dict, List\nimport pandas as pd\n\ndef _ternary_search(func, left, right, absolute_precision, debug=False):\n \"\"\"\n taken from wikipedia article on ternary search\n \"\"\"\n \n while True:\n if abs(right - left) < absolute_precision:\n return (left + right)/2\n\n left_third = left + (right - left)/3\n right_third = right - (right - left)/3\n \n if debug:\n print(left_third, right_third)\n\n if func(left_third) < func(right_third):\n left = left_third\n else:\n right = right_third\n\n\nclass CoordinateAscent(ParameterSpaceOptimizer):\n\n def __init__(\n self,\n func,\n func_args_ranges: Dict[str, Tuple[int, int]],\n func_args_extension: Dict[str, int] = None,\n extension_precision=50,\n ternary_search_precision=10,\n max_iterations=500,\n ):\n self.func = func\n self.func_args_ranges = func_args_ranges\n\n arg_names = list(self.func_args_ranges.keys())\n\n if func_args_extension:\n self.func_args_extension = func_args_extension\n else:\n extensions_values = len(arg_names) * [500]\n\n self.func_args_extension = dict(zip(arg_names, extensions_values))\n\n self.ternary_search_precision = dict(zip(arg_names, len(arg_names) * [ternary_search_precision]))\n\n self.extension_precision = extension_precision\n\n self.max_iterations = max_iterations\n\n self.procedure_data = []\n\n def make_1arg_func(self, variable_arg_name, fixed_params):\n def func(x):\n fixed_params_copy = fixed_params.copy()\n\n fixed_params_copy[variable_arg_name] = x\n\n return self.func(fixed_params_copy)\n\n return func\n\n def extend_interval(self, arg_name, current_value):\n lower_interval_value, upper_interval_value = self.func_args_ranges[arg_name]\n\n if abs(upper_interval_value - current_value) <= self.extension_precision:\n new_upper_interval_value = upper_interval_value + self.func_args_extension[arg_name]\n\n self.func_args_ranges[arg_name] = lower_interval_value, new_upper_interval_value\n\n def fit(self):\n current_params = self.sample_starting_params()\n\n current_procedure_data = dict()\n current_procedure_data.update(dict(\n iteration=-1,\n current_lambda_param=\"None\",\n loss=self.func(current_params),\n current_params=current_params.copy()\n ))\n\n self.procedure_data.append(current_procedure_data)\n\n for i in range(self.max_iterations):\n for arg_name in self.func_args_ranges.keys():\n arg_func = self.make_1arg_func(arg_name, current_params)\n\n print(f\"using precision {self.ternary_search_precision[arg_name]}\")\n\n interval_lower, interval_upper = self.func_args_ranges[arg_name]\n best_param = _ternary_search(\n arg_func,\n interval_lower,\n interval_upper,\n self.ternary_search_precision[arg_name]\n )\n\n self.extend_interval(arg_name, best_param)\n\n _, interval_upper_new = self.func_args_ranges[arg_name]\n\n if interval_upper == interval_upper_new:\n self.ternary_search_precision[arg_name] /= 2\n\n current_params[arg_name] = best_param\n\n current_procedure_data = dict()\n current_procedure_data.update(dict(\n iteration=i,\n current_lambda_param=arg_name,\n loss=self.func(current_params),\n current_params=current_params.copy()\n ))\n\n self.procedure_data.append(current_procedure_data)\n\n procedure_data_df = pd.DataFrame(self.procedure_data)\n best_loss_mask = procedure_data_df[\"loss\"] == procedure_data_df[\"loss\"].max()\n best_lambda_index = procedure_data_df[best_loss_mask].index[0]\n\n best_lambda = list(self.procedure_data[best_lambda_index][\"current_params\"].values())\n\n return best_lambda\n\n\n\n\n\n\n \n\n", "import pandas as pd\n\nfrom pyarc.qcba.data_structures import QuantitativeDataFrame\n\nfrom pyids.data_structures import IDS, mine_IDS_ruleset, mine_CARs\nfrom pyids.data_structures import IDSRuleSet\nfrom pyids.rule_mining import RuleMiner\nfrom pyids.model_selection import CoordinateAscentOptimizer, train_test_split_pd\n\n\ndf = pd.read_csv(\"../../../data/titanic.csv\")\n\ncars = mine_CARs(df, 80)\n\nids_ruleset = IDSRuleSet.from_cba_rules(cars)\n\n\ndf_train, df_test = train_test_split_pd(df, prop=0.25)\nquant_df_train, quant_df_test = QuantitativeDataFrame(df_train), QuantitativeDataFrame(df_test)\n\n\n\ncoordinate_ascent = CoordinateAscentOptimizer(IDS(), maximum_delta_between_iterations=200, maximum_score_estimation_iterations=10, ternary_search_precision=20, maximum_consecutive_iterations=20)\nlambda_array = coordinate_ascent.fit(ids_ruleset, quant_df_train, quant_df_test)\n\nprint(lambda_array)\n\n\n", "from pyids.model_selection import CoordinateAscent\nfrom pyids.algorithms.ids import IDS\nfrom pyids.algorithms import mine_CARs, mine_IDS_ruleset\n\nfrom pyarc.qcba.data_structures import QuantitativeDataFrame\n\nimport pandas as pd\nimport numpy as np\n\ndf_iris = pd.read_csv(\"../../../data/iris0.csv\")\nquant_df = QuantitativeDataFrame(df_iris)\ncars = mine_CARs(df_iris, 30)\n\n\ninterpretability_bounds = dict(\n fraction_overlap=0.1,\n fraction_classes=1,\n fraction_uncovered=0.35,\n average_rule_width=8,\n ruleset_length=10\n)\n\ndef is_solution_interpretable(metrics):\n print(metrics)\n return (\n metrics[\"fraction_overlap\"] <= interpretability_bounds[\"fraction_overlap\"] and\n metrics[\"fraction_classes\"] >= interpretability_bounds[\"fraction_classes\"] and\n metrics[\"fraction_uncovered\"] <= interpretability_bounds[\"fraction_uncovered\"] and\n metrics[\"average_rule_width\"] <= interpretability_bounds[\"average_rule_width\"] and\n metrics[\"ruleset_length\"] <= interpretability_bounds[\"ruleset_length\"]\n )\n\ndef solution_interpretability_distance(metrics):\n distance_vector = np.array([\n max(metrics[\"fraction_overlap\"] - interpretability_bounds[\"fraction_overlap\"], 0),\n max(interpretability_bounds[\"fraction_classes\"] - metrics[\"fraction_classes\"], 0),\n max(metrics[\"fraction_uncovered\"] - interpretability_bounds[\"fraction_uncovered\"], 0),\n max(metrics[\"average_rule_width\"] - interpretability_bounds[\"average_rule_width\"], 0),\n max(metrics[\"ruleset_length\"] - interpretability_bounds[\"ruleset_length\"], 0)\n ])\n return np.linalg.norm(distance_vector)\n\ndef fmax(lambda_dict):\n print(lambda_dict)\n ids = IDS(algorithm=\"SLS\")\n ids.fit(class_association_rules=cars, quant_dataframe=quant_df, lambda_array=list(lambda_dict.values()))\n\n metrics = ids.score_interpretability_metrics(quant_df)\n\n if not is_solution_interpretable(metrics):\n distance = -solution_interpretability_distance(metrics)\n print(distance)\n return distance\n\n if not is_solution_interpretable(metrics):\n return 0\n\n auc = ids.score_auc(quant_df)\n\n print(auc)\n\n return auc\n\n\n\ncoord_asc = CoordinateAscent(\n func=fmax,\n func_args_ranges=dict(\n l1=(1, 1000),\n l2=(1, 1000),\n l3=(1, 1000),\n l4=(1, 1000),\n l5=(1, 1000),\n l6=(1, 1000),\n l7=(1, 1000)\n ),\n ternary_search_precision=25,\n max_iterations=3\n)\n\ncoord_asc.fit()\n\ndf = pd.DataFrame(coord_asc.procedure_data)\n\ndf.to_csv(\"output_data/coordinate_ascent_run_AUC_interpretability_distance_sum_euclidean.csv\")\n", "from pyarc.qcba.data_structures import QuantitativeDataFrame\nfrom pyarc.data_structures import ClassAssocationRule\n\nfrom sklearn.metrics import f1_score\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom scipy import stats as st\n\n\nclass IDSRule:\n\n DUMMY_LABEL = \"N/A\"\n \n def __init__(self, class_association_rule: ClassAssocationRule):\n self.car = class_association_rule\n self.cover_cache = dict(\n cover=None,\n correct_cover=None,\n incorrect_cover=None,\n rule_cover=None\n )\n self.cache_prepared = False\n self.f1 = 0\n\n def calc_f1(self, quant_dataframe: QuantitativeDataFrame):\n ground_truth = quant_dataframe.dataframe.iloc[:, -1]\n predictions = self.predict(quant_dataframe)\n\n f1 = f1_score(ground_truth, predictions, average=\"micro\")\n\n return f1\n\n def predict(self, quant_dataframe: QuantitativeDataFrame):\n correct_cover_mask = self.correct_cover(quant_dataframe)\n\n predictions = np.where(correct_cover_mask, self.car.consequent.value, \"DUMMY_LABEL\")\n\n return predictions\n\n def __repr__(self):\n args = [\n self.car.antecedent.string(),\n \"{\" + self.car.consequent.string() + \"}\",\n self.car.support,\n self.car.confidence,\n self.f1,\n self.car.rulelen,\n self.car.rid\n ]\n\n text = \"IDSRule {} => {} sup: {:.2f} conf: {:.2f}, f1: {:.2f}, len: {}, id: {}\".format(*args)\n\n return text\n\n def __len__(self):\n return len(self.car.antecedent)\n\n def __hash__(self):\n return hash(self.car)\n\n def to_dict(self):\n rule_dict = dict(antecedent=[], consequent={})\n\n for label, value in self.car.antecedent:\n rule_dict[\"antecedent\"].append(dict(name=label, value=value))\n\n label, value = self.car.consequent\n\n rule_dict[\"consequent\"].update(dict(name=label, value=value))\n\n return rule_dict\n\n def to_ruleml_xml(self):\n rule_dict = self.to_dict()\n\n rule = ET.Element(\"Implies\")\n\n consequent = ET.SubElement(rule, \"head\")\n\n label_element = ET.SubElement(consequent, \"Atom\")\n var_element = ET.SubElement(label_element, \"Var\")\n var_element.text = rule_dict[\"consequent\"][\"name\"]\n\n rel_element = ET.SubElement(label_element, \"Rel\")\n rel_element.text = rule_dict[\"consequent\"][\"value\"]\n\n antecedent = ET.SubElement(rule, \"body\")\n\n for antecedent_member in rule_dict[\"antecedent\"]:\n for label, value in antecedent_member.items():\n label_element = ET.SubElement(antecedent, \"Atom\")\n var_element = ET.SubElement(label_element, \"Var\")\n var_element.text = label\n\n rel_element = ET.SubElement(label_element, \"Rel\")\n rel_element.text = value\n\n return rule\n\n def to_xml(self):\n rule_dict = self.to_dict()\n\n rule = ET.Element(\"Rule\")\n antecedent = ET.SubElement(rule, \"Antecedent\")\n\n for antecedent_member in rule_dict[\"antecedent\"]:\n for label, value in antecedent_member.items():\n label_element = ET.SubElement(antecedent, label)\n label_element.text = value\n\n consequent = ET.SubElement(rule, \"Consequent\")\n\n for label, value in rule_dict[\"consequent\"].items():\n label_element = ET.SubElement(consequent, label)\n label_element.text = value\n\n return rule\n\n def calculate_cover(self, quant_dataframe: QuantitativeDataFrame):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n self.cover_cache[\"cover\"] = self._cover(quant_dataframe)\n self.cover_cache[\"correct_cover\"] = self._correct_cover(quant_dataframe)\n self.cover_cache[\"incorrect_cover\"] = self._incorrect_cover(quant_dataframe)\n self.cover_cache[\"rule_cover\"] = self._rule_cover(quant_dataframe)\n\n self.cover_cache[\"cover_len\"] = np.sum(self.cover_cache[\"cover\"])\n self.cover_cache[\"correct_cover_len\"] = np.sum(self.cover_cache[\"correct_cover\"])\n self.cover_cache[\"incorrect_cover_len\"] = np.sum(self.cover_cache[\"incorrect_cover\"])\n self.cover_cache[\"rule_cover_len\"] = np.sum(self.cover_cache[\"rule_cover\"])\n\n self.cache_prepared = True\n\n self.f1 = self.calc_f1(quant_dataframe)\n\n def cover(self, quant_dataframe):\n if not self.cache_prepared:\n raise Exception(\"Caches not prepared yet\")\n\n return self.cover_cache[\"cover\"]\n\n def correct_cover(self, quant_dataframe):\n if not self.cache_prepared:\n raise Exception(\"Caches not prepared yet\")\n\n return self.cover_cache[\"correct_cover\"]\n\n def incorrect_cover(self, quant_dataframe):\n if not self.cache_prepared:\n raise Exception(\"Caches not prepared yet\")\n\n return self.cover_cache[\"incorrect_cover\"]\n\n def rule_cover(self, quant_dataframe):\n if not self.cache_prepared:\n raise Exception(\"Caches not prepared yet\")\n\n return self.cover_cache[\"rule_cover\"]\n\n def _cover(self, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n cover, _ = quant_dataframe.find_covered_by_rule_mask(self.car)\n\n return cover\n\n def rule_overlap(self, other, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\") \n\n if type(other) != IDSRule:\n raise Exception(\"Type of other must be IDSRule\")\n\n cover1 = self.cover(quant_dataframe)\n cover2 = other.cover(quant_dataframe)\n\n overlap = np.logical_and(cover1, cover2)\n\n return overlap\n\n def predict(self, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n cover = self.cover(quant_dataframe)\n\n class_label = self.car.consequent.value\n\n prediction = np.where(cover, class_label, IDSRule.DUMMY_LABEL)\n\n return prediction\n\n def _rule_cover(self, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n cover_antecedent, cover_consequent = quant_dataframe.find_covered_by_rule_mask(self.car)\n\n rule_cover = cover_antecedent & cover_consequent\n\n return rule_cover\n\n def _correct_cover(self, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n rule_cover = self._rule_cover(quant_dataframe)\n\n class_column_cover = quant_dataframe.dataframe.iloc[:,-1].values == self.car.consequent.value\n\n return np.logical_and(rule_cover, class_column_cover)\n\n def _incorrect_cover(self, quant_dataframe):\n if type(quant_dataframe) != QuantitativeDataFrame:\n raise Exception(\"Type of quant_dataframe must be QuantitativeDataFrame\")\n\n correct_cover = self._correct_cover(quant_dataframe)\n\n return np.logical_not(correct_cover)\n\n def __gt__(self, other):\n \"\"\"\n precedence operator. Determines if this rule\n has higher precedence. Rules are sorted according\n to their f1 score.\n \"\"\"\n\n f1_score_self = self.f1\n f1_score_other = other.f1\n\n return f1_score_self > f1_score_other\n\n def __lt__(self, other):\n \"\"\"\n rule precedence operator\n \"\"\"\n return not self > other\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.read_csv" ], [ "pandas.read_csv", "numpy.linalg.norm", "pandas.DataFrame" ], [ "numpy.logical_not", "sklearn.metrics.f1_score", "numpy.logical_and", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akashrajkn/sparse-distributions
[ "961447fef7307905cf3d8a32ac31d5473c6c1f20" ]
[ "mixture.py" ]
[ "import torch\nfrom torch.distributions.uniform import Uniform\nfrom torch.distributions.utils import broadcast_all\nfrom torch.distributions.kl import register_kl, kl_divergence\nimport torch.nn.functional as F\n\n\nEPS = 1e-5\n\n\nclass MixtureD0C01(torch.distributions.Distribution):\n \n def __init__(self, logits0, cont, validate_args=None):\n \"\"\"\n - with probability p_0 = sigmoid(logits0) this returns 0\n - with probability 1 - p_0 this returns a sample in the open interval (0, 1)\n \n logits0: logits for p_0\n cont: a (properly normalised) distribution over (0, 1)\n e.g. RightTruncatedExponential\n \"\"\"\n shape = cont.batch_shape\n super(MixtureD0C01, self).__init__(batch_shape=shape, validate_args=validate_args)\n self.logits = logits0\n self.cont = cont\n self.log_p0 = F.logsigmoid(self.logits)\n self.p0 = torch.sigmoid(self.logits) \n self.pc = 1. - self.p0\n self.log_pc = - F.softplus(logits0) # = torch.log(self.pc)\n self.uniform = Uniform(torch.zeros(shape).to(logits0.device), \n torch.ones(shape).to(logits0.device))\n \n def rsample(self, sample_shape=torch.Size()): \n # sample from (0, 1) uniformly\n u = self.uniform.rsample(sample_shape) \n # affine transform to project from (p_0, 1) to (0, 1)\n # note that only where p_0 < u < 1 this is correct\n to_cont = (u - self.p0) / self.pc \n # c ~ ContinuousDist()\n # note where p_0 < u < 1, c is valid and is in (0,1)\n c = self.cont.icdf(to_cont)\n # inverse cdf of mixture model\n # 0 if u < p_0\n # c otherwise\n x = torch.where(u <= self.p0, torch.zeros_like(u), c)\n return x\n \n def log_prob(self, value): \n log_prob_cont = self.cont.log_prob(value)\n log_prob = torch.where(value == 0., self.log_p0, self.log_pc + log_prob_cont)\n return log_prob\n \n def cdf(self, value):\n cdf_cont = self.cont.cdf(value)\n cdf = torch.where(value == 0., self.p0, self.p0 + self.pc * cdf_cont)\n return cdf\n \n def entropy(self):\n h = self.p0 * ( - self.log_p0) + self.pc * (- self.log_pc) + self.pc * self.cont.entropy()\n return h\n\n \ndef kl_mixture_mixture(p, q): \n # see derivation on overleaf\n kl = p.p0 * (p.log_p0 - q.log_p0)\n kl = kl + p.pc * (p.log_pc - q.log_pc)\n kl = kl + p.pc * kl_divergence(p.cont, q.cont)\n return kl\n\n\n@register_kl(MixtureD0C01, MixtureD0C01)\ndef _kl(p, q):\n return kl_mixture_mixture(p, q)" ]
[ [ "torch.Size", "torch.sigmoid", "torch.ones", "torch.distributions.kl.register_kl", "torch.zeros", "torch.distributions.kl.kl_divergence", "torch.nn.functional.logsigmoid", "torch.zeros_like", "torch.where", "torch.nn.functional.softplus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
supersunpower/exchange_calendars
[ "306d99a1204f2625c2ff0d795a8f8cb1dfd0d03a" ]
[ "exchange_calendars/us_holidays.py" ]
[ "\"\"\"\nUS Holidays\n\nMany historical holidays were derived from the pdf at\netc/NYSE-Historical-Closings.pdf. Originally posted at\nhttp://s3.amazonaws.com/armstrongeconomics-wp/2013/07/NYSE-Closings.pdf # noqa\n\nThese were originally added in\nhttps://github.com/rsheftel/pandas_market_calendars/pull/30\n\"\"\"\n\nfrom dateutil.relativedelta import MO, TH, TU\nfrom pandas import DateOffset, Timestamp\nfrom pandas.tseries.holiday import Holiday, nearest_workday, sunday_to_monday\nfrom pandas.tseries.offsets import Day\nfrom pytz import UTC\n\nfrom .common_holidays import new_years_day\nfrom .exchange_calendar import FRIDAY, MONDAY, THURSDAY, TUESDAY, WEDNESDAY\n\n\ndef following_tuesday_every_four_years_observance(dt):\n return dt + DateOffset(years=(4 - (dt.year % 4)) % 4, weekday=TU(1))\n\n\n# Holidays\n\n# These have the same definition, but are used in different places because the\n# NYSE closed at 2:00 PM on Christmas Eve until 1993.\nChristmasEveBefore1993 = Holiday(\n \"Christmas Eve\",\n month=12,\n day=24,\n end_date=Timestamp(\"1993-01-01\"),\n # When Christmas is a Saturday, the 24th is a full holiday.\n days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY),\n)\nChristmasEveInOrAfter1993 = Holiday(\n \"Christmas Eve\",\n month=12,\n day=24,\n start_date=Timestamp(\"1993-01-01\"),\n # When Christmas is a Saturday, the 24th is a full holiday.\n days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY),\n)\nUSNewYearsDay = new_years_day(\n # When Jan 1 is a Sunday, US markets observe the subsequent Monday.\n # When Jan 1 is a Saturday (as in 2005 and 2011), no holiday is observed.\n observance=sunday_to_monday\n)\nUSMartinLutherKingJrAfter1998 = Holiday(\n \"Dr. Martin Luther King Jr. Day\",\n month=1,\n day=1,\n # The US markets didn't observe MLK day as a holiday until 1998.\n start_date=Timestamp(\"1998-01-01\"),\n offset=DateOffset(weekday=MO(3)),\n)\nUSLincolnsBirthDayBefore1954 = Holiday(\n \"Lincoln's Birthday\",\n month=2,\n day=12,\n start_date=Timestamp(\"1874-01-01\"),\n end_date=Timestamp(\"1953-12-31\"),\n observance=sunday_to_monday,\n)\nUSWashingtonsBirthDayBefore1964 = Holiday(\n \"Washington's Birthday\",\n month=2,\n day=22,\n start_date=Timestamp(\"1880-01-01\"),\n end_date=Timestamp(\"1963-12-31\"),\n observance=sunday_to_monday,\n)\nUSWashingtonsBirthDay1964to1970 = Holiday(\n \"Washington's Birthday\",\n month=2,\n day=22,\n start_date=Timestamp(\"1964-01-01\"),\n end_date=Timestamp(\"1970-12-31\"),\n observance=nearest_workday,\n)\nUSPresidentsDay = Holiday(\n \"President's Day\",\n start_date=Timestamp(\"1971-01-01\"),\n month=2,\n day=1,\n offset=DateOffset(weekday=MO(3)),\n)\n\nUSThanksgivingDayBefore1939 = Holiday(\n \"Thanksgiving Before 1939\",\n start_date=Timestamp(\"1864-01-01\"),\n end_date=Timestamp(\"1938-12-31\"),\n month=11,\n day=30,\n offset=DateOffset(weekday=TH(-1)),\n)\n\nUSThanksgivingDay1939to1941 = Holiday(\n \"Thanksgiving 1939 to 1941\",\n start_date=Timestamp(\"1939-01-01\"),\n end_date=Timestamp(\"1941-12-31\"),\n month=11,\n day=30,\n offset=DateOffset(weekday=TH(-2)),\n)\nUSThanksgivingDay = Holiday(\n \"Thanksgiving\",\n start_date=Timestamp(\"1942-01-01\"),\n month=11,\n day=1,\n offset=DateOffset(weekday=TH(4)),\n)\n\nUSMemorialDayBefore1964 = Holiday(\n \"Memorial Day\",\n month=5,\n day=30,\n end_date=Timestamp(\"1963-12-31\"),\n observance=sunday_to_monday,\n)\nUSMemorialDay1964to1969 = Holiday(\n \"Memorial Day\",\n month=5,\n day=30,\n start_date=Timestamp(\"1964-01-01\"),\n end_date=Timestamp(\"1969-12-31\"),\n observance=nearest_workday,\n)\nUSMemorialDay = Holiday(\n # NOTE: The definition for Memorial Day is incorrect as of pandas 0.16.0.\n # See https://github.com/pydata/pandas/issues/9760.\n \"Memorial Day\",\n month=5,\n day=25,\n start_date=Timestamp(\"1971-01-01\"),\n offset=DateOffset(weekday=MO(1)),\n)\n\nUSIndependenceDayBefore1954 = Holiday(\n \"July 4th\",\n month=7,\n day=4,\n end_date=Timestamp(\"1953-12-31\"),\n observance=sunday_to_monday,\n)\nUSIndependenceDay = Holiday(\n \"July 4th\",\n month=7,\n day=4,\n start_date=Timestamp(\"1954-01-01\"),\n observance=nearest_workday,\n)\n\nUSElectionDay1848to1967 = Holiday(\n \"Election Day\",\n month=11,\n day=2,\n start_date=Timestamp(\"1848-1-1\"),\n end_date=Timestamp(\"1967-12-31\"),\n offset=DateOffset(weekday=TU(1)),\n)\nUSElectionDay1968to1980 = Holiday(\n \"Election Day\",\n month=11,\n day=2,\n start_date=Timestamp(\"1968-01-01\"),\n end_date=Timestamp(\"1980-12-31\"),\n observance=following_tuesday_every_four_years_observance,\n)\nUSVeteransDay1934to1953 = Holiday(\n \"Veteran Day\",\n month=11,\n day=11,\n start_date=Timestamp(\"1934-1-1\"),\n end_date=Timestamp(\"1953-12-31\"),\n observance=sunday_to_monday,\n)\nUSColumbusDayBefore1954 = Holiday(\n \"Columbus Day\",\n month=10,\n day=12,\n end_date=Timestamp(\"1953-12-31\"),\n observance=sunday_to_monday,\n)\nChristmasBefore1954 = Holiday(\n \"Christmas\",\n month=12,\n day=25,\n end_date=Timestamp(\"1953-12-31\"),\n observance=sunday_to_monday,\n)\nChristmas = Holiday(\n \"Christmas\",\n month=12,\n day=25,\n start_date=Timestamp(\"1954-01-01\"),\n observance=nearest_workday,\n)\n\n# Early Closes\n\nMonTuesThursBeforeIndependenceDay = Holiday(\n # When July 4th is a Tuesday, Wednesday, or Friday, the previous day is a\n # half day.\n \"Mondays, Tuesdays, and Thursdays Before Independence Day\",\n month=7,\n day=3,\n days_of_week=(MONDAY, TUESDAY, THURSDAY),\n start_date=Timestamp(\"1995-01-01\"),\n)\nFridayAfterIndependenceDayPre2013 = Holiday(\n # When July 4th is a Thursday, the next day is a half day prior to 2013.\n # Since 2013 the early close is on Wednesday and Friday is a full day\n \"Fridays after Independence Day prior to 2013\",\n month=7,\n day=5,\n days_of_week=(FRIDAY,),\n start_date=Timestamp(\"1995-01-01\"),\n end_date=Timestamp(\"2013-01-01\"),\n)\nWednesdayBeforeIndependenceDayPost2013 = Holiday(\n # When July 4th is a Thursday, the next day is a half day prior to 2013.\n # Since 2013 the early close is on Wednesday and Friday is a full day\n \"Wednesdays Before Independence Day including and after 2013\",\n month=7,\n day=3,\n days_of_week=(WEDNESDAY,),\n start_date=Timestamp(\"2013-01-01\"),\n)\nUSBlackFridayBefore1993 = Holiday(\n \"Black Friday\",\n month=11,\n day=1,\n # Black Friday was not observed until 1992.\n start_date=Timestamp(\"1992-01-01\"),\n end_date=Timestamp(\"1993-01-01\"),\n offset=[DateOffset(weekday=TH(4)), Day(1)],\n)\nUSBlackFridayInOrAfter1993 = Holiday(\n \"Black Friday\",\n month=11,\n day=1,\n start_date=Timestamp(\"1993-01-01\"),\n offset=[DateOffset(weekday=TH(4)), Day(1)],\n)\nBattleOfGettysburg = Holiday(\n # All of the floor traders in Chicago were sent to PA\n \"Markets were closed during the battle of Gettysburg\",\n month=7,\n day=(1, 2, 3),\n start_date=Timestamp(\"1863-07-01\"),\n end_date=Timestamp(\"1863-07-03\"),\n)\n\n# Adhoc and other closings\n# use list for consistency in returning ad-hoc dates\n\n\nNovember29BacklogRelief = [\n Timestamp(\"1929-11-01\", tz=\"UTC\"),\n Timestamp(\"1929-11-29\", tz=\"UTC\"),\n]\n\nMarch33BankHoliday = [\n Timestamp(\"1933-03-06\", tz=\"UTC\"),\n Timestamp(\"1933-03-07\", tz=\"UTC\"),\n Timestamp(\"1933-03-08\", tz=\"UTC\"),\n Timestamp(\"1933-03-09\", tz=\"UTC\"),\n Timestamp(\"1933-03-10\", tz=\"UTC\"),\n Timestamp(\"1933-03-13\", tz=\"UTC\"),\n Timestamp(\"1933-03-14\", tz=\"UTC\"),\n]\n\nAugust45VictoryOverJapan = [\n Timestamp(\"1945-08-15\", tz=\"UTC\"),\n Timestamp(\"1945-08-16\", tz=\"UTC\"),\n]\n\n\nChristmasEvesAdhoc = [\n Timestamp(\"1945-12-24\", tz=\"UTC\"),\n Timestamp(\"1956-12-24\", tz=\"UTC\"),\n]\n\nDayAfterChristmasAdhoc = [Timestamp(\"1958-12-26\", tz=\"UTC\")]\n\nDayBeforeDecorationAdhoc = [Timestamp(\"1961-05-29\", tz=\"UTC\")]\n\nLincolnsBirthDayAdhoc = [Timestamp(\"1968-02-12\", tz=\"UTC\")]\n\nPaperworkCrisis68 = [\n Timestamp(\"1968-06-12\", tz=\"UTC\"),\n Timestamp(\"1968-06-19\", tz=\"UTC\"),\n Timestamp(\"1968-06-26\", tz=\"UTC\"),\n Timestamp(\"1968-07-10\", tz=\"UTC\"),\n Timestamp(\"1968-07-17\", tz=\"UTC\"),\n Timestamp(\"1968-07-24\", tz=\"UTC\"),\n Timestamp(\"1968-07-31\", tz=\"UTC\"),\n Timestamp(\"1968-08-07\", tz=\"UTC\"),\n Timestamp(\"1968-08-14\", tz=\"UTC\"),\n Timestamp(\"1968-08-21\", tz=\"UTC\"),\n Timestamp(\"1968-08-28\", tz=\"UTC\"),\n Timestamp(\"1968-09-11\", tz=\"UTC\"),\n Timestamp(\"1968-09-18\", tz=\"UTC\"),\n Timestamp(\"1968-09-25\", tz=\"UTC\"),\n Timestamp(\"1968-10-02\", tz=\"UTC\"),\n Timestamp(\"1968-10-09\", tz=\"UTC\"),\n Timestamp(\"1968-10-16\", tz=\"UTC\"),\n Timestamp(\"1968-10-23\", tz=\"UTC\"),\n Timestamp(\"1968-10-30\", tz=\"UTC\"),\n Timestamp(\"1968-11-11\", tz=\"UTC\"),\n Timestamp(\"1968-11-20\", tz=\"UTC\"),\n Timestamp(\"1968-12-04\", tz=\"UTC\"),\n Timestamp(\"1968-12-11\", tz=\"UTC\"),\n Timestamp(\"1968-12-18\", tz=\"UTC\"),\n Timestamp(\"1968-12-25\", tz=\"UTC\"),\n]\n\nDayAfterIndependenceDayAdhoc = [Timestamp(\"1968-07-05\", tz=\"UTC\")]\n\nWeatherSnowClosing = [Timestamp(\"1969-02-10\", tz=\"UTC\")]\n\nFirstLunarLandingClosing = [Timestamp(\"1969-07-21\", tz=\"UTC\")]\n\nNewYorkCityBlackout77 = [Timestamp(\"1977-07-14\", tz=\"UTC\")]\n\n\n# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks\nSeptember11Closings = [\n Timestamp(\"2001-09-11\", tz=UTC),\n Timestamp(\"2001-09-12\", tz=UTC),\n Timestamp(\"2001-09-13\", tz=UTC),\n Timestamp(\"2001-09-14\", tz=UTC),\n]\n\n# http://en.wikipedia.org/wiki/Hurricane_sandy\nHurricaneSandyClosings = [\n Timestamp(\"2012-10-29\", tz=UTC),\n Timestamp(\"2012-10-30\", tz=UTC),\n]\n\n# add Hurricane Gloria closing\nHurricaneGloriaClosing = [Timestamp(\"1985-09-27\", tz=UTC)]\n\n\n# National Days of Mourning\n# - President John F. Kennedy - November 25, 1963\n# - Martin Luther King - April 9, 1968\n# - President Dwight D. Eisenhower - March 31, 1969\n# - President Harry S. Truman - December 28, 1972\n# - President Lyndon B. Johnson - January 25, 1973\n# - President Richard Nixon - April 27, 1994\n# - President Ronald W. Reagan - June 11, 2004\n# - President Gerald R. Ford - Jan 2, 2007\n# - President George H.W. Bush - Dec 5, 2018\n# added Truman and Johnson to go back to 1970\n# http://s3.amazonaws.com/armstrongeconomics-wp/2013/07/NYSE-Closings.pdf\nUSNationalDaysofMourning = [\n Timestamp(\"1963-11-25\", tz=\"UTC\"),\n Timestamp(\"1968-04-09\", tz=\"UTC\"),\n Timestamp(\"1969-03-31\", tz=\"UTC\"),\n Timestamp(\"1972-12-28\", tz=UTC),\n Timestamp(\"1973-01-25\", tz=UTC),\n Timestamp(\"1994-04-27\", tz=UTC),\n Timestamp(\"2004-06-11\", tz=UTC),\n Timestamp(\"2007-01-02\", tz=UTC),\n Timestamp(\"2018-12-05\", tz=UTC),\n]\n" ]
[ [ "pandas.tseries.offsets.Day", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Soapy-Salted-Fish-King/DIM
[ "bac4765a8126746675f517c7bfa1b04b88044d51", "bac4765a8126746675f517c7bfa1b04b88044d51" ]
[ "cortex_DIM/functions/gradient_penalty.py", "cortex_DIM/nn_modules/convnet.py" ]
[ "'''Gradient penalty functions.\n\n'''\n\nimport torch\nfrom torch import autograd\n\n\ndef contrastive_gradient_penalty(network, input, penalty_amount=1.):\n \"\"\"Contrastive gradient penalty.\n\n This is essentially the loss introduced by Mescheder et al 2018.\n\n Args:\n network: Network to apply penalty through.\n input: Input or list of inputs for network.\n penalty_amount: Amount of penalty.\n\n Returns:\n torch.Tensor: gradient penalty loss.\n\n \"\"\"\n def _get_gradient(inp, output):\n gradient = autograd.grad(outputs=output, inputs=inp,\n grad_outputs=torch.ones_like(output),\n create_graph=True, retain_graph=True,\n only_inputs=True, allow_unused=True)[0]\n return gradient\n\n if not isinstance(input, (list, tuple)):\n input = [input]\n\n input = [inp.detach() for inp in input]\n input = [inp.requires_grad_() for inp in input]\n\n with torch.set_grad_enabled(True):\n output = network(*input)[-1]\n gradient = _get_gradient(input, output)\n gradient = gradient.view(gradient.size()[0], -1)\n penalty = (gradient ** 2).sum(1).mean()\n\n return penalty * penalty_amount\n", "'''Convnet encoder module.\n\n'''\n\nimport copy\n\nimport torch\nimport torch.nn as nn\n\nfrom cortex.built_ins.networks.utils import get_nonlinearity\n\nfrom cortex_DIM.nn_modules.misc import Expand2d, Fold, Unfold, View\n\n\nclass Convnet(nn.Module):\n '''Basic convnet convenience class.\n\n Attributes:\n layers: nn.Sequential of layers with batch norm,\n dropout, nonlinearity, etc.\n shapes: list of output shapes for every layer..\n\n '''\n\n _supported_types = ('linear', 'conv', 'tconv', 'reshape', 'flatten', None)\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.create_layers(*args, **kwargs)\n\n def create_layers(self, shape, layers=None):\n '''Creates layers\n\n Args:\n shape: Shape of input.\n layers: list of layer arguments.\n '''\n\n self.layers, self.shapes = self.create_sequential(shape, layers=layers)\n\n def create_sequential(self, shape, layers=None):\n '''Creates a sequence of layers.\n\n Args:\n shape: Input shape.\n layers: list of layer arguments.\n\n Returns:\n nn.Sequential: a sequence of convolutional layers.\n\n '''\n\n modules = nn.Sequential()\n layers = layers or []\n layers = copy.deepcopy(layers)\n shapes = []\n\n for i, layer in enumerate(layers):\n layer_type = layer.pop('layer', None)\n\n name = 'layer{}'.format(i)\n block = nn.Sequential()\n\n shape = self.handle_layer(block, shape, layer, layer_type)\n shape = self.finish_block(block, shape, **layer)\n if len(block) == 1:\n block = block[0]\n shapes.append(shape)\n\n modules.add_module(name, block)\n\n return modules, shapes\n\n def handle_layer(self, block, shape, layer, layer_type):\n '''Handles the layer arguments and adds layer to the block.\n\n Args:\n block: nn.Sequential to add modules to.\n shape: Shape of the input.\n layer: Layer arguments.\n layer_type: Type of layer.\n\n Returns:\n tuple: Output shape.\n\n '''\n args = layer.pop('args', None)\n if layer_type == 'linear':\n if len(shape) == 3:\n dim_x, dim_y, dim_out = shape\n shape = (dim_x * dim_y * dim_out,)\n block.add_module('flatten', View(-1, shape[0]))\n bn = layer.get('bn', False)\n bias = layer.pop('bias', None)\n init = layer.pop('init', None)\n init_args = layer.pop('init_args', {})\n shape = self.add_linear_layer(block, shape, args=args, bn=bn, bias=bias, init=init, init_args=init_args)\n elif layer_type == 'conv':\n if len(shape) == 1:\n shape = (1, 1, shape[0])\n block.add_module('expand', Expand2d())\n bn = layer.get('bn', False)\n bias = layer.pop('bias', None)\n init = layer.pop('init', None)\n init_args = layer.pop('init_args', {})\n shape = self.add_conv_layer(block, shape, args=args, bn=bn, bias=bias, init=init, init_args=init_args)\n elif layer_type == 'tconv':\n if len(shape) == 1:\n raise ValueError('Transpose conv needs 4d input')\n bn = layer.get('bn', False)\n bias = layer.pop('bias', True)\n shape = self.add_tconv_layer(block, shape, args=args, bn=bn, bias=bias)\n elif layer_type == 'flatten':\n if len(shape) == 3:\n dim_x, dim_y, dim_out = shape\n shape = (dim_x * dim_y * dim_out,)\n block.add_module(layer_type, View(-1, shape[0]))\n elif layer_type == 'reshape':\n if args is None:\n raise ValueError('reshape needs args')\n new_shape = args\n dim_new = 1\n dim_out = 1\n for s in new_shape:\n dim_new *= s\n for s in shape:\n dim_out *= s\n if dim_new != dim_out:\n raise ValueError('New shape {} not compatible with old shape {}.'\n .format(new_shape, shape))\n block.add_module(layer_type, View((-1,) + new_shape))\n shape = new_shape[::-1]\n elif layer_type is None:\n pass\n else:\n raise NotImplementedError(\n 'Layer {} not supported. Use {}'.format(layer_type, self._supported_types))\n return shape\n\n def add_conv_layer(self, block, shape, args=None, bn=False, bias=None, init=None, init_args=None):\n '''Adds a convolutional layer to the block.\n\n Args:\n block: nn.Sequential to add conv layer to.\n shape: Shape of the input.\n args: conv layer arguments (n_units, filter size, stride, padding)\n bn (bool): Batch normalization.\n bias (bool): Controls bias in layer.\n init: Initialization of layer.\n init_args: Arguments for initialization.\n\n Returns:\n tuple: Output shape.\n\n '''\n dim_x, dim_y, dim_in = shape\n try:\n dim_out, f, s, p = args\n except:\n raise ValueError('args must be provided for conv layer and in format '\n '`(depth, kernel size, stride, padding)`')\n\n if bias is None:\n bias = not (bn)\n conv = nn.Conv2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=bias)\n if init:\n init = getattr(nn.init, init)\n init(conv.weight, **init_args)\n block.add_module('conv', conv)\n dim_x, dim_y = self.next_conv_size(dim_x, dim_y, f, s, p)\n\n return (dim_x, dim_y, dim_out)\n\n def add_tconv_layer(self, block, shape, args=None, bn=False, bias=None):\n '''Adds a transpose convolutional layer to the block.\n\n Args:\n block: nn.Sequential to add tconv layer to.\n shape: Shape of the input.\n args: tconv layer arguments (n_units, filter size, stride, padding)\n bn (bool): Batch normalization.\n bias (bool): Controls bias in layer.\n\n Returns:\n tuple: Output shape.\n\n '''\n\n dim_x, dim_y, dim_in = shape\n try:\n dim_out, f, s, p = args\n except:\n raise ValueError('args must be provided for tconv layer and in format '\n '`(depth, kernel size, stride, padding)`')\n\n if bias is None:\n bias = not (bn)\n tconv = nn.ConvTranspose2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=bias)\n block.add_module('tconv', tconv)\n dim_x, dim_y = self.next_tconv_size(dim_x, dim_y, f, s, p)\n\n return (dim_x, dim_y, dim_out)\n\n def add_linear_layer(self, block, shape, args=None, bn=False, bias=None, init=None, init_args=None):\n '''Adds a linear layer\n\n Args:\n block: nn.Sequential to add linear layer to.\n shape: Shape of the input.\n args: linear layer arguments (n_units,)\n bn (bool): Batch normalization.\n bias (bool): Controls bias in layer.\n init: Initialization of layer.\n init_args: Arguments for initialization.\n\n Returns:\n tuple: Output shape.\n\n '''\n\n try:\n dim_out, = args\n except:\n raise ValueError('args must be provided for fully-connected layer and in format '\n '`(depth,)`')\n\n dim_in, = shape\n if bias is None:\n bias = not (bn)\n layer = nn.Linear(dim_in, dim_out, bias=bias)\n if init:\n init = getattr(nn.init, init)\n init(layer.weight, **init_args)\n block.add_module('fc', layer)\n\n return (dim_out,)\n\n def finish_block(self, block, shape, bn=False, ln=False, do=False, act=None, pool=None):\n '''Finishes a block.\n\n Adds batch norm, dropout, activation, pooling.\n\n Args:\n block (nn.Sequential): Block to add conv layer to.\n shape (tuple): Shape of the input.\n bn (bool): Batch normalization.\n ln (bool): Layer normalization.\n do (float): Dropout.\n act (str): Activation.\n pool (tuple): Pooling. In format (pool type, kernel size, stride).\n\n Returns:\n\n '''\n if len(shape) == 1:\n BN = nn.BatchNorm1d\n DO = nn.Dropout\n elif len(shape) == 3:\n BN = nn.BatchNorm2d\n DO = nn.Dropout2d\n else:\n raise NotImplementedError('Shape {} not supported'.format(shape))\n LN = nn.LayerNorm\n\n if ln and bn:\n raise ValueError('Use only one sort of normalization.')\n\n dim_out = shape[-1]\n\n if do:\n block.add_module('do', DO(p=do))\n if bn:\n block.add_module('bn', BN(dim_out))\n if ln:\n block.add_module('ln', LN(dim_out))\n\n if act:\n nonlinearity = get_nonlinearity(act)\n block.add_module(nonlinearity.__class__.__name__, nonlinearity)\n\n if pool:\n if len(shape) == 1:\n raise ValueError('Cannot pool on 1d tensor.')\n (pool_type, kernel, stride) = pool\n Pool = getattr(nn, pool_type)\n block.add_module('pool', Pool(kernel_size=kernel, stride=stride))\n dim_x, dim_y, dim_out = shape\n dim_x, dim_y = self.next_conv_size(dim_x, dim_y, kernel, stride, 0)\n shape = (dim_x, dim_y, dim_out)\n\n return shape\n\n def next_conv_size(self, dim_x, dim_y, k, s, p):\n '''Infers the next size of a convolutional layer.\n\n Args:\n dim_x: First dimension.\n dim_y: Second dimension.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n (int, int): (First output dimension, Second output dimension)\n\n '''\n\n def infer_conv_size(w, k, s, p):\n '''Infers the next size after convolution.\n\n Args:\n w: Input size.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n int: Output size.\n\n '''\n x = (w - k + 2 * p) // s + 1\n return x\n\n if isinstance(k, int):\n kx, ky = (k, k)\n else:\n kx, ky = k\n\n if isinstance(s, int):\n sx, sy = (s, s)\n else:\n sx, sy = s\n\n if isinstance(p, int):\n px, py = (p, p)\n else:\n px, py = p\n return (infer_conv_size(dim_x, kx, sx, px),\n infer_conv_size(dim_y, ky, sy, py))\n\n def next_tconv_size(self, dim_x, dim_y, k, s, p):\n '''Infers the next size of a transpose convolutional layer.\n\n Args:\n dim_x: First dimension.\n dim_y: Second dimension.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n (int, int): (First output dimension, Second output dimension)\n\n '''\n\n def infer_conv_size(w, k, s, p):\n '''Infers the next size after convolution.\n\n Args:\n w: Input size.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n int: Output size.\n\n '''\n x = s * (w - 1) - 2 * p + k\n return x\n\n if isinstance(k, int):\n kx, ky = (k, k)\n else:\n kx, ky = k\n\n if isinstance(s, int):\n sx, sy = (s, s)\n else:\n sx, sy = s\n\n if isinstance(p, int):\n px, py = (p, p)\n else:\n px, py = p\n return (infer_conv_size(dim_x, kx, sx, px),\n infer_conv_size(dim_y, ky, sy, py))\n\n def forward(self, x: torch.Tensor, return_full_list=False, clip_grad=False):\n '''Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n '''\n\n def _clip_grad(v, min, max):\n v_tmp = v.expand_as(v)\n v_tmp.register_hook(lambda g: g.clamp(min, max))\n return v_tmp\n\n out = []\n for layer in self.layers:\n x = layer(x)\n if clip_grad:\n x = _clip_grad(x, -clip_grad, clip_grad)\n out.append(x)\n\n if not return_full_list:\n out = out[-1]\n\n return out\n\n\nclass FoldedConvnet(Convnet):\n '''Convnet with strided crop input.\n\n '''\n\n _supported_types = ('linear', 'conv', 'tconv', 'flatten', 'fold', 'unfold', None)\n\n def create_layers(self, shape, crop_size=8, layers=None):\n '''Creates layers\n\n Args:\n shape: Shape of input.\n crop_size: Size of crops\n layers: list of layer arguments.\n '''\n\n self.crop_size = crop_size\n self.layers, self.shapes = self.create_sequential(shape, layers=layers)\n\n def create_sequential(self, shape, layers=None):\n '''Creates a sequence of layers.\n\n Args:\n shape: Input shape.\n layers: list of layer arguments.\n\n Returns:\n nn.Sequential: a sequence of convolutional layers.\n\n '''\n\n self.final_size = None\n return super().create_sequential(shape, layers=layers)\n\n def handle_layer(self, block, shape, layer, layer_type):\n '''Handles the layer arguments and adds layer to the block.\n\n Args:\n block: nn.Sequential to add modules to.\n shape: Shape of the input.\n layer: Layer arguments.\n layer_type: Type of layer.\n\n Returns:\n tuple: Output shape.\n '''\n if layer_type == 'unfold':\n dim_x, dim_y, dim_out = shape\n self.final_size = 2 * (dim_x // self.crop_size) - 1\n block.add_module(layer_type, Unfold(dim_x, self.crop_size))\n shape = (self.crop_size, self.crop_size, dim_out)\n elif layer_type == 'fold':\n if self.final_size is None:\n raise ValueError('Cannot fold without unfolding first.')\n dim_out = shape[2]\n block.add_module(layer_type, Fold(self.final_size))\n shape = (self.final_size, self.final_size, dim_out)\n elif layer_type is None:\n pass\n else:\n shape = super().handle_layer(block, shape, layer, layer_type)\n\n return shape\n" ]
[ [ "torch.set_grad_enabled", "torch.ones_like" ], [ "torch.nn.Linear", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.ConvTranspose2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tenghehan/reid_without_id
[ "d1d0ff273b1ef19fc6da8cbbf210527779b37455", "d1d0ff273b1ef19fc6da8cbbf210527779b37455" ]
[ "fastreid/modeling/losses/triplet_loss.py", "fastreid/data/build.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: [email protected]\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\n\nfrom fastreid.utils import comm\nfrom fastreid.layers import GatherLayer\nfrom .utils import concat_all_gather, euclidean_dist, normalize\n\n\ndef softmax_weights(dist, mask):\n max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]\n diff = dist - max_v\n Z = torch.sum(torch.exp(diff) * mask, dim=1, keepdim=True) + 1e-6 # avoid division by zero\n W = torch.exp(diff) * mask / Z\n return W\n\n\ndef hard_example_mining(dist_mat, is_pos, is_neg):\n \"\"\"For each anchor, find the hardest positive and negative sample.\n Args:\n dist_mat: pair wise distance between samples, shape [N, M]\n is_pos: positive index with shape [N, M]\n is_neg: negative index with shape [N, M]\n Returns:\n dist_ap: pytorch Variable, distance(anchor, positive); shape [N]\n dist_an: pytorch Variable, distance(anchor, negative); shape [N]\n p_inds: pytorch LongTensor, with shape [N];\n indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1\n n_inds: pytorch LongTensor, with shape [N];\n indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1\n NOTE: Only consider the case in which all labels have same num of samples,\n thus we can cope with all anchors in parallel.\n \"\"\"\n\n assert len(dist_mat.size()) == 2\n N = dist_mat.size(0)\n\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N, 1]\n dist_ap, relative_p_inds = torch.max(\n dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N, 1]\n dist_an, relative_n_inds = torch.min(\n dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)\n\n # shape [N]\n dist_ap = dist_ap.squeeze(1)\n dist_an = dist_an.squeeze(1)\n\n return dist_ap, dist_an\n\n\ndef weighted_example_mining(dist_mat, is_pos, is_neg):\n \"\"\"For each anchor, find the weighted positive and negative sample.\n Args:\n dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]\n is_pos:\n is_neg:\n Returns:\n dist_ap: pytorch Variable, distance(anchor, positive); shape [N]\n dist_an: pytorch Variable, distance(anchor, negative); shape [N]\n \"\"\"\n assert len(dist_mat.size()) == 2\n\n is_pos = is_pos.float()\n is_neg = is_neg.float()\n dist_ap = dist_mat * is_pos\n dist_an = dist_mat * is_neg\n\n weights_ap = softmax_weights(dist_ap, is_pos)\n weights_an = softmax_weights(-dist_an, is_neg)\n\n dist_ap = torch.sum(dist_ap * weights_ap, dim=1)\n dist_an = torch.sum(dist_an * weights_an, dim=1)\n\n return dist_ap, dist_an\n\n\ndef triplet_loss(embedding, targets, margin, norm_feat, hard_mining):\n r\"\"\"Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).\n Related Triplet Loss theory can be found in paper 'In Defense of the Triplet\n Loss for Person Re-Identification'.\"\"\"\n\n if norm_feat: embedding = normalize(embedding, axis=-1)\n\n # For distributed training, gather all features from different process.\n if comm.get_world_size() > 1:\n all_embedding = torch.cat(GatherLayer.apply(embedding), dim=0)\n all_targets = concat_all_gather(targets)\n else:\n all_embedding = embedding\n all_targets = targets\n\n dist_mat = euclidean_dist(embedding, all_embedding)\n\n N, M = dist_mat.size()\n is_pos = targets.view(N, 1).expand(N, M).eq(all_targets.view(M, 1).expand(M, N).t())\n is_neg = targets.view(N, 1).expand(N, M).ne(all_targets.view(M, 1).expand(M, N).t())\n\n if hard_mining:\n dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)\n else:\n dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)\n\n y = dist_an.new().resize_as_(dist_an).fill_(1)\n\n if margin > 0:\n loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=margin)\n else:\n loss = F.soft_margin_loss(dist_an - dist_ap, y)\n # fmt: off\n if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)\n # fmt: on\n\n return loss\n", "# encoding: utf-8\n\"\"\"\n@author: l1aoxingyu\n@contact: [email protected]\n\"\"\"\n\nimport os\nimport torch\nfrom torch._six import container_abcs, string_classes, int_classes\nfrom torch.utils.data import DataLoader\nfrom fastreid.utils import comm\n\nfrom . import samplers\nfrom .common import CommDataset\nfrom .datasets import DATASET_REGISTRY\nfrom .transforms import build_transforms\n\n_root = os.getenv(\"FASTREID_DATASETS\", \"datasets\")\n\n\ndef build_reid_train_loader(cfg):\n cfg = cfg.clone()\n cfg.defrost()\n\n train_items = list()\n for d in cfg.DATASETS.NAMES:\n dataset = DATASET_REGISTRY.get(d)(root=_root, dataset_name=cfg.SPECIFIC_DATASET, combineall=cfg.DATASETS.COMBINEALL)\n if comm.is_main_process():\n dataset.show_train()\n train_items.extend(dataset.train)\n\n iters_per_epoch = len(train_items) // cfg.SOLVER.IMS_PER_BATCH\n cfg.SOLVER.MAX_ITER *= iters_per_epoch\n train_transforms = build_transforms(cfg, is_train=True)\n train_set = CommDataset(train_items, train_transforms, relabel=True)\n\n num_workers = cfg.DATALOADER.NUM_WORKERS\n num_instance = cfg.DATALOADER.NUM_INSTANCE\n mini_batch_size = cfg.SOLVER.IMS_PER_BATCH // comm.get_world_size()\n\n if cfg.DATALOADER.PK_SAMPLER:\n if cfg.DATALOADER.NAIVE_WAY:\n data_sampler = samplers.NaiveIdentitySampler(train_set.img_items,\n cfg.SOLVER.IMS_PER_BATCH, num_instance)\n else:\n data_sampler = samplers.BalancedIdentitySampler(train_set.img_items,\n cfg.SOLVER.IMS_PER_BATCH, num_instance)\n else:\n data_sampler = samplers.TrainingSampler(len(train_set))\n batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, mini_batch_size, True)\n\n train_loader = torch.utils.data.DataLoader(\n train_set,\n num_workers=num_workers,\n batch_sampler=batch_sampler,\n collate_fn=fast_batch_collator,\n pin_memory=True,\n )\n return train_loader\n\n\ndef build_reid_test_loader(cfg, dataset_name):\n cfg = cfg.clone()\n cfg.defrost()\n\n dataset = DATASET_REGISTRY.get(dataset_name)(root=_root, dataset_name=cfg.SPECIFIC_DATASET)\n if comm.is_main_process():\n dataset.show_test()\n test_items = dataset.query + dataset.gallery\n\n test_transforms = build_transforms(cfg, is_train=False)\n test_set = CommDataset(test_items, test_transforms, relabel=False)\n\n mini_batch_size = cfg.TEST.IMS_PER_BATCH // comm.get_world_size()\n data_sampler = samplers.InferenceSampler(len(test_set))\n batch_sampler = torch.utils.data.BatchSampler(data_sampler, mini_batch_size, False)\n test_loader = DataLoader(\n test_set,\n batch_sampler=batch_sampler,\n num_workers=0, # save some memory\n collate_fn=fast_batch_collator,\n pin_memory=True,\n )\n return test_loader, len(dataset.query)\n\n\ndef trivial_batch_collator(batch):\n \"\"\"\n A batch collator that does nothing.\n \"\"\"\n return batch\n\n\ndef fast_batch_collator(batched_inputs):\n \"\"\"\n A simple batch collator for most common reid tasks\n \"\"\"\n elem = batched_inputs[0]\n if isinstance(elem, torch.Tensor):\n out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype)\n for i, tensor in enumerate(batched_inputs):\n out[i] += tensor\n return out\n\n elif isinstance(elem, container_abcs.Mapping):\n return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem}\n\n elif isinstance(elem, float):\n return torch.tensor(batched_inputs, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batched_inputs)\n elif isinstance(elem, string_classes):\n return batched_inputs\n" ]
[ [ "torch.max", "torch.nn.functional.margin_ranking_loss", "torch.sum", "torch.nn.functional.soft_margin_loss", "torch.exp" ], [ "torch.utils.data.DataLoader", "torch.utils.data.sampler.BatchSampler", "torch.utils.data.BatchSampler", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zuimeiyujianni/MobileStyleGAN.pytorch
[ "2d18a80bed6be3ec0eec703cc9be50616f2401ee" ]
[ "core/model_zoo.py" ]
[ "import json\nimport torch\nfrom core.utils import download_ckpt\n\ndef model_zoo(name, zoo_path=\"configs/model_zoo.json\"):\n zoo = json.load(open(zoo_path))\n if name in zoo:\n ckpt = download_ckpt(**zoo[name])\n else:\n ckpt = torch.load(name, map_location=\"cpu\")\n return ckpt\n" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
taruninani/SudokuSolver
[ "66ca33bb6ce4bce59128ebd1b6f16fa704a8e5f1", "66ca33bb6ce4bce59128ebd1b6f16fa704a8e5f1", "66ca33bb6ce4bce59128ebd1b6f16fa704a8e5f1" ]
[ "SudokuSolver.py", "Examples/hardClassic.py", "SudokuBoard.py" ]
[ "from SudokuSolver.Logger import Logger, LOGLEVEL\nfrom SudokuSolver.SudokuBoard import SudokuBoard\nimport numpy as np\n\nclass SudokuSolver(object):\n logger: Logger\n board: SudokuBoard\n\n def log(self,string):\n \"\"\"\n Logger\n Parameters\n ----------\n string\n\n Returns\n -------\n\n \"\"\"\n print(string)\n\n def __init__(self,board):\n \"\"\"\n\n \"\"\"\n #Initialize\n self.logger=Logger()\n self.board=board\n\n def iterativePass(self):\n updateMade=False\n updated_i=[]\n updated_j=[]\n # iterate through each row\n shape=self.board.getBoardShape()\n for i in range(0,shape[0]):\n for j in range(0,shape[1]):\n possibleValues=self.board.getPossibleValues(i,j)\n if possibleValues.size == 1:\n self.board.markValue(i,j,possibleValues[0])\n updateMade=True\n updated_i+=[i]\n updated_j+=[j]\n\n # check for each subcell 3x3 if one of the possible Values occurs just once\n for subcell_i in range(0, int(shape[0] / 3)):\n for subcell_j in range(0, int(shape[1] / 3)):\n values = np.zeros(self.board.getValidValues().shape)\n # count how many possible cells does each value can be assigned to\n for ii in range(0, int(shape[0]/ 3)):\n for jj in range(0, int(shape[1]/ 3)):\n idx_i = ii + subcell_i * 3\n idx_j = jj + subcell_j * 3\n possibleValues=self.board.getPossibleValues(idx_i,idx_j)\n for val in possibleValues:\n values[val-1]+=1\n # check if one of the numbers can be filled in only one cell\n [all_i,]=np.where(values==1)\n for idx in all_i:\n # count of the valid values how many times does the\n val = idx + 1\n for ii in range(0, int(shape[0] / 3)):\n for jj in range(0, int(shape[1] / 3)):\n idx_i = ii + subcell_i * 3\n idx_j = jj + subcell_j * 3\n possibleValues = self.board.getPossibleValues(idx_i, idx_j)\n if np.any(possibleValues==(val)):\n # if true then this value can be only assigned here\n self.board.markValue(idx_i,idx_j,val)\n updateMade = True\n updated_i += [idx_i]\n updated_j += [idx_j]\n\n # check for each row if one of the possible Values occurs just once\n for i in range(0,shape[0]):\n values = np.zeros(self.board.getValidValues().shape)\n for j in range(0, shape[1]):\n possibleValues = self.board.getPossibleValues(i, j)\n for val in possibleValues:\n values[val - 1] += 1\n # check if one of the numbers can be filled in only one cell\n [all_i, ] = np.where(values == 1)\n for idx in all_i:\n # count of the valid values how many times does the\n val = idx + 1\n for j in range(0, shape[1]):\n possibleValues = self.board.getPossibleValues(i, j)\n if np.any(possibleValues==(val)):\n # if true then this value can be only assigned here\n self.board.markValue(i,j,val)\n updateMade = True\n updated_i += [i]\n updated_j += [j]\n\n # check for each col if one of the possible Values occurs just once\n for j in range(0, shape[1]):\n values = np.zeros(self.board.getValidValues().shape)\n for i in range(0, shape[0]):\n possibleValues = self.board.getPossibleValues(i, j)\n for val in possibleValues:\n values[val - 1] += 1\n # check if one of the numbers can be filled in only one cell\n [all_j, ] = np.where(values == 1)\n for idx in all_j:\n # count of the valid values how many times does the\n val = idx + 1\n for i in range(0, shape[0]):\n possibleValues = self.board.getPossibleValues(i, j)\n if np.any(possibleValues == (val)):\n # if true then this value can be only assigned here\n self.board.markValue(i, j, val)\n updateMade = True\n updated_i += [i]\n updated_j += [j]\n\n return [updateMade,updated_i,updated_j]\n\n def bruteForceAssume(self,i,j,values):\n atLeastOneChangeMade=False\n for val in values:\n self.logger.info('AssumptionMade : {0},{1} has value {2}'.format(i,j,val))\n self.board.markValue(i,j,val)\n [atLeastOneChangeMade,updated_i,updated_j]=self.fullIterativePass()\n\n if not self.board.isBoardValid() or not atLeastOneChangeMade:\n self.logger.warn('Assumption didnt work resetting board.')\n self.board.clearValues(updated_i,updated_j)\n atLeastOneChangeMade=False\n updated_i=[]\n updated_j=[]\n\n return [atLeastOneChangeMade,updated_i,updated_j]\n\n def fullIterativePass(self):\n # run with no assumptions\n [updatedBoard,updated_i,updated_j] = self.iterativePass()\n atLeastOneChangeMade = False\n inc = 1\n while updatedBoard:\n inc += 1\n atLeastOneChangeMade = True\n [updatedBoard,new_updated_i,new_updated_j] = self.iterativePass()\n updated_i+=new_updated_i\n updated_j+=new_updated_j\n self.logger.info('Iteration Number : {0}'.format(inc))\n\n return [atLeastOneChangeMade,updated_i,updated_j]\n\n\n def solve(self):\n \"\"\"\n Solve the given sudoku board\n\n Returns\n -------\n\n \"\"\"\n\n self.fullIterativePass()\n\n self.backtrack(0)\n\n def backtrack(self,depth):\n self.logger.debug('Enter AT depth : {0}'.format(depth))\n if self.board.isBoardComplete():\n return True\n # get the first input with the least possible values\n possibleValuesCount=self.board.getPossibleValuesCount()\n [all_i,all_j]=np.where(possibleValuesCount != 0)\n if all_i.size == 0:\n self.logger.warn('Exhausted all options')\n return False\n for idx in range(0,all_i.size):\n i=all_i[idx]\n j=all_j[idx]\n possibleValues=self.board.getPossibleValues(i,j)\n\n for val in possibleValues:\n self.logger.info('AssumptionMade : {0},{1} has value {2}'.format(i,j,val))\n self.board.markValue(i,j,val)\n [atLeastOneChangeMade,updated_i,updated_j]=self.fullIterativePass()\n\n # if not self.board.isBoardValid():\n # self.log('Assumption didnt work resetting board.')\n # self.board.clearValues(updated_i,updated_j)\n updated_i+=[i]\n updated_j+=[j]\n result = self.backtrack(depth+1)\n if result:\n return True\n\n self.board.clearValues(updated_i, updated_j)\n\n self.logger.debug('Left AT depth : {0}'.format(depth))\n", "from SudokuSolver.Boards import ClassicBoard\nfrom SudokuSolver.Solvers import BackTrackSolver\nimport numpy as np\n\nboard = ClassicBoard()\n# hard board\ninput = [\n [0, 0, 0, 0, 8, 0, 0, 0, 5],\n [8, 5, 1, 4, 0, 9, 0, 0, 0],\n [4, 0, 0, 0, 2, 0, 8, 0, 0],\n [0, 6, 8, 0, 0, 7, 9, 0, 0],\n [0, 4, 0, 0, 0, 0, 0, 0, 7],\n [1, 9, 0, 0, 0, 3, 2, 0, 4],\n [0, 0, 4, 0, 0, 0, 0, 6, 0],\n [0, 0, 0, 0, 0, 2, 0, 0, 0],\n [0, 1, 9, 6, 7, 0, 0, 0, 0]\n]\n\nboard.boardInput(np.array(input), 0)\n\nbackTracker = BackTrackSolver(board)\nbackTracker.solve()\n\nassert (board.isBoardComplete())\n", "from SudokuSolver.Logger import Logger, LOGLEVEL\nimport numpy as np\nimport copy\n\nclass SudokuBoard(object):\n logger = None\n board = None\n leftValues = None\n numRows = None\n numCols = None\n possibleMat=None\n dummy=None\n\n def __init__(self,input):\n # Initialize\n self.logger = Logger()\n self.numRows=9\n self.numCols=9\n self.dummy=0\n self.board = np.zeros(shape=(self.numRows,self.numCols))\n self.possibleMat=[]\n for i in range(0,self.numRows):\n self.possibleMat+=[[self.dummy]*9]\n for j in range(0,self.numCols):\n self.possibleMat[i][j]=self.getValidValues()\n\n assert(self.numCols==self.numRows)\n\n maxVal=max(self.numRows,self.numCols)\n\n self.leftValues = np.array([maxVal] * maxVal) # all numbers must come only 9 times\n\n # check which numbers have been entered and remove them from the leftValues\n for i in range(0, input.shape[0]):\n for j in range(0, input.shape[1]):\n if input[i][j] != self.dummy:\n self.markValue(i, j, input[i][j])\n\n def getValidValues(self):\n return np.array(list(range(1,self.numRows+1)))\n\n def clearValue(self, i, j):\n self.clearValues([i],[j])\n\n def clearValues(self,all_i,all_j):\n for idx in range(0,len(all_i)):\n self.markValue(all_i[idx],all_j[idx],self.dummy)\n self.rebuildPossiblityMat()\n\n def rebuildPossiblityMat(self):\n self.leftValues = np.array([self.numRows] * self.numRows) # all numbers must come only 9 times\n for i in range(0,self.numRows):\n for j in range(0,self.numCols):\n self.possibleMat[i][j]=self.getValidValues()\n\n for i in range(0,self.numRows):\n for j in range(0,self.numCols):\n if self.board[i,j] != self.dummy:\n self.markValue(i,j,self.getValue(i,j).astype(int))\n\n def updatePossibilityMat(self,i,j):\n \"\"\"\n Update the Possibility Mat when i,j place is filled\n Parameters\n ----------\n i\n j\n\n Returns\n -------\n\n \"\"\"\n # value filled out\n val=self.board[i,j]\n if self.numRows == 9:\n # remove the possibilities for that position\n self.possibleMat[i][j] = np.array([])\n # remove it for column and row elements\n for k in range(0, self.numRows):\n self.removeValueAsPossiblity(i,k,val)\n self.removeValueAsPossiblity(k,j,val)\n\n # remove it for the subcell 3x3\n subcell_i = int(i / 3)\n subcell_j = int(j / 3)\n for ii in range(0, int(self.numRows / 3)):\n for jj in range(0, int(self.numCols / 3)):\n idx_i = ii + subcell_i * 3\n idx_j = jj + subcell_j * 3\n self.removeValueAsPossiblity(idx_i,idx_j,val)\n\n def removeValueAsPossiblity(self,i,j,val):\n self.possibleMat[i][j]=np.setdiff1d(self.possibleMat[i][j], val)\n\n def markValue(self, i, j, val):\n self.board[i, j] = val\n if val==self.dummy:\n # add the removed value back in\n self.leftValues[self.board[i,j].astype(int)-1]+=1\n return\n else:\n self.leftValues[val - 1] -= 1\n\n # update the possiblity matrix\n self.updatePossibilityMat(i,j)\n\n def getPossibleValues(self,i,j) -> np.array:\n return self.possibleMat[i][j]\n\n def getPossibleValuesCount(self) -> np.array:\n output=np.zeros(shape=(self.numRows,self.numCols))\n for i in range(0,self.numRows):\n for j in range(0,self.numCols):\n output[i,j]=self.possibleMat[i][j].size\n\n return output\n\n def getPossiblePlacesForValue(self,val):\n output=np.zeros(shape=(self.numRows,self.numCols))\n for i in range(0,self.numRows):\n for j in range(0,self.numCols):\n if val in self.possibleMat[i][j]:\n output[i,j]=1\n return output\n\n def getFilledPlacesForValue(self,val):\n return ((self.board==val) * 1)\n\n def isBoardComplete(self):\n validValues=self.getValidValues()\n for i in range(0,self.numRows):\n values=self.getRow(i)\n diff=np.setdiff1d(validValues,values)\n if diff.size != 0:\n return False\n return True\n\n def isBoardValid(self):\n # validate rows\n for i in range(0,self.numRows):\n values=self.getRow(i)\n filledValues=np.delete(values,np.where(values==self.dummy)[0])\n if np.unique(filledValues).size != filledValues.size:\n return False\n\n # validate cols\n for i in range(0,self.numCols):\n values=self.getCol(i)\n filledValues=np.setdiff1d(values,self.dummy)\n if np.unique(filledValues).size != filledValues.size:\n return False\n\n # validate subcell 3x3\n for subcell_i in range(0,int(self.numRows / 3)):\n for subcell_j in range(0,int(self.numCols / 3)):\n values=[]\n for ii in range(0, int(self.numRows / 3)):\n for jj in range(0, int(self.numCols / 3)):\n idx_i = ii + subcell_i * 3\n idx_j = jj + subcell_j * 3\n values+=[self.board[idx_i,idx_j]]\n values=np.array(values)\n filledValues=np.setdiff1d(values,self.dummy)\n if np.unique(filledValues).size != filledValues.size:\n return False\n\n return True\n\n def isPlaceEmpty(self,i,j):\n return self.board[i,j]==self.dummy\n\n def getValue(self,i,j):\n return self.board[i,j]\n\n def getCol(self,i):\n return self.board[:,i]\n\n def getRow(self,i):\n return self.board[i,:]\n\n def getBoardShape(self):\n return self.board.shape\n\n def log(self,string):\n print(string)\n\n def getEmptyPlacesForRow(self,i):\n output = []\n for j in range(0, self.board.shape[1]):\n if self.isPlaceEmpty(i,j):\n output += [j]\n return np.array(output)\n\n def getEmptyPlacesForCol(self,j):\n output = []\n for i in range(0, self.board.shape[0]):\n if self.isPlaceEmpty(i,j):\n output += [i]\n return np.array(output)\n\n def getMissingValuesForRow(self,i):\n values=self.getRow(i)\n possible=self.getValidValues()\n output=np.array(list(set(possible).difference(set(values))))\n return output\n\n def getMissingValuesForCol(self,i):\n values=self.getCol(i)\n possible = self.getValidValues()\n output=np.array(list(set(possible).difference(set(values))))\n return output\n\n def isValueInRow(self,val,i):\n return val in self.board[i,:]\n\n def isValueInCol(self,val,j):\n return val in self.board[:,j]\n\n def getCopy(self):\n return copy.deepcopy(self)\n" ]
[ [ "numpy.where", "numpy.any" ], [ "numpy.array" ], [ "numpy.unique", "numpy.setdiff1d", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csbhagav/allennlp
[ "4c99f8e82f7fd70c86652109bfca5282d470e981" ]
[ "allennlp/tests/models/semantic_parsing/wikitables/wikitables_mml_semantic_parser_test.py" ]
[ "# pylint: disable=invalid-name,no-self-use,protected-access\nfrom collections import namedtuple\nimport os\nimport pytest\n\nfrom flaky import flaky\nfrom numpy.testing import assert_almost_equal\nimport torch\n\nfrom allennlp.common.testing import ModelTestCase\nfrom allennlp.training.metrics.wikitables_accuracy import SEMPRE_ABBREVIATIONS_PATH, SEMPRE_GRAMMAR_PATH\n\[email protected]\nclass WikiTablesMmlSemanticParserTest(ModelTestCase):\n def setUp(self):\n self.should_remove_sempre_abbreviations = not os.path.exists(SEMPRE_ABBREVIATIONS_PATH)\n self.should_remove_sempre_grammar = not os.path.exists(SEMPRE_GRAMMAR_PATH)\n\n # The model tests are run with respect to the module root, so check if abbreviations\n # and grammar already exist there (since we want to clean up module root after test)\n self.module_root_abbreviations_path = self.MODULE_ROOT / \"data\" / \"abbreviations.tsv\"\n self.module_root_grammar_path = self.MODULE_ROOT / \"data\" / \"grow.grammar\"\n self.should_remove_root_sempre_abbreviations = not os.path.exists(self.module_root_abbreviations_path)\n self.should_remove_root_sempre_grammar = not os.path.exists(self.module_root_grammar_path)\n\n super(WikiTablesMmlSemanticParserTest, self).setUp()\n self.set_up_model(str(self.FIXTURES_ROOT / \"semantic_parsing\" / \"wikitables\" / \"experiment.json\"),\n str(self.FIXTURES_ROOT / \"data\" / \"wikitables\" / \"sample_data.examples\"))\n\n def tearDown(self):\n super().tearDown()\n # We don't want to leave generated files around just from running tests...\n if self.should_remove_sempre_abbreviations and os.path.exists(SEMPRE_ABBREVIATIONS_PATH):\n os.remove(SEMPRE_ABBREVIATIONS_PATH)\n if self.should_remove_sempre_grammar and os.path.exists(SEMPRE_GRAMMAR_PATH):\n os.remove(SEMPRE_GRAMMAR_PATH)\n if self.should_remove_root_sempre_abbreviations and os.path.exists(self.module_root_abbreviations_path):\n os.remove(self.module_root_abbreviations_path)\n if self.should_remove_root_sempre_grammar and os.path.exists(self.module_root_grammar_path):\n os.remove(self.module_root_grammar_path)\n\n def test_model_can_train_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.param_file)\n\n def test_mixture_no_features_model_can_train_save_and_load(self):\n param_file = self.FIXTURES_ROOT / 'semantic_parsing' / 'wikitables' / 'experiment-mixture.json'\n self.ensure_model_can_train_save_and_load(param_file)\n\n @flaky\n def test_elmo_no_features_can_train_save_and_load(self):\n param_file = self.FIXTURES_ROOT / 'semantic_parsing' / 'wikitables' / 'experiment-elmo-no-features.json'\n self.ensure_model_can_train_save_and_load(param_file, tolerance=1e-2)\n\n def test_get_neighbor_indices(self):\n worlds, num_entities = self.get_fake_worlds()\n tensor = torch.LongTensor([])\n\n neighbor_indices = self.model._get_neighbor_indices(worlds, num_entities, tensor)\n\n # Checks for the correct shape meaning dimension 2 has size num_neighbors,\n # padding of -1 is used, and correct neighbor indices.\n assert_almost_equal(neighbor_indices.data.numpy(), [[[-1, -1],\n [3, 4],\n [3, 4],\n [1, 2],\n [1, 2]],\n [[-1, -1],\n [2, -1],\n [1, -1],\n [-1, -1],\n [-1, -1]]])\n\n def test_get_type_vector(self):\n worlds, num_entities = self.get_fake_worlds()\n tensor = torch.LongTensor([])\n type_vector, _ = self.model._get_type_vector(worlds, num_entities, tensor)\n # Verify that both types are present and padding used for non existent entities.\n assert_almost_equal(type_vector.data.numpy(), [[0, 1, 1, 3, 3],\n [0, 1, 3, 0, 0]])\n\n def test_get_linking_probabilities(self):\n worlds, num_entities = self.get_fake_worlds()\n # (batch_size, num_question_tokens, num_entities)\n linking_scores = [[[-2, 1, 0, -3, 2],\n [4, -1, 5, -3, 4]],\n [[0, 1, 8, 10, 10],\n [3, 2, -1, -2, 1]]]\n linking_scores = torch.FloatTensor(linking_scores)\n question_mask = torch.LongTensor([[1, 1], [1, 0]])\n _, entity_type_dict = self.model._get_type_vector(worlds, num_entities, linking_scores)\n\n # (batch_size, num_question_tokens, num_entities)\n entity_probability = self.model._get_linking_probabilities(worlds, linking_scores, question_mask,\n entity_type_dict)\n\n # The following properties in entity_probability are tested for by true_probability:\n # (1) It has all 0.0 probabilities when there is no question token, as seen for the\n # second word in the second batch.\n # (2) It has 0.0 probabilities when an entity is masked, as seen in the last two entities\n # for the second batch instance.\n # (3) The probabilities for entities of the same type with the same question token should\n # sum to at most 1, but not necessarily 1, because some probability mass goes to the\n # null entity. We have three entity types here, so each row should sum to at most 3,\n # and that number will approach 3 as the unnormalized linking scores for each entity\n # get higher.\n true_probability = [[[0.1192029, 0.5761169, 0.2119416, 0.0058998, 0.8756006],\n [0.9820138, 0.0024561, 0.9908675, 0.0008947, 0.9811352]],\n [[0.5, 0.7310586, 0.9996647, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0]]]\n assert_almost_equal(entity_probability.detach().cpu().numpy(), true_probability)\n\n def get_fake_worlds(self):\n # Generate a toy WikitablesWorld.\n FakeTable = namedtuple('FakeTable', ['entities', 'neighbors'])\n FakeWorld = namedtuple('FakeWorld', ['table_graph'])\n entities = [['0', 'fb:cell.2010', 'fb:cell.2011', 'fb:row.row.year', 'fb:row.row.year2'],\n ['1', 'fb:cell.2012', 'fb:row.row.year']]\n neighbors = [{'fb:cell.2010': ['fb:row.row.year', 'fb:row.row.year2'],\n 'fb:cell.2011': ['fb:row.row.year', 'fb:row.row.year2'],\n 'fb:row.row.year': ['fb:cell.2010', 'fb:cell.2011'],\n 'fb:row.row.year2': ['fb:cell.2010', 'fb:cell.2011'],\n '0': [],\n },\n {'fb:cell.2012': ['fb:row.row.year'],\n 'fb:row.row.year': ['fb:cell.2012'],\n '1': [],\n }]\n\n worlds = [FakeWorld(FakeTable(entity_list, entity2neighbors))\n for entity_list, entity2neighbors in zip(entities, neighbors)]\n num_entities = max([len(entity_list) for entity_list in entities])\n return worlds, num_entities\n" ]
[ [ "torch.LongTensor", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
juierror/mace
[ "a2bcf2c98b410f75d38f2291585b0ad11b36d068" ]
[ "tools/sh_commands.py" ]
[ "# Copyright 2018 The MACE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport logging\nimport numpy as np\nimport os\nimport random\nimport re\nimport sh\nimport struct\nimport sys\nimport time\nimport platform\n\nimport six\n\nimport common\nfrom common import abi_to_internal\n\nsys.path.insert(0, \"mace/python/tools\")\ntry:\n from encrypt_opencl_codegen import encrypt_opencl_codegen\n from opencl_binary_codegen import generate_opencl_code\n from generate_data import generate_input_data\n from validate import validate\n from mace_engine_factory_codegen import gen_mace_engine_factory\nexcept Exception as e:\n six.print_(\"Import error:\\n%s\" % e, file=sys.stderr)\n exit(1)\n\n\n################################\n# common\n################################\n\n\ndef strip_invalid_utf8(str):\n return sh.iconv(str, \"-c\", \"-t\", \"UTF-8\")\n\n\ndef split_stdout(stdout_str):\n stdout_str = strip_invalid_utf8(stdout_str)\n # Filter out last empty line\n return [l.strip() for l in stdout_str.split('\\n') if len(l.strip()) > 0]\n\n\ndef make_output_processor(buff):\n def process_output(line):\n six.print_(line.rstrip())\n buff.append(line)\n\n return process_output\n\n\ndef device_lock_path(serialno):\n return \"/tmp/device-lock-%s\" % serialno\n\n\ndef device_lock(serialno, timeout=7200):\n import filelock\n return filelock.FileLock(device_lock_path(serialno.replace(\"/\", \"\")),\n timeout=timeout)\n\n\ndef is_device_locked(serialno):\n import filelock\n try:\n with device_lock(serialno, timeout=0.000001):\n return False\n except filelock.Timeout:\n return True\n\n\nclass BuildType(object):\n proto = 'proto'\n code = 'code'\n\n\ndef stdout_success(stdout):\n stdout_lines = stdout.split(\"\\n\")\n for line in stdout_lines:\n if \"Aborted\" in line or \"FAILED\" in line or \\\n \"Segmentation fault\" in line:\n return False\n return True\n\n\n# select a random unlocked device support the ABI\ndef choose_a_random_device(target_devices, target_abi):\n eligible_devices = [dev for dev in target_devices\n if target_abi in dev[common.YAMLKeyword.target_abis]]\n unlocked_devices = [dev for dev in eligible_devices if\n not is_device_locked(dev[common.YAMLKeyword.address])]\n if len(unlocked_devices) > 0:\n chosen_devices = [random.choice(unlocked_devices)]\n else:\n chosen_devices = [random.choice(eligible_devices)]\n return chosen_devices\n\n\n################################\n# clear data\n################################\ndef clear_phone_data_dir(serialno, phone_data_dir):\n sh.adb(\"-s\",\n serialno,\n \"shell\",\n \"rm -rf %s\" % phone_data_dir)\n\n\n################################\n# adb commands\n################################\ndef adb_devices():\n serialnos = []\n p = re.compile(r'(\\S+)\\s+device')\n for line in split_stdout(sh.adb(\"devices\")):\n m = p.match(line)\n if m:\n serialnos.append(m.group(1))\n\n return serialnos\n\n\ndef get_soc_serialnos_map():\n serialnos = adb_devices()\n soc_serialnos_map = {}\n for serialno in serialnos:\n props = adb_getprop_by_serialno(serialno)\n soc_serialnos_map.setdefault(props[\"ro.board.platform\"], []) \\\n .append(serialno)\n\n return soc_serialnos_map\n\n\ndef get_target_socs_serialnos(target_socs=None):\n soc_serialnos_map = get_soc_serialnos_map()\n serialnos = []\n if target_socs is None:\n target_socs = soc_serialnos_map.keys()\n for target_soc in target_socs:\n serialnos.extend(soc_serialnos_map[target_soc])\n return serialnos\n\n\ndef adb_getprop_by_serialno(serialno):\n outputs = sh.adb(\"-s\", serialno, \"shell\", \"getprop\")\n raw_props = split_stdout(outputs)\n props = {}\n p = re.compile(r'\\[(.+)\\]: \\[(.+)\\]')\n for raw_prop in raw_props:\n m = p.match(raw_prop)\n if m:\n props[m.group(1)] = m.group(2)\n return props\n\n\ndef adb_get_device_name_by_serialno(serialno):\n props = adb_getprop_by_serialno(serialno)\n return props.get(\"ro.product.model\", \"\").replace(' ', '')\n\n\ndef adb_supported_abis(serialno):\n props = adb_getprop_by_serialno(serialno)\n abilist_str = props[\"ro.product.cpu.abilist\"]\n abis = [abi.strip() for abi in abilist_str.split(',')]\n return abis\n\n\ndef adb_get_all_socs():\n socs = []\n for d in adb_devices():\n props = adb_getprop_by_serialno(d)\n socs.append(props[\"ro.board.platform\"])\n return set(socs)\n\n\ndef adb_push(src_path, dst_path, serialno):\n sh.adb(\"-s\", serialno, \"push\", src_path, dst_path)\n\n\ndef adb_pull(src_path, dst_path, serialno):\n try:\n sh.adb(\"-s\", serialno, \"pull\", src_path, dst_path)\n except Exception as e:\n six.print_(\"Error msg: %s\" % e, file=sys.stderr)\n\n\n################################\n# Toolchain\n################################\ndef asan_rt_library_names(abi):\n asan_rt_names = {\n \"armeabi-v7a\": \"libclang_rt.asan-arm-android.so\",\n \"arm64-v8a\": \"libclang_rt.asan-aarch64-android.so\",\n }\n return asan_rt_names[abi]\n\n\ndef find_asan_rt_library(abi, asan_rt_path=''):\n if not asan_rt_path:\n find_path = os.environ['ANDROID_NDK_HOME']\n candidates = split_stdout(sh.find(find_path, \"-name\",\n asan_rt_library_names(abi)))\n if len(candidates) == 0:\n common.MaceLogger.error(\n \"Toolchain\",\n \"Can't find AddressSanitizer runtime library in %s\" %\n find_path)\n elif len(candidates) > 1:\n common.MaceLogger.info(\n \"More than one AddressSanitizer runtime library, use the 1st\")\n return candidates[0]\n return \"%s/%s\" % (asan_rt_path, asan_rt_library_names(abi))\n\n\ndef simpleperf_abi_dir_names(abi):\n simpleperf_dir_names = {\n \"armeabi-v7a\": \"arm\",\n \"arm64-v8a\": \"arm64\",\n }\n return simpleperf_dir_names[abi]\n\n\ndef find_simpleperf_library(abi, simpleperf_path=''):\n if not simpleperf_path:\n find_path = os.environ['ANDROID_NDK_HOME']\n candidates = split_stdout(sh.find(find_path, \"-name\", \"simpleperf\"))\n if len(candidates) == 0:\n common.MaceLogger.error(\n \"Toolchain\",\n \"Can't find Simpleperf runtime library in % s\" %\n find_path)\n found = False\n for candidate in candidates:\n if candidate.find(simpleperf_abi_dir_names(abi) + \"/\") != -1:\n found = True\n return candidate\n if not found:\n common.MaceLogger.error(\n \"Toolchain\",\n \"Can't find Simpleperf runtime library in % s\" %\n find_path)\n\n return \"%s/simpleperf\" % simpleperf_path\n\n\n################################\n# bazel commands\n################################\ndef bazel_build(target,\n abi=\"armeabi-v7a\",\n toolchain='android',\n enable_hexagon=False,\n enable_hta=False,\n enable_apu=False,\n enable_neon=True,\n enable_opencl=True,\n enable_quantize=True,\n enable_bfloat16=False,\n enable_rpcmem=True,\n address_sanitizer=False,\n symbol_hidden=True,\n debug_mode=False,\n extra_args=\"\"):\n six.print_(\"* Build %s with ABI %s\" % (target, abi))\n if abi == \"host\":\n toolchain = platform.system().lower()\n bazel_args = (\n \"build\",\n \"--config\",\n toolchain,\n \"--define\",\n \"quantize=%s\" % str(enable_quantize).lower(),\n \"--define\",\n \"bfloat16=%s\" % str(enable_bfloat16).lower(),\n target,\n )\n else:\n bazel_args = (\n \"build\",\n target,\n \"--config\",\n toolchain,\n \"--cpu=%s\" % abi_to_internal(abi),\n \"--define\",\n \"neon=%s\" % str(enable_neon).lower(),\n \"--define\",\n \"opencl=%s\" % str(enable_opencl).lower(),\n \"--define\",\n \"quantize=%s\" % str(enable_quantize).lower(),\n \"--define\",\n \"bfloat16=%s\" % str(enable_bfloat16).lower(),\n \"--define\",\n \"rpcmem=%s\" % str(enable_rpcmem).lower(),\n \"--define\",\n \"hexagon=%s\" % str(enable_hexagon).lower(),\n \"--define\",\n \"hta=%s\" % str(enable_hta).lower(),\n \"--define\",\n \"apu=%s\" % str(enable_apu).lower())\n if address_sanitizer:\n bazel_args += (\"--config\", \"asan\")\n if debug_mode:\n bazel_args += (\"--config\", \"debug\")\n if not address_sanitizer and not debug_mode:\n if toolchain == \"darwin\" or toolchain == \"ios\":\n bazel_args += (\"--config\", \"optimization_darwin\")\n else:\n bazel_args += (\"--config\", \"optimization\")\n if symbol_hidden:\n bazel_args += (\"--config\", \"symbol_hidden\")\n if extra_args:\n bazel_args += (extra_args,)\n six.print_(bazel_args)\n sh.bazel(\n _fg=True,\n *bazel_args)\n six.print_(bazel_args)\n six.print_(\"Build done!\\n\")\n\n\ndef bazel_build_common(target, build_args=\"\"):\n stdout_buff = []\n process_output = make_output_processor(stdout_buff)\n sh.bazel(\n \"build\",\n target + build_args,\n _tty_in=True,\n _out=process_output,\n _err_to_out=True)\n return \"\".join(stdout_buff)\n\n\ndef bazel_target_to_bin(target):\n # change //mace/a/b:c to bazel-bin/mace/a/b/c\n prefix, bin_name = target.split(':')\n prefix = prefix.replace('//', '/')\n if prefix.startswith('/'):\n prefix = prefix[1:]\n host_bin_path = \"bazel-bin/%s\" % prefix\n return host_bin_path, bin_name\n\n\n################################\n# mace commands\n################################\ndef gen_encrypted_opencl_source(codegen_path=\"mace/codegen\"):\n sh.mkdir(\"-p\", \"%s/opencl\" % codegen_path)\n encrypt_opencl_codegen(\"./mace/ops/opencl/cl/\",\n \"mace/codegen/opencl/opencl_encrypt_program.cc\")\n\n\ndef gen_mace_engine_factory_source(model_tags,\n embed_model_data,\n codegen_path=\"mace/codegen\"):\n six.print_(\"* Generate mace engine creator source\")\n codegen_tools_dir = \"%s/engine\" % codegen_path\n sh.rm(\"-rf\", codegen_tools_dir)\n sh.mkdir(\"-p\", codegen_tools_dir)\n gen_mace_engine_factory(\n model_tags,\n embed_model_data,\n codegen_tools_dir)\n six.print_(\"Generate mace engine creator source done!\\n\")\n\n\ndef merge_opencl_binaries(binaries_dirs,\n cl_compiled_program_file_name,\n output_file_path):\n platform_info_key = 'mace_opencl_precompiled_platform_info_key'\n cl_bin_dirs = []\n for d in binaries_dirs:\n cl_bin_dirs.append(os.path.join(d, \"opencl_bin\"))\n # create opencl binary output dir\n opencl_binary_dir = os.path.dirname(output_file_path)\n if not os.path.exists(opencl_binary_dir):\n sh.mkdir(\"-p\", opencl_binary_dir)\n kvs = {}\n for binary_dir in cl_bin_dirs:\n binary_path = os.path.join(binary_dir, cl_compiled_program_file_name)\n if not os.path.exists(binary_path):\n continue\n\n six.print_('generate opencl code from', binary_path)\n with open(binary_path, \"rb\") as f:\n binary_array = np.fromfile(f, dtype=np.uint8)\n\n idx = 0\n size, = struct.unpack(\"Q\", binary_array[idx:idx + 8])\n idx += 8\n for _ in six.moves.range(size):\n key_size, = struct.unpack(\"i\", binary_array[idx:idx + 4])\n idx += 4\n key, = struct.unpack(\n str(key_size) + \"s\", binary_array[idx:idx + key_size])\n idx += key_size\n value_size, = struct.unpack(\"i\", binary_array[idx:idx + 4])\n idx += 4\n if key == platform_info_key and key in kvs:\n common.mace_check(\n (kvs[key] == binary_array[idx:idx + value_size]).all(),\n \"\",\n \"There exists more than one OpenCL version for models:\"\n \" %s vs %s \" %\n (kvs[key], binary_array[idx:idx + value_size]))\n else:\n kvs[key] = binary_array[idx:idx + value_size]\n idx += value_size\n\n output_byte_array = bytearray()\n data_size = len(kvs)\n output_byte_array.extend(struct.pack(\"Q\", data_size))\n for key, value in six.iteritems(kvs):\n key_size = len(key)\n output_byte_array.extend(struct.pack(\"i\", key_size))\n output_byte_array.extend(struct.pack(str(key_size) + \"s\", key))\n value_size = len(value)\n output_byte_array.extend(struct.pack(\"i\", value_size))\n output_byte_array.extend(value)\n\n np.array(output_byte_array).tofile(output_file_path)\n\n\ndef merge_opencl_parameters(binaries_dirs,\n cl_parameter_file_name,\n output_file_path):\n cl_bin_dirs = []\n for d in binaries_dirs:\n cl_bin_dirs.append(os.path.join(d, \"opencl_bin\"))\n # create opencl binary output dir\n opencl_binary_dir = os.path.dirname(output_file_path)\n if not os.path.exists(opencl_binary_dir):\n sh.mkdir(\"-p\", opencl_binary_dir)\n kvs = {}\n for binary_dir in cl_bin_dirs:\n binary_path = os.path.join(binary_dir, cl_parameter_file_name)\n if not os.path.exists(binary_path):\n continue\n\n six.print_('generate opencl parameter from', binary_path)\n with open(binary_path, \"rb\") as f:\n binary_array = np.fromfile(f, dtype=np.uint8)\n\n idx = 0\n size, = struct.unpack(\"Q\", binary_array[idx:idx + 8])\n idx += 8\n for _ in six.moves.range(size):\n key_size, = struct.unpack(\"i\", binary_array[idx:idx + 4])\n idx += 4\n key, = struct.unpack(\n str(key_size) + \"s\", binary_array[idx:idx + key_size])\n idx += key_size\n value_size, = struct.unpack(\"i\", binary_array[idx:idx + 4])\n idx += 4\n kvs[key] = binary_array[idx:idx + value_size]\n idx += value_size\n\n output_byte_array = bytearray()\n data_size = len(kvs)\n output_byte_array.extend(struct.pack(\"Q\", data_size))\n for key, value in six.iteritems(kvs):\n key_size = len(key)\n output_byte_array.extend(struct.pack(\"i\", key_size))\n output_byte_array.extend(struct.pack(str(key_size) + \"s\", key))\n value_size = len(value)\n output_byte_array.extend(struct.pack(\"i\", value_size))\n output_byte_array.extend(value)\n\n np.array(output_byte_array).tofile(output_file_path)\n\n\ndef gen_input(model_output_dir,\n input_nodes,\n input_shapes,\n input_files=None,\n input_ranges=None,\n input_data_types=None,\n input_data_map=None,\n input_file_name=\"model_input\"):\n for input_name in input_nodes:\n formatted_name = common.formatted_file_name(\n input_file_name, input_name)\n if os.path.exists(\"%s/%s\" % (model_output_dir, formatted_name)):\n sh.rm(\"%s/%s\" % (model_output_dir, formatted_name))\n input_file_list = []\n if isinstance(input_files, list):\n input_file_list.extend(input_files)\n else:\n input_file_list.append(input_files)\n if input_data_map:\n for i in range(len(input_nodes)):\n dst_input_file = model_output_dir + '/' + \\\n common.formatted_file_name(input_file_name,\n input_nodes[i])\n input_name = input_nodes[i]\n common.mace_check(input_name in input_data_map,\n common.ModuleName.RUN,\n \"The preprocessor API in PrecisionValidator\"\n \" script should return all inputs of model\")\n if input_data_types[i] == 'float32':\n input_data = np.array(input_data_map[input_name],\n dtype=np.float32)\n elif input_data_types[i] == 'int32':\n input_data = np.array(input_data_map[input_name],\n dtype=np.int32)\n else:\n common.mace_check(\n False,\n common.ModuleName.RUN,\n 'Do not support input data type %s' % input_data_types[i])\n common.mace_check(\n list(map(int, common.split_shape(input_shapes[i])))\n == list(input_data.shape),\n common.ModuleName.RUN,\n \"The shape return from preprocessor API of\"\n \" PrecisionValidator script is not same with\"\n \" model deployment file. %s vs %s\"\n % (str(input_shapes[i]), str(input_data.shape)))\n input_data.tofile(dst_input_file)\n elif len(input_file_list) != 0:\n input_name_list = []\n if isinstance(input_nodes, list):\n input_name_list.extend(input_nodes)\n else:\n input_name_list.append(input_nodes)\n common.mace_check(len(input_file_list) == len(input_name_list),\n common.ModuleName.RUN,\n 'If input_files set, the input files should '\n 'match the input names.')\n for i in range(len(input_file_list)):\n if input_file_list[i] is not None:\n dst_input_file = model_output_dir + '/' + \\\n common.formatted_file_name(input_file_name,\n input_name_list[i])\n if input_file_list[i].startswith(\"http://\") or \\\n input_file_list[i].startswith(\"https://\"):\n six.moves.urllib.request.urlretrieve(input_file_list[i],\n dst_input_file)\n else:\n sh.cp(\"-f\", input_file_list[i], dst_input_file)\n else:\n # generate random input files\n input_nodes_str = \",\".join(input_nodes)\n input_shapes_str = \":\".join(input_shapes)\n input_ranges_str = \":\".join(input_ranges)\n input_data_types_str = \",\".join(input_data_types)\n generate_input_data(\"%s/%s\" % (model_output_dir, input_file_name),\n input_nodes_str,\n input_shapes_str,\n input_ranges_str,\n input_data_types_str)\n\n\ndef gen_opencl_binary_cpps(opencl_bin_file_path,\n opencl_param_file_path,\n opencl_bin_cpp_path,\n opencl_param_cpp_path):\n output_dir = os.path.dirname(opencl_bin_cpp_path)\n if not os.path.exists(output_dir):\n sh.mkdir(\"-p\", output_dir)\n opencl_bin_load_func_name = 'LoadOpenCLBinary'\n opencl_bin_size_func_name = 'OpenCLBinarySize'\n opencl_param_load_func_name = 'LoadOpenCLParameter'\n opencl_param_size_func_name = 'OpenCLParameterSize'\n generate_opencl_code(opencl_bin_file_path, opencl_bin_load_func_name,\n opencl_bin_size_func_name, opencl_bin_cpp_path)\n generate_opencl_code(opencl_param_file_path, opencl_param_load_func_name,\n opencl_param_size_func_name, opencl_param_cpp_path)\n\n\ndef update_mace_run_binary(build_tmp_binary_dir, link_dynamic=False):\n if link_dynamic:\n mace_run_filepath = build_tmp_binary_dir + \"/mace_run_dynamic\"\n else:\n mace_run_filepath = build_tmp_binary_dir + \"/mace_run_static\"\n\n if os.path.exists(mace_run_filepath):\n sh.rm(\"-rf\", mace_run_filepath)\n if link_dynamic:\n sh.cp(\"-f\", \"bazel-bin/mace/tools/mace_run_dynamic\",\n build_tmp_binary_dir)\n else:\n sh.cp(\"-f\", \"bazel-bin/mace/tools/mace_run_static\",\n build_tmp_binary_dir)\n\n\ndef create_internal_storage_dir(serialno, phone_data_dir):\n internal_storage_dir = \"%s/interior/\" % phone_data_dir\n sh.adb(\"-s\", serialno, \"shell\", \"mkdir\", \"-p\", internal_storage_dir)\n return internal_storage_dir\n\n\ndef push_depended_so_libs(libmace_dynamic_library_path,\n abi, phone_data_dir, serialno):\n src_file = \"%s/sources/cxx-stl/llvm-libc++/libs/\" \\\n \"%s/libc++_shared.so\" \\\n % (os.environ[\"ANDROID_NDK_HOME\"], abi)\n try:\n dep_so_libs = sh.bash(os.environ[\"ANDROID_NDK_HOME\"] + \"/ndk-depends\",\n libmace_dynamic_library_path)\n except sh.ErrorReturnCode_127:\n print(\"Find no ndk-depends, use default libc++_shared.so\")\n else:\n for dep in split_stdout(dep_so_libs):\n if dep == \"libgnustl_shared.so\":\n src_file = \"%s/sources/cxx-stl/gnu-libstdc++/4.9/libs/\" \\\n \"%s/libgnustl_shared.so\" \\\n % (os.environ[\"ANDROID_NDK_HOME\"], abi)\n print(\"push %s to %s\" % (src_file, phone_data_dir))\n adb_push(src_file, phone_data_dir, serialno)\n\n\ndef validate_model(abi,\n device,\n model_file_path,\n weight_file_path,\n docker_image_tag,\n dockerfile_path,\n platform,\n device_type,\n input_nodes,\n output_nodes,\n input_shapes,\n output_shapes,\n input_data_formats,\n output_data_formats,\n model_output_dir,\n input_data_types,\n caffe_env,\n input_file_name=\"model_input\",\n output_file_name=\"model_out\",\n validation_threshold=0.9,\n backend=\"tensorflow\",\n validation_outputs_data=[],\n log_file=\"\"):\n if not validation_outputs_data:\n six.print_(\"* Validate with %s\" % platform)\n else:\n six.print_(\"* Validate with file: %s\" % validation_outputs_data)\n if abi != \"host\":\n for output_name in output_nodes:\n formatted_name = common.formatted_file_name(\n output_file_name, output_name)\n if os.path.exists(\"%s/%s\" % (model_output_dir,\n formatted_name)):\n sh.rm(\"-rf\", \"%s/%s\" % (model_output_dir, formatted_name))\n device.pull_from_data_dir(formatted_name, model_output_dir)\n\n if platform == \"tensorflow\" or platform == \"onnx\":\n validate(platform, model_file_path, \"\",\n \"%s/%s\" % (model_output_dir, input_file_name),\n \"%s/%s\" % (model_output_dir, output_file_name), device_type,\n \":\".join(input_shapes), \":\".join(output_shapes),\n \",\".join(input_data_formats), \",\".join(output_data_formats),\n \",\".join(input_nodes), \",\".join(output_nodes),\n validation_threshold, \",\".join(input_data_types), backend,\n validation_outputs_data,\n log_file)\n elif platform == \"caffe\":\n image_name = \"mace-caffe:\" + docker_image_tag\n container_name = \"mace_caffe_\" + docker_image_tag + \"_validator\"\n\n if caffe_env == common.CaffeEnvType.LOCAL:\n try:\n import caffe\n except ImportError:\n logging.error('There is no caffe python module.')\n validate(platform, model_file_path, weight_file_path,\n \"%s/%s\" % (model_output_dir, input_file_name),\n \"%s/%s\" % (model_output_dir, output_file_name),\n device_type,\n \":\".join(input_shapes), \":\".join(output_shapes),\n \",\".join(input_data_formats),\n \",\".join(output_data_formats),\n \",\".join(input_nodes), \",\".join(output_nodes),\n validation_threshold, \",\".join(input_data_types), backend,\n validation_outputs_data,\n log_file)\n elif caffe_env == common.CaffeEnvType.DOCKER:\n docker_image_id = sh.docker(\"images\", \"-q\", image_name)\n if not docker_image_id:\n six.print_(\"Build caffe docker\")\n sh.docker(\"build\", \"-t\", image_name,\n dockerfile_path)\n\n container_id = sh.docker(\"ps\", \"-qa\", \"-f\",\n \"name=%s\" % container_name)\n if container_id and not sh.docker(\"ps\", \"-qa\", \"--filter\",\n \"status=running\", \"-f\",\n \"name=%s\" % container_name):\n sh.docker(\"rm\", \"-f\", container_name)\n container_id = \"\"\n if not container_id:\n six.print_(\"Run caffe container\")\n sh.docker(\n \"run\",\n \"-d\",\n \"-it\",\n \"--name\",\n container_name,\n image_name,\n \"/bin/bash\")\n\n for input_name in input_nodes:\n formatted_input_name = common.formatted_file_name(\n input_file_name, input_name)\n sh.docker(\n \"cp\",\n \"%s/%s\" % (model_output_dir, formatted_input_name),\n \"%s:/mace\" % container_name)\n\n for output_name in output_nodes:\n formatted_output_name = common.formatted_file_name(\n output_file_name, output_name)\n sh.docker(\n \"cp\",\n \"%s/%s\" % (model_output_dir, formatted_output_name),\n \"%s:/mace\" % container_name)\n model_file_name = os.path.basename(model_file_path)\n weight_file_name = os.path.basename(weight_file_path)\n sh.docker(\"cp\", \"tools/common.py\", \"%s:/mace\" % container_name)\n sh.docker(\"cp\", \"tools/validate.py\", \"%s:/mace\" % container_name)\n sh.docker(\"cp\", model_file_path, \"%s:/mace\" % container_name)\n sh.docker(\"cp\", weight_file_path, \"%s:/mace\" % container_name)\n\n sh.docker(\n \"exec\",\n container_name,\n \"python\",\n \"-u\",\n \"/mace/validate.py\",\n \"--platform=caffe\",\n \"--model_file=/mace/%s\" % model_file_name,\n \"--weight_file=/mace/%s\" % weight_file_name,\n \"--input_file=/mace/%s\" % input_file_name,\n \"--mace_out_file=/mace/%s\" % output_file_name,\n \"--device_type=%s\" % device_type,\n \"--input_node=%s\" % \",\".join(input_nodes),\n \"--output_node=%s\" % \",\".join(output_nodes),\n \"--input_shape=%s\" % \":\".join(input_shapes),\n \"--output_shape=%s\" % \":\".join(output_shapes),\n \"--input_data_format=%s\" % \",\".join(input_data_formats),\n \"--output_data_format=%s\" % \",\".join(output_data_formats),\n \"--validation_threshold=%f\" % validation_threshold,\n \"--input_data_type=%s\" % \",\".join(input_data_types),\n \"--backend=%s\" % backend,\n \"--validation_outputs_data=%s\" % \",\".join(\n validation_outputs_data),\n \"--log_file=%s\" % log_file,\n _fg=True)\n\n six.print_(\"Validation done!\\n\")\n\n\n################################\n# library\n################################\ndef packaging_lib(libmace_output_dir, project_name):\n six.print_(\"* Package libs for %s\" % project_name)\n tar_package_name = \"libmace_%s.tar.gz\" % project_name\n project_dir = \"%s/%s\" % (libmace_output_dir, project_name)\n tar_package_path = \"%s/%s\" % (project_dir, tar_package_name)\n if os.path.exists(tar_package_path):\n sh.rm(\"-rf\", tar_package_path)\n\n six.print_(\"Start packaging '%s' libs into %s\" % (project_name,\n tar_package_path))\n which_sys = platform.system()\n if which_sys == \"Linux\" or which_sys == \"Darwin\":\n sh.tar(\n \"--exclude\",\n \"%s/_tmp\" % project_dir,\n \"-cvzf\",\n \"%s\" % tar_package_path,\n glob.glob(\"%s/*\" % project_dir),\n _fg=True)\n six.print_(\"Packaging Done!\\n\")\n return tar_package_path\n" ]
[ [ "numpy.array", "numpy.fromfile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CHUNYUWANG/imu-human-pose-pytorch
[ "f4813336571789f46eabdfb520e7ed5b20ac04ea" ]
[ "lib/multiviews/totalcapture_body.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport numpy as np\n\n\nclass HumanBody(object):\n\n def __init__(self):\n self.skeleton = self.get_skeleton()\n self.skeleton_sorted_by_level = self.sort_skeleton_by_level(\n self.skeleton)\n self.imu_edges = self.get_imu_edges()\n self.imu_edges_reverse = self.get_imu_edges_reverse()\n\n def get_skeleton(self):\n joint_names = [\n 'root', 'rhip', 'rkne', 'rank', 'lhip', 'lkne', 'lank', 'belly',\n 'neck', 'nose', 'lsho', 'lelb', 'lwri', 'rsho', 'relb', # use nose here instead of head\n 'rwri'\n ]\n children = [[1, 4, 7], [2], [3], [], [5], [6], [], [8], [9, 10, 13],\n [], [11], [12], [], [14], [15], []]\n imubone = [[-1, -1, -1], [3], [4], [], [5], [6], [], [-1], [-1, -1, -1],\n [], [11], [12], [], [13], [14], []]\n\n skeleton = []\n for i in range(len(joint_names)):\n skeleton.append({\n 'idx': i,\n 'name': joint_names[i],\n 'children': children[i],\n 'imubone': imubone[i]\n })\n return skeleton\n\n def sort_skeleton_by_level(self, skeleton):\n njoints = len(skeleton)\n level = np.zeros(njoints)\n\n queue = [skeleton[0]]\n while queue:\n cur = queue[0]\n for child in cur['children']:\n skeleton[child]['parent'] = cur['idx']\n level[child] = level[cur['idx']] + 1\n queue.append(skeleton[child])\n del queue[0]\n\n desc_order = np.argsort(level)[::-1]\n sorted_skeleton = []\n for i in desc_order:\n skeleton[i]['level'] = level[i]\n sorted_skeleton.append(skeleton[i])\n return sorted_skeleton\n\n def get_imu_edges(self):\n imu_edges = dict()\n for joint in self.skeleton:\n for idx_child, child in enumerate(joint['children']):\n if joint['imubone'][idx_child] >= 0:\n one_edge_name = (joint['idx'], child)\n bone_idx = joint['imubone'][idx_child]\n imu_edges[one_edge_name] = bone_idx\n return imu_edges\n\n def get_imu_edges_reverse(self):\n imu_edges = self.imu_edges\n imu_edges_reverse = {imu_edges[k]:k for k in imu_edges}\n return imu_edges_reverse\n\n\nif __name__ == '__main__':\n hb = HumanBody()\n print(hb.skeleton)\n print(hb.skeleton_sorted_by_level)\n" ]
[ [ "numpy.argsort", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ibrahiminfinite/dart
[ "495c82120c836005f2d136d4a50c8cc997fb879b" ]
[ "python/tests/unit/collision/test_collision.py" ]
[ "import platform\nimport pytest\nimport dartpy as dart\nimport numpy as np\n\n\ndef collision_groups_tester(cd):\n size = [1, 1, 1]\n pos1 = [0, 0, 0]\n pos2 = [0.5, 0, 0]\n\n simple_frame1 = dart.dynamics.SimpleFrame()\n simple_frame2 = dart.dynamics.SimpleFrame()\n\n sphere1 = dart.dynamics.SphereShape(1)\n sphere2 = dart.dynamics.SphereShape(1)\n\n simple_frame1.setShape(sphere1)\n simple_frame2.setShape(sphere2)\n\n group = cd.createCollisionGroup()\n group.addShapeFrame(simple_frame1)\n group.addShapeFrame(simple_frame2)\n assert group.getNumShapeFrames() is 2\n\n #\n # ( s1,s2 ) collision!\n # ---+---|---+---+---+---+--->\n # -1 0 +1 +2 +3 +4\n #\n assert group.collide()\n\n #\n # ( s1 ) ( s2 ) no collision\n # ---+---|---+---+---+---+--->\n # -1 0 +1 +2 +3 +4\n #\n simple_frame2.setTranslation([3, 0, 0])\n assert not group.collide()\n\n option = dart.collision.CollisionOption()\n result = dart.collision.CollisionResult()\n\n group.collide(option, result)\n assert not result.isCollision()\n assert result.getNumContacts() is 0\n\n option.enableContact = True\n simple_frame2.setTranslation([1.99, 0, 0])\n\n group.collide(option, result)\n assert result.isCollision()\n assert result.getNumContacts() is not 0\n\n # Repeat the same test with BodyNodes instead of SimpleFrames\n\n group.removeAllShapeFrames()\n assert group.getNumShapeFrames() is 0\n\n skel1 = dart.dynamics.Skeleton()\n skel2 = dart.dynamics.Skeleton()\n\n [joint1, body1] = skel1.createFreeJointAndBodyNodePair(None)\n [joint2, body2] = skel2.createFreeJointAndBodyNodePair(None)\n\n shape_node1 = body1.createShapeNode(sphere1)\n shape_node1.createVisualAspect()\n shape_node1.createCollisionAspect()\n\n shape_node2 = body2.createShapeNode(sphere2)\n shape_node2.createVisualAspect()\n shape_node2.createCollisionAspect()\n\n group.addShapeFramesOf(body1)\n group.addShapeFramesOf(body2)\n\n assert group.getNumShapeFrames() is 2\n\n assert group.collide()\n\n joint2.setPosition(3, 3)\n assert not group.collide()\n\n # Repeat the same test with BodyNodes and two groups\n\n joint2.setPosition(3, 0)\n\n group.removeAllShapeFrames()\n assert group.getNumShapeFrames() is 0\n group2 = cd.createCollisionGroup()\n\n group.addShapeFramesOf(body1)\n group2.addShapeFramesOf(body2)\n\n assert group.getNumShapeFrames() is 1\n assert group2.getNumShapeFrames() is 1\n\n assert group.collide(group2)\n\n joint2.setPosition(3, 3)\n assert not group.collide(group2)\n\n\ndef test_collision_groups():\n cd = dart.collision.FCLCollisionDetector()\n collision_groups_tester(cd)\n\n cd = dart.collision.DARTCollisionDetector()\n collision_groups_tester(cd)\n\n if hasattr(dart.collision, \"BulletCollisionDetector\"):\n cd = dart.collision.BulletCollisionDetector()\n collision_groups_tester(cd)\n\n if hasattr(dart.collision, \"OdeCollisionDetector\"):\n cd = dart.collision.OdeCollisionDetector()\n collision_groups_tester(cd)\n\n\n# TODO: Add more collision detectors\[email protected](\"cd\", [dart.collision.FCLCollisionDetector()])\ndef test_filter(cd):\n # Create two bodies skeleton. The two bodies are placed at the same position\n # with the same size shape so that they collide by default.\n skel = dart.dynamics.Skeleton()\n\n shape = dart.dynamics.BoxShape(np.ones(3))\n\n _, body0 = skel.createRevoluteJointAndBodyNodePair()\n shape_node0 = body0.createShapeNode(shape)\n shape_node0.createVisualAspect()\n shape_node0.createCollisionAspect()\n\n _, body1 = skel.createRevoluteJointAndBodyNodePair(body0)\n shape_node1 = body1.createShapeNode(shape)\n shape_node1.createVisualAspect()\n shape_node1.createCollisionAspect()\n\n # Create a world and add the created skeleton\n world = dart.simulation.World()\n world.addSkeleton(skel)\n\n # Set a new collision detector\n constraint_solver = world.getConstraintSolver()\n constraint_solver.setCollisionDetector(cd)\n\n # Get the collision group from the constraint solver\n group = constraint_solver.getCollisionGroup()\n assert group.getNumShapeFrames() == 2\n\n # Create BodyNodeCollisionFilter\n option = constraint_solver.getCollisionOption()\n body_node_filter = dart.collision.BodyNodeCollisionFilter()\n option.collisionFilter = body_node_filter\n\n skel.enableSelfCollisionCheck()\n skel.enableAdjacentBodyCheck()\n assert skel.isEnabledSelfCollisionCheck()\n assert skel.isEnabledAdjacentBodyCheck()\n assert group.collide()\n assert group.collide(option)\n\n skel.enableSelfCollisionCheck()\n skel.disableAdjacentBodyCheck()\n assert skel.isEnabledSelfCollisionCheck()\n assert not skel.isEnabledAdjacentBodyCheck()\n assert group.collide()\n assert not group.collide(option)\n\n skel.disableSelfCollisionCheck()\n skel.enableAdjacentBodyCheck()\n assert not skel.isEnabledSelfCollisionCheck()\n assert skel.isEnabledAdjacentBodyCheck()\n assert group.collide()\n assert not group.collide(option)\n\n skel.disableSelfCollisionCheck()\n skel.disableAdjacentBodyCheck()\n assert not skel.isEnabledSelfCollisionCheck()\n assert not skel.isEnabledAdjacentBodyCheck()\n assert group.collide()\n assert not group.collide(option)\n\n # Test collision body filtering\n skel.enableSelfCollisionCheck()\n skel.enableAdjacentBodyCheck()\n body_node_filter.addBodyNodePairToBlackList(body0, body1)\n assert not group.collide(option)\n body_node_filter.removeBodyNodePairFromBlackList(body0, body1)\n assert group.collide(option)\n body_node_filter.addBodyNodePairToBlackList(body0, body1)\n assert not group.collide(option)\n body_node_filter.removeAllBodyNodePairsFromBlackList()\n assert group.collide(option)\n\n\ndef test_raycast():\n cd = dart.collision.BulletCollisionDetector()\n\n simple_frame = dart.dynamics.SimpleFrame()\n sphere = dart.dynamics.SphereShape(1)\n simple_frame.setShape(sphere)\n\n group = cd.createCollisionGroup()\n group.addShapeFrame(simple_frame)\n assert group.getNumShapeFrames() == 1\n\n option = dart.collision.RaycastOption()\n option.mEnableAllHits = False\n\n result = dart.collision.RaycastResult()\n assert not result.hasHit()\n\n ray_hit = dart.collision.RayHit()\n\n result.clear()\n simple_frame.setTranslation(np.zeros(3))\n assert group.raycast([-2, 0, 0], [2, 0, 0], option, result)\n assert result.hasHit()\n assert len(result.mRayHits) == 1\n ray_hit = result.mRayHits[0]\n assert np.isclose(ray_hit.mPoint, [-1, 0, 0]).all()\n assert np.isclose(ray_hit.mNormal, [-1, 0, 0]).all()\n assert ray_hit.mFraction == pytest.approx(0.25)\n\n result.clear()\n simple_frame.setTranslation(np.zeros(3))\n assert group.raycast([2, 0, 0], [-2, 0, 0], option, result)\n assert result.hasHit()\n assert len(result.mRayHits) == 1\n ray_hit = result.mRayHits[0]\n assert np.isclose(ray_hit.mPoint, [1, 0, 0]).all()\n assert np.isclose(ray_hit.mNormal, [1, 0, 0]).all()\n assert ray_hit.mFraction == pytest.approx(0.25)\n\n result.clear()\n simple_frame.setTranslation([1, 0, 0])\n assert group.raycast([-2, 0, 0], [2, 0, 0], option, result)\n assert result.hasHit()\n assert len(result.mRayHits) == 1\n ray_hit = result.mRayHits[0]\n assert np.isclose(ray_hit.mPoint, [0, 0, 0]).all()\n assert np.isclose(ray_hit.mNormal, [-1, 0, 0]).all()\n assert ray_hit.mFraction == pytest.approx(0.5)\n\n\nif __name__ == \"__main__\":\n pytest.main()\n" ]
[ [ "numpy.isclose", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arthur-e/pyl4c
[ "97e1225c8b70ed9b21edc9e54ee66c78a02cded8" ]
[ "pyl4c/apps/l4c/extensions/damm.py" ]
[ "r'''\nThe Dual Arrhenius Michaelis-Menten (DAMM) soil decomposition model (from\nDavidson et al. 2012).\n\n$$\nR_H = V_{\\mathrm{max}}\\, \\frac{S_X}{K_{M_S} + S_X} \\frac{O_2}{K_{M_{O_2}} + O_2}\n$$\n\nNOTE: The only term in the equation that is not unitless (either intrinsically\nor because the units cancel out) is `V_max`; therefore, the units of alpha,\nwhich determine the units of `V_max`, ultimately determine the units of\nrespiration.\n'''\n\nimport numpy as np\nfrom pyl4c import Namespace\n\nclass AbstractDAMM(object):\n '''\n Abstract Dual Arrhenius Michaelis-Menten (DAMM) model framework. Not\n intended for end-user instantiation.\n '''\n def cbar0(self, litterfall, soil_m, soil_t, porosity, fmet, fstr):\n '''\n An alternative method for estimating the initital C pool sizes, this\n analytical approaches solves for C storage after setting the\n differential equation governing change in C to zero. See README for\n the equations. The C storage magnitudes tend to be higher because\n `V_max` is very small compared to litterfall, leading to very large\n empirical C storage estimates.\n\n Parameters\n ----------\n litterfall : numpy.ndarray\n Average daily litterfall [g C cm-3 day-1]\n soil_m : numpy.ndarray\n 365-day soil moisture (%) climatology (365 x N ...)\n soil_t : numpy.ndarray\n 365-day soil temperature (°K) climatology (365 x N ...)\n porosity : numpy.ndarray\n Total porosity, between [0, 1]\n fmet : float\n Fraction of daily litterfall entering metabolic pool\n fstr : float\n Fraction of structural pool transferred to the recalcitrant pool\n during \"humification\"\n\n Returns\n -------\n tuple\n (c0, c1, c2) numpy.ndarray instances, one for each of the\n steady-state C pools (Units: g cm-3)\n '''\n # Calculate the DAMM parameters needed in the steady-state C eq.\n alpha0, alpha1, alpha2, p, d_liq = (\n self.get(p)\n for p in ('alpha0', 'alpha1', 'alpha2', 'p', 'd_liq')\n )\n try:\n km_s = self.params[self.labels.index('km_s')]\n except ValueError:\n km_s_int, km_s_slope = (\n self.get(p)\n for p in ('km_s_int', 'km_s_slope')\n )\n km_s = km_s_int + (km_s_slope * soil_t)\n\n # Convert from [Mg C cm-3 hr-1] to [g C cm-3 day-1]\n vmax0 = self.v_max(soil_t, alpha0) * 1e6 * 24\n vmax1 = self.v_max(soil_t, alpha1) * 1e6 * 24\n vmax2 = self.v_max(soil_t, alpha2) * 1e6 * 24\n _, conc_O2 = self.concentrations(np.nan, soil_m, porosity)\n mm_O2 = conc_O2 / (self.get('km_O2') + conc_O2)\n sx_coef = np.multiply(p * d_liq, np.power(soil_m / 100, 3))\n c0 = (litterfall * fmet * km_s) / (sx_coef * (\n (vmax0 * mm_O2) - (litterfall * fmet)))\n c1 = (litterfall * (1 - fmet) * km_s) / (sx_coef * (\n (vmax1 * mm_O2) - (litterfall * (1 - fmet))))\n c2 = -(vmax1 * c1 * fstr * km_s) / (\n (sx_coef * (((vmax1 * c1 * fstr) + (vmax2 * c1)))) + (vmax2 * km_s))\n substrate_by_pool = []\n # Empirical C is now the steady-state value for each day of the\n # climatological year; calculate the daily average\n for empirical_C in (c0, c1, c2):\n # Calculate mean across 365 days\n substrate_by_pool.append(np.nanmean(\n np.where(empirical_C < 0, 0, empirical_C), axis = 0))\n return substrate_by_pool\n\n def cbar(self, rh, soil_m, soil_t, porosity, perc = 80):\n r'''\n As in the L4C calibration, calculate the empirical C storage, based on\n observed RH, since we don't yet know how much C we have to burn. Note\n that this SINGLE value per (flux tower) site is used as a \"constant\n effective SOC factor\" (Jones et al. 2017) throughout calibration, for\n every time step. This is because C substrate pools are assumed to be\n in the steady state for calibration. The empirical C storage\n magnitudes estimated by this function compare well with the\n SoilGrids 250m global 0-5 cm SOC density estimates of 10-60 kg m-2.\n\n $$\n \\bar{C} = \\frac{R_H\\, k_{M_{S_x}}}{(p\\, D_{\\mathrm{liq}}\\, \\theta^3)\n [V_{\\mathrm{max}}\\, [O_2](k_{M_{O_2}} + [O_2])^{-1} - R_H]}\n $$\n\n Parameters\n ----------\n rh : numpy.ndarray\n RH in g C m-2 day-1\n soil_m : numpy.ndarray\n Soil moisture (%)\n soil_t : numpy.ndarray\n Soil temperature (°K)\n porosity : numpy.ndarray\n Total porosity, between [0, 1]\n perc : int or float\n Empirical Cbar percentile to return\n\n Returns\n -------\n tuple\n (c0, c1, c2) numpy.ndarray instances, one for each of the\n steady-state C pools (Units: g cm-3)\n '''\n # Calculate the DAMM parameters needed in the steady-state C eq.\n alpha0, alpha1, alpha2, ea, p, d_liq = (\n self.get(p)\n for p in ('alpha0', 'alpha1', 'alpha2', 'ea', 'p', 'd_liq')\n )\n try:\n km_s = self.params[self.labels.index('km_s')]\n except ValueError:\n km_s_int, km_s_slope = (\n self.get(p)\n for p in ('km_s_int', 'km_s_slope')\n )\n km_s = km_s_int + (km_s_slope * soil_t)\n\n # Let's do this in the units of km_s, because substrate is needed\n # in these units (g C cm-3)\n # -- rh is in g m-2 day-1\n # -- km_s is in g cm-3\n # -- v_max is in Mg cm-3 hr-1\n # Calculate coefficients for substrate concentration\n sx_coef = np.multiply(p * d_liq, np.power(soil_m / 100, 3))\n # Calculate oxygen concentration at reaction site; then MM constraint\n _, conc_O2 = self.concentrations(np.nan, soil_m, porosity)\n mm_O2 = conc_O2 / (self.get('km_O2') + conc_O2) # (dimensionless)\n # Convert g C m-2 to g C m-3, then denominator from m-3 to cm-3\n rh = (rh / self.constants.soil_depth_m) / 1e6\n # Estimate each soil C pool (differentiated by base rate or alpha)\n substrate_by_pool = []\n for alpha in (alpha0, alpha1, alpha2):\n v_max_i = self.v_max(soil_t, alpha, ea) # Calculate base rate\n v_max_i = (v_max_i * 1e6) # Convert Mg C to g C\n empirical_C = np.divide(\n np.multiply(rh, km_s),\n np.multiply(sx_coef, np.subtract( # Hourly to daily Vmax\n np.multiply(v_max_i * 24, mm_O2), rh)))\n # This empirical approach may result in C values < 0; assign 0\n substrate_by_pool.append(\n np.nanpercentile(\n np.where(empirical_C < 0, 0, empirical_C), perc, axis = 0))\n return substrate_by_pool\n\n def concentrations(self, substrate, soil_m, porosity):\n '''\n For a single C pool, returns substrate and oxygen concentrations.\n Units for C substrate are g C cm-3 (same as input argument) and units\n for O2 are dimensionless--essentially, it's `d_gas` times the proportion\n of total volume (soil + water + air) occupied by O2.\n\n Parameters\n ----------\n substrate : numpy.ndarray\n Soil C substrate (g cm-3)\n soil_m : numpy.ndarray\n Soil moisture (%)\n porosity : numpy.ndarray\n Total porosity, between [0, 1]\n\n Returns\n -------\n tuple\n `(conc_Sx, conc_O2)` where `conc_Sx` is the concentration of\n substrate, `conc_O2` is the concentration of O2\n '''\n air_frac_O2 = 0.2095 # L O2 L-1 air (20.95%)\n p = self.get('p')\n d_liq = self.get('d_liq')\n d_gas = self.get('d_gas')\n # Calculate substrate concentration at reaction site\n conc_Sx = np.multiply(substrate,\n np.multiply(p * d_liq, np.power(soil_m / 100, 3)))\n # Calculate oxygen concentration at reaction site; then MM constraint\n a = (porosity - (soil_m / 100))\n conc_O2 = d_gas * air_frac_O2 * np.power(np.where(a < 0, 0, a), (4/3))\n return (conc_Sx, conc_O2)\n\n def get(self, parameter):\n '''\n Retrieves a parameter value whether it is fixed or a free parameter.\n This provides flexibility for versions of the model that may differ\n in free versus fixed parameters. Constants take precedence--if a value\n was fit but a constant value is found, the constant is used.\n\n Parameters\n ----------\n parameter : str\n Name of the parameter to retrieve\n '''\n if hasattr(self.constants, parameter):\n return getattr(self.constants, parameter)\n return self.params[self.labels.index(parameter)]\n\n def total_respiration(self, *args, **kwargs):\n '''\n Calculates the sum of respiration in each C pool. See `respiration()`.\n\n Returns\n -------\n numpy.ndarray\n '''\n rh = self.respiration(*args, **kwargs)\n respiration = np.zeros(rh[0].shape)\n for i in range(0, len(rh)):\n np.add(respiration, rh[i], out = respiration)\n return respiration\n\n def v_max(self, soil_t, alpha, ea = None):\n '''\n For a single C pool, returns maximum rate on enzymatic reaction in\n Mg C cm-3 hr-1. NOTE: Units of base rate (megagrams, Mg) are chosen\n to improve convergence in model fitting.\n\n Parameters\n ----------\n soil_t : numpy.ndarray\n Soil temperature (°K)\n alpha : numpy.ndarray\n Base rate/ pre-exponential factor (Mg C cm-3 hr-1)\n ea : numpy.ndarray\n Activation energy (kJ mol-1)\n\n Returns\n -------\n numpy.ndarray\n '''\n ea = self.get('ea') if ea is None else ea\n r_gas = 8.314472e-3 # Universal gas constant (kJ K-1 mol-1)\n return np.multiply( # Mg C cm-3 hr-1\n alpha, np.exp(-np.divide(ea, np.multiply(r_gas, soil_t))))\n\n\nclass DAMMDecompositionModel(AbstractDAMM):\n '''\n The DAMM decomposition model as reported by Davidson et al. (2012), with\n some changes: Support for multiple soil C pools; Additional free\n parameters (not bragging about it, these have to be fit); and Changed the\n units of the pre-exponential factor to better condition optimization.\n\n Free parameters are:\n\n - `alpha`: Pre-exponential factor of enzymatic reaction with `S_x`\n (Mg C cm-3 hr-1), note this is *Megagrams* of C...\n - `ea`: Activation energy of enzymatic reaction with `S_x` (kJ mol-1)\n - `km_s`: Michaelis-Menten coefficient for subtrate, using the constant-\n value form (g C cm-3)\n - `p`: Proportion of `C_total` that is soluble\n - `d_liq`: Diffusion coefficient of substrate in liquid phase\n - `d_gas`: Diffusion coefficient of `O_2` in air\n\n NOTE: CUE is potentially another free parameter, but it has no relevance\n in running DAMM, only in fitting the model with unknown C storage/\n substrate.\n '''\n parameter_names = (\n 'alpha0', 'alpha1', 'alpha2', 'ea', 'km_s', 'p', 'd_liq', 'd_gas')\n\n def __init__(self, params = None, soil_depth_cm = 5, km_O2 = 0.121):\n self.constants = Namespace()\n self.constants.add('km_O2', km_O2)\n self.constants.add('soil_depth_cm', soil_depth_cm)\n self.constants.add('soil_depth_m', soil_depth_cm / 100)\n self.params = params\n if params is None:\n # Use parameters from Davidson et al. (2012)\n self.params = (53.8, 0, 0, 72.26, 9.95e-7, 4.14e-4, 3.17, 1.67, np.nan)\n\n def respiration(self, substrate, soil_m, soil_t, porosity):\n '''\n Calculates daily total RH for all soil pools, g C m-2 day-1.\n\n Parameters\n ----------\n substrate : numpy.ndarray\n Soil C substrate (g cm-3) in each pool (3-tuple)\n soil_m : numpy.ndarray\n Soil moisture (%)\n soil_t : numpy.ndarray\n Soil temperature (°K)\n porosity : numpy.ndarray\n Total porosity, between [0, 1]\n\n Returns\n -------\n tuple\n `(rh0, rh1, rh2)` numpy.ndarray instances, one for each of the\n C pools (Units: g m-2 day-1)\n '''\n assert len(substrate) == 3,\\\n 'Need a substrate value for each of 3 pools'\n alpha0, alpha1, alpha2, ea, km_s, p, d_liq = (\n self.params[i] for i in range(0, 7)\n )\n respiration = []\n for i, alpha_i in enumerate((alpha0, alpha1, alpha2)):\n v_max_i = self.v_max(soil_t, alpha_i, ea)\n sx_i, conc_O2 = self.concentrations(substrate[i], soil_m, porosity)\n # Calculate Michaelis-Menten coefficients\n mm_sx = sx_i / (km_s + sx_i) # Units (g C cm-3) cancel out (dimensionless)\n mm_O2 = conc_O2 / (self.constants.km_O2 + conc_O2) # (dimensionless)\n rh = v_max_i * mm_sx * mm_O2 # Mg C cm-3 hr-1; need g C m-2 day-1\n # First convert Mg C to g C, then cm-3 to cm-2, then from\n # cm-2 to m-2, then from hourly to a daily flux (24 hours/ day)\n resp = (((1e6 * rh) * self.constants.soil_depth_cm) * 1e4) * 24\n respiration.append(np.where(resp < 0, 0, resp))\n return respiration\n\n\nclass DAMMDecompositionModel2(AbstractDAMM):\n '''\n The DAMM decomposition model as reported by Davidson et al. (2012), with\n some changes: Support for multiple soil C pools; Additional free\n parameters (not bragging about it, these have to be fit); and Changed the\n units of the pre-exponential factor to better condition optimization.\n This model assumes that the Michaelis-Menten coefficient for substrate is\n *not constant* w.r.t. temperature (the slope-intercept form of km_s). It\n also allows any free parameter to be specified as a constant.\n\n Free parameters are:\n\n - `alpha`: Pre-exponential factor of enzymatic reaction with `S_x`\n (Mg C cm-3 hr-1), note this is *Megagrams* of C...\n - `ea`: Activation energy of enzymatic reaction with `S_x`\n (kJ mol-1)\n - `km_s_int`: Intercept of Michaelis-Menten (MM) coefficient for\n substrate (g C cm-3)\n - `km_s_slope`: Slope of Michaelis-Menten (MM) coefficient for\n substrate (g C cm-3 K-1)\n - `p`: Proportion of `C_total` that is soluble\n - `d_liq`: Diffusion coefficient of substrate in liquid phase\n - `d_gas`: Diffusion coefficient of `O_2` in air\n - `km_O2`: Half-saturation (MM) coefficient for diffusion of O2\n\n NOTE: CUE is potentially another free parameter, but it has no relevance\n in running DAMM, only in fitting the model with unknown C storage/\n substrate.\n '''\n parameter_names = (\n 'alpha0', 'alpha1', 'alpha2', 'ea', 'km_s_int', 'km_s_slope',\n 'p', 'd_liq', 'd_gas', 'km_O2')\n\n def __init__(self, params, soil_depth_cm = 5, **kwargs):\n self.constants = Namespace()\n self.constants.add('soil_depth_cm', soil_depth_cm)\n self.constants.add('soil_depth_m', soil_depth_cm / 100)\n self.labels = list(self.parameter_names).copy()\n self.params = params\n for key, value in kwargs.items():\n self.constants.add(key, value)\n # Don't allow constants to appear in the parameters list\n if key in self.labels:\n # May need to re-build parameters list\n self.labels.remove(key)\n for name in self.parameter_names:\n assert name in self.labels or hasattr(self.constants, name),\\\n 'Required parameter \"%s\" must be specified either as a constant nor a free parameter' % name\n\n def respiration(self, substrate, soil_m, soil_t, porosity):\n '''\n Calculates daily total RH for all soil pools, g C m-2 day-1.\n\n Parameters\n ----------\n substrate : numpy.ndarray\n Soil C substrate (g cm-3) in each pool (3-tuple)\n soil_m : numpy.ndarray\n Soil moisture (%)\n soil_t : numpy.ndarray\n Soil temperature (°K)\n porosity : numpy.ndarray\n Total porosity, between [0, 1]\n\n Returns\n -------\n tuple\n (rh0, rh1, rh2) numpy.ndarray instances, one for each of the\n C pools (Units: g m-2 day-1)\n '''\n assert len(substrate) == 3,\\\n 'Need a substrate value for each of 3 pools'\n alpha0, alpha1, alpha2, ea, km_s_int, km_s_slope = (\n self.params[i] for i in range(0, 6)\n )\n respiration = []\n for i, a_i in enumerate((alpha0, alpha1, alpha2)):\n v_max_i = self.v_max(soil_t, a_i, ea)\n sx_i, conc_O2 = self.concentrations(\n np.array(substrate[i]), soil_m, porosity)\n # Calculate Michaelis-Menten coefficients\n km_s = km_s_int + (km_s_slope * soil_t)\n mm_sx = sx_i / (km_s + sx_i) # Units (g C cm-3) cancel out\n mm_O2 = conc_O2 / (self.get('km_O2') + conc_O2) # (dim.less)\n rh = v_max_i * mm_sx * mm_O2 # Mg C cm-3 hr-1\n # First convert Mg C to g C, then cm-3 to cm-2, then from\n # cm-2 to m-2, then from hourly to a daily flux (24 hours/ day)\n resp = (((1e6 * rh) * self.constants.soil_depth_cm) * 1e4) * 24\n respiration.append(np.where(resp < 0, 0, resp))\n return respiration\n\n\ndef g_m2_to_g_cm3(value, soil_depth_cm = 5):\n '''\n Converts flux/ SOC stock from g m-2 to g cm-3.\n\n Parameters\n ----------\n value : int or float\n Value in g m-2\n soil_depth_cm : int\n Depth of the soil, in centimeters\n\n Returns\n -------\n float\n Value in g cm-3\n '''\n return (value / (soil_depth_cm / 100)) / 1e6\n\n\ndef g_cm3_to_g_m2(value, soil_depth_cm = 5):\n '''\n Converts flux/ SOC stock from g cm-3 to g m-2.\n\n Parameters\n ----------\n value : int or float\n Value in g cm-3\n soil_depth_cm : int\n Depth of the soil, in centimeters\n\n Returns\n -------\n float\n Value in g m-2\n '''\n return (value * soil_depth_cm) * 1e4\n\n\nif __name__ == '__main__':\n # Test model implementation matches the description by Davidson et al.\n # (2012, Table 3) by running...\n # Here we also show that the code is vectorized.\n substrate = (np.array(0.048).repeat(4).reshape((2,2)),\n np.zeros((2,2)), np.zeros((2,2)))\n soil_temp = np.array(273.15 + 37.7).repeat(4).reshape((2,2))\n soil_moisture = np.array(50).repeat(4).reshape((2,2))\n porosity = np.array(1 - (0.8 / 2.52)).repeat(4).reshape((2,2))\n\n damm = DAMMDecompositionModel(soil_depth_cm = 10)\n rh = damm.respiration(substrate, soil_moisture, soil_temp, porosity)\n assert np.all(rh[0].round(2) == 19.04), 'DAMMDecompositionModel: Failed test'\n print('DAMMDecompositionModel: Passed test')\n\n # km_s_int = 0 # Back-solved for km_s_slope given km_s = 9.95e-7\n # km_s_slope = 3.2009e-09\n damm = DAMMDecompositionModel2(\n # alpha E_a km_s_slope p D_liq D_gas\n params = (53.8, 0, 0, 72.26, 0, 3.2e-9, 4.14e-4, 3.17, 1.67, np.nan),\n km_O2 = 0.121, soil_depth_cm = 10)\n rh = damm.respiration(substrate, soil_moisture, soil_temp, porosity)\n assert np.all(rh[0].round(2) == 19.04), 'DAMMDecompositionModel2: Failed test'\n print('DAMMDecompositionModel2: Passed test')\n\n # Vectorization in parameters\n damm = DAMMDecompositionModel2(\n params = (\n np.array([53.8, 56.81]), np.array([0, 8.297]), np.array([0, 6.27e-2]),\n np.array([72.26, 71.13]), np.array([0, -6.28e-3]), np.array([3.2e-9, 2.34e-5]),\n np.array([4.14e-4, 0.126]), np.array([3.17, 23.309]), np.array([1.67, 4.648]),\n np.array([0.121, 8.367e-2])),\n soil_depth_cm = 10)\n rh = damm.respiration([0.048, 0, 0], 50, 310.85, 0.6825)\n assert np.all(rh[0].round(2) == 19.04), 'DAMMDecompositionModel2: Failed test'\n print('DAMMDecompositionModel2: Passed test')\n" ]
[ [ "numpy.multiply", "numpy.power", "numpy.add", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
svenschultze/Colab-Live-Figures
[ "fe70ac089eb30f4ccda0996128c3fe13654567f0" ]
[ "live/gif.py" ]
[ "import ffmpeg\nimport cv2\nimport uuid\nimport os\nimport base64\nimport numpy as np\n\ndef save_video_to_file(file, vid, fps):\n writer = cv2.VideoWriter(file.name, cv2.VideoWriter_fourcc('M','J','P','G'), fps, (vid.shape[2], vid.shape[1]))\n for img in vid:\n writer.write(np.flip(img, axis=2))\n writer.release()\n\ndef convert(input, output):\n ffmpeg.input(input).output(output).run()\n\ndef video_to_gif(vid, fps=10):\n filename = uuid.uuid4()\n\n with open(f\"/tmp/{filename}.avi\", \"w\") as avi:\n save_video_to_file(avi, vid, fps)\n\n ffmpeg.input(f\"/tmp/{filename}.avi\").output(f\"/tmp/{filename}.gif\").run()\n\n with open(f\"/tmp/{filename}.gif\", \"rb\") as image_file:\n gif_b64 = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n os.remove(f\"/tmp/{filename}.avi\")\n os.remove(f\"/tmp/{filename}.gif\")\n\n return gif_b64" ]
[ [ "numpy.flip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MyMiDiII/bmstu-aa
[ "d08612e31ccd85f1b07ca9143b5806296198030a" ]
[ "lab07/src/main.py" ]
[ "import matplotlib.pyplot as plt\n\nfrom colored import fg, attr\n\nfrom dictionary import Dictionary\n\nfrom utils import *\nfrom experiments import getTimes, getComps\n\nDATADIR = './data/'\nFULL_COMB_SEARCH = 1\nBIN_SEARCH = 2\nSEGM_SEARCH = 3\n\ndef printInfo():\n print(\"Сравнение алгоритмов поиска в словаре\")\n\n\ndef printAuthor():\n print(\"АВТОР: Маслова Марина\")\n print(\"ГРУППА: ИУ7-53Б\")\n\n\ndef printGreeting():\n print(\"ПОИСК В СЛОВАРЕ\")\n print()\n printInfo();\n print()\n printAuthor();\n\n\ndef printMenu():\n print()\n print(\"МЕНЮ\")\n print()\n print(\"1 -- поиск;\")\n print(\"2 -- сравнение различных алгоритмов по времени;\")\n print(\"3 -- сравнение различных алгоритмов по количеству сравнений;\")\n print(\"0 -- выход\")\n print()\n print(\"Выбор:\")\n\n\ndef singleExperiment(myDict):\n name = input('Введите название игры: ')\n\n print(\"%sПолный перебор%s\" % (fg('blue'), attr(0)))\n res, compNum = myDict.bruteForce(name)\n printRes(res, name)\n print(\"Количество сравнений:\", compNum)\n\n print(\"%sБинарный поиск%s\" % (fg('blue'), attr(0)))\n res, compNum = myDict.binSearch(name)\n printRes(res, name)\n print(\"Количество сравнений:\", compNum)\n\n print(\"%sСегментация словаря%s\" % (fg('blue'), attr(0)))\n res, compNum = myDict.segSearch(name)\n printRes(res, name)\n print(\"Количество сравнений:\", compNum)\n\n\ndef massExperimentsTime(myDict):\n keys = myDict.getKeys()\n inds = [i + 1 for i in range(len(keys))]\n funcs = [\n myDict.bruteForce,\n myDict.binSearch,\n myDict.segSearch\n ]\n\n times = getTimes(funcs, keys)\n\n labels = ['бинарный поиск', 'сегментация']\n\n for i, algTime in enumerate(times):\n if None not in algTime:\n plt.plot(inds, algTime, label=labels[i])\n\n plt.xlabel(\"Индекс ключа\", fontsize=14)\n plt.ylabel(\"Время, ns\", fontsize=14)\n plt.grid(True)\n plt.legend()\n\n plt.show()\n\n\ndef massExperimentsComp(myDict):\n keys = myDict.getKeys()\n inds = [i + 1 for i in range(len(keys))]\n funcs = [\n myDict.bruteForce,\n myDict.binSearch,\n myDict.segSearch\n ]\n algs = [\n 'перебор',\n 'бинарный',\n 'сегментация'\n ]\n\n comps = getComps(funcs, keys)\n\n\n for j in range(3):\n fig, ax = plt.subplots(2, 1)\n\n ax[0].bar(inds, comps[j], color='c')\n ax[0].set(title=algs[j])\n\n sortComps = sorted(comps[j], reverse=True)\n\n ax[1].bar(inds, sortComps, color='c')\n ax[1].set(title=algs[j] + '(по убыванию)')\n \n for i in range(2):\n ax[i].set_xlabel(\"Индекс ключа\")\n ax[i].set_ylabel(\"Количество сравнений\")\n\n plt.subplots_adjust(hspace=0.5)\n plt.get_current_fig_manager().window.showMaximized()\n\n plt.show()\n\n\ndef wrongAnswer():\n print(\"Нет такого пунтка меню!\")\n print(\"Попробуйте ещё раз!\")\n\n\ndef getAnswer():\n answer = input()\n answer = -1 if answer not in (\"0\", \"1\", \"2\", \"3\") else int(answer)\n return answer\n\n\nif __name__ == \"__main__\":\n printGreeting()\n print(\"Загрузка словаря...\")\n myDict = Dictionary(DATADIR + 'games.csv')\n\n menuFuncs = [lambda: True, singleExperiment, massExperimentsTime,\n massExperimentsComp, wrongAnswer]\n args = [[], [myDict], [myDict], [myDict], []]\n\n answer = 1\n while answer:\n printMenu()\n answer = getAnswer()\n menuFuncs[answer](*args[answer])\n\n print(\"Спасибо за использование программы!\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "matplotlib.pyplot.plot", "matplotlib.pyplot.get_current_fig_manager", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cvillacampa/dgps_pep
[ "30ce21fd68f3af7b9cdb4d2a267e95c568c3ac93" ]
[ "utils.py" ]
[ "import os\n\nimport numpy as np\nimport psutil\n\n\ndef calculate_ETA(last_epoch_times, current_iteration, max_iterations):\n \"\"\"Calculates remaining training time in seconds\n\n Args:\n last_epoch_times (list/deque): Running time of last epochs\n current_iteration (int): current iteration number\n max_iterations (int): Max training iterations\n \"\"\"\n mean_iteration_time = np.mean(last_epoch_times)\n remaining_iterations = max_iterations - current_iteration\n return mean_iteration_time * remaining_iterations\n\n\ndef calculate_ETA_str(last_epoch_times, current_iteration, max_iterations):\n \"\"\"Calculates remaining training time, returning a string\n\n Args:\n last_epoch_times (list/deque): Running time of last epochs\n current_iteration (int): current iteration number\n max_iterations (int): Max training iterations\n \"\"\"\n if current_iteration < 5:\n return \"-\"\n\n eta = calculate_ETA(last_epoch_times, current_iteration, max_iterations)\n sec_to_min = 60\n sec_to_hour = 3600\n sec_to_day = 86400\n if eta < sec_to_min:\n return \"{:1.0f}s\".format(eta)\n if eta < sec_to_hour:\n return \"{:1.0f}min, {:1.0f}s\".format(eta // sec_to_min, (eta % sec_to_min))\n if eta < sec_to_day:\n return \"{:1.0f}h, {:1.0f}min\".format(\n eta // sec_to_hour, (eta % sec_to_hour) // sec_to_min\n )\n\n return \"{:1.0f}day, {:1.0f}h\".format(\n eta // sec_to_day, (eta % sec_to_day) // sec_to_hour\n )\n\n\ndef extend_dimension_if_1d(np_array):\n return np_array[:, None] if np_array.ndim == 1 else np_array\n\n\ndef memory_used():\n pid = os.getpid()\n py = psutil.Process(pid)\n memory_usage = py.memory_info()[0] / 2.0 ** 30 # memory use in GB\n return memory_usage\n\n" ]
[ [ "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
clairekope/SALSA
[ "b6d525f4a147b6983f1d6835a42c1fe7b87cf406" ]
[ "salsa/absorber_extractor.py" ]
[ "import yt\nimport trident\nimport numpy as np\nimport pandas as pd\n\nfrom spectacle.fitting import LineFinder1D\nfrom numpy.linalg import norm\nimport astropy.units as u\nfrom astropy.table import QTable\n\nfrom yt.data_objects.static_output import \\\n Dataset\n\nfrom salsa.utils.functions import ion_p_num\nfrom salsa.utils.defaults import default_cloud_dict\n\nclass AbsorberExtractor():\n \"\"\"\n Extracts absorbers from a trident lightray for a given ion species. Does This\n through two methods, by using the SPICE (Simple Procedure for Iterative\n Cloud Extraction) method and by fitting a synthetic spectra made by trident.\n Fit is done using spectacle\n\n Parameters\n --------------\n\n ds_filename: str or YT dataset\n Either Path/name of the dataset to be loaded or the dataset itself\n\n ray_filename: str or Trident ray\n Path/name of the hdf5 ray file to be loaded or the ray already loaded\n\n ion_name: string, optional\n Name of the ion to extract absorbers of\n Default: \"H I\"\n\n cut_region_filters: list of strings, optional\n a list of filters defined by the way you use Cut Regions in YT\n Default: None\n\n wavelegnth_center: float, optional\n The specific absorption line to look at (in unit Angstrom). None\n defaults to strongest absorption line for specified ion\n (using trident's ion table).\n Default: None\n\n velocity_res: float, optional\n width of velocity bins for spectra. Minimum threshold for combining\n absorbers in the SPICE method.\n Default: 10\n\n spectacle_res: float, optional\n Set minimum resolution that spectacle will attempt to fit lines to.\n (in km/s) If None, default to value of velocity_res\n Default: None\n\n spectacle_defaults: dict, optional\n Dictionary passed to spectacle defining default parameters/ranges\n when fitting absorption lines\n Deafult: None\n\n absorber_min: float, optional\n Minimum Log Column Density that will be used to define an absorber.\n If None, defaults to either default for specific ion or 13\n Default: None\n\n frac: float, optional\n Parameter defining what fraction of the number density is being\n accounted for in each iteration of the SPICE method. Must be a number\n between 0 and 1.\n Default: 0.8\n\n \"\"\"\n\n def __init__(self, ds_filename, ray_filename,\n ion_name='H I', cut_region_filters=None,\n wavelength_center=None, velocity_res = 10,\n spectacle_defaults=None, spectacle_res=None,\n absorber_min=None, frac=0.8):\n\n\n\n #set file names and ion name\n if isinstance(ds_filename, str):\n self.ds = yt.load(ds_filename)\n elif isinstance(ds_filename, Dataset):\n self.ds = ds_filename\n\n self.ray_filename = ray_filename\n self.ion_name = ion_name\n self.cut_region_filters = cut_region_filters\n if not (0 < frac < 1):\n raise RuntimeError(f\"frac {frac} must be between 0 and 1.\")\n self.frac = frac\n\n #add ion name to list of all ions to be plotted\n self.ion_list = [ion_name]\n\n #open up the dataset and ray files\n self.load_ray(self.ray_filename)\n\n if absorber_min is None:\n if self.ion_name in default_cloud_dict.keys():\n self.absorber_min = default_cloud_dict[self.ion_name]\n else:\n self.absorber_min=13\n else:\n self.absorber_min = absorber_min\n\n self.defaults_dict = {\n 'bounds' :{\n 'column_density' : (self.absorber_min-0.5, 23)\n },\n 'fixed' : {\n 'delta_lambda' : True,\n 'column_density' : False\n }\n }\n\n #add user defined defaults\n if spectacle_defaults is not None:\n self.defaults_dict.update(spectacle_defaults)\n\n self.velocity_res = velocity_res\n\n #default spectacle resolution to velocity_res\n if spectacle_res is None:\n self.spectacle_res = velocity_res\n else:\n self.spectacle_res = spectacle_res\n\n #default set the wavelength center to one of the known spectral lines\n #for ion name. Use tridents line database to search for correct wavelength\n if wavelength_center is None:\n #open up tridents default line database\n lbd = trident.LineDatabase('lines.txt')\n #find all lines that match ion\n lines = lbd.parse_subset(subsets= [self.ion_name])\n #take one with largest f_value\n f_val = 0\n for line in lines:\n if line.f_value >= f_val:\n f_val = line.f_value\n self.wavelength_center = line.wavelength\n else:\n self.wavelength_center = wavelength_center\n\n def load_ray(self, new_ray):\n \"\"\"\n loads a new ray into the multi_plot class. (same dataset)\n\n Parameters\n -----------\n new_ray :str or yt.ray\n either filename to rayfile or a trident ray that's already opened\n\n \"\"\"\n #reset absorber extraction variables\n # variables to store raw info from the different methods\n self.spectacle_model=None\n self.spice_intervals=None\n\n # to store absorber feature table\n self.spice_df=None\n self.spectacle_df=None\n\n #store number of features found\n self.num_spice = None\n self.num_spectacle = None\n\n #check if str else assume is ray\n if isinstance(new_ray, str):\n self.ray_filename=new_ray\n self.ray = yt.load(new_ray)\n else:\n self.ray = new_ray\n self.ray_filename=new_ray.filename_template\n\n #save uncut data. define center\n self.uncut_data = self.ray.all_data()\n\n #apply cut region if specified\n if self.cut_region_filters is None:\n self.data = self.uncut_data\n else:\n curr_data = self.uncut_data\n #iteratively apply filters\n for filter in self.cut_region_filters:\n curr_data = curr_data.cut_region(filter)\n\n self.data = curr_data\n\n # Check if ray is empty due to cuts\n if self.data['l'].size == 0:\n print(f'light ray {self.ray} is empty')\n\n def ray_position_prop(self, units='code_length'):\n \"\"\"\n returns positional/directional properties of the ray so that it can be used like a vector\n\n Parameters\n -----------\n\n units : str, optional\n YT defined units to return arrays in. defaults to 'code length'.\n Default: 'code_length'\n Returns\n -------\n ray_begin : yt.arr\n the starting coordinates of ray\n\n ray_end : yt.arr\n the ending coordinates of the ray\n\n ray_length : yt.arr\n the length of the ray\n\n ray_unit : yt.arr\n unit vector showing direction of the ray\n \"\"\"\n #get start and end points of ray. convert to defined units\n #print(self.ray)\n ray_begin = self.ray.light_ray_solution[0]['start']\n ray_end = self.ray.light_ray_solution[0]['end']\n\n ray_begin = ray_begin.in_units(units)\n ray_end = ray_end.in_units(units)\n\n #construct vector pointing in ray's direction\n ray_vec = ray_end - ray_begin\n ray_length = self.ds.quan(norm(ray_vec.value), units)\n\n #normalize vector to unit length\n ray_unit = ray_vec/ray_length\n\n return ray_begin, ray_end, ray_length, ray_unit\n\n def close(self):\n \"\"\"\n close all opened files, dataset, ray\n \"\"\"\n\n self.ds.close()\n self.ray.close()\n\n def get_spice_absorbers(self, fields=[], units_dict={}):\n \"\"\"\n Use the SPICE method to extract absorbers and then find features of\n absorbers. Default outputs column density and central velocity of the\n absorption line (delta_v) as well as requested fields All in\n a pandas dataframe.\n\n Parameters\n -----------\n\n fields : list, optional\n list of yt fields to extract averages of for the absorbers.\n Defalut: []\n\n units_dict : dict, optional\n dictionary of fields and corresponding units to use for each field.\n Default: {}\n\n Returns\n ---------\n\n absorber_info : pandas.DataFrame\n Dataframe of all the absorbers and their corresponding features.\n\n \"\"\"\n # get absorber locations\n self.spice_intervals = self.run_spice()\n self.num_spice = len(self.spice_intervals)\n\n\n # line information for absorbers\n name_type = [('name', str),\n ('wave', np.float64),\n ('redshift', np.float64),\n ('col_dens', np.float64),\n ('delta_v', np.float64),\n ('vel_dispersion', np.float64),\n ('interval_start', np.int32),\n ('interval_end', np.int32)]\n\n # get name of columns and type of data for each\n for f in fields:\n name_type.append( (f, np.float64) )\n\n #check if any absorbrs were found\n n_abs = len(self.spice_intervals)\n if n_abs == 0:\n print(\"No absorbers in ray: \", self.ray)\n return None\n\n #initialize empty table\n stats_table = pd.DataFrame(np.empty(n_abs , dtype=name_type))\n\n #add ion name and wavelength\n stats_table['name']= self.ion_name\n stats_table['wave'] = self.wavelength_center\n stats_table['redshift'] = self.ds.current_redshift\n\n # fill table with absorber features\n for i in range(n_abs):\n #load data for calculating properties\n start, end = self.spice_intervals[i]\n stats_table.loc[i, 'interval_start'] = start\n stats_table.loc[i, 'interval_end'] = end\n dl = self.data['dl'][start:end].in_units('cm')\n density = self.data[('gas', 'density')][start:end].in_units('g/cm**3')\n tot_density = np.sum(dl*density)\n\n #calculate column density\n ion_field = ion_p_num(self.ion_name)\n ion_density=self.data[ion_field][start:end].in_units('cm**-3')\n col_density = np.sum(dl*ion_density)\n\n stats_table.loc[i, 'col_dens'] = np.log10(col_density)\n\n #calculate delta_v of absorber. ion col dense weighted\n vel_los_dat = self.data['velocity_los'][start:end].in_units('km/s')\n central_vel = np.sum(dl*ion_density*vel_los_dat)/col_density\n stats_table.loc[i, 'delta_v'] = central_vel\n\n #calculate velocity dispersion\n\n # set single cell absorber to zero velocity variance\n if end-start == 1:\n vel_variance=np.nan\n else:\n #weighted sample variance\n vel_variance=col_density \\\n *np.sum(dl*ion_density * ( vel_los_dat - self.ds.quan(central_vel, 'km/s') )**2) \\\n /(col_density**2 - np.sum( (dl*ion_density)**2 ))\n\n stats_table.loc[i, 'vel_dispersion'] = np.sqrt(vel_variance)\n\n #calculate other field averages. gas col density weighted\n for fld in fields:\n fld_data = self.data[fld][start:end]\n avg_fld = np.sum(dl*density*fld_data)/tot_density\n\n if fld in units_dict.keys():\n stats_table.loc[i, fld] = avg_fld.in_units( units_dict[fld] )\n else:\n stats_table.loc[i, fld] = avg_fld\n\n self.spice_df = stats_table\n return self.spice_df\n\n def get_spectacle_absorbers(self):\n \"\"\"\n Uses spectacle to fit a trident made spectra of the specified ion.\n\n Returns\n ----------\n line_stats : pandas.DataFrame\n Table including all line statistics found from spectacle's fit of\n the spectra.\n \"\"\"\n #create spectra for a single line to fit\n wav = int( np.round(self.wavelength_center) )\n line = f\"{self.ion_name} {wav}\"\n #format ion correctly to fit\n ion_wav= \"\".join(line.split())\n\n vel_array, flux_array=self._create_spectra()\n\n #constrain possible column density values\n #create line model\n line_finder = LineFinder1D(ions=[ion_wav], continuum=1, z=0,\n defaults=self.defaults_dict,\n fitter_args={'maxiter':2000},\n threshold=0.01, output='flux',\n min_distance=self.spectacle_res, auto_fit=True)\n #fit data\n try:\n spec_model = line_finder(vel_array*u.Unit('km/s'), flux_array)\n except RuntimeError:\n print('fit failed(prolly hit max iterations)', self.ray)\n spec_model = None\n except IndexError:\n print('INDEX ERROR on', self.ray)\n spec_model = None\n\n #check if fit found any lines\n if spec_model is None:\n print('line could not be fit on ray ', self.ray)\n self.spectacle_model = None\n line_stats = None\n self.num_spectacle = 0\n\n else:\n init_stats = spec_model.line_stats(vel_array*u.Unit('km/s'))\n\n # include only lines greater than absorber_min\n line_indxs, = np.where( init_stats['col_dens'] >= self.absorber_min)\n if line_indxs.size == 0:\n print('line could not be fit on ray ', self.ray)\n self.spectacle_model = None\n line_stats = None\n self.num_spectacle = 0\n\n else:\n # retrieve lines that pass col dense threshold\n good_lines=[]\n for i in line_indxs:\n good_lines.append(spec_model.lines[i])\n\n #create and save new model with lines desired\n self.spectacle_model = spec_model.with_lines(good_lines, reset=True)\n self.num_spectacle = len(good_lines)\n line_stats=self.spectacle_model.line_stats(vel_array*u.Unit('km/s'))\n\n #add redshift\n line_stats['redshift'] = self.ds.current_redshift\n line_stats = line_stats.to_pandas()\n self.spectacle_df=line_stats\n return self.spectacle_df\n\n def run_spice(self):\n \"\"\"\n iteratively run the cloud method to extract all the absorbers in the\n lightray.\n\n Returns\n --------\n :final_intervals: list of tuples of int\n List of the indices that indicate the start and end of each absorber.\n \"\"\"\n num_density = self.data[ion_p_num(self.ion_name)].in_units(\"cm**(-3)\")\n dl_list = self.data['dl'].in_units('cm')\n\n all_intervals=[]\n curr_num_density = num_density.copy()\n curr_col_density = np.sum(num_density*dl_list)\n min_col_density = 10**self.absorber_min\n\n while curr_col_density > min_col_density:\n #calc threshold to get fraction from current num density\n curr_thresh = self._cloud_method(curr_num_density, coldens_fraction=self.frac)\n\n #extract intervals this would cover\n curr_intervals = self._identify_intervals(curr_thresh)\n all_intervals = self._sensible_combination(all_intervals, curr_intervals)\n\n #mask density array above threshold and apply mask to dl\n curr_num_density = np.ma.masked_greater_equal(num_density, curr_thresh)\n curr_dl = np.ma.masked_array(dl_list, curr_num_density.mask)\n\n #calc leftover column density\n curr_col_density = np.sum(curr_num_density*curr_dl)\n\n #make sure intervals have high enough col density\n final_intervals=[]\n for b, e in all_intervals:\n lcd = np.log10(np.sum(dl_list[b:e]*num_density[b:e]))\n if lcd > self.absorber_min:\n final_intervals.append((b, e))\n return final_intervals\n\n def _create_spectra(self):\n \"\"\"\n Use trident to create the absorption spectrum of the ray in velocity\n space for use in fitting.\n\n Returns\n --------\n velocity: YT array\n Array of velocity values of the generated spectra (in km/s)\n\n flux: YT array\n Array of the normalized flux values of the generated spectra\n\n \"\"\"\n #set which ions to add to spectra\n wav = int( np.round(self.wavelength_center) )\n line = f\"{self.ion_name} {wav}\"\n ion_list = [line]\n\n #use auto feature to capture full line\n spect_gen = trident.SpectrumGenerator(lambda_min=\"auto\", lambda_max=\"auto\", dlambda = self.velocity_res, bin_space=\"velocity\")\n spect_gen.make_spectrum(self.data, lines=ion_list)\n\n #get fields from spectra and give correct units\n flux = spect_gen.flux_field\n velocity = spect_gen.lambda_field\n\n return velocity, flux\n\n def _cloud_method(self, num_density_arr, coldens_fraction):\n \"run the cloud method\"\n cut = 0.999\n total = np.sum(num_density_arr)\n ratio = 0.001\n while ratio < coldens_fraction:\n part = np.sum(num_density_arr[num_density_arr > cut * np.max(num_density_arr)])\n ratio = part / total\n cut = cut - 0.001\n\n threshold = cut * np.max(num_density_arr)\n\n return threshold\n\n def _sensible_combination(self, prev_intervals, curr_intervals):\n \"\"\"\n adds new intervals by taking into account the velocities when combining them\n\n Parameters\n -----------\n prev_intervals : list\n the intervals already calculated\n\n curr_intervals : list\n the intervals that need to be added/combined\n\n Returns\n --------\n new_intervals : list\n a final list of intervals where prev and curr are properly combined.\n \"\"\"\n dl_array = self.data['dl'].in_units('cm')\n l_array = self.data['l'].in_units('cm')\n velocity_array = self.data['velocity_los'].in_units('km/s')\n density_array = self.data['density']\n\n # first check no region jumping (from use of cut_regions)\n if self.cut_region_filters is not None:\n #make sure spatially connected\n real_intervals = []\n for curr_b, curr_e in curr_intervals:\n #check if lengths match up\n size_dl = np.sum(dl_array[curr_b:curr_e])\n size_l = l_array[curr_e] - l_array[curr_b]\n rel_diff = abs(size_dl - size_l)/size_dl\n #print(\"rel diff: \", rel_diff)\n if rel_diff > 1e-12:\n print(curr_b, curr_e)\n # make sure things are good\n divide_indx=None\n for i in range(curr_b, curr_e):\n # find where the jump is\n\n rel_diff = abs(l_array[i] +dl_array[i] - l_array[i+1])/l_array[i]\n #print(i, rel_diff.value)\n if rel_diff > 1e-12:\n divide_indx=i\n break\n #append intervals split up by the jump\n if divide_indx is not None:\n print(divide_indx)\n real_intervals.append((curr_b, divide_indx))\n real_intervals.append((divide_indx+1, curr_e))\n else:\n print(\"couldn't divide index for \",curr_b, \" \", curr_e)\n\n else:\n real_intervals.append((curr_b, curr_e))\n curr_intervals = real_intervals.copy()\n\n\n #check if there are any previous intervals to combine with\n if prev_intervals == []:\n return curr_intervals\n\n new_intervals=prev_intervals.copy()\n del_v = self.ds.quan(self.velocity_res, 'km/s')\n\n #loop through current intervals\n for curr_b, curr_e in curr_intervals:\n\n overlap_intervals=[]\n #loop through all previous intervals\n for b,e in prev_intervals:\n #check if previous interval is nested in curr interval\n if curr_b <= b and curr_e >= b:\n #print(f\"interval ({curr_b}, {curr_e}) overlap with \", b, e)\n if curr_b <= e and curr_e >= e:\n overlap_intervals.append((b, e))\n\n #check if just beginning point enclose\n else:\n err_file = open(\"error_file.txt\", 'a')\n err_file.write(f\"{self.ray_filename} had an intersection that wasn't complete :/\")\n err_file.close()\n #check if just endpoint enclosed\n elif curr_b <= e and curr_e >= e:\n err_file = open(\"error_file.txt\", 'a')\n err_file.write(f\"{self.ray_filename} had an intersection that wasn't complete :/\")\n err_file.close()\n\n #\n #\n #This is such a mess below but it works\n #hopefully I'll think of a much cleaner way to do this\n #but for now this is it\n #\n #\n\n if overlap_intervals == []:\n new_intervals.append((curr_b, curr_e))\n else:\n\n #collect overlap points into list\n points = [curr_b]\n for b, e in overlap_intervals:\n #print(f\"curr {curr_b, curr_e} ovelaps {b, e}\")\n new_intervals.remove((b, e))\n points.append(b)\n points.append(e)\n points.append(curr_e)\n\n avg_v=[]\n for i in range(len(points)-1):\n pnt1, pnt2 = points[i], points[i+1]\n #find weighted avg velocity\n curr_dense = density_array[pnt1:pnt2]\n curr_dl = dl_array[pnt1:pnt2]\n vel = np.sum(curr_dense*curr_dl*velocity_array[pnt1:pnt2]) \\\n /np.sum(curr_dense*curr_dl)\n avg_v.append((vel, pnt1, pnt2))\n\n start_b = curr_b\n for i in range(len(avg_v)-1):\n #if velocity difference is greater than threshold\n if abs(avg_v[i][0] - avg_v[i+1][0]) > del_v:\n #create new interval\n new_intervals.append((start_b, avg_v[i][2]))\n #change start of next interval\n start_b = avg_v[i+1][1]\n #check if this is the last two intervals to check\n if i == len(avg_v) -2:\n new_intervals.append((start_b, curr_e))\n\n\n return new_intervals\n\n def _identify_intervals(self, cutoff):\n \"\"\"\n Find the intervals for absorbers using some cutoff on the number density\n field along lightray.\n\n Parameters\n -----------\n cutoff : double\n threshold defining where absorbers are.\n\n Returns\n -----------\n intervals : list of tuples\n list of the intervals defining the absorbers in this ray.\n \"\"\"\n num_density = self.data[ion_p_num(self.ion_name)].in_units(\"cm**(-3)\")\n in_absorber = False\n intervals = []\n\n #Iterate over values in field\n for i,value in enumerate(num_density):\n #Check if started an absorber and if above cutoff\n if in_absorber and value < cutoff:\n in_absorber = False\n #add interval to list\n intervals.append((start,i))\n # check if just entered an absorber\n elif not in_absorber and value >= cutoff:\n in_absorber = True\n start = i\n else:\n continue\n #check if was still in absorber when hitting end of ray\n if in_absorber and start != i:\n intervals.append((start, i))\n return intervals\n" ]
[ [ "numpy.ma.masked_greater_equal", "numpy.sqrt", "numpy.linalg.norm", "numpy.round", "numpy.max", "numpy.log10", "numpy.ma.masked_array", "numpy.where", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RUB-SysSec/WaveFake
[ "d52d51b9ccdb0cec3f484e84b228791f06b955be" ]
[ "tests/test_dataset.py" ]
[ "\"\"\"Test cases for datahandling.\"\"\"\nimport unittest\n\nimport torch\nfrom dfadetect.datasets import AudioDataset\nfrom dfadetect.utils import find_wav_files\n\nfrom tests.utils import REAL_PATH, load_real, load_special\n\n\nclass TestAudioDataset(unittest.TestCase):\n\n def test_loading_audio(self):\n dataset = load_real()\n\n # found all files\n self.assertEqual(len(dataset), 5)\n\n # returns sample rate\n self.assertEqual(len(dataset[0]), 2)\n\n def test_resampling(self):\n new_rate = 24_000\n dataset = load_real(sample_rate=new_rate)\n\n for _, sample_rate in dataset:\n self.assertEqual(sample_rate, new_rate)\n\n def test_loading_audio_triming(self):\n # trimmed\n dataset = load_real()\n\n trim_time = 0.\n for waveform, _ in dataset:\n trim_time += waveform.shape[1]\n\n # not trimmed\n dataset = load_real(trim=False)\n\n orig_time = 0.\n for waveform, _ in dataset:\n orig_time += waveform.shape[1]\n\n self.assertGreater(orig_time, trim_time)\n\n def test_trimming_entire_file(self):\n dataset = load_special()\n\n # check that we do not trim entire file\n for waveform, _sr in dataset:\n self.assertGreater(waveform.size()[1], 0)\n\n def test_phone_call(self):\n dataset = load_special(phone_call=True)\n\n for _waveform, sr in dataset:\n self.assertEqual(sr, 8_000)\n\n def test_phone_call_reassigned(self):\n dataset = load_special()\n\n for _waveform, sr in dataset:\n self.assertEqual(sr, 16_000)\n\n dataset.phone_call = True\n\n for _waveform, sr in dataset:\n self.assertEqual(sr, 8_000)\n\n def test_list_of_paths(self):\n ref = load_real()\n paths = find_wav_files(REAL_PATH)\n from_paths = AudioDataset(paths)\n\n for (file_1, sr_1), (file_2, sr_2) in zip(ref, from_paths):\n self.assertTrue(torch.allclose(file_1, file_2))\n self.assertEqual(sr_1, sr_2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sergiuionescu/gym-agents
[ "c65b786a148a868c9ee922dca1cb592604e7e828" ]
[ "agents/DiffAgent.py" ]
[ "import numpy as np\n\nfrom agents import DiffAgentBase\n\n\nclass DiffAgent(DiffAgentBase.DiffAgentBase):\n\n def prediction(self, observation):\n self.diff = []\n self.noise_reduction = []\n for dimension in self.space.spaces:\n self.diff.append(np.random.randint(0, dimension.n))\n self.noise_reduction.append(np.random.randint(2))\n\n def act(self, ob):\n self.current_prediction = []\n key = 0\n for dimension in self.space.spaces:\n self.current_prediction.append((self.diff[key] + ob * self.noise_reduction[key]) % dimension.n)\n key += 1\n\n return self.current_prediction\n\n def add_reward(self, observation, reward):\n if reward <= 0:\n self.prediction()\n\n self.experience.add_reward(observation, reward)\n self.experience.success += reward > 0\n self.experience.total_success += reward > 0\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
beenje/taurus_pyqtgraph
[ "2b29d81b0af35ea64a5ac4dfd7773d2d22af497e" ]
[ "taurus_pyqtgraph/dateaxisitem.py" ]
[ "#!/usr/bin/env python\n\n#############################################################################\n##\n# This file is part of Taurus\n##\n# http://taurus-scada.org\n##\n# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain\n##\n# Taurus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n##\n# Taurus is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n##\n# You should have received a copy of the GNU Lesser General Public License\n# along with Taurus. If not, see <http://www.gnu.org/licenses/>.\n##\n#############################################################################\n\n\"\"\"\nThis module provides date-time aware axis\n\"\"\"\n\n__all__ = [\"DateAxisItem\"]\n\n# -------------------------------------------------------------------------\n# There is a conflict problem with PyQt versions. Pyqtgraph imports his own\n# library of PyQt, and Taurus too. So we have to import Qt from own version\n# first as a workaround for forcing our own (as a workaround)\nfrom taurus.external.qt import Qt # noqa\n\n# -------------------------------------------------------------------------\n\nimport numpy\nfrom pyqtgraph import AxisItem\nfrom datetime import datetime, timedelta\nfrom time import mktime\n\n\nclass DateAxisItem(AxisItem):\n \"\"\"\n A tool that provides a date-time aware axis. It is implemented as an\n AxisItem that interpretes positions as unix timestamps (i.e. seconds\n since 1970).\n\n The labels and the tick positions are dynamically adjusted depending\n on the range.\n\n It provides a :meth:`attachToPlotItem` method to add it to a given\n PlotItem\n \"\"\"\n\n # TODO: Document this class and methods\n # Max width in pixels reserved for each label in axis\n _pxLabelWidth = 80\n\n def __init__(self, *args, **kwargs):\n AxisItem.__init__(self, *args, **kwargs)\n self._oldAxis = None\n\n def tickValues(self, minVal, maxVal, size):\n \"\"\"\n Reimplemented from PlotItem to adjust to the range and to force\n the ticks at \"round\" positions in the context of time units instead of\n rounding in a decimal base\n \"\"\"\n\n maxMajSteps = int(size // self._pxLabelWidth)\n\n dx = maxVal - minVal\n majticks = []\n\n try:\n dt1 = datetime.fromtimestamp(minVal)\n dt2 = datetime.fromtimestamp(maxVal)\n except Exception as e:\n from taurus import warning\n\n warning(\"Invalid range in DateTime axis: %s\", e)\n return [(dx, [])]\n\n if dx > 63072001: # 3600s*24*(365+366) = 2 years (count leap year)\n d = timedelta(days=366)\n for y in range(dt1.year + 1, dt2.year + 1):\n dt = datetime(year=y, month=1, day=1)\n majticks.append(mktime(dt.timetuple()))\n\n elif dx > 5270400: # 3600s*24*61 = 61 days\n d = timedelta(days=31)\n dt = (\n dt1.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n + d\n )\n while dt < dt2:\n # make sure that we are on day 1 (even if always sum 31 days)\n dt = dt.replace(day=1)\n majticks.append(mktime(dt.timetuple()))\n dt += d\n\n elif dx > 172800: # 3600s24*2 = 2 days\n d = timedelta(days=1)\n dt = dt1.replace(hour=0, minute=0, second=0, microsecond=0) + d\n while dt < dt2:\n majticks.append(mktime(dt.timetuple()))\n dt += d\n\n elif dx > 7200: # 3600s*2 = 2hours\n d = timedelta(hours=1)\n dt = dt1.replace(minute=0, second=0, microsecond=0) + d\n while dt < dt2:\n majticks.append(mktime(dt.timetuple()))\n dt += d\n\n elif dx > 1200: # 60s*20 = 20 minutes\n d = timedelta(minutes=10)\n dt = (\n dt1.replace(\n minute=(dt1.minute // 10) * 10, second=0, microsecond=0\n )\n + d\n )\n while dt < dt2:\n majticks.append(mktime(dt.timetuple()))\n dt += d\n\n elif dx > 120: # 60s*2 = 2 minutes\n d = timedelta(minutes=1)\n dt = dt1.replace(second=0, microsecond=0) + d\n while dt < dt2:\n majticks.append(mktime(dt.timetuple()))\n dt += d\n\n elif dx > 20: # 20s\n d = timedelta(seconds=10)\n dt = dt1.replace(second=(dt1.second // 10) * 10, microsecond=0) + d\n while dt < dt2:\n majticks.append(mktime(dt.timetuple()))\n dt += d\n\n elif dx > 2: # 2s\n d = timedelta(seconds=1)\n # majticks = list(range(int(minVal), int(maxVal)))\n majticks = list(\n range(int(numpy.ceil(minVal)), int(numpy.ceil(maxVal)))\n )\n\n else: # <2s , use standard implementation from parent\n return AxisItem.tickValues(self, minVal, maxVal, size)\n\n # print(\"majticks >: \", majticks)\n\n L = len(majticks)\n if L > maxMajSteps:\n if maxMajSteps == 0:\n majticks = []\n else:\n majticks = majticks[:: int(numpy.ceil(float(L) / maxMajSteps))]\n\n # print(\"majticks <: \", majticks)\n # print \"----------------------------\"\n\n return [(d.total_seconds(), majticks)]\n\n def tickStrings(self, values, scale, spacing):\n \"\"\"Reimplemented from PlotItem to adjust to the range\"\"\"\n ret = []\n if not values:\n return []\n # rng = max(values)-min(values)\n # print('values: ', values)\n # print('scale: ', scale)\n # print('spacing: ', spacing)\n\n if spacing >= 31622400: # = timedelta(days=366).total_seconds\n fmt = \"%Y\"\n\n elif spacing >= 2678400: # = timedelta(days=31).total_seconds\n fmt = \"%Y %b\"\n\n elif spacing >= 86400: # = timedelta(days = 1).total_seconds\n fmt = \"%b/%d\"\n\n elif spacing >= 3600: # = timedelta(hours=1).total_seconds\n fmt = \"%b/%d-%Hh\"\n\n elif spacing >= 600: # = timedelta(minutes=10).total_seconds\n fmt = \"%H:%M\"\n\n elif spacing >= 60: # = timedelta(minutes=1).total_seconds\n fmt = \"%H:%M\"\n\n elif spacing >= 10: # 10 s\n fmt = \"%H:%M:%S\"\n\n elif spacing >= 1: # 1s\n fmt = \"%H:%M:%S\"\n\n else:\n # less than 2s (show microseconds)\n # fmt = '%S.%f\"'\n fmt = \"[+%fms]\" # explicitly relative to last second\n\n for x in values:\n try:\n t = datetime.fromtimestamp(x)\n ret.append(t.strftime(fmt))\n except ValueError: # Windows can't handle dates before 1970\n ret.append(\"\")\n\n return ret\n\n def attachToPlotItem(self, plotItem):\n \"\"\"Add this axis to the given PlotItem\n\n :param plotItem: (PlotItem)\n \"\"\"\n self.setParentItem(plotItem)\n viewBox = plotItem.getViewBox()\n self.linkToView(viewBox)\n self._oldAxis = plotItem.axes[self.orientation][\"item\"]\n self._oldAxis.hide()\n plotItem.axes[self.orientation][\"item\"] = self\n pos = plotItem.axes[self.orientation][\"pos\"]\n plotItem.layout.addItem(self, *pos)\n self.setZValue(-1000)\n\n def detachFromPlotItem(self):\n \"\"\"Remove this axis from its attached PlotItem\n (not yet implemented)\n \"\"\"\n pass # TODO\n\n\nif __name__ == \"__main__\":\n\n import sys\n import pyqtgraph as pg\n from taurus.qt.qtgui.application import TaurusApplication\n from taurus.qt.qtgui.tpg import TaurusPlotDataItem\n\n app = TaurusApplication()\n\n # a standard pyqtgraph plot_item\n w = pg.PlotWidget()\n axis = DateAxisItem(orientation=\"bottom\")\n\n axis.attachToPlotItem(w.getPlotItem())\n\n # adding a taurus data item\n c2 = TaurusPlotDataItem()\n w.addItem(c2)\n\n w.show()\n\n sys.exit(app.exec_())\n" ]
[ [ "numpy.ceil" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RunxinXu/GIT
[ "3f91743656ae65c49bbfbe11a7ed8152a8b0bc20" ]
[ "dee/dee_helper.py" ]
[ "# Code Reference: (https://github.com/dolphin-zs/Doc2EDAG)\n\nimport logging\nimport os\nimport re\nfrom collections import defaultdict, Counter\nimport numpy as np\nimport torch\n\nfrom .dee_metric import measure_event_table_filling\nfrom .event_type import event_type2event_class, BaseEvent, event_type_fields_list, common_fields\nfrom .ner_task import NERExample, NERFeatureConverter\nfrom .utils import default_load_json, default_dump_json, default_dump_pkl, default_load_pkl\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DEEExample(object):\n def __init__(self, annguid, detail_align_dict, only_inference=False):\n self.guid = annguid\n # [sent_text, ...]\n self.sentences = detail_align_dict['sentences']\n self.num_sentences = len(self.sentences)\n\n if only_inference:\n # set empty entity/event information\n self.only_inference = True\n self.ann_valid_mspans = []\n self.ann_mspan2dranges = {}\n self.ann_mspan2guess_field = {}\n self.recguid_eventname_eventdict_list = []\n self.num_events = 0\n self.sent_idx2srange_mspan_mtype_tuples = {}\n self.event_type2event_objs = {}\n else:\n # set event information accordingly\n self.only_inference = False\n\n # [span_text, ...]\n self.ann_valid_mspans = detail_align_dict['ann_valid_mspans']\n # span_text -> [drange_tuple, ...]\n self.ann_mspan2dranges = detail_align_dict['ann_mspan2dranges']\n # span_text -> guessed_field_name\n self.ann_mspan2guess_field = detail_align_dict['ann_mspan2guess_field']\n # [(recguid, event_name, event_dict), ...]\n self.recguid_eventname_eventdict_list = detail_align_dict['recguid_eventname_eventdict_list']\n self.num_events = len(self.recguid_eventname_eventdict_list)\n\n # for create ner examples\n # sentence_index -> [(sent_match_range, match_span, match_type), ...]\n self.sent_idx2srange_mspan_mtype_tuples = {}\n for sent_idx in range(self.num_sentences):\n self.sent_idx2srange_mspan_mtype_tuples[sent_idx] = []\n\n for mspan in self.ann_valid_mspans:\n for drange in self.ann_mspan2dranges[mspan]:\n sent_idx, char_s, char_e = drange\n sent_mrange = (char_s, char_e)\n\n sent_text = self.sentences[sent_idx]\n if sent_text[char_s: char_e] != mspan:\n raise Exception('GUID: {} span range is not correct, span={}, range={}, sent={}'.format(\n annguid, mspan, str(sent_mrange), sent_text\n ))\n\n guess_field = self.ann_mspan2guess_field[mspan]\n\n self.sent_idx2srange_mspan_mtype_tuples[sent_idx].append(\n (sent_mrange, mspan, guess_field)\n )\n\n # for create event objects\n # the length of event_objs should >= 1\n self.event_type2event_objs = {}\n for mrecguid, event_name, event_dict in self.recguid_eventname_eventdict_list:\n event_class = event_type2event_class[event_name]\n event_obj = event_class()\n assert isinstance(event_obj, BaseEvent)\n event_obj.update_by_dict(event_dict, recguid=mrecguid)\n\n if event_obj.name in self.event_type2event_objs:\n self.event_type2event_objs[event_obj.name].append(event_obj)\n else:\n self.event_type2event_objs[event_name] = [event_obj]\n\n def __repr__(self):\n dee_str = 'DEEExample (\\n'\n dee_str += ' guid: {},\\n'.format(repr(self.guid))\n\n if not self.only_inference:\n dee_str += ' span info: (\\n'\n for span_idx, span in enumerate(self.ann_valid_mspans):\n gfield = self.ann_mspan2guess_field[span]\n dranges = self.ann_mspan2dranges[span]\n dee_str += ' {:2} {:20} {:30} {}\\n'.format(span_idx, span, gfield, str(dranges))\n dee_str += ' ),\\n'\n\n dee_str += ' event info: (\\n'\n event_str_list = repr(self.event_type2event_objs).split('\\n')\n for event_str in event_str_list:\n dee_str += ' {}\\n'.format(event_str)\n dee_str += ' ),\\n'\n\n dee_str += ' sentences: (\\n'\n for sent_idx, sent in enumerate(self.sentences):\n dee_str += ' {:2} {}\\n'.format(sent_idx, sent)\n dee_str += ' ),\\n'\n\n dee_str += ')\\n'\n\n return dee_str\n\n @staticmethod\n def get_event_type_fields_pairs():\n return list(event_type_fields_list)\n\n @staticmethod\n def get_entity_label_list():\n visit_set = set()\n entity_label_list = [NERExample.basic_entity_label]\n\n for field in common_fields:\n if field not in visit_set:\n visit_set.add(field)\n entity_label_list.extend(['B-' + field, 'I-' + field])\n\n for event_name, fields in event_type_fields_list:\n for field in fields:\n if field not in visit_set:\n visit_set.add(field)\n entity_label_list.extend(['B-' + field, 'I-' + field])\n\n return entity_label_list\n\nclass DEEExampleLoader(object):\n def __init__(self, rearrange_sent_flag, max_sent_len):\n self.rearrange_sent_flag = rearrange_sent_flag\n self.max_sent_len = max_sent_len\n\n def rearrange_sent_info(self, detail_align_info):\n if 'ann_valid_dranges' not in detail_align_info:\n detail_align_info['ann_valid_dranges'] = []\n if 'ann_mspan2dranges' not in detail_align_info:\n detail_align_info['ann_mspan2dranges'] = {}\n\n detail_align_info = dict(detail_align_info)\n split_rgx = re.compile('[,::;;))]')\n\n raw_sents = detail_align_info['sentences']\n doc_text = ''.join(raw_sents)\n raw_dranges = detail_align_info['ann_valid_dranges']\n raw_sid2span_char_set = defaultdict(lambda: set())\n for raw_sid, char_s, char_e in raw_dranges:\n span_char_set = raw_sid2span_char_set[raw_sid]\n span_char_set.update(range(char_s, char_e))\n\n # try to split long sentences into short ones by comma, colon, semi-colon, bracket 不能把mention切开!\n short_sents = []\n for raw_sid, sent in enumerate(raw_sents):\n span_char_set = raw_sid2span_char_set[raw_sid]\n if len(sent) > self.max_sent_len:\n cur_char_s = 0\n for mobj in split_rgx.finditer(sent):\n m_char_s, m_char_e = mobj.span()\n if m_char_s in span_char_set:\n continue\n short_sents.append(sent[cur_char_s:m_char_e])\n cur_char_s = m_char_e\n short_sents.append(sent[cur_char_s:])\n else:\n short_sents.append(sent)\n\n # merge adjacent short sentences to compact ones that match max_sent_len\n comp_sents = ['']\n for sent in short_sents:\n prev_sent = comp_sents[-1]\n if len(prev_sent + sent) <= self.max_sent_len:\n comp_sents[-1] = prev_sent + sent\n else:\n comp_sents.append(sent)\n\n # get global sentence character base indexes\n raw_char_bases = [0]\n for sent in raw_sents:\n raw_char_bases.append(raw_char_bases[-1] + len(sent))\n comp_char_bases = [0]\n for sent in comp_sents:\n comp_char_bases.append(comp_char_bases[-1] + len(sent))\n\n assert raw_char_bases[-1] == comp_char_bases[-1] == len(doc_text)\n\n # calculate compact doc ranges\n raw_dranges.sort()\n raw_drange2comp_drange = {}\n prev_comp_sid = 0\n for raw_drange in raw_dranges:\n raw_drange = tuple(raw_drange) # important when json dump change tuple to list\n raw_sid, raw_char_s, raw_char_e = raw_drange\n raw_char_base = raw_char_bases[raw_sid]\n doc_char_s = raw_char_base + raw_char_s\n doc_char_e = raw_char_base + raw_char_e\n assert doc_char_s >= comp_char_bases[prev_comp_sid]\n\n cur_comp_sid = prev_comp_sid\n for cur_comp_sid in range(prev_comp_sid, len(comp_sents)):\n if doc_char_e <= comp_char_bases[cur_comp_sid+1]:\n prev_comp_sid = cur_comp_sid\n break\n comp_char_base = comp_char_bases[cur_comp_sid]\n assert comp_char_base <= doc_char_s < doc_char_e <= comp_char_bases[cur_comp_sid+1]\n comp_char_s = doc_char_s - comp_char_base\n comp_char_e = doc_char_e - comp_char_base\n comp_drange = (cur_comp_sid, comp_char_s, comp_char_e)\n\n raw_drange2comp_drange[raw_drange] = comp_drange\n assert raw_sents[raw_drange[0]][raw_drange[1]:raw_drange[2]] == \\\n comp_sents[comp_drange[0]][comp_drange[1]:comp_drange[2]]\n\n # update detailed align info with rearranged sentences\n detail_align_info['sentences'] = comp_sents\n detail_align_info['ann_valid_dranges'] = [\n raw_drange2comp_drange[tuple(raw_drange)] for raw_drange in detail_align_info['ann_valid_dranges']\n ]\n ann_mspan2comp_dranges = {}\n for ann_mspan, mspan_raw_dranges in detail_align_info['ann_mspan2dranges'].items():\n comp_dranges = [\n raw_drange2comp_drange[tuple(raw_drange)] for raw_drange in mspan_raw_dranges\n ]\n ann_mspan2comp_dranges[ann_mspan] = comp_dranges\n detail_align_info['ann_mspan2dranges'] = ann_mspan2comp_dranges\n\n return detail_align_info\n\n def convert_dict_to_example(self, annguid, detail_align_info, only_inference=False):\n if self.rearrange_sent_flag:\n detail_align_info = self.rearrange_sent_info(detail_align_info)\n dee_example = DEEExample(annguid, detail_align_info, only_inference=only_inference)\n\n return dee_example\n\n def __call__(self, dataset_json_path):\n total_dee_examples = []\n annguid_aligninfo_list = default_load_json(dataset_json_path)\n for annguid, detail_align_info in annguid_aligninfo_list:\n # if self.rearrange_sent_flag:\n # detail_align_info = self.rearrange_sent_info(detail_align_info)\n # dee_example = DEEExample(annguid, detail_align_info)\n dee_example = self.convert_dict_to_example(annguid, detail_align_info)\n total_dee_examples.append(dee_example)\n\n return total_dee_examples\n\nclass DEEFeature(object):\n def __init__(self, guid, ex_idx, doc_token_id_mat, doc_token_mask_mat, doc_token_label_mat,\n span_token_ids_list, span_dranges_list, event_type_labels, event_arg_idxs_objs_list,\n valid_sent_num=None):\n self.guid = guid\n self.ex_idx = ex_idx # example row index, used for backtracking\n self.valid_sent_num = valid_sent_num\n\n # directly set tensor for dee feature to save memory\n # self.doc_token_id_mat = doc_token_id_mat\n # self.doc_token_mask_mat = doc_token_mask_mat\n # self.doc_token_label_mat = doc_token_label_mat\n self.doc_token_ids = torch.tensor(doc_token_id_mat, dtype=torch.long)\n self.doc_token_masks = torch.tensor(doc_token_mask_mat, dtype=torch.uint8) # uint8 for mask\n self.doc_token_labels = torch.tensor(doc_token_label_mat, dtype=torch.long)\n\n # sorted by the first drange tuple\n # [(token_id, ...), ...]\n # span_idx -> span_token_id tuple , list of token ids of span\n self.span_token_ids_list = span_token_ids_list \n # [[(sent_idx, char_s, char_e), ...], ...]\n # span_idx -> [drange tuple, ...] \n # self.span_dranges_list[i] contains all the mention spans of self.span_token_ids_list[i] entity\n self.span_dranges_list = span_dranges_list\n\n # [event_type_label, ...]\n # length = the total number of events to be considered\n # event_type_label \\in {0, 1}, 0: no 1: yes\n self.event_type_labels = event_type_labels # length=5 1: has this event type 0: does not have this event type\n # event_type is denoted by the index of event_type_labels\n # event_type_idx -> event_obj_idx -> event_arg_idx -> span_idx\n # if no event objects, event_type_idx -> None\n self.event_arg_idxs_objs_list = event_arg_idxs_objs_list \n\n # event_type_idx -> event_field_idx -> pre_path -> {span_idx, ...}\n # pre_path is tuple of span_idx\n self.event_idx2field_idx2pre_path2cur_span_idx_set = self.build_dag_info(self.event_arg_idxs_objs_list)\n\n # event_type_idx -> key_sent_idx_set, used for key-event sentence detection\n self.event_idx2key_sent_idx_set, self.doc_sent_labels = self.build_key_event_sent_info()\n\n def generate_dag_info_for(self, pred_span_token_tup_list, return_miss=False):\n token_tup2pred_span_idx = {\n token_tup: pred_span_idx for pred_span_idx, token_tup in enumerate(pred_span_token_tup_list)\n }\n gold_span_idx2pred_span_idx = {}\n # pred_span_idx2gold_span_idx = {}\n missed_span_idx_list = [] # in terms of self\n missed_sent_idx_list = [] # in terms of self\n for gold_span_idx, token_tup in enumerate(self.span_token_ids_list):\n if token_tup in token_tup2pred_span_idx:\n pred_span_idx = token_tup2pred_span_idx[token_tup]\n gold_span_idx2pred_span_idx[gold_span_idx] = pred_span_idx\n # pred_span_idx2gold_span_idx[pred_span_idx] = gold_span_idx\n else:\n missed_span_idx_list.append(gold_span_idx)\n for gold_drange in self.span_dranges_list[gold_span_idx]:\n missed_sent_idx_list.append(gold_drange[0])\n missed_sent_idx_list = list(set(missed_sent_idx_list))\n\n pred_event_arg_idxs_objs_list = []\n for event_arg_idxs_objs in self.event_arg_idxs_objs_list:\n if event_arg_idxs_objs is None:\n pred_event_arg_idxs_objs_list.append(None)\n else:\n pred_event_arg_idxs_objs = []\n for event_arg_idxs in event_arg_idxs_objs:\n pred_event_arg_idxs = []\n for gold_span_idx in event_arg_idxs:\n if gold_span_idx in gold_span_idx2pred_span_idx:\n pred_event_arg_idxs.append(\n gold_span_idx2pred_span_idx[gold_span_idx]\n )\n else:\n pred_event_arg_idxs.append(None)\n\n pred_event_arg_idxs_objs.append(tuple(pred_event_arg_idxs))\n pred_event_arg_idxs_objs_list.append(pred_event_arg_idxs_objs)\n\n # event_idx -> field_idx -> pre_path -> cur_span_idx_set\n pred_dag_info = self.build_dag_info(pred_event_arg_idxs_objs_list)\n\n if return_miss:\n return pred_dag_info, missed_span_idx_list, missed_sent_idx_list\n else:\n return pred_dag_info\n\n def get_event_args_objs_list(self):\n event_args_objs_list = []\n for event_arg_idxs_objs in self.event_arg_idxs_objs_list:\n if event_arg_idxs_objs is None:\n event_args_objs_list.append(None)\n else:\n event_args_objs = []\n for event_arg_idxs in event_arg_idxs_objs:\n event_args = []\n for arg_idx in event_arg_idxs:\n if arg_idx is None:\n token_tup = None\n else:\n token_tup = self.span_token_ids_list[arg_idx]\n event_args.append(token_tup)\n event_args_objs.append(event_args)\n event_args_objs_list.append(event_args_objs)\n\n return event_args_objs_list\n\n def build_key_event_sent_info(self):\n assert len(self.event_type_labels) == len(self.event_arg_idxs_objs_list)\n # event_idx -> key_event_sent_index_set\n event_idx2key_sent_idx_set = [set() for _ in self.event_type_labels]\n for key_sent_idx_set, event_label, event_arg_idxs_objs in zip(\n event_idx2key_sent_idx_set, self.event_type_labels, self.event_arg_idxs_objs_list\n ):\n if event_label == 0:\n assert event_arg_idxs_objs is None\n else:\n for event_arg_idxs_obj in event_arg_idxs_objs:\n sent_idx_cands = []\n for span_idx in event_arg_idxs_obj:\n if span_idx is None:\n continue\n span_dranges = self.span_dranges_list[span_idx]\n for sent_idx, _, _ in span_dranges:\n sent_idx_cands.append(sent_idx)\n if len(sent_idx_cands) == 0:\n raise Exception('Event {} has no valid spans'.format(str(event_arg_idxs_obj)))\n sent_idx_cnter = Counter(sent_idx_cands)\n key_sent_idx = sent_idx_cnter.most_common()[0][0]\n key_sent_idx_set.add(key_sent_idx)\n\n doc_sent_labels = [] # 1: key event sentence, 0: otherwise\n for sent_idx in range(self.valid_sent_num): # masked sents will be truncated at the model part\n sent_labels = []\n for key_sent_idx_set in event_idx2key_sent_idx_set: # this mapping is a list\n if sent_idx in key_sent_idx_set:\n sent_labels.append(1)\n else:\n sent_labels.append(0)\n doc_sent_labels.append(sent_labels)\n\n return event_idx2key_sent_idx_set, doc_sent_labels\n\n @staticmethod\n def build_dag_info(event_arg_idxs_objs_list):\n # event_idx -> field_idx -> pre_path -> {span_idx, ...}\n # pre_path is tuple of span_idx\n event_idx2field_idx2pre_path2cur_span_idx_set = []\n for event_idx, event_arg_idxs_list in enumerate(event_arg_idxs_objs_list):\n if event_arg_idxs_list is None:\n event_idx2field_idx2pre_path2cur_span_idx_set.append(None)\n else:\n num_fields = len(event_arg_idxs_list[0])\n # field_idx -> pre_path -> {span_idx, ...}\n field_idx2pre_path2cur_span_idx_set = []\n for field_idx in range(num_fields):\n pre_path2cur_span_idx_set = {}\n for event_arg_idxs in event_arg_idxs_list:\n pre_path = event_arg_idxs[:field_idx]\n span_idx = event_arg_idxs[field_idx]\n if pre_path not in pre_path2cur_span_idx_set:\n pre_path2cur_span_idx_set[pre_path] = set()\n pre_path2cur_span_idx_set[pre_path].add(span_idx)\n field_idx2pre_path2cur_span_idx_set.append(pre_path2cur_span_idx_set)\n event_idx2field_idx2pre_path2cur_span_idx_set.append(field_idx2pre_path2cur_span_idx_set)\n\n return event_idx2field_idx2pre_path2cur_span_idx_set\n\n def is_multi_event(self):\n event_cnt = 0\n for event_objs in self.event_arg_idxs_objs_list:\n if event_objs is not None:\n event_cnt += len(event_objs)\n if event_cnt > 1:\n return True\n\n return False\n\nclass DEEFeatureConverter(object):\n def __init__(self, entity_label_list, event_type_fields_pairs,\n max_sent_len, max_sent_num, tokenizer,\n ner_fea_converter=None, include_cls=True, include_sep=True):\n self.entity_label_list = entity_label_list\n self.event_type_fields_pairs = event_type_fields_pairs\n self.max_sent_len = max_sent_len\n self.max_sent_num = max_sent_num\n self.tokenizer = tokenizer\n self.truncate_doc_count = 0 # track how many docs have been truncated due to max_sent_num\n self.truncate_span_count = 0 # track how may spans have been truncated\n\n # label not in entity_label_list will be default 'O'\n # sent_len > max_sent_len will be truncated, and increase ner_fea_converter.truncate_freq\n if ner_fea_converter is None:\n self.ner_fea_converter = NERFeatureConverter(entity_label_list, self.max_sent_len, tokenizer,\n include_cls=include_cls, include_sep=include_sep)\n else:\n self.ner_fea_converter = ner_fea_converter\n\n self.include_cls = include_cls\n self.include_sep = include_sep\n\n # prepare entity_label -> entity_index mapping\n self.entity_label2index = {}\n for entity_idx, entity_label in enumerate(self.entity_label_list):\n self.entity_label2index[entity_label] = entity_idx\n\n # prepare event_type -> event_index and event_index -> event_fields mapping\n self.event_type2index = {}\n self.event_type_list = []\n self.event_fields_list = []\n for event_idx, (event_type, event_fields) in enumerate(self.event_type_fields_pairs):\n self.event_type2index[event_type] = event_idx\n self.event_type_list.append(event_type)\n self.event_fields_list.append(event_fields)\n\n def convert_example_to_feature(self, ex_idx, dee_example, log_flag=False):\n annguid = dee_example.guid\n assert isinstance(dee_example, DEEExample)\n \n # 1. prepare doc token-level feature\n\n # Size(num_sent_num, num_sent_len)\n doc_token_id_mat = [] # [[token_idx, ...], ...]\n doc_token_mask_mat = [] # [[token_mask, ...], ...]\n doc_token_label_mat = [] # [[token_label_id, ...], ...]\n\n for sent_idx, sent_text in enumerate(dee_example.sentences):\n if sent_idx >= self.max_sent_num:\n # truncate doc whose number of sentences is longer than self.max_sent_num\n self.truncate_doc_count += 1\n break\n\n if sent_idx in dee_example.sent_idx2srange_mspan_mtype_tuples:\n srange_mspan_mtype_tuples = dee_example.sent_idx2srange_mspan_mtype_tuples[sent_idx]\n else:\n srange_mspan_mtype_tuples = []\n \n # srange_mspan_mtype_tuples in this sentence (span-position,span-text,span-type)\n\n ner_example = NERExample(\n '{}-{}'.format(annguid, sent_idx), sent_text, srange_mspan_mtype_tuples\n )\n # sentence truncated count will be recorded incrementally\n ner_feature = self.ner_fea_converter.convert_example_to_feature(ner_example, log_flag=log_flag)\n\n doc_token_id_mat.append(ner_feature.input_ids)\n doc_token_mask_mat.append(ner_feature.input_masks)\n doc_token_label_mat.append(ner_feature.label_ids)\n\n # already pad to max_len=128\n\n assert len(doc_token_id_mat) == len(doc_token_mask_mat) == len(doc_token_label_mat) <= self.max_sent_num\n valid_sent_num = len(doc_token_id_mat)\n\n # 2. prepare span feature\n # spans are sorted by the first drange\n span_token_ids_list = []\n span_dranges_list = []\n mspan2span_idx = {}\n for mspan in dee_example.ann_valid_mspans:\n if mspan in mspan2span_idx:\n continue\n\n raw_dranges = dee_example.ann_mspan2dranges[mspan]\n char_base_s = 1 if self.include_cls else 0\n char_max_end = self.max_sent_len - 1 if self.include_sep else self.max_sent_len\n span_dranges = []\n for sent_idx, char_s, char_e in raw_dranges:\n if char_base_s + char_e <= char_max_end and sent_idx < self.max_sent_num:\n span_dranges.append((sent_idx, char_base_s + char_s, char_base_s + char_e))\n else:\n self.truncate_span_count += 1\n if len(span_dranges) == 0:\n # span does not have any valid location in truncated sequences\n continue\n\n span_tokens = self.tokenizer.char_tokenize(mspan)\n span_token_ids = tuple(self.tokenizer.convert_tokens_to_ids(span_tokens))\n\n mspan2span_idx[mspan] = len(span_token_ids_list)\n span_token_ids_list.append(span_token_ids)\n span_dranges_list.append(span_dranges)\n assert len(span_token_ids_list) == len(span_dranges_list) == len(mspan2span_idx)\n\n if len(span_token_ids_list) == 0 and not dee_example.only_inference:\n logger.warning('Neglect example {}'.format(ex_idx))\n return None\n\n # 3. prepare doc-level event feature\n # event_type_labels: event_type_index -> event_type_exist_sign (1: exist, 0: no)\n # event_arg_idxs_objs_list: event_type_index -> event_obj_index -> event_arg_index -> arg_span_token_ids\n\n event_type_labels = [] # event_type_idx -> event_type_exist_sign (1 or 0)\n event_arg_idxs_objs_list = [] # event_type_idx -> event_obj_idx -> event_arg_idx -> span_idx\n for event_idx, event_type in enumerate(self.event_type_list):\n event_fields = self.event_fields_list[event_idx]\n\n if event_type not in dee_example.event_type2event_objs:\n event_type_labels.append(0)\n event_arg_idxs_objs_list.append(None)\n else:\n event_objs = dee_example.event_type2event_objs[event_type]\n\n event_arg_idxs_objs = []\n for event_obj in event_objs:\n assert isinstance(event_obj, BaseEvent)\n\n event_arg_idxs = []\n any_valid_flag = False\n for field in event_fields:\n arg_span = event_obj.field2content[field]\n\n if arg_span is None or arg_span not in mspan2span_idx:\n # arg_span can be none or valid span is truncated\n arg_span_idx = None\n else:\n # when constructing data files,\n # must ensure event arg span is covered by the total span collections\n arg_span_idx = mspan2span_idx[arg_span]\n any_valid_flag = True\n\n event_arg_idxs.append(arg_span_idx)\n\n if any_valid_flag:\n event_arg_idxs_objs.append(tuple(event_arg_idxs))\n\n if event_arg_idxs_objs:\n event_type_labels.append(1)\n event_arg_idxs_objs_list.append(event_arg_idxs_objs)\n else:\n event_type_labels.append(0)\n event_arg_idxs_objs_list.append(None)\n\n dee_feature = DEEFeature(\n annguid, ex_idx, doc_token_id_mat, doc_token_mask_mat, doc_token_label_mat,\n span_token_ids_list, span_dranges_list, event_type_labels, event_arg_idxs_objs_list,\n valid_sent_num=valid_sent_num\n )\n return dee_feature\n\n def __call__(self, dee_examples, log_example_num=0):\n \"\"\"Convert examples to features suitable for document-level event extraction\"\"\"\n dee_features = []\n self.truncate_doc_count = 0\n self.truncate_span_count = 0\n self.ner_fea_converter.truncate_count = 0\n\n remove_ex_cnt = 0\n for ex_idx, dee_example in enumerate(dee_examples):\n if ex_idx < log_example_num:\n dee_feature = self.convert_example_to_feature(ex_idx-remove_ex_cnt, dee_example, log_flag=True)\n else:\n dee_feature = self.convert_example_to_feature(ex_idx-remove_ex_cnt, dee_example, log_flag=False)\n\n if dee_feature is None:\n remove_ex_cnt += 1\n continue\n dee_features.append(dee_feature)\n\n logger.info('{} documents, ignore {} examples, truncate {} docs, {} sents, {} spans'.format(\n len(dee_examples), remove_ex_cnt,\n self.truncate_doc_count, self.ner_fea_converter.truncate_count, self.truncate_span_count\n ))\n\n return dee_features\n\ndef convert_dee_features_to_dataset(dee_features):\n # just view a list of doc_fea as the dataset, that only requires __len__, __getitem__\n assert len(dee_features) > 0 and isinstance(dee_features[0], DEEFeature)\n\n return dee_features\n\ndef prepare_doc_batch_dict(doc_fea_list):\n doc_batch_keys = ['ex_idx', 'doc_token_ids', 'doc_token_masks', 'doc_token_labels', 'valid_sent_num']\n doc_batch_dict = {}\n for key in doc_batch_keys:\n doc_batch_dict[key] = [getattr(doc_fea, key) for doc_fea in doc_fea_list]\n\n return doc_batch_dict\n\ndef measure_dee_prediction(event_type_fields_pairs, features, event_decode_results,\n dump_json_path=None, writer=None, epoch=None):\n pred_record_mat_list = []\n gold_record_mat_list = []\n for term in event_decode_results:\n ex_idx, pred_event_type_labels, pred_record_mat, doc_span_info = term[:4]\n pred_record_mat = [\n [\n [\n tuple(arg_tup) if arg_tup is not None else None\n for arg_tup in pred_record\n ] for pred_record in pred_records\n ] if pred_records is not None else None\n for pred_records in pred_record_mat\n ]\n doc_fea = features[ex_idx]\n assert isinstance(doc_fea, DEEFeature)\n gold_record_mat = [\n [\n [\n tuple(doc_fea.span_token_ids_list[arg_idx]) if arg_idx is not None else None\n for arg_idx in event_arg_idxs\n ] for event_arg_idxs in event_arg_idxs_objs\n ] if event_arg_idxs_objs is not None else None\n for event_arg_idxs_objs in doc_fea.event_arg_idxs_objs_list\n ]\n\n pred_record_mat_list.append(pred_record_mat)\n gold_record_mat_list.append(gold_record_mat)\n\n g_eval_res = measure_event_table_filling(\n pred_record_mat_list, gold_record_mat_list, event_type_fields_pairs, dict_return=True\n )\n\n if writer is not None and dump_json_path is not None:\n if 'dev' in dump_json_path:\n prefix = 'Dev-Pred-' if 'pred' in dump_json_path else 'Dev-Gold-'\n else:\n prefix = 'Test-Pred-' if 'pred' in dump_json_path else 'Test-Gold-'\n writer.add_scalar(prefix+'MicroF1', g_eval_res[-1]['MicroF1'], global_step=epoch)\n writer.add_scalar(prefix+'MacroF1', g_eval_res[-1]['MacroF1'], global_step=epoch)\n writer.add_scalar(prefix+'MicroPrecision', g_eval_res[-1]['MicroPrecision'], global_step=epoch)\n writer.add_scalar(prefix+'MicroRecall', g_eval_res[-1]['MicroRecall'], global_step=epoch)\n \n event_triggering_tp = [0 for _ in range(5)]\n event_triggering_fp = [0 for _ in range(5)]\n event_triggering_fn = [0 for _ in range(5)]\n for term in event_decode_results:\n ex_idx, pred_event_type_labels, pred_record_mat, doc_span_info = term[:4]\n event_triggering_golden = features[ex_idx].event_type_labels\n for et_idx, et in enumerate(pred_event_type_labels):\n if pred_event_type_labels[et_idx] == 1:\n if event_triggering_golden[et_idx] == 1:\n event_triggering_tp[et_idx] += 1\n else:\n event_triggering_fp[et_idx] += 1\n else:\n if event_triggering_golden[et_idx] == 1:\n event_triggering_fn[et_idx] += 1\n\n for eidx in range(5):\n if event_triggering_tp[eidx]+event_triggering_fp[eidx] != 0:\n event_p = event_triggering_tp[eidx] / (event_triggering_tp[eidx]+event_triggering_fp[eidx])\n else:\n event_p = 0\n if event_triggering_tp[eidx]+event_triggering_fn[eidx] != 0:\n event_r = event_triggering_tp[eidx] / (event_triggering_tp[eidx]+event_triggering_fn[eidx])\n else:\n event_r = 0\n if event_p != 0 and event_r != 0:\n event_f1 = 2 * event_p * event_r / (event_p + event_r)\n else:\n event_f1 = 0\n g_eval_res[-1]['event_{}_p'.format(eidx+1)] = event_p\n g_eval_res[-1]['event_{}_r'.format(eidx+1)] = event_r\n g_eval_res[-1]['event_{}_f1'.format(eidx+1)] = event_f1\n \n if dump_json_path is not None:\n default_dump_json(g_eval_res, dump_json_path)\n\n return g_eval_res\n\ndef aggregate_task_eval_info(eval_dir_path, target_file_pre='dee_eval', target_file_suffix='.json',\n dump_name='total_task_eval.pkl', dump_flag=False):\n \"\"\"Enumerate the evaluation directory to collect all dumped evaluation results\"\"\"\n logger.info('Aggregate task evaluation info from {}'.format(eval_dir_path))\n data_span_type2model_str2epoch_res_list = {}\n for fn in os.listdir(eval_dir_path):\n fn_splits = fn.split('.')\n if fn.startswith(target_file_pre) and fn.endswith(target_file_suffix) and len(fn_splits) == 6:\n _, data_type, span_type, model_str, epoch, _ = fn_splits\n\n data_span_type = (data_type, span_type)\n if data_span_type not in data_span_type2model_str2epoch_res_list:\n data_span_type2model_str2epoch_res_list[data_span_type] = {}\n model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[data_span_type]\n\n if model_str not in model_str2epoch_res_list:\n model_str2epoch_res_list[model_str] = []\n epoch_res_list = model_str2epoch_res_list[model_str]\n\n epoch = int(epoch)\n fp = os.path.join(eval_dir_path, fn)\n eval_res = default_load_json(fp)\n\n epoch_res_list.append((epoch, eval_res))\n\n for data_span_type, model_str2epoch_res_list in data_span_type2model_str2epoch_res_list.items():\n for model_str, epoch_res_list in model_str2epoch_res_list.items():\n epoch_res_list.sort(key=lambda x: x[0])\n\n if dump_flag:\n dump_fp = os.path.join(eval_dir_path, dump_name)\n logger.info('Dumping {} into {}'.format(dump_name, eval_dir_path))\n default_dump_pkl(data_span_type2model_str2epoch_res_list, dump_fp)\n\n return data_span_type2model_str2epoch_res_list\n\ndef print_total_eval_info(data_span_type2model_str2epoch_res_list,\n metric_type='micro',\n span_type='pred_span',\n model_str='GIT',\n target_set='test'):\n \"\"\"Print the final performance by selecting the best epoch on dev set and emitting performance on test set\"\"\"\n dev_type = 'dev'\n test_type = 'test'\n avg_type2prf1_keys = {\n 'macro': ('MacroPrecision', 'MacroRecall', 'MacroF1'),\n 'micro': ('MicroPrecision', 'MicroRecall', 'MicroF1'),\n }\n\n name_key = 'EventType'\n p_key, r_key, f_key = avg_type2prf1_keys[metric_type]\n\n def get_avg_event_score(epoch_res):\n eval_res = epoch_res[1]\n avg_event_score = eval_res[-1][f_key]\n\n return avg_event_score\n\n dev_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(dev_type, span_type)]\n test_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(test_type, span_type)]\n\n has_header = False\n mstr_bepoch_list = []\n print('=' * 15, 'Final Performance (%) (avg_type={})'.format(metric_type), '=' * 15)\n\n if model_str not in dev_model_str2epoch_res_list or model_str not in test_model_str2epoch_res_list:\n pass\n else:\n # get the best epoch on dev set\n dev_epoch_res_list = dev_model_str2epoch_res_list[model_str]\n best_dev_epoch, best_dev_res = max(dev_epoch_res_list, key=get_avg_event_score)\n\n test_epoch_res_list = test_model_str2epoch_res_list[model_str]\n best_test_epoch = None\n best_test_res = None\n for test_epoch, test_res in test_epoch_res_list:\n if test_epoch == best_dev_epoch:\n best_test_epoch = test_epoch\n best_test_res = test_res\n assert best_test_epoch is not None\n mstr_bepoch_list.append((model_str, best_test_epoch))\n\n if target_set == 'test':\n target_eval_res = best_test_res\n else:\n target_eval_res = best_dev_res\n\n align_temp = '{:20}'\n head_str = align_temp.format('ModelType')\n eval_str = align_temp.format(model_str)\n head_temp = ' \\t {}'\n eval_temp = ' \\t & {:.1f} & {:.1f} & {:.1f}'\n ps = []\n rs = []\n fs = []\n for tgt_event_res in target_eval_res[:-1]:\n head_str += align_temp.format(head_temp.format(tgt_event_res[0][name_key]))\n p, r, f1 = (100 * tgt_event_res[0][key] for key in [p_key, r_key, f_key])\n eval_str += align_temp.format(eval_temp.format(p, r, f1))\n ps.append(p)\n rs.append(r)\n fs.append(f1)\n\n head_str += align_temp.format(head_temp.format('Average'))\n ap, ar, af1 = (x for x in [np.mean(ps), np.mean(rs), np.mean(fs)])\n eval_str += align_temp.format(eval_temp.format(ap, ar, af1))\n\n head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type)))\n g_avg_res = target_eval_res[-1]\n ap, ar, af1 = (100 * g_avg_res[key] for key in [p_key, r_key, f_key])\n eval_str += align_temp.format(eval_temp.format(ap, ar, af1))\n\n if not has_header:\n print(head_str)\n has_header = True\n print(eval_str)\n \n print(mstr_bepoch_list)\n return mstr_bepoch_list\n\n# evaluation dump file name template\n# dee_eval.[DataType].[SpanType].[ModelStr].[Epoch].(pkl|json)\ndecode_dump_template = 'dee_eval.{}.{}.{}.{}.pkl'\neval_dump_template = 'dee_eval.{}.{}.{}.{}.json'\n\ndef resume_decode_results(base_dir, data_type, span_type, model_str, epoch):\n decode_fn = decode_dump_template.format(data_type, span_type, model_str, epoch)\n decode_fp = os.path.join(base_dir, decode_fn)\n logger.info('Resume decoded results from {}'.format(decode_fp))\n decode_results = default_load_pkl(decode_fp)\n\n return decode_results\n\ndef resume_eval_results(base_dir, data_type, span_type, model_str, epoch):\n eval_fn = eval_dump_template.format(data_type, span_type, model_str, epoch)\n eval_fp = os.path.join(base_dir, eval_fn)\n logger.info('Resume eval results from {}'.format(eval_fp))\n eval_results = default_load_json(eval_fp)\n\n return eval_results\n\ndef print_single_vs_multi_performance(mstr_bepoch_list, base_dir, features,\n metric_type='micro', data_type='test', span_type='pred_span'):\n model_str2decode_results = {}\n for model_str, best_epoch in mstr_bepoch_list:\n model_str2decode_results[model_str] = resume_decode_results(\n base_dir, data_type, span_type, model_str, best_epoch\n )\n\n single_eid_set = set([doc_fea.ex_idx for doc_fea in features if not doc_fea.is_multi_event()])\n multi_eid_set = set([doc_fea.ex_idx for doc_fea in features if doc_fea.is_multi_event()])\n event_type_fields_pairs = DEEExample.get_event_type_fields_pairs()\n event_type_list = [x for x, y in event_type_fields_pairs]\n\n name_key = 'EventType'\n avg_type2f1_key = {\n 'micro': 'MicroF1',\n 'macro': 'MacroF1',\n }\n f1_key = avg_type2f1_key[metric_type]\n\n model_str2etype_sf1_mf1_list = {}\n for model_str, _ in mstr_bepoch_list:\n total_decode_results = model_str2decode_results[model_str]\n\n single_decode_results = [dec_res for dec_res in total_decode_results if dec_res[0] in single_eid_set]\n assert len(single_decode_results) == len(single_eid_set)\n single_eval_res = measure_dee_prediction(\n event_type_fields_pairs, features, single_decode_results\n )\n\n multi_decode_results = [dec_res for dec_res in total_decode_results if dec_res[0] in multi_eid_set]\n assert len(multi_decode_results) == len(multi_eid_set)\n multi_eval_res = measure_dee_prediction(\n event_type_fields_pairs, features, multi_decode_results\n )\n\n etype_sf1_mf1_list = []\n for event_idx, (se_res, me_res) in enumerate(zip(single_eval_res[:-1], multi_eval_res[:-1])):\n assert se_res[0][name_key] == me_res[0][name_key] == event_type_list[event_idx]\n event_type = event_type_list[event_idx]\n single_f1 = se_res[0][f1_key]\n multi_f1 = me_res[0][f1_key]\n\n etype_sf1_mf1_list.append((event_type, single_f1, multi_f1))\n g_avg_se_res = single_eval_res[-1]\n g_avg_me_res = multi_eval_res[-1]\n etype_sf1_mf1_list.append(\n ('Total ({})'.format(metric_type), g_avg_se_res[f1_key], g_avg_me_res[f1_key])\n )\n model_str2etype_sf1_mf1_list[model_str] = etype_sf1_mf1_list\n\n print('=' * 15, 'Single vs. Multi (%) (avg_type={})'.format(metric_type), '=' * 15)\n align_temp = '{:20}'\n head_str = align_temp.format('ModelType')\n head_temp = ' \\t {}'\n eval_temp = ' \\t & {:.1f} & {:.1f} '\n for event_type in event_type_list:\n head_str += align_temp.format(head_temp.format(event_type))\n head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type)))\n head_str += align_temp.format(head_temp.format('Average'))\n print(head_str)\n\n for model_str, _ in mstr_bepoch_list:\n eval_str = align_temp.format(model_str)\n sf1s = []\n mf1s = []\n for _, single_f1, multi_f1 in model_str2etype_sf1_mf1_list[model_str]:\n eval_str += align_temp.format(eval_temp.format(single_f1*100, multi_f1*100))\n sf1s.append(single_f1)\n mf1s.append(multi_f1)\n avg_sf1 = np.mean(sf1s[:-1])\n avg_mf1 = np.mean(mf1s[:-1])\n eval_str += align_temp.format(eval_temp.format(avg_sf1*100, avg_mf1*100))\n print(eval_str)" ]
[ [ "numpy.mean", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vikas-kum/incubator-mxnet
[ "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf" ]
[ "tests/python/unittest/test_contrib_svrg_optimizer.py", "tests/python/unittest/test_contrib_svrg_module.py", "example/rnn/word_lm/data.py", "example/sparse/wide_deep/data.py", "benchmark/python/control_flow/rnn.py", "example/neural-style/end_to_end/data_processing.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport mxnet as mx\nfrom mxnet.test_utils import same\nfrom mxnet.contrib.svrg_optimization.svrg_module import SVRGModule\nfrom mxnet.contrib.svrg_optimization.svrg_optimizer import _SVRGOptimizer\n\n\ndef create_network():\n\n train_data = np.random.randint(1, 5, [1000, 2])\n weights = np.array([1.0, 2.0])\n train_label = train_data.dot(weights)\n\n batch_size = 32\n\n di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label')\n X = mx.sym.Variable('data')\n Y = mx.symbol.Variable('lin_reg_label')\n fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)\n lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name=\"lro\")\n\n mod = SVRGModule(\n symbol=lro,\n data_names=['data'],\n label_names=['lin_reg_label'], update_freq=2\n )\n\n mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False,\n force_init=False, allow_extra=False)\n\n return di, mod\n\n\ndef test_init_svrg_optimizer():\n _, mod = create_network()\n\n kv = mx.kv.create('local')\n mod.init_optimizer(kvstore=kv, optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),\n force_init=False)\n\n assert type(mod._optimizer).__name__ == _SVRGOptimizer.__name__\n\n\ndef test_svrg_optimizer_constructor():\n kv = mx.kv.create('local')\n svrg_optimizer = _SVRGOptimizer(default_optimizer='sgd', learning_rate=-1.0)\n kv.set_optimizer(svrg_optimizer)\n\n assert svrg_optimizer.default_opt.lr == -1.0\n\n\ndef test_kvstore_init_aux_keys():\n param_idx2name = {0: \"weight\", 1: \"weight_full\"}\n\n svrg_optimizer = _SVRGOptimizer(default_optimizer='sgd', param_idx2name= param_idx2name, learning_rate=1.0)\n kv = mx.kv.create('local')\n kv.set_optimizer(svrg_optimizer)\n\n # Use default sgd optimizer\n param_weight_init = mx.nd.array([0, 0, 0])\n param_weight_update = mx.nd.array([1, 1, 1])\n\n kv.init(0, param_weight_init)\n kv.push(0, param_weight_update)\n kv.pull(0, param_weight_init)\n\n param_weight_full_init = mx.nd.array([1, 1, 1])\n param_weight_full_update = mx.nd.array([2, 2, 2])\n\n # Use AssignmentOptimizer\n kv.init(1, param_weight_full_init)\n kv.push(1, param_weight_full_update)\n kv.pull(1, param_weight_full_init)\n\n # updated weights using default sgd optimizer\n assert same(param_weight_init.asnumpy(), np.array([-1, -1, -1]))\n # updated with AssignmentOptimizer\n assert same(param_weight_full_init.asnumpy(), np.array([2, 2, 2]))\n\n\nif __name__ == \"__main__\":\n import nose\n nose.runmodule()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport mxnet as mx\nimport numpy as np\nfrom common import with_seed, assertRaises\nfrom mxnet.contrib.svrg_optimization.svrg_module import SVRGModule\nfrom mxnet.test_utils import *\nimport unittest\n\ndef setup():\n train_data = np.random.randint(1, 5, [1000, 2])\n weights = np.array([1.0, 2.0])\n train_label = train_data.dot(weights)\n\n di = mx.io.NDArrayIter(train_data, train_label, batch_size=32, shuffle=True, label_name='lin_reg_label')\n X = mx.sym.Variable('data')\n Y = mx.symbol.Variable('lin_reg_label')\n fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)\n lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name=\"lro\")\n\n mod = SVRGModule(\n symbol=lro,\n data_names=['data'],\n label_names=['lin_reg_label'], update_freq=2)\n mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, force_init=False, allow_extra=False)\n\n return di, mod\n\n\ndef test_bind_module():\n _, mod = setup()\n assert mod.binded == True\n assert mod._mod_aux.binded == True\n\n\ndef test_module_init():\n _, mod = setup()\n assert mod._mod_aux is not None\n\n\ndef test_module_initializer():\n def regression_model(m):\n x = mx.symbol.var(\"data\", stype='csr')\n v = mx.symbol.var(\"v\", shape=(m, 1), init=mx.init.Uniform(scale=.1),\n stype='row_sparse')\n model = mx.symbol.dot(lhs=x, rhs=v)\n y = mx.symbol.Variable(\"label\")\n model = mx.symbol.LinearRegressionOutput(data=model, label=y, name=\"out\")\n return model\n\n #shape of the data\n n, m = 128, 100\n model = regression_model(m)\n\n data = mx.nd.zeros(shape=(n, m), stype='csr')\n label = mx.nd.zeros((n, 1))\n iterator = mx.io.NDArrayIter(data=data, label={'label': label},\n batch_size=n, last_batch_handle='discard')\n\n # create module\n mod = SVRGModule(symbol=model, data_names=['data'], label_names=['label'], update_freq=2)\n mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)\n mod.init_params()\n v = mod._arg_params['v']\n assert v.stype == 'row_sparse'\n assert np.sum(v.asnumpy()) != 0\n\n\ndef test_module_bind():\n x = mx.sym.Variable(\"data\")\n net = mx.sym.FullyConnected(x, num_hidden=1)\n\n mod = SVRGModule(symbol=net, data_names=['data'], label_names=None, update_freq=2)\n assertRaises(TypeError, mod.bind, data_shapes=['data', mx.nd.zeros(shape=(2, 1))])\n\n mod.bind(data_shapes=[('data', (2, 1))])\n assert mod.binded == True\n assert mod._mod_aux.binded == True\n\n\[email protected](\"Flaky test https://gitsvrhub.com/apache/incubator-mxnet/issues/12510\")\n@with_seed()\ndef test_module_save_load():\n import tempfile\n import os\n\n x = mx.sym.Variable(\"data\")\n y = mx.sym.Variable(\"softmax_label\")\n net = mx.sym.FullyConnected(x, y, num_hidden=1)\n\n mod = SVRGModule(symbol=net, data_names=['data'], label_names=['softmax_label'], update_freq=2)\n mod.bind(data_shapes=[('data', (1, 1))])\n mod.init_params()\n mod.init_optimizer(optimizer='sgd', optimizer_params={'learning_rate': 0.1})\n mod.update()\n\n # Create tempfile\n tmp = tempfile.mkdtemp()\n tmp_file = os.path.join(tmp, 'svrg_test_output')\n mod.save_checkpoint(tmp_file, 0, save_optimizer_states=True)\n\n mod2 = SVRGModule.load(tmp_file, 0, load_optimizer_states=True, data_names=('data', ))\n mod2.bind(data_shapes=[('data', (1, 1))])\n mod2.init_optimizer(optimizer_params={'learning_rate': 0.1})\n assert mod._symbol.tojson() == mod2._symbol.tojson()\n\n # Multi-device\n mod3 = SVRGModule(symbol=net, data_names=['data'], label_names=['softmax_label'], update_freq=3,\n context=[mx.cpu(0), mx.cpu(1)])\n mod3.bind(data_shapes=[('data', (10, 10))])\n mod3.init_params()\n mod3.init_optimizer(optimizer_params={'learning_rate': 1.0})\n mod3.update()\n mod3.save_checkpoint(tmp_file, 0, save_optimizer_states=True)\n\n mod4 = SVRGModule.load(tmp_file, 0, load_optimizer_states=True, data_names=('data', ))\n mod4.bind(data_shapes=[('data', (10, 10))])\n mod4.init_optimizer(optimizer_params={'learning_rate': 1.0})\n assert mod3._symbol.tojson() == mod4._symbol.tojson()\n\n\[email protected](\"Flaky test https://github.com/apache/incubator-mxnet/issues/12510\")\n@with_seed()\ndef test_svrgmodule_reshape():\n data = mx.sym.Variable(\"data\")\n sym = mx.sym.FullyConnected(data=data, num_hidden=4, name='fc')\n\n dshape=(3, 4)\n mod = SVRGModule(sym, data_names=[\"data\"], label_names=None, context=[mx.cpu(0), mx.cpu(1)], update_freq=2)\n mod.bind(data_shapes=[('data', dshape)])\n mod.init_params()\n mod._mod_aux.init_params()\n mod.init_optimizer(optimizer_params={\"learning_rate\": 1.0})\n\n data_batch = mx.io.DataBatch(data=[mx.nd.ones(dshape)], label=None)\n mod.forward(data_batch)\n mod.backward([mx.nd.ones(dshape)])\n mod.update()\n assert mod.get_outputs()[0].shape == dshape\n\n dshape = (2, 4)\n mod.reshape(data_shapes=[('data', dshape)])\n mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],\n label=None))\n mod.backward([mx.nd.ones(dshape)])\n mod.update()\n assert mod.get_outputs()[0].shape == dshape\n\n\[email protected](\"Flaky test https://github.com/apache/incubator-mxnet/issues/12510\")\n@with_seed()\ndef test_update_full_grad():\n def create_network():\n train_data = np.random.randint(1, 5, [10, 2])\n weights = np.array([1.0, 2.0])\n train_label = train_data.dot(weights)\n\n di = mx.io.NDArrayIter(train_data, train_label, batch_size=5, shuffle=True, label_name='lin_reg_label')\n X = mx.sym.Variable('data')\n Y = mx.symbol.Variable('lin_reg_label')\n fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)\n lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name=\"lro\")\n\n mod = SVRGModule(\n symbol=lro,\n data_names=['data'],\n label_names=['lin_reg_label'], update_freq=2)\n mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False)\n mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),\n force_init=False)\n return di, mod\n\n di, svrg_mod = create_network()\n\n # Calculates the average of full gradients over number batches\n full_grads_weights = mx.nd.zeros(shape=svrg_mod.get_params()[0]['fc1_weight'].shape)\n arg, aux = svrg_mod.get_params()\n svrg_mod._mod_aux.set_params(arg_params=arg, aux_params=aux)\n num_batch = 2\n\n for batch in di:\n svrg_mod.forward(batch)\n svrg_mod.backward()\n full_grads_weights = mx.nd.broadcast_add(svrg_mod._exec_group.grad_arrays[0][0], full_grads_weights, axis=0)\n full_grads_weights /= num_batch\n\n di.reset()\n svrg_mod.update_full_grads(di)\n assert same(full_grads_weights, svrg_mod._param_dict[0]['fc1_weight'])\n\n\[email protected](\"Flaky test https://github.com/apache/incubator-mxnet/issues/12510\")\n@with_seed()\ndef test_svrg_with_sgd():\n def create_module_with_sgd():\n train_data = np.random.randint(1, 5, [100, 2])\n weights = np.array([1.0, 2.0])\n train_label = train_data.dot(weights)\n\n di = mx.io.NDArrayIter(train_data, train_label, batch_size=10, shuffle=True, label_name='lin_reg_label')\n X = mx.sym.Variable('data')\n Y = mx.symbol.Variable('lin_reg_label')\n fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)\n lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name=\"lro\")\n\n reg_mod = mx.mod.Module(\n symbol=lro,\n data_names=['data'],\n label_names=['lin_reg_label'])\n reg_mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n reg_mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False)\n reg_mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),))\n\n svrg_mod = SVRGModule(symbol=lro,\n data_names=['data'],\n label_names=['lin_reg_label'],\n update_freq=2)\n svrg_mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n svrg_mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False)\n svrg_mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),))\n\n return di,reg_mod, svrg_mod\n\n di, reg_mod, svrg_mod = create_module_with_sgd()\n num_epoch = 10\n\n # Use metric MSE\n metrics = mx.metric.create(\"mse\")\n\n # Train with SVRGModule\n for e in range(num_epoch):\n metrics.reset()\n if e % svrg_mod.update_freq == 0:\n svrg_mod.update_full_grads(di)\n di.reset()\n for batch in di:\n svrg_mod.forward_backward(data_batch=batch)\n svrg_mod.update()\n svrg_mod.update_metric(metrics, batch.label)\n svrg_mse = metrics.get()[1]\n\n # Train with SGD standard Module\n di.reset()\n for e in range(num_epoch):\n metrics.reset()\n di.reset()\n for batch in di:\n reg_mod.forward_backward(data_batch=batch)\n reg_mod.update()\n reg_mod.update_metric(metrics, batch.label)\n sgd_mse = metrics.get()[1]\n\n assert svrg_mse < sgd_mse\n\n\[email protected](\"Flaky test https://github.com/apache/incubator-mxnet/issues/12510\")\n@with_seed()\ndef test_accumulate_kvstore():\n # Test KVStore behavior when push a list of values\n kv = mx.kv.create('local')\n kv.init(\"fc1_weight\", mx.nd.zeros(shape=(1, 2)))\n kv.init(\"fc1_weight_full\", mx.nd.zeros(shape=(1, 2)))\n b = [mx.nd.ones(shape=(1, 2)) for i in range(4)]\n a = mx.nd.zeros(shape=(1, 2))\n kv.push(\"fc1_weight_full\", b)\n kv.pull(\"fc1_weight_full\", out=a)\n assert same(a, [mx.nd.array([4, 4])])\n assert kv.num_workers == 1\n\n # Test accumulate in KVStore and allocate gradients\n kv_test = mx.kv.create('local')\n _, svrg_mod = setup()\n svrg_mod.init_optimizer(kvstore=kv_test, optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),\n force_init=False)\n svrg_mod._accumulate_kvstore(\"fc1_weight\", b)\n assert len(svrg_mod._param_dict) == svrg_mod._ctx_len\n assert same(svrg_mod._param_dict[0][\"fc1_weight\"], b[0])\n\n\[email protected](\"Flaky test https://github.com/apache/incubator-mxnet/issues/12510\")\n@with_seed()\ndef test_fit():\n di, mod = setup()\n num_epoch = 100\n metric = mx.metric.create(\"mse\")\n mod.fit(di, eval_metric=metric, optimizer='sgd', optimizer_params=(('learning_rate', 0.025),), num_epoch=num_epoch,\n kvstore='local')\n\n # Estimated MSE for using SGD optimizer of lr = 0.025, SVRG MSE should be smaller\n estimated_mse = 1e-5\n assert metric.get()[1] < estimated_mse\n\n\nif __name__ == \"__main__\":\n import nose\n nose.runmodule()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os, gzip\nimport sys\nimport mxnet as mx\nimport numpy as np\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n self.word_count = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n self.word_count.append(0)\n index = self.word2idx[word]\n self.word_count[index] += 1\n return index\n\n def __len__(self):\n return len(self.idx2word)\n\nclass Corpus(object):\n def __init__(self, path):\n self.dictionary = Dictionary()\n self.train = self.tokenize(path + 'train.txt')\n self.valid = self.tokenize(path + 'valid.txt')\n self.test = self.tokenize(path + 'test.txt')\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = np.zeros((tokens,), dtype='int32')\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return mx.nd.array(ids, dtype='int32')\n\ndef batchify(data, batch_size):\n \"\"\"Reshape data into (num_example, batch_size)\"\"\"\n nbatch = data.shape[0] // batch_size\n data = data[:nbatch * batch_size]\n data = data.reshape((batch_size, nbatch)).T\n return data\n\nclass CorpusIter(mx.io.DataIter):\n \"An iterator that returns the a batch of sequence each time\"\n def __init__(self, source, batch_size, bptt):\n super(CorpusIter, self).__init__()\n self.batch_size = batch_size\n self.provide_data = [('data', (bptt, batch_size), np.int32)]\n self.provide_label = [('label', (bptt, batch_size))]\n self._index = 0\n self._bptt = bptt\n self._source = batchify(source, batch_size)\n\n def iter_next(self):\n i = self._index\n if i+self._bptt > self._source.shape[0] - 1:\n return False\n self._next_data = self._source[i:i+self._bptt]\n self._next_label = self._source[i+1:i+1+self._bptt].astype(np.float32)\n self._index += self._bptt\n return True\n\n def next(self):\n if self.iter_next():\n return mx.io.DataBatch(data=self.getdata(), label=self.getlabel())\n else:\n raise StopIteration\n\n def reset(self):\n self._index = 0\n self._next_data = None\n self._next_label = None\n\n def getdata(self):\n return [self._next_data]\n\n def getlabel(self):\n return [self._next_label]\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: skip-file\nfrom csv import DictReader\nimport os\nimport mxnet as mx\nimport numpy as np\n\n\ndef get_uci_adult(data_dir, data_name, url):\n if not os.path.isdir(data_dir):\n os.mkdir(data_dir)\n os.chdir(data_dir)\n if (not os.path.exists(data_name)):\n print(\"Dataset \" + data_name + \" not present. Downloading now ...\")\n os.system(\"wget %r\" % url + data_name)\n if \"test\" in data_name:\n os.system(\"sed -i '1d' %r\" % data_name)\n print(\"Dataset \" + data_name + \" is now present.\")\n csr, dns, label = preprocess_uci_adult(data_name)\n os.chdir(\"..\")\n return csr, dns, label\n\n\ndef preprocess_uci_adult(data_name):\n \"\"\"Some tricks of feature engineering are adapted\n from tensorflow's wide and deep tutorial.\n \"\"\"\n csv_columns = [\n \"age\", \"workclass\", \"fnlwgt\", \"education\", \"education_num\",\n \"marital_status\", \"occupation\", \"relationship\", \"race\", \"gender\",\n \"capital_gain\", \"capital_loss\", \"hours_per_week\", \"native_country\",\n \"income_bracket\"\n ]\n\n vocabulary_dict = {\n \"gender\": [\n \"Female\", \"Male\"\n ],\n \"education\": [\n \"Bachelors\", \"HS-grad\", \"11th\", \"Masters\", \"9th\",\n \"Some-college\", \"Assoc-acdm\", \"Assoc-voc\", \"7th-8th\",\n \"Doctorate\", \"Prof-school\", \"5th-6th\", \"10th\", \"1st-4th\",\n \"Preschool\", \"12th\"\n ],\n \"marital_status\": [\n \"Married-civ-spouse\", \"Divorced\", \"Married-spouse-absent\",\n \"Never-married\", \"Separated\", \"Married-AF-spouse\", \"Widowed\"\n ],\n \"relationship\": [\n \"Husband\", \"Not-in-family\", \"Wife\", \"Own-child\", \"Unmarried\",\n \"Other-relative\"\n ],\n \"workclass\": [\n \"Self-emp-not-inc\", \"Private\", \"State-gov\", \"Federal-gov\",\n \"Local-gov\", \"?\", \"Self-emp-inc\", \"Without-pay\", \"Never-worked\"\n ]\n }\n # wide columns\n crossed_columns = [\n [\"education\", \"occupation\"],\n [\"native_country\", \"occupation\"],\n [\"age_buckets\", \"education\", \"occupation\"],\n ]\n age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65]\n # deep columns\n indicator_columns = ['workclass', 'education', 'gender', 'relationship']\n \n embedding_columns = ['native_country', 'occupation']\n\n continuous_columns = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']\n # income_bracket column is the label\n labels = [\"<\", \">\"]\n\n hash_bucket_size = 1000\n \n csr_ncols = len(crossed_columns) * hash_bucket_size\n dns_ncols = len(continuous_columns) + len(embedding_columns)\n for col in indicator_columns:\n dns_ncols += len(vocabulary_dict[col])\n\n label_list = []\n csr_list = []\n dns_list = []\n\n with open(data_name) as f:\n for row in DictReader(f, fieldnames=csv_columns):\n label_list.append(labels.index(row['income_bracket'].strip()[0]))\n\n for i, cols in enumerate(crossed_columns):\n if cols[0] == \"age_buckets\":\n age_bucket = np.digitize(float(row[\"age\"]), age_boundaries)\n s = '_'.join([row[col].strip() for col in cols[1:]])\n s += '_' + str(age_bucket)\n csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0))\n else:\n s = '_'.join([row[col].strip() for col in cols])\n csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0))\n \n dns_row = [0] * dns_ncols\n dns_dim = 0\n for col in embedding_columns:\n dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size\n dns_dim += 1\n\n for col in indicator_columns:\n dns_row[dns_dim + vocabulary_dict[col].index(row[col].strip())] = 1.0\n dns_dim += len(vocabulary_dict[col])\n\n for col in continuous_columns:\n dns_row[dns_dim] = float(row[col].strip())\n dns_dim += 1\n\n dns_list.append(dns_row)\n\n data_list = [item[1] for item in csr_list]\n indices_list = [item[0] for item in csr_list]\n indptr_list = range(0, len(indices_list) + 1, len(crossed_columns))\n # convert to ndarrays\n csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list),\n shape=(len(label_list), hash_bucket_size * len(crossed_columns)))\n dns = np.array(dns_list)\n label = np.array(label_list)\n return csr, dns, label\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\nfrom six.moves import range\n\nimport argparse\nimport subprocess\nfrom itertools import product\nfrom time import time\n\nimport mxnet as mx\nimport numpy as np\nfrom mxnet import gluon\n\n\n_parser = argparse.ArgumentParser(description='Benchmark foreach and while_loop on RNN tasks.')\n_parser.add_argument('--benchmark', choices=[\"foreach\", \"while_loop\"], required=True)\n_parser.add_argument('--warmup_rounds', type=int, default=20)\n_parser.add_argument('--test_rounds', type=int, default=100)\n_parser.add_argument('--gpu', type=bool, default=False)\nargs = _parser.parse_args()\n\n\nclass ForeachRNN(gluon.HybridBlock):\n def __init__(self, cell, length, prefix=None, params=None):\n super(ForeachRNN, self).__init__(prefix=prefix, params=params)\n self.length = length\n self.cell = cell\n\n def hybrid_forward(self, F, inputs, states):\n out, states = F.contrib.foreach(self.cell, inputs, states)\n return out\n\n\nclass WhileRNN(gluon.HybridBlock):\n def __init__(self, cell, length, prefix=None, params=None):\n super(WhileRNN, self).__init__(prefix=prefix, params=params)\n self.length = length\n self.cell = cell\n\n def hybrid_forward(self, F, inputs, states):\n def _func(*states):\n i = states[0]\n s = states[1: ]\n data = inputs.take(i).squeeze(axis=0)\n out, new_s = self.cell(data, s)\n new_s = [i + 1] + new_s\n return out, new_s\n out, states = F.contrib.while_loop(\n cond=lambda i, *_: i < self.length,\n func=_func,\n loop_vars=states,\n max_iterations=self.length,\n )\n return out\n\n\ndef _zeros(shape, ctx):\n return mx.nd.zeros(shape=shape, ctx=ctx)\n\n\ndef _array(shape, ctx):\n return mx.nd.normal(loc=0.0, scale=1.0, shape=shape, ctx=ctx)\n\n\ndef _get_gpus():\n try:\n re = subprocess.check_output([\"nvidia-smi\", \"-L\"], universal_newlines=True)\n except OSError:\n return []\n return range(len([i for i in re.split('\\n') if 'GPU' in i]))\n\n\ndef run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim):\n obj = {\"foreach\": ForeachRNN, \"while_loop\": WhileRNN}[args.benchmark]\n inputs = _array((seq_len, batch_size, hidden_dim), ctx)\n states = [_array((batch_size, hidden_dim), ctx) for _ in cell_type(0).state_info()]\n if args.benchmark == \"while_loop\":\n states.insert(0, _zeros((1, ), ctx))\n\n for is_train, is_hyb_cell, is_hyb_layer in product([True, False], [False, True], [False, True]):\n cell = cell_type(hidden_dim)\n if is_hyb_cell:\n cell.hybridize(static_alloc=True)\n layer = obj(cell, seq_len)\n layer.initialize(ctx=ctx)\n if is_hyb_layer:\n layer.hybridize(static_alloc=True)\n print(\"is_train = %r, hybridize_cell = %r, hybridize_layer = %r\" % (is_train, is_hyb_cell, is_hyb_layer))\n times = []\n for _ in range(args.warmup_rounds + args.test_rounds):\n tick = time()\n if not is_train:\n res = layer(inputs, states)\n else:\n with mx.autograd.record():\n res = layer(inputs, states)\n if is_train:\n res.backward()\n mx.nd.waitall()\n tock = time()\n times.append((tock - tick) * 1000.0)\n times = times[args.warmup_rounds: ]\n print(\"Time used: mean = %.3f ms, std = %.3f ms\" % (np.mean(times), np.std(times)))\n\n\ndef main():\n # testing configurations\n cell_types = [gluon.rnn.RNNCell,\n gluon.rnn.GRUCell,\n gluon.rnn.LSTMCell]\n ctxs = [mx.cpu(0)]\n if args.gpu:\n ctxs = ctxs + [mx.gpu(i) for i in _get_gpus()]\n seq_lens = [100]\n batch_sizes = [1, 32]\n hidden_dims = [512]\n print(\"--------------------------------------\")\n print(\"Benchmarking\", args.benchmark)\n for cell_type, ctx, seq_len, batch_size, hidden_dim in product( \\\n cell_types, ctxs, seq_lens, batch_sizes, hidden_dims):\n print(\"--------------------------------------\")\n print(\"cell: %s ctx: %s length: %d batch size: %d dim: %d\" % \\\n (cell_type.__name__, str(ctx), seq_len, batch_size, hidden_dim))\n run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim)\n\n\nif __name__ == \"__main__\":\n main()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nfrom skimage import io, transform\nfrom skimage.restoration import denoise_tv_chambolle\nimport logging\nimport random\nFORMAT = '%(asctime)-15s %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\n\ndef PreprocessContentImage(path, short_edge, dshape=None):\n img = io.imread(path)\n #logging.info(\"load the content image, size = %s\", img.shape[:2])\n factor = float(short_edge) / min(img.shape[:2])\n new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))\n resized_img = transform.resize(img, new_size)\n sample = np.asarray(resized_img) * 256\n if dshape is not None:\n # random crop\n xx = int((sample.shape[0] - dshape[2]))\n yy = int((sample.shape[1] - dshape[3]))\n xstart = random.randint(0, xx)\n ystart = random.randint(0, yy)\n xend = xstart + dshape[2]\n yend = ystart + dshape[3]\n sample = sample[xstart:xend, ystart:yend, :]\n\n # swap axes to make image from (224, 224, 3) to (3, 224, 224)\n sample = np.swapaxes(sample, 0, 2)\n sample = np.swapaxes(sample, 1, 2)\n # sub mean\n sample[0, :] -= 123.68\n sample[1, :] -= 116.779\n sample[2, :] -= 103.939\n #logging.info(\"resize the content image to %s\", sample.shape)\n return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))\n\ndef PreprocessStyleImage(path, shape):\n img = io.imread(path)\n resized_img = transform.resize(img, (shape[2], shape[3]))\n sample = np.asarray(resized_img) * 256\n sample = np.swapaxes(sample, 0, 2)\n sample = np.swapaxes(sample, 1, 2)\n\n sample[0, :] -= 123.68\n sample[1, :] -= 116.779\n sample[2, :] -= 103.939\n return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))\n\ndef PostprocessImage(img):\n img = np.resize(img, (3, img.shape[2], img.shape[3]))\n img[0, :] += 123.68\n img[1, :] += 116.779\n img[2, :] += 103.939\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 0, 2)\n img = np.clip(img, 0, 255)\n return img.astype('uint8')\n\ndef SaveImage(img, filename, remove_noise=0.02):\n logging.info('save output to %s', filename)\n out = PostprocessImage(img)\n if remove_noise != 0.0:\n out = denoise_tv_chambolle(out, weight=remove_noise, multichannel=True)\n io.imsave(filename, out)\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.random.randint" ], [ "numpy.array", "numpy.random.randint" ], [ "numpy.zeros" ], [ "numpy.array" ], [ "numpy.std", "numpy.mean" ], [ "numpy.asarray", "numpy.swapaxes", "numpy.resize", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mfkiwl/depthy
[ "2a1f4f8abe59cdfae09a219eb12412453f1edca3" ]
[ "depthy/lightfield/structure_tensor.py" ]
[ "import numpy as np\nfrom scipy.ndimage import gaussian_filter, convolve\n\n\ndef local_structure_tensor(img: np.ndarray,\n si: float = 0.8,\n so: float = 1.6,\n slope_method: str = 'eigen',\n grad_method: str = None,\n f: float = 1) -> [np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n This function computes the local slopes of a given input image (e.g. epipolar image) using a structure tensor.\n\n :param img: image input (e.g. epipolar image)\n :param si: sigma for inner Gaussian kernel\n :param so: sigma for outer Gaussian kernel\n :param slope_method: 'eigen' for eigendecomposition\n :param grad_method: partial derivative method with 'scharr' as default and 'sobel' or 'gradient' as alternatives\n :param f: focal length scaling local slope values\n :return: local_slopes, coherence, n\n \"\"\"\n\n img = img if len(img.shape) == 3 else img[..., np.newaxis]\n chs = img.shape[-1] if len(img.shape) == 3 else (1,)\n grad_method = 'scharr' if grad_method is None else grad_method\n\n jyy, jxx, jxy = np.zeros((3,) + img.shape)\n for ch in range(chs):\n # gaussian filter for smoothness/de-noising\n img[..., ch] = gaussian_filter(img[..., ch], si)\n\n # compute image gradients\n grad_y, grad_x = partial_img_gradients(img[..., ch], method=grad_method)\n\n # compute structure tensor (using gradient maps)\n jyy[..., ch] = gaussian_filter(grad_y**2, so)\n jxx[..., ch] = gaussian_filter(grad_x**2, so)\n jxy[..., ch] = gaussian_filter(grad_x * grad_y, so)\n\n # local gradients of structure tensor\n if slope_method == 'eigen':\n num = -.5 * (jxx - jyy - np.sqrt((jxx-jyy)**2 + 4*jxy**2))\n denom = jxy\n else:\n raise Exception('Local slope method %s not recognized' % slope_method)\n local_slopes = f*np.divide(num, denom, out=np.zeros_like(denom), where=denom != 0)\n\n # slope direction as vector n\n n = np.array([(jyy-jxx), (2*jxy)])\n\n # coherence as reliability measure\n coherence = np.sqrt(np.divide((jyy-jxx)**2+4*jxy**2, (jxx+jyy)**2, out=np.zeros_like(jxx), where=jxx+jyy != 0))\n\n return local_slopes, coherence, n\n\n\ndef partial_img_gradients(img: np.ndarray, method: str = 'gradient') -> [np.ndarray, np.ndarray]:\n \"\"\"\n Compute partial derivatives of a 2-dimensional image.\n\n :param img: input image\n :param method: method for first-order partial derivative featuring 'scharr', 'sobel' and 'gradient'.\n :return: vertical partial gradient, horizontal partial gradient\n \"\"\"\n\n if method == 'scharr':\n kernel = np.array([[3, 10, 3], [0, 0, 0], [-3, -10, -3]])\n grad_y = convolve(img, kernel)\n grad_x = convolve(img, kernel.T)\n elif method == 'sobel':\n kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])\n grad_y = convolve(img, kernel)\n grad_x = convolve(img, kernel.T)\n elif method == 'gradient':\n grad_y = np.gradient(img, axis=0)\n grad_x = np.gradient(img, axis=1)\n else:\n raise Exception('Gradient method %s not supported' % method)\n\n return grad_y, grad_x\n" ]
[ [ "scipy.ndimage.gaussian_filter", "numpy.sqrt", "numpy.gradient", "scipy.ndimage.convolve", "numpy.zeros_like", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
KyungB/sc2_team_classifier
[ "fe49c2973b1e3feb4c7c56f5cf89438e3d5b2450" ]
[ "kmean.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 12:22:46 2019\n\n@author: KY-Coffee\n\"\"\"\n\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\n\ndef plot_KMeans(k_data,n=14, title=\"\", margins=True):\n model = KMeans(n)\n model.fit(k_data[0])\n \n y_kmeans = model.predict(k_data[0])\n plt.figure(figsize=(10,5))\n plt.title(\"KMeans clustering result for n = \"+str(n)+ \" \"+title)\n #plt.xlabel(\"Predicted\")\n #plt.ylabel(\"Actual\")\n if margins:\n plt.axis([0,10,0,20])\n plt.scatter(k_data[0][:, 0], k_data[0][:, 1], c=y_kmeans, s=50, cmap='viridis')\n plt.legend(y_kmeans)\n #plt.scatter(all_prediction,range(1,len(all_prediction)+1))" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "sklearn.cluster.KMeans", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aroldanm/PartnerPlayBot
[ "758a5b010e22c72186c19de1510276a10894f829" ]
[ "Robot/AIModule.py" ]
[ "import operator\nimport numpy as np\nfrom VC import VCModule\nfrom time import sleep\n\nclass AIModule():\n vc = None\n \n def __init__(self):\n print(\"init aimodule\")\n self.vc = VCModule()\n \n#################################################################################\n# MÉTODOS PÚBLICO\n#################################################################################\n \n def scanf(self, tableroPrevio):\n haveChanged = False\n while haveChanged == False:\n print(\"Esperando a que el tablero cambie\")\n haveChanged = self.anyChange(tableroPrevio)\n\n def getTablero(self):\n return self.vc.getTablero()\n\n def getMovimiento(self):\n #Calcular movimiento\n tablero = self.getTablero()\n tirada = self.minimax(tablero, 2, 0, 99, 'b')\n print(tirada)\n print(\"--------------\")\n print(tablero)\n #Calcular eliminadas\n if len(tirada) > 0:\n movimiento = tirada[0]\n fichas_muertas = self.fichas_comidas(movimiento)\n return movimiento, fichas_muertas\n\n#################################################################################\n# MÉTODOS PRIVADOS\n#################################################################################\n\n def anyChange(self, tableroPrevio):\n # Miramos si la ultima matrix y la última ha cambiado\n tableroActual = self.getTablero()\n print(tableroPrevio)\n print(\"-------\")\n print(tableroActual)\n changed = False\n #if self.vc.comprobacionRuido() == False\n i = 0\n j = 0\n while changed == False and i < 8:\n if tableroPrevio[i][j] != tableroActual[i][j]:\n changed = True\n j += 1\n if j >= 8:\n j = 0\n i += 1\n return changed\n \n#################################################################################\n\n #Una jugada será un string tal que origen-destino: c3b2\n def minimax(self, tablero, profundidad, a, b, jugador):\n if profundidad == 0:\n \n if jugador == 'N':\n dict_movimientos_negras = {\"n1\" : \"-\", \"n2\" : \"-\", \"n3\" : \"-\", \"n4\" : \"-\", \"n5\" : \"-\",\n \"n6\" : \"-\", \"n7\" : \"-\", \"n8\" : \"-\", \"n9\" : \"-\", \"n10\" : \"-\",\n \"n11\" : \"-\", \"n12\" : \"-\"}\n dict_movimientos_negras = self.posibles_movimientos(tablero, dict_movimientos_negras, 'n')\n ordenacion_move = self.ordenar_movimientos(dict_movimientos_negras, 'n')\n movimiento_negro = ordenacion_move[0]\n \n return movimiento_negro\n else:\n dict_movimientos_blancas = {\"b1\" : \"-\", \"b2\" : \"-\", \"b3\" : \"-\", \"b4\" : \"-\", \"b5\" : \"-\",\n \"b6\" : \"-\", \"b7\" : \"-\", \"b8\" : \"-\", \"b9\" : \"-\", \"b10\" : \"-\",\n \"b11\" : \"-\", \"b12\" : \"-\"}\n dict_movimientos_blancas = self.posibles_movimientos(tablero, dict_movimientos_blancas, 'b')\n ordenacion_move = self.ordenar_movimientos(dict_movimientos_blancas, 'b')\n movimiento_blanco = ordenacion_move[0]\n \n return movimiento_blanco\n else: \n best_move = None\n \n \n if jugador == 'N':\n dict_negras_posteriori = {\"n1\" : \"-\", \"n2\" : \"-\", \"n3\" : \"-\", \"n4\" : \"-\", \"n5\" : \"-\",\n \"n6\" : \"-\", \"n7\" : \"-\", \"n8\" : \"-\", \"n9\" : \"-\", \"n10\" : \"-\",\n \"n11\" : \"-\", \"n12\" : \"-\"}\n dict_negras_posteriori = self.posibles_movimientos(tablero, dict_negras_posteriori, 'n')\n ordenacion_move_negras = self.ordenar_movimientos(dict_negras_posteriori, 'n')\n \n for i in ordenacion_move_negras:\n tablero_aux = np.copy(tablero)\n tablero_aux = self.realizar_movimiento(tablero_aux, ordenacion_move_negras[0][0])\n valor = self.minimax(tablero_aux, profundidad - 1, a, b, 'b')\n b = max(int(ordenacion_move_negras[0][1]), int(valor[0][1]))\n if b == ordenacion_move_negras[0][1]:\n best_move = ordenacion_move_negras[0]\n else:\n best_move = valor\n else:\n dict_blancas_posteriori = {\"b1\" : \"-\", \"b2\" : \"-\", \"b3\" : \"-\", \"b4\" : \"-\", \"b5\" : \"-\",\n \"b6\" : \"-\", \"b7\" : \"-\", \"b8\" : \"-\", \"b9\" : \"-\", \"b10\" : \"-\",\n \"b11\" : \"-\", \"b12\" : \"-\"}\n dict_blancas_posteriori = self.posibles_movimientos(tablero, dict_blancas_posteriori, 'b')\n ordenacion_move_blancas = self.ordenar_movimientos(dict_blancas_posteriori, 'b')\n \n for i in ordenacion_move_blancas:\n tablero_aux = np.copy(tablero)\n \n tablero_aux = self.realizar_movimiento(tablero_aux, ordenacion_move_blancas[0][0])\n valor = self.minimax(tablero_aux, profundidad - 1, a, b, 'N')\n b = min(int(ordenacion_move_blancas[0][1]), int(valor[0][1]))\n if b == ordenacion_move_blancas[0][1]:\n best_move = ordenacion_move_blancas[0]\n else:\n best_move = valor \n \n return best_move\n \n \n def ordenar_movimientos(self, diccionario, jugador):\n aux= {}\n lista = []\n if jugador == 'b':\n lista = [\"b1\", \"b2\", \"b3\", \"b4\", \"b5\", \"b6\", \"b7\", \"b8\", \"b9\",\"b10\", \"b11\", \"b12\"]\n else:\n lista = [\"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\", \"n7\", \"n8\", \"n9\",\"n10\", \"n11\", \"n12\"]\n \n for ficha in lista:\n #value = diccionario.pop(ficha)\n if diccionario.get(ficha) != '-': #not empty\n aux.update(diccionario.pop(ficha))\n \n resultado = sorted(aux.items(), key=operator.itemgetter(1))\n resultado.reverse()\n return resultado\n \n \n def comprobar_movimiento(self, tablero, x, y, ficha):\n dict2 = {}\n \n #MOVIMIENTOS FICHAS BLANCAS\n if \"b\" in ficha:\n \n #COMER 3 FICHAS\n if x>=6 and x<=7:\n if y>=0 and y<=5:\n #id1\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\" and \"n\" in tablero[x-3][y+1] \n and tablero[x-4][y] == \"-\" and \"n\" in tablero[x-5][y+1] and tablero[x-6][y+2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)+str(x-4+1)+str(y+1)+str(x-6+1)+str(y+2+1)) : 3 })\n if y>=2 and y<=7:\n #id2\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\" and \"n\" in tablero[x-3][y-1] \n and tablero[x-4][y] == \"-\" and \"n\" in tablero[x-5][y-1] and tablero[x-6][y-2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)+str(x-4+1)+str(y+1)+str(x-6+1)+str(y-2+1)) : 3 })\n if y>=6 and y<=7:\n #id3\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\" and \"n\" in tablero[x-3][y-3] \n and tablero[x-4][y-4] == \"-\" and \"n\" in tablero[x-5][y-5] and tablero[x-6][y-6] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)+str(x-4+1)+str(y-4+1)+str(x-6+1)+str(y-6+1)) : 3 })\n if y>=0 and y <=1:\n #id4\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\" and \"n\" in tablero[x-3][y+3] \n and tablero[x-4][y+4] == \"-\" and \"n\" in tablero[x-5][y+5] and tablero[x-6][y+6] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)+str(x-4+1)+str(y+4+1)+str(x-6+1)+str(y+6+1)) : 3 })\n if y>=0 and y<=3:\n #id5\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\" and \"n\" in tablero[x-3][y+3] \n and tablero[x-4][y+4] == \"-\" and \"n\" in tablero[x-5][y+3] and tablero[x-6][y+2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)+str(x-4+1)+str(y+4+1)+str(x-6+1)+str(y+2+1)) : 3 })\n if y>=4 and y<=7:\n #id6\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\" and \"n\" in tablero[x-3][y-3] \n and tablero[x-4][y-4] == \"-\" and \"n\" in tablero[x-5][y-3] and tablero[x-6][y-2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)+str(x-4+1)+str(y-4+1)+str(x-6+1)+str(y-2+1)) : 3 })\n if y>=2 and y<=5:\n #id7\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\" and \"n\" in tablero[x-3][y+1] \n and tablero[x-4][y] == \"-\" and \"n\" in tablero[x-5][y-1] and tablero[x-6][y-2] == \"-\"): \n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)+str(x-4+1)+str(y+1)+str(x-6+1)+str(y-2+1)) : 3 })\n #id8\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\" and \"n\" in tablero[x-3][y-1] \n and tablero[x-4][y] == \"-\" and \"n\" in tablero[x-5][y+1] and tablero[x-6][y+2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)+str(x-4+1)+str(y+1)+str(x-6+1)+str(y+2+1)) : 3 })\n \n #COMER 2 FICHAS\n if x>=4 and x<=7:\n if y>=2 and y<=7:\n #id9\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\" and \"n\" in tablero[x-3][y-1] \n and tablero[x-4][y] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)+str(x-4+1)+str(y+1)) : 2 })\n if y>=0 and y<=5:\n #id10\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\" and \"n\" in tablero[x-3][y+1] \n and tablero[x-4][y] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)+str(x-4+1)+str(y+1)) : 2 })\n if y>=0 and y<=3:\n #id11\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\" and \"n\" in tablero[x-3][y+3] \n and tablero[x-4][y+4] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)+str(x-4+1)+str(y+4+1)) : 2 })\n if y>=4 and y<=7:\n #id12\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\" and \"n\" in tablero[x-3][y-3] \n and tablero[x-4][y-4] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)+str(x-4+1)+str(y-4+1)) : 2 })\n \n #COMER 1 FICHA\n if x>=2 and x<=7:\n if y>=0 and y<=5:\n #id13\n if ((\"n\" in tablero[x-1][y+1]) and tablero[x-2][y+2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y+2+1)) : 1 })\n if y>=2 and y<=7:\n #id14\n if ((\"n\" in tablero[x-1][y-1]) and tablero[x-2][y-2] == \"-\"):\n dict2.update({(str(x+1)+str(y+1)+str(x-2+1)+str(y-2+1)) : 1 })\n \n #COMER 0 FICHAS\n if x>=1 and x<=7:\n if y>=0 and y<=6:\n #id15\n if tablero[x-1][y+1] == '-':\n dict2.update({(str(x+1)+str(y+1)+str(x-1+1)+str(y+1+1)) : 0 })\n if y>=1 and y<=7:\n #id16\n if tablero[x-1][y-1] == '-':\n dict2.update({(str(x+1)+str(y+1)+str(x-1+1)+str(y-1+1)) : 0 })\n \n #MOVIMIENTOS FICHAS NEGRAS \n else:\n #COMER 3 FICHAS\n if x>=0 and x<=1: \n #id1\n if y>=2 and y<=7:\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-' and \"b\" in tablero[x+3][y-1] \n and tablero[x+4][y] == '-' and \"b\" in tablero[x+5][y-1] and tablero[x+6][y-2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)+str(x+4+1)+str(y+1)+str(x+6+1)+str(y-2+1)) : 3 })\n \n #id2\n if y>=0 and y<=5:\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-' and \"b\" in tablero[x+3][y+1] \n and tablero[x+4][y] == '-' and \"b\" in tablero[x+5][y+1] and tablero[x+6][y+2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)+str(x+4+1)+str(y+1)+str(x+6+1)+str(y+2+1)) : 3 })\n \n \n \n #id3\n if y<=6 and y>=7:\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-' and \"b\" in tablero[x+3][y-3] \n and tablero[x+4][y-4] == '-' and \"b\" in tablero[x+5][y-5] and tablero[x+6][y-6] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)+str(x+4+1)+str(y-4+1)+str(x+6+1)+str(y-6+1)) : 3 })\n \n #id4\n if y<=0 and y>=1:\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-' and \"b\" in tablero[x+3][y+3] \n and tablero[x+4][y+4] == '-' and \"b\" in tablero[x+5][y+5] and tablero[x+6][y+6] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)+str(x+4+1)+str(y+4+1)+str(x+6+1)+str(y+6+1)) : 3 })\n \n #id5\n if y<=0 and y>=1:\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-' and \"b\" in tablero[x+3][y-3] \n and tablero[x+4][y-4] == '-' and \"b\" in tablero[x+5][y+1] and tablero[x+6][y+2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)+str(x+4+1)+str(y-4+1)+str(x+6+1)+str(y+2+1)) : 3 })\n #id6 \n if y<=0 and y>=3:\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-' and \"b\" in tablero[x+3][y+3] \n and tablero[x+4][y+4] == '-' and \"b\" in tablero[x+5][y+4] and tablero[x+6][y+2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)+str(x+4+1)+str(y+4+1)+str(x+6+1)+str(y+2+1)) : 3 })\n \n if y<=2 and y>=5:\n #id7\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-' and \"b\" in tablero[x+3][y+1] \n and tablero[x+4][y] == '-' and \"b\" in tablero[x+5][y-1] and tablero[x+6][y-2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)+str(x+4+1)+str(y+1)+str(x+6+1)+str(y-2+1)) : 3 })\n \n #id8\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-' and \"b\" in tablero[x+3][y-1] \n and tablero[x+4][y] == '-' and \"b\" in tablero[x+5][y+1] and tablero[x+6][y+2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)+str(x+4+1)+str(y+1)+str(x+6+1)+str(y+2+1)) : 3 })\n \n #COMER 2 FICHAS\n if x>=0 and x<=3: \n #id9\n if y>=2 and y<=7:\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-' and \"b\" in tablero[x+3][y-1] \n and tablero[x+4][y] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)+str(x+4+1)+str(y+1)) : 2 })\n \n #id10\n if y>=0 and y<=5:\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-' and \"b\" in tablero[x+3][y+1] \n and tablero[x+4][y] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)+str(x+4+1)+str(y+1)) : 2 })\n #id11\n if y>=4 and y<=7:\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-' and \"b\" in tablero[x+3][y-3] \n and tablero[x+4][y-4] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)+str(x+4+1)+str(y-4+1)) : 2 })\n \n \n #id12\n if y>=0 and y<=3:\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-' and \"b\" in tablero[x+3][y+3] \n and tablero[x+4][y+4] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)+str(x+4+1)+str(y+4+1)) : 2 })\n \n #COMER 1 FICHAS \n if x>=0 and x<=5: \n #id13\n if y>=2 and y<=7:\n if( \"b\" in tablero[x+1][y-1] and tablero[x+2][y-2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y-2+1)) : 1 })\n #id14\n if y>=0 and y<=5:\n if( \"b\" in tablero[x+1][y+1] and tablero[x+2][y+2] == '-'):\n dict2.update({(str(x+1)+str(y+1)+str(x+2+1)+str(y+2+1)) : 1 })\n #COMER 0 FICHAS\n if x>=0 and x<=6:\n #id15\n if y>=1 and y<=7:\n if tablero[x+1][y-1] == '-':\n dict2.update({(str(x+1)+str(y+1)+str(x+1+1)+str(y-1+1)) : 0 })\n #id16\n if y>=0 and y<=6:\n if tablero[x+1][y+1] == '-':\n dict2.update({(str(x+1)+str(y+1)+str(x+1+1)+str(y+1+1)) : 0 })\n \n \n \n if len(dict2) == 0:\n dict2 = '-'\n return dict2\n \n def realizar_movimiento(self, tablero, movimiento):\n \n if len(movimiento) == 4:\n if(abs(int(movimiento[0]) - int(movimiento[2])) == 1 and abs(int(movimiento[1]) - int(movimiento[3])) == 1):\n aux = tablero[int(movimiento[0])-1][int(movimiento[1])-1]\n tablero[int(movimiento[0])-1][int(movimiento[1])-1] = '-'\n tablero[int(movimiento[2])-1][int(movimiento[3])-1] = aux\n else:\n aux = tablero[int(movimiento[0])-1][int(movimiento[1])-1]\n tablero[int(movimiento[0])-1][int(movimiento[1])-1] = '-'\n tablero[int(movimiento[2])-1][int(movimiento[3])-1] = aux\n x = abs((int(movimiento[0])+int(movimiento[2]))//2)\n y = abs((int(movimiento[1])+int(movimiento[3]))//2)\n tablero[x-1, y-1] = '-'\n \n elif len(movimiento) == 6:\n \n aux = tablero[int(movimiento[0])-1][int(movimiento[1])-1]\n tablero[int(movimiento[0])-1][int(movimiento[1])-1] = '-'\n tablero[int(movimiento[4])-1][int(movimiento[5])-1] = aux\n x = abs((int(movimiento[0])+int(movimiento[2]))//2)\n y = abs((int(movimiento[1])+int(movimiento[3]))//2)\n x1 = abs((int(movimiento[2])+int(movimiento[4]))//2)\n y1 = abs((int(movimiento[3])+int(movimiento[5]))//2)\n tablero[x-1, y-1] = '-'\n tablero[x1-1, y1-1] = '-'\n \n else: \n aux = tablero[int(movimiento[0])-1][int(movimiento[1])-1]\n tablero[int(movimiento[0])-1][int(movimiento[1])-1] = '-'\n tablero[int(movimiento[6])-1][int(movimiento[7])-1] = aux\n \n x = abs((int(movimiento[0])+int(movimiento[2]))//2)\n y = abs((int(movimiento[1])+int(movimiento[3]))//2)\n x1 = abs((int(movimiento[2])+int(movimiento[4]))//2)\n y1 = abs((int(movimiento[3])+int(movimiento[5]))//2)\n x2 = abs((int(movimiento[4])+int(movimiento[6]))//2)\n y2 = abs((int(movimiento[5])+int(movimiento[7]))//2)\n tablero[x-1, y-1] = '-'\n tablero[x1-1, y1-1] = '-'\n tablero[x2-1, y2-1] = '-'\n \n return tablero\n \n def posibles_movimientos(self, tablero, dict_movimientos, jugador):\n \n lista = []\n if jugador == 'b':\n lista = [\"b1\", \"b2\", \"b3\", \"b4\", \"b5\", \"b6\", \"b7\", \"b8\", \"b9\",\"b10\", \"b11\", \"b12\"]\n else:\n lista = [\"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\", \"n7\", \"n8\", \"n9\",\"n10\", \"n11\", \"n12\"]\n \n for ficha in lista:\n if ficha in dict_movimientos.keys():\n for x in range(len(tablero)):\n for y in range(len(tablero)):\n dict2 = {}\n if tablero[x][y] == ficha:\n dict2 = self.comprobar_movimiento(tablero, x, y, ficha)\n dict_movimientos.update({ficha: dict2})\n \n return dict_movimientos\n \n def fichas_comidas(self, move):\n \n if len(move) == 4:\n if(abs(int(move[0]) - int(move[2])) == 1 and abs(int(move[1]) - int(move[3])) == 1):\n fichas_comidas = \"\"\n else:\n x = abs((int(move[0])+int(move[2]))//2)\n y = abs((int(move[1])+int(move[3]))//2)\n string = str(x) + str(y)\n fichas_comidas = string\n \n elif len(move) == 6:\n x = abs((int(move[0])+int(move[2]))//2)\n y = abs((int(move[1])+int(move[3]))//2)\n x1 = abs((int(move[2])+int(move[4]))//2)\n y1 = abs((int(move[3])+int(move[5]))//2)\n string = str(x) + str(y) + str(x1) + str(y1)\n fichas_comidas = string \n \n else: \n x = abs((int(move[0])+int(move[2]))//2)\n y = abs((int(move[1])+int(move[3]))//2)\n x1 = abs((int(move[2])+int(move[4]))//2)\n y1 = abs((int(move[3])+int(move[5]))//2)\n x2 = abs((int(move[4])+int(move[6]))//2)\n y2 = abs((int(move[5])+int(move[7]))//2)\n string = str(x) + str(y) + str(x1) + str(y1) + str(x2) + str(y2)\n fichas_comidas = string\n \n return fichas_comidas\n" ]
[ [ "numpy.copy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
konnase/DI-engine
[ "f803499cad191e9277b10e194132d74757bcfc8e", "f803499cad191e9277b10e194132d74757bcfc8e", "f803499cad191e9277b10e194132d74757bcfc8e", "f803499cad191e9277b10e194132d74757bcfc8e", "f803499cad191e9277b10e194132d74757bcfc8e", "f803499cad191e9277b10e194132d74757bcfc8e" ]
[ "dizoo/classic_control/cartpole/envs/test_cartpole_env.py", "dizoo/multiagent_particle/envs/particle_env.py", "dizoo/pomdp/envs/atari_env.py", "dizoo/pybullet/envs/pybullet_env.py", "ding/utils/log_helper.py", "ding/entry/application_entry.py" ]
[ "import pytest\nimport numpy as np\nfrom dizoo.classic_control.cartpole.envs import CartPoleEnv\n\n\[email protected]\nclass TestCartPoleEnv:\n\n def test_naive(self):\n env = CartPoleEnv({})\n env.seed(314, dynamic_seed=False)\n assert env._seed == 314\n obs = env.reset()\n assert obs.shape == (4, )\n act_val = env.info().act_space.value\n min_val, max_val = act_val['min'], act_val['max']\n for _ in range(5):\n env.reset()\n np.random.seed(314)\n print('=' * 60)\n for i in range(10):\n random_action = np.random.randint(min_val, max_val, size=(1, ))\n timestep = env.step(random_action)\n print(timestep)\n assert isinstance(timestep.obs, np.ndarray)\n assert isinstance(timestep.done, bool)\n assert timestep.obs.shape == (4, )\n assert timestep.reward.shape == (1, )\n assert timestep.reward >= env.info().rew_space.value['min']\n assert timestep.reward <= env.info().rew_space.value['max']\n print(env.info())\n env.close()\n", "from collections import namedtuple\nfrom typing import Any, Optional\nfrom easydict import EasyDict\nimport copy\nimport numpy as np\nimport torch\n\nfrom ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo\nfrom ding.envs.common.env_element import EnvElement, EnvElementInfo\nfrom ding.utils import ENV_REGISTRY\nfrom ding.torch_utils import to_tensor, to_ndarray, to_list\nfrom dizoo.multiagent_particle.envs.make_env import make_env\nfrom dizoo.multiagent_particle.envs.multiagent.multi_discrete import MultiDiscrete\nimport gym\nfrom gym import wrappers\n\n\nclass ParticleEnv(BaseEnv):\n\n def __init__(self, cfg: dict) -> None:\n self._cfg = cfg\n self._env_name = cfg.get(\"env_name\", \"simple\")\n self._num_agents = cfg.get(\"num_agents\")\n self._num_landmarks = cfg.get(\"num_landmarks\")\n self._env = make_env(self._env_name, self._num_agents, self._num_landmarks)\n self._env.discrete_action_input = cfg.get('discrete_action', True)\n self._max_step = cfg.get('max_step', 100)\n # self._env.discrete_action_input = True\n self._env.force_discrete_action = True\n self.agent_num = self._env.n\n\n def reset(self) -> torch.Tensor:\n self._step_count = 0\n if hasattr(self, '_seed'):\n # Note: the real env instance only has a empty seed method, only pass\n self._env.seed(self._seed)\n obs_n = self._env.reset()\n obs_n = to_ndarray(obs_n, np.float32)\n return obs_n\n\n def close(self) -> None:\n # Note: the real env instance only has a empty close method, only pass\n self._env.close()\n\n def seed(self, seed: int, dynamic_seed: bool = False) -> None:\n if dynamic_seed:\n raise NotImplementedError\n self._seed = seed\n\n def _process_action(self, action: list):\n return to_list(action)\n\n def step(self, action: list) -> BaseEnvTimestep:\n action = self._process_action(action)\n obs_n, rew_n, done_n, info_n = self._env.step(action)\n obs_n = [to_ndarray(obs, np.float32) for obs in obs_n]\n rew_n = [to_ndarray(rew, np.float32) for rew in rew_n]\n if self._step_count >= self._max_step:\n done_n = True\n return BaseEnvTimestep(obs_n, rew_n, done_n, info_n)\n\n def info(self) -> BaseEnvInfo:\n T = EnvElementInfo\n act_space = {}\n obs_space = {}\n rew_space = {}\n for i in range(self._env.n):\n obs_space['agent' + str(i)] = T(\n self._env.observation_space[i].shape,\n {\n 'min': -np.inf,\n 'max': +np.inf,\n 'dtype': np.float32\n },\n )\n rew_space['agent' + str(i)] = T(\n (1, ),\n {\n 'min': -np.inf,\n 'max': +np.inf,\n 'dtype': np.float32\n },\n )\n act = self._env.action_space[i]\n if isinstance(act, MultiDiscrete):\n act_space['agent' + str(i)] = T(\n (act.shape, ),\n {\n 'min': [int(l) for l in list(act.low)],\n 'max': [int(h) for h in list(act.high)]\n },\n )\n elif isinstance(act, gym.spaces.Tuple):\n # are not used in our environment yet\n act_space['agent' + str(i)] = T(\n (len(act.gym.spaces), ),\n {'space': act.gym.spaces},\n )\n elif isinstance(act, gym.spaces.Discrete):\n act_space['agent' + str(i)] = T(\n (1, ),\n {\n 'min': 0,\n 'max': act.n - 1,\n 'dtype': int\n },\n )\n elif isinstance(act, gym.spaces.Box):\n act_space['agent' + str(i)] = T(\n act.shape,\n {\n 'min': act.low,\n 'max': act.high,\n 'dtype': act.dtype\n },\n )\n return BaseEnvInfo(\n agent_num=self.agent_num, obs_space=obs_space, act_space=act_space, rew_space=rew_space, use_wrappers=None\n )\n\n def __repr__(self) -> str:\n return \"DI-engine wrapped Multiagent particle Env({})\".format(self._cfg.env_name)\n\n\nCNEnvTimestep = namedtuple('CNEnvTimestep', ['obs', 'reward', 'done', 'info'])\nCNEnvInfo = namedtuple('CNEnvInfo', ['agent_num', 'obs_space', 'act_space', 'rew_space'])\n\n\n@ENV_REGISTRY.register('modified_predator_prey')\nclass ModifiedPredatorPrey(BaseEnv):\n\n def __init__(self, cfg: dict) -> None:\n self._cfg = cfg\n self._env_name = 'simple_tagv1'\n self._n_predator = cfg.get(\"n_predator\", 2)\n self._n_prey = cfg.get(\"n_prey\", 1)\n self._n_agent = self._n_predator + self._n_prey\n self._num_landmarks = cfg.get(\"num_landmarks\", 3)\n self._external_cfg = {\n 'num_catch': cfg.get('num_catch', 1),\n 'reward_right_catch': cfg.get('reward_right_catch', 1),\n 'reward_wrong_catch': cfg.get('reward_wrong_catch', 0),\n 'collision_ratio': cfg.get('collision_ratio', 1)\n }\n self._env = make_env(self._env_name, self._n_agent, self._num_landmarks, True, self._n_prey, self._external_cfg)\n self._env.discrete_action_input = cfg.get('discrete_action', True)\n self._max_step = cfg.get('max_step', 100)\n self.obs_alone = cfg.get('obs_alone', False)\n self._env.force_discrete_action = cfg.get('force_discrete_action', False)\n self.action_dim = 5\n # obs = np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + other_pos + entity_pos)\n self.obs_dim = 2 + 2 + (self._n_agent - 1) * 2 + self._num_landmarks * 2\n self.global_obs_dim = self._n_agent * 2 + self._num_landmarks * 2 + self._n_agent * 2\n self.obs_alone_dim = 2 + 2 + (self._num_landmarks) * 2\n\n def reset(self) -> torch.Tensor:\n self._step_count = 0\n self._sum_reward = 0\n # if hasattr(self, '_seed'):\n # # Note: the real env instance only has a empty seed method, only pass\n # self._env.seed = self._seed\n obs_n = self._env.reset()\n obs_n = self.process_obs(obs_n)\n return obs_n\n\n def close(self) -> None:\n # Note: the real env instance only has a empty close method, only pass\n self._env.close()\n\n def seed(self, seed: int, dynamic_seed: bool = False) -> None:\n self._seed = seed\n if dynamic_seed:\n raise NotImplementedError\n if hasattr(self, '_seed'):\n # Note: the real env instance only has a empty seed method, only pass\n self._env.seed = self._seed\n\n def _process_action(self, action: list):\n return to_list(action) + self._random_action()\n\n # generate random action for prey\n def _random_action(self):\n action = []\n for i in range(self._n_prey):\n action.append(np.random.choice(self.action_dim))\n return action\n\n def process_obs(self, obs: list):\n ret = {}\n obs = obs[:self._n_predator]\n obs = np.array(obs).astype(np.float32)\n ret['agent_state'] = np.concatenate(\n [obs[:, 0:4 + (self._n_agent - 1) * 2], obs[:, -self._num_landmarks * 2:]], 1\n )\n ret['global_state'] = np.concatenate(\n [obs[:, 0:4].flatten(), obs[0, -self._n_prey * 4 - self._num_landmarks * 2:]]\n )\n if self.obs_alone:\n ret['agent_alone_state'] = np.concatenate([obs[:, 0:4], obs[:, -self._num_landmarks * 2:]], 1)\n ret['agent_alone_padding_state'] = np.concatenate(\n [\n obs[:, 0:4],\n np.zeros((self._n_predator, (self._n_agent - 1) * 2), np.float32), obs[:, -self._num_landmarks * 2:]\n ], 1\n )\n ret['action_mask'] = np.ones((self._n_predator, self.action_dim))\n return ret\n\n # note: the reward is shared between all the agents\n # (see dizoo/multiagent_particle/envs/multiagent/scenarios/simple_tagv1.py)\n # If you need to make the reward different to each agent, change the code there\n def step(self, action: list) -> BaseEnvTimestep:\n # self._env.render()\n self._step_count += 1\n action = self._process_action(action)\n obs_n, rew_n, _, info_n = self._env.step(action)\n obs_n = self.process_obs(obs_n)\n rew_n = np.array([sum(rew_n)])\n info = info_n\n\n # collide_sum = 0\n # for i in range(self._n_agent):\n # collide_sum += info['n'][i][1]\n rew_n = rew_n / (self._max_step * self._n_predator)\n self._sum_reward += rew_n\n if self._step_count >= self._max_step:\n done_n = True\n else:\n done_n = False\n if done_n:\n info['final_eval_reward'] = self._sum_reward\n return CNEnvTimestep(obs_n, rew_n, done_n, info)\n\n def info(self):\n T = EnvElementInfo\n if self._obs_alone:\n return CNEnvInfo(\n agent_num=self._n_predator,\n obs_space=T(\n {\n 'agent_state': (self._n_predator, self.obs_dim),\n 'global_state': (self.global_obs_dim, ),\n 'action_mask': (self._n_predator, self.action_dim)\n },\n None,\n ),\n act_space=T(\n (self._n_predator, self.action_dim),\n {\n 'min': 0,\n 'max': self.action_dim,\n 'dtype': int\n },\n ),\n rew_space=T(\n (1, ),\n None,\n )\n )\n return CNEnvInfo(\n agent_num=self._n_predator,\n obs_space=T(\n {\n 'agent_state': (self._n_predator, self.obs_dim),\n 'agent_alone_state': (self._n_predator, self.obs_alone_dim),\n 'agent_alone_padding_state': (self._n_predator, self.obs_dim),\n 'global_state': (self.global_obs_dim, ),\n 'action_mask': (self._n_predator, self.action_dim)\n },\n None,\n ),\n act_space=T(\n (self._n_predator, self.action_dim),\n {\n 'min': 0,\n 'max': self.action_dim,\n 'dtype': int\n },\n ),\n rew_space=T(\n (1, ),\n None,\n )\n )\n\n def __repr__(self) -> str:\n return \"DI-engine wrapped Multiagent particle Env: CooperativeNavigation({})\".format(self._env_name)\n\n def enable_save_replay(self, replay_path: Optional[str] = None) -> None:\n if replay_path is None:\n replay_path = './video'\n self._replay_path = replay_path\n self._env = wrappers.Monitor(self._env, self._replay_path, video_callable=lambda episode_id: True, force=True)\n\n\n# same structure as smac env\n@ENV_REGISTRY.register('cooperative_navigation')\nclass CooperativeNavigation(BaseEnv):\n\n def __init__(self, cfg: dict) -> None:\n self._cfg = cfg\n self._env_name = 'simple_spread'\n self._n_agent = cfg.get(\"n_agent\", 5)\n self._num_landmarks = cfg.get(\"num_landmarks\", 5)\n self._env = make_env(self._env_name, self._n_agent, self._num_landmarks, True)\n self._env.discrete_action_input = cfg.get('discrete_action', True)\n self._max_step = cfg.get('max_step', 100)\n self._collide_penalty = cfg.get('collide_penal', self._n_agent)\n self._agent_obs_only = cfg.get('agent_obs_only', False)\n self._env.force_discrete_action = cfg.get('force_discrete_action', False)\n self.action_dim = 5\n # obs = np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + other_pos + entity_pos)\n self.obs_dim = 2 + 2 + (self._n_agent - 1) * 2 + self._num_landmarks * 2\n self.global_obs_dim = self._n_agent * 2 + self._num_landmarks * 2 + self._n_agent * 2\n self.obs_alone_dim = 2 + 2 + (self._num_landmarks) * 2\n\n def reset(self) -> torch.Tensor:\n self._step_count = 0\n self._sum_reward = 0\n if hasattr(self, '_seed'):\n # Note: the real env instance only has a empty seed method, only pass\n self._env.seed = self._seed\n obs_n = self._env.reset()\n obs_n = self.process_obs(obs_n)\n return obs_n\n\n def close(self) -> None:\n # Note: the real env instance only has a empty close method, only pass\n self._env.close()\n\n def seed(self, seed: int, dynamic_seed: bool = False) -> None:\n self._seed = seed\n if dynamic_seed:\n raise NotImplementedError\n if hasattr(self, '_seed'):\n # Note: the real env instance only has a empty seed method, only pass\n self._env.seed = self._seed\n\n def _process_action(self, action: list):\n return to_list(action)\n\n def process_obs(self, obs: list):\n ret = {}\n obs = np.array(obs).astype(np.float32)\n if self._agent_obs_only:\n return obs\n ret['agent_state'] = obs\n ret['global_state'] = np.concatenate((obs[0, 2:], obs[:, 0:2].flatten()))\n ret['agent_alone_state'] = np.concatenate([obs[:, 0:4], obs[:, -self._num_landmarks * 2:]], 1)\n ret['agent_alone_padding_state'] = np.concatenate(\n [\n obs[:, 0:4],\n np.zeros((self._n_agent, (self._n_agent - 1) * 2), np.float32), obs[:, -self._num_landmarks * 2:]\n ], 1\n )\n ret['action_mask'] = np.ones((self._n_agent, self.action_dim))\n return ret\n\n # note: the reward is shared between all the agents\n # (see dizoo/multiagent_particle/envs/multiagent/scenarios/simple_spread.py)\n # If you need to make the reward different to each agent, change the code there\n def step(self, action: list) -> BaseEnvTimestep:\n self._step_count += 1\n action = self._process_action(action)\n obs_n, rew_n, _, info_n = self._env.step(action)\n obs_n = self.process_obs(obs_n)\n rew_n = np.array([sum(rew_n)])\n info = info_n\n\n collide_sum = 0\n for i in range(self._n_agent):\n collide_sum += info['n'][i][1]\n rew_n += collide_sum * (1.0 - self._collide_penalty)\n rew_n = rew_n / (self._max_step * self._n_agent)\n self._sum_reward += rew_n\n occupied_landmarks = info['n'][0][3]\n if self._step_count >= self._max_step or occupied_landmarks >= self._n_agent \\\n or occupied_landmarks >= self._num_landmarks:\n done_n = True\n else:\n done_n = False\n if done_n:\n info['final_eval_reward'] = self._sum_reward\n return CNEnvTimestep(obs_n, rew_n, done_n, info)\n\n def info(self):\n T = EnvElementInfo\n if self._agent_obs_only:\n return CNEnvInfo(\n agent_num=self._n_agent,\n obs_space=T(\n (self._n_agent, self.obs_dim),\n None,\n ),\n act_space=T(\n (self._n_agent, self.action_dim),\n {\n 'min': 0,\n 'max': self.action_dim,\n 'dtype': int\n },\n ),\n rew_space=T(\n (1, ),\n None,\n )\n )\n return CNEnvInfo(\n agent_num=self._n_agent,\n obs_space=T(\n {\n 'agent_state': (self._n_agent, self.obs_dim),\n 'agent_alone_state': (self._n_agent, self.obs_alone_dim),\n 'agent_alone_padding_state': (self._n_agent, self.obs_dim),\n 'global_state': (self.global_obs_dim, ),\n 'action_mask': (self._n_agent, self.action_dim)\n },\n None,\n ),\n act_space=T(\n (self._n_agent, self.action_dim),\n {\n 'min': 0,\n 'max': self.action_dim,\n 'dtype': int\n },\n ),\n rew_space=T(\n (1, ),\n None,\n )\n )\n\n def __repr__(self) -> str:\n return \"DI-engine wrapped Multiagent particle Env: CooperativeNavigation({})\".format(self._env_name)\n\n def enable_save_replay(self, replay_path: Optional[str] = None) -> None:\n if replay_path is None:\n replay_path = './video'\n self._replay_path = replay_path\n self._env = wrappers.Monitor(self._env, self._replay_path, video_callable=lambda episode_id: True, force=True)\n", "from typing import Any, List, Union, Sequence\nimport copy\nimport torch\nimport gym\nimport numpy as np\nfrom ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo\nfrom ding.envs.common.env_element import EnvElement, EnvElementInfo\nfrom ding.utils import ENV_REGISTRY\nfrom ding.torch_utils import to_tensor, to_ndarray, to_list\nfrom .atari_wrappers import wrap_deepmind\n\nfrom pprint import pprint\n\nPOMDP_INFO_DICT = {\n 'Pong-ramNoFrameskip-v4': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(128, ),\n value={\n 'min': 0,\n 'max': 255,\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(6, ),\n value={\n 'min': 0,\n 'max': 6,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': -1,\n 'max': 1,\n 'dtype': np.float32\n },\n ),\n use_wrappers=None,\n ),\n}\n\n\ndef PomdpEnv(cfg, only_info=False):\n '''\n For debug purpose, create an env follow openai gym standard so it can be widely test by\n other library with same environment setting in DI-engine\n env = PomdpEnv(cfg)\n obs = env.reset()\n obs, reward, done, info = env.step(action)\n '''\n env = wrap_deepmind(\n cfg.env_id,\n frame_stack=cfg.frame_stack,\n episode_life=cfg.is_train,\n clip_rewards=cfg.is_train,\n warp_frame=cfg.warp_frame,\n use_ram=cfg.use_ram,\n render=cfg.render,\n pomdp=cfg.pomdp,\n only_info=only_info,\n )\n return env\n\n\n@ENV_REGISTRY.register('pomdp')\nclass PomdpAtariEnv(BaseEnv):\n\n def __init__(self, cfg: dict) -> None:\n self._cfg = cfg\n self._init_flag = False\n\n def reset(self) -> Sequence:\n if not self._init_flag:\n self._env = self._make_env(only_info=False)\n self._init_flag = True\n if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:\n np_seed = 100 * np.random.randint(1, 1000)\n self._env.seed(self._seed + np_seed)\n elif hasattr(self, '_seed'):\n self._env.seed(self._seed)\n obs = self._env.reset()\n obs = to_ndarray(obs)\n self._final_eval_reward = 0.\n return obs\n\n def close(self) -> None:\n if self._init_flag:\n self._env.close()\n self._init_flag = False\n\n def seed(self, seed: int, dynamic_seed: bool = True) -> None:\n self._seed = seed\n self._dynamic_seed = dynamic_seed\n np.random.seed(self._seed)\n\n def step(self, action: np.ndarray) -> BaseEnvTimestep:\n assert isinstance(action, np.ndarray), type(action)\n obs, rew, done, info = self._env.step(action)\n self._final_eval_reward += rew\n obs = to_ndarray(obs)\n rew = to_ndarray([rew]) # wrapped to be transfered to a Tensor with shape (1,)\n if done:\n info['final_eval_reward'] = self._final_eval_reward\n return BaseEnvTimestep(obs, rew, done, info)\n\n def info(self) -> BaseEnvInfo:\n if self._cfg.env_id in POMDP_INFO_DICT:\n info = copy.deepcopy(POMDP_INFO_DICT[self._cfg.env_id])\n info.use_wrappers = self._make_env(only_info=True)\n return info\n else:\n raise NotImplementedError('{} not found in POMDP_INFO_DICT [{}]'\\\n .format(self._cfg.env_id, POMDP_INFO_DICT.keys()))\n\n def _make_env(self, only_info=False):\n return wrap_deepmind(\n self._cfg.env_id,\n episode_life=self._cfg.is_train,\n clip_rewards=self._cfg.is_train,\n pomdp=self._cfg.pomdp,\n frame_stack=self._cfg.frame_stack,\n warp_frame=self._cfg.warp_frame,\n use_ram=self._cfg.use_ram,\n only_info=only_info,\n )\n\n def __repr__(self) -> str:\n return \"DI-engine POMDP Atari Env({})\".format(self._cfg.env_id)\n\n @staticmethod\n def create_collector_env_cfg(cfg: dict) -> List[dict]:\n collector_env_num = cfg.pop('collector_env_num', 1)\n cfg = copy.deepcopy(cfg)\n cfg.is_train = True\n return [cfg for _ in range(collector_env_num)]\n\n @staticmethod\n def create_evaluator_env_cfg(cfg: dict) -> List[dict]:\n evaluator_env_num = cfg.pop('evaluator_env_num', 1)\n cfg = copy.deepcopy(cfg)\n cfg.is_train = False\n return [cfg for _ in range(evaluator_env_num)]\n", "from typing import Any, Union, List\nimport copy\nimport torch\nimport numpy as np\n\nfrom ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo, update_shape\nfrom ding.envs.common.env_element import EnvElement, EnvElementInfo\nfrom ding.envs.common.common_function import affine_transform\nfrom ding.torch_utils import to_tensor, to_ndarray, to_list\nfrom .pybullet_wrappers import wrap_pybullet\nfrom ding.utils import ENV_REGISTRY\n\nPybullet_INFO_DICT = {\n # pybullet env\n 'InvertedPendulumMuJoCoEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(4, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(1, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'InvertedDoublePendulumMuJoCoEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(11, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(1, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'Walker2DMuJoCoEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(17, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(6, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'Walker2DPyBulletEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(22, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(6, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'HalfCheetahMuJoCoEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(17, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(6, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'HalfCheetahPyBulletEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(26, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(6, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'AntMuJoCoEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(111, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(8, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'AntPyBulletEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(28, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(8, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'HopperMuJoCoEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(11, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(3, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n 'HopperPyBulletEnv-v0': BaseEnvInfo(\n agent_num=1,\n obs_space=EnvElementInfo(\n shape=(15, ),\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\"),\n 'dtype': np.float32\n },\n ),\n act_space=EnvElementInfo(\n shape=(3, ),\n value={\n 'min': -1.0,\n 'max': 1.0,\n 'dtype': np.float32\n },\n ),\n rew_space=EnvElementInfo(\n shape=1,\n value={\n 'min': np.float64(\"-inf\"),\n 'max': np.float64(\"inf\")\n },\n ),\n use_wrappers=None,\n ),\n}\n\n\n@ENV_REGISTRY.register('pybullet')\nclass PybulletEnv(BaseEnv):\n\n def __init__(self, cfg: dict) -> None:\n self._cfg = cfg\n self._use_act_scale = cfg.use_act_scale\n self._init_flag = False\n\n def reset(self) -> torch.FloatTensor:\n if not self._init_flag:\n self._env = self._make_env(only_info=False)\n self._init_flag = True\n if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:\n np_seed = 100 * np.random.randint(1, 1000)\n self._env.seed(self._seed + np_seed)\n elif hasattr(self, '_seed'):\n self._env.seed(self._seed)\n obs = self._env.reset()\n obs = to_ndarray(obs).astype('float32')\n self._final_eval_reward = 0.\n return obs\n\n def close(self) -> None:\n if self._init_flag:\n self._env.close()\n self._init_flag = False\n\n def seed(self, seed: int, dynamic_seed: bool = True) -> None:\n self._seed = seed\n self._dynamic_seed = dynamic_seed\n np.random.seed(self._seed)\n\n def step(self, action: Union[torch.Tensor, np.ndarray, list]) -> BaseEnvTimestep:\n action = to_ndarray(action)\n if self._use_act_scale:\n action_range = self.info().act_space.value\n action = affine_transform(action, min_val=action_range['min'], max_val=action_range['max'])\n obs, rew, done, info = self._env.step(action)\n self._final_eval_reward += rew\n obs = to_ndarray(obs).astype('float32')\n rew = to_ndarray([rew]) # wrapped to be transfered to a Tensor with shape (1,)\n if done:\n info['final_eval_reward'] = self._final_eval_reward\n return BaseEnvTimestep(obs, rew, done, info)\n\n def info(self) -> BaseEnvInfo:\n if self._cfg.env_id in Pybullet_INFO_DICT:\n info = copy.deepcopy(Pybullet_INFO_DICT[self._cfg.env_id])\n info.use_wrappers = self._make_env(only_info=True)\n obs_shape, act_shape, rew_shape = update_shape(\n info.obs_space.shape, info.act_space.shape, info.rew_space.shape, info.use_wrappers.split('\\n')\n )\n info.obs_space.shape = obs_shape\n info.act_space.shape = act_shape\n info.rew_space.shape = rew_shape\n return info\n else:\n raise NotImplementedError('{} not found in Pybullet_INFO_DICT [{}]'\\\n .format(self._cfg.env_id, Pybullet_INFO_DICT.keys()))\n\n def _make_env(self, only_info=False):\n return wrap_pybullet(\n self._cfg.env_id,\n norm_obs=self._cfg.get('norm_obs', None),\n norm_reward=self._cfg.get('norm_reward', None),\n only_info=only_info\n )\n\n def __repr__(self) -> str:\n return \"DI-engine Pybullet Env({})\".format(self._cfg.env_id)\n\n @staticmethod\n def create_collector_env_cfg(cfg: dict) -> List[dict]:\n collector_cfg = copy.deepcopy(cfg)\n collector_env_num = collector_cfg.pop('collector_env_num', 1)\n return [collector_cfg for _ in range(collector_env_num)]\n\n @staticmethod\n def create_evaluator_env_cfg(cfg: dict) -> List[dict]:\n evaluator_cfg = copy.deepcopy(cfg)\n evaluator_env_num = evaluator_cfg.pop('evaluator_env_num', 1)\n evaluator_cfg.norm_reward.use_norm = False\n return [evaluator_cfg for _ in range(evaluator_env_num)]\n", "import json\nimport logging\nimport os\nimport numpy as np\nimport yaml\nfrom tabulate import tabulate\nfrom tensorboardX import SummaryWriter\nfrom typing import Optional, Tuple, Union, Dict, Any\n\n\ndef build_logger(\n path: str,\n name: Optional[str] = None,\n need_tb: bool = True,\n need_text: bool = True,\n text_level: Union[int, str] = logging.INFO\n) -> Tuple[Optional[logging.Logger], Optional['SummaryWriter']]: # noqa\n r'''\n Overview:\n Build text logger and tensorboard logger.\n Arguments:\n - path (:obj:`str`): Logger(``Textlogger`` & ``SummaryWriter``)'s saved dir\n - name (:obj:`str`): The logger file name\n - need_tb (:obj:`bool`): Whether ``SummaryWriter`` instance would be created and returned\n - need_text (:obj:`bool`): Whether ``loggingLogger`` instance would be created and returned\n - text_level (:obj:`int`` or :obj:`str`): Logging level of ``logging.Logger``, default set to ``logging.INFO``\n Returns:\n - logger (:obj:`Optional[logging.Logger]`): Logger that displays terminal output\n - tb_logger (:obj:`Optional['SummaryWriter']`): Saves output to tfboard, only return when ``need_tb``.\n '''\n if name is None:\n name = 'default'\n logger = LoggerFactory.create_logger(path, name=name) if need_text else None\n tb_name = name + '_tb_logger'\n tb_logger = SummaryWriter(os.path.join(path, tb_name)) if need_tb else None\n return logger, tb_logger\n\n\nclass LoggerFactory(object):\n\n @classmethod\n def create_logger(cls, path: str, name: str = 'default', level: Union[int, str] = logging.INFO) -> logging.Logger:\n r\"\"\"\n Overview:\n Create logger using logging\n Arguments:\n - name (:obj:`str`): Logger's name\n - path (:obj:`str`): Logger's save dir\n - level (:obj:`int` or :obj:`str`): Used to set the level. Reference: ``Logger.setLevel`` method.\n Returns:\n - (:obj:`logging.Logger`): new logging logger\n \"\"\"\n name += '_logger'\n # ensure the path exists\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n logger = logging.getLogger(name)\n logger_file_path = os.path.join(path, name + '.txt')\n if not logger.handlers:\n formatter = logging.Formatter('[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s')\n fh = logging.FileHandler(logger_file_path, 'a')\n fh.setFormatter(formatter)\n logger.setLevel(level)\n logger.addHandler(fh)\n logger.get_tabulate_vars = LoggerFactory.get_tabulate_vars\n logger.get_tabulate_vars_hor = LoggerFactory.get_tabulate_vars_hor\n return logger\n\n @staticmethod\n def get_tabulate_vars(variables: Dict[str, Any]) -> str:\n r\"\"\"\n Overview:\n Get the text description in tabular form of all vars\n Arguments:\n - variables (:obj:`List[str]`): Names of the vars to query.\n Returns:\n - string (:obj:`str`): Text description in tabular form of all vars\n \"\"\"\n headers = [\"Name\", \"Value\"]\n data = []\n for k, v in variables.items():\n data.append([k, \"{:.6f}\".format(v)])\n s = \"\\n\" + tabulate(data, headers=headers, tablefmt='grid')\n return s\n\n @staticmethod\n def get_tabulate_vars_hor(variables: Dict[str, Any]) -> str:\n datak = []\n datav = []\n datak.append(\"Name\")\n datav.append(\"Value\")\n for k, v in variables.items():\n datak.append(k)\n if not isinstance(v, str) and np.isscalar(v):\n datav.append(\"{:.6f}\".format(v))\n else:\n datav.append(v)\n data = [datak, datav]\n s = \"\\n\" + tabulate(data, tablefmt='grid')\n return s\n\n\nclass DistributionTimeImage:\n r\"\"\"\n Overview:\n ``DistributionTimeImage`` can be used to store images accorrding to ``time_steps``,\n for data with 3 dims``(time, category, value)``\n Interface:\n ``__init__``, ``add_one_time_step``, ``get_image``\n \"\"\"\n\n def __init__(self, maxlen: int = 600, val_range: Optional[dict] = None):\n r\"\"\"\n Overview:\n Init the ``DistributionTimeImage`` class\n Arguments:\n - maxlen (:obj:`int`): The max length of data inputs\n - val_range (:obj:`dict` or :obj:`None`): Dict with ``val_range['min']`` and ``val_range['max']``.\n \"\"\"\n self.maxlen = maxlen\n self.val_range = val_range\n self.img = np.ones((maxlen, maxlen))\n self.time_step = 0\n self.one_img = np.ones((maxlen, maxlen))\n\n def add_one_time_step(self, data: np.ndarray) -> None:\n r\"\"\"\n Overview:\n Step one timestep in ``DistributionTimeImage`` and add the data to distribution image\n Arguments:\n - data (:obj:`np.ndarray`): The data input\n \"\"\"\n assert (isinstance(data, np.ndarray))\n data = np.expand_dims(data, 1)\n data = np.resize(data, (1, self.maxlen))\n if self.time_step >= self.maxlen:\n self.img = np.concatenate([self.img[:, 1:], data])\n else:\n self.img[:, self.time_step:self.time_step + 1] = data\n self.time_step += 1\n\n def get_image(self) -> np.ndarray:\n r\"\"\"\n Overview:\n Return the distribution image\n Returns:\n - img (:obj:`np.ndarray`): The calculated distribution image\n \"\"\"\n norm_img = np.copy(self.img)\n valid = norm_img[:, :self.time_step]\n if self.val_range is None:\n valid = (valid - valid.min()) / (valid.max() - valid.min())\n else:\n valid = np.clip(valid, self.val_range['min'], self.val_range['max'])\n valid = (valid - self.val_range['min']) / (self.val_range['max'] - self.val_range['min'])\n norm_img[:, :self.time_step] = valid\n return np.stack([self.one_img, norm_img, norm_img], axis=0)\n\n\ndef pretty_print(result: dict, direct_print: bool = True) -> str:\n r\"\"\"\n Overview:\n Print a dict ``result`` in a pretty way\n Arguments:\n - result (:obj:`dict`): The result to print\n - direct_print (:obj:`bool`): Whether to print directly\n Returns:\n - string (:obj:`str`): The pretty-printed result in str format\n \"\"\"\n result = result.copy()\n out = {}\n for k, v in result.items():\n if v is not None:\n out[k] = v\n cleaned = json.dumps(out)\n string = yaml.safe_dump(json.loads(cleaned), default_flow_style=False)\n if direct_print:\n print(string)\n return string\n", "from typing import Union, Optional, List, Any, Tuple\nimport pickle\nimport torch\nfrom functools import partial\n\nfrom ding.config import compile_config, read_config\nfrom ding.worker import SampleSerialCollector, InteractionSerialEvaluator\nfrom ding.envs import create_env_manager, get_vec_env_setting\nfrom ding.policy import create_policy\nfrom ding.torch_utils import to_device\nfrom ding.utils import set_pkg_seed\n\n\ndef eval(\n input_cfg: Union[str, Tuple[dict, dict]],\n seed: int = 0,\n env_setting: Optional[List[Any]] = None,\n model: Optional[torch.nn.Module] = None,\n state_dict: Optional[dict] = None,\n load_path: Optional[str] = None,\n replay_path: Optional[str] = None,\n) -> float:\n r\"\"\"\n Overview:\n Pure evaluation entry.\n Arguments:\n - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \\\n ``str`` type means config file path. \\\n ``Tuple[dict, dict]`` type means [user_config, create_cfg].\n - seed (:obj:`int`): Random seed.\n - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \\\n ``BaseEnv`` subclass, collector env config, and evaluator env config.\n - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module.\n - state_dict (:obj:`Optional[dict]`): The state_dict of policy or model.\n - load_path (:obj:`Optional[str]`): Path to load ckpt.\n - replay_path (:obj:`Optional[str]`): Path to save replay.\n \"\"\"\n if isinstance(input_cfg, str):\n cfg, create_cfg = read_config(input_cfg)\n else:\n cfg, create_cfg = input_cfg\n create_cfg.policy.type += '_command'\n env_fn = None if env_setting is None else env_setting[0]\n cfg = compile_config(\n cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True, save_path='eval_config.py'\n )\n\n # Create components: env, policy, evaluator\n if env_setting is None:\n env_fn, _, evaluator_env_cfg = get_vec_env_setting(cfg.env)\n else:\n env_fn, _, evaluator_env_cfg = env_setting\n evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg])\n evaluator_env.seed(seed, dynamic_seed=False)\n if replay_path is None: # argument > config\n replay_path = cfg.env.get('replay_path', None)\n if replay_path:\n evaluator_env.enable_save_replay(replay_path)\n set_pkg_seed(seed, use_cuda=cfg.policy.cuda)\n policy = create_policy(cfg.policy, model=model, enable_field=['eval'])\n if state_dict is None:\n if load_path is None:\n load_path = cfg.policy.learn.learner.load_path\n state_dict = torch.load(load_path, map_location='cpu')\n policy.eval_mode.load_state_dict(state_dict)\n evaluator = InteractionSerialEvaluator(cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode)\n\n # Evaluate\n _, eval_reward = evaluator.eval()\n print('Eval is over! The performance of your RL policy is {}'.format(eval_reward))\n return eval_reward\n\n\ndef collect_demo_data(\n input_cfg: Union[str, dict],\n seed: int,\n collect_count: int,\n expert_data_path: str,\n env_setting: Optional[List[Any]] = None,\n model: Optional[torch.nn.Module] = None,\n state_dict: Optional[dict] = None,\n) -> None:\n r\"\"\"\n Overview:\n Collect demonstration data by the trained policy.\n Arguments:\n - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \\\n ``str`` type means config file path. \\\n ``Tuple[dict, dict]`` type means [user_config, create_cfg].\n - seed (:obj:`int`): Random seed.\n - collect_count (:obj:`int`): The count of collected data.\n - expert_data_path (:obj:`str`): File path of the expert demo data will be written to.\n - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \\\n ``BaseEnv`` subclass, collector env config, and evaluator env config.\n - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module.\n - state_dict (:obj:`Optional[dict]`): The state_dict of policy or model.\n \"\"\"\n if isinstance(input_cfg, str):\n cfg, create_cfg = read_config(input_cfg)\n else:\n cfg, create_cfg = input_cfg\n create_cfg.policy.type += '_command'\n env_fn = None if env_setting is None else env_setting[0]\n cfg = compile_config(\n cfg,\n seed=seed,\n env=env_fn,\n auto=True,\n create_cfg=create_cfg,\n save_cfg=True,\n save_path='collect_demo_data_config.py'\n )\n\n # Create components: env, policy, collector\n if env_setting is None:\n env_fn, collector_env_cfg, _ = get_vec_env_setting(cfg.env)\n else:\n env_fn, collector_env_cfg, _ = env_setting\n collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg])\n collector_env.seed(seed)\n set_pkg_seed(seed, use_cuda=cfg.policy.cuda)\n policy = create_policy(cfg.policy, model=model, enable_field=['collect', 'eval'])\n # for policies like DQN (in collect_mode has eps-greedy)\n # collect_demo_policy = policy.collect_function(\n # policy._forward_eval,\n # policy._process_transition,\n # policy._get_train_sample,\n # policy._reset_eval,\n # policy._get_attribute,\n # policy._set_attribute,\n # policy._state_dict_collect,\n # policy._load_state_dict_collect,\n # )\n collect_demo_policy = policy.collect_mode\n if state_dict is None:\n state_dict = torch.load(cfg.learner.load_path, map_location='cpu')\n policy.collect_mode.load_state_dict(state_dict)\n collector = SampleSerialCollector(cfg.policy.collect.collector, collector_env, collect_demo_policy)\n\n # Let's collect some expert demostrations\n exp_data = collector.collect(n_sample=collect_count)\n if cfg.policy.cuda:\n exp_data = to_device(exp_data, 'cpu')\n with open(expert_data_path, 'wb') as f:\n pickle.dump(exp_data, f)\n print('Collect demo data successfully')\n" ]
[ [ "numpy.random.seed", "numpy.random.randint" ], [ "numpy.random.choice", "numpy.ones", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "numpy.random.seed", "numpy.random.randint" ], [ "numpy.float64", "numpy.random.seed", "numpy.random.randint" ], [ "numpy.expand_dims", "numpy.resize", "numpy.clip", "numpy.stack", "numpy.ones", "numpy.concatenate", "numpy.copy", "numpy.isscalar" ], [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonasdegrave/peptideClassifier
[ "8abadbc1f34514105791189374ef56d73576e7b4" ]
[ "archive/legacyCode.py" ]
[ "import xlsxwriter\nimport os\nimport re\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport urllib3\nimport matplotlib.pyplot as plt\n\n#import requests\n#import glob\n#import numpy as np\n\n#import xml.etree.ElementTree as ET\n#import urllib.request\n#from collections import Counter\n#from bioservices import UniProt\n#import urllib.parse\n#import urllib.request\n#from bioservices import Rhea\n\n\n\ndef getPeptide():\n workbook=xlsxwriter.Workbook('sequences_remade.xlsx')\n worksheet = workbook.add_worksheet()\n worksheet.write(0,0,\"Sequences\")\n worksheet.write(0,1,\"Seq_ID (Uniprot)\")\n worksheet.write(0,2,\"Total Length\")\n worksheet.write(0,3,\"P1' position\")\n worksheet.write(0,4,\"5 Before\")\n worksheet.write(0,5,\"5 After\")\n worksheet.write(0,6,\"P1\")\n worksheet.write(0,7,\"Acetilation\")\n worksheet.write(0,8,\"Remarks-1\")\n worksheet.write(0,9,\"Remarks-2\")\n worksheet.write(0,10,\"Remarks-3\")\n worksheet.write(0,11,\"P1'\")\n worksheet.write(0,12,\"Sequence Raw\")\n worksheet.write(0,13,\"Remarks-4\")\n row=1\n df=pd.read_excel('para_testar_A375.xlsx')\n contents=df.iloc[:,3]\n files=df.iloc[:,4]\n #print(files)\n count=0\n count_2=0\n column=0\n row_8=1\n row_10=1\n row_12=1\n row_13=1\n col_13=13\n count_met=0\n count_nonmet=0\n m=[\"M\"]\n for line_2 in contents:\n #print (content)\n x=re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", line_2)\n #print(x)\n search=x[3:-3]\n #print(search)\n writing_sequence=worksheet.write(row,column,search)\n column+=1\n writing_sequence=worksheet.write(row,column,files[count])\n column+=1\n files_fasta=\"./fasta/\"+files[count]+\".fasta\"\n count+=1\n print(files[count])\n try:\n with open (files_fasta) as f:\n next(f)\n seq=\"\"\n c=0\n #url='https://www.uniprot.org/uniprot/Q99832'\n url=\"https://www.uniprot.org/uniprot/%s\"%(files[count])\n http=urllib3.PoolManager()\n r=http.request('GET',url)\n content=r.data\n content=content.decode(\"utf-8\")\n soup = BeautifulSoup(content, 'html.parser') \n pep=content.find(\"Signal peptide\")\n nat=content.find(\"Natural variant\")\n poly=content.find(\"Propeptide\")\n for line in f:\n c=c+len(line)-1\n seq+=line\n # print(seq)\n seq = seq.replace(\"\\n\", \"\")\n find=seq.find(search)+1\n writing_protein=worksheet.write(row,column,c)\n column+=1\n writing_sequence=worksheet.write(row,column,find)\n column+=1\n col_8=8\n col_10=10\n list_natural=[1,2,3]\n #print(line[0])\n col_12=12\n writing_sequence=worksheet.write(row_12,col_12,line_2)\n row_12+=1\n bf_pep1=seq[:find-1]\n bf_pep=bf_pep1[-5:]\n af_pep1=seq[find:]\n af_pep1=af_pep1.strip(search)\n af_pep=af_pep1[:5]\n writing_sequence=worksheet.write(row,column,bf_pep) \n column+=1\n writing_sequence=worksheet.write(row,column,af_pep)\n #print(bf_pep1)\n if find in list_natural:\n writing_col_8=worksheet.write(row_8,col_8,\"Natural\")\n row_8+=1\n if \"M\" in bf_pep:\n writing_met=worksheet.write(row_10,col_10,\"Met-removed\")\n row_10+=1\n count_met+=1\n #print(line_2[0])\n else:\n if line_2[0] != \"M\":\n writing_met=worksheet.write(row_10,col_10,\"Met-intact\")\n row_10+=1\n count_nonmet+=1 \n #print(r_10)\n else:\n writing_col_8=worksheet.write(row_8,col_8,\"Neo N-terminus\")\n row_8+=1\n row_10+=1\n\n #get webpage content from unitprot to find natural, signal peptide and polypeptide\n\n if pep > 0:\n y = soup.find('a', {'title' : 'BLAST subsequence'})\n y=str(y)\n number=y[-6:-4] \n number=int(number)\n if find < number:\n writing_col_13=worksheet.write(row_13,col_13,\"Signal Peptide\")\n row_13+=1\n print(number)\n number=\"\" \n #elif nat > 0:\n # writing_col_13=worksheet.write(row_13,col_13,\"Natural variant\")\n # row_13+=1\n elif poly>0:\n y = soup.find('a', {'title' : 'BLAST subsequence'})\n y=str(y)\n number=y[-6:-4] \n number=int(number)\n if find < number:\n writing_col_13=worksheet.write(row_13,col_13,\"Signal Peptide\")\n row_13+=1\n print(number)\n number=\"\"\n writing_col_13=worksheet.write(row_13,col_13,\"Propeptide\")\n row_13+=1\n elif pep < 0 and nat <0 and poly <0:\n #writing_col_13=worksheet.write(row_13,col_13,\"Polypeptide\")\n row_13+=1\n print(pep,nat,poly)\n #print(af_pep)\n\n except StopIteration:\n pass \n row+=1\n column=0 \n try:\n r=1\n r_2=1\n r_11=1\n count_acetilated=0\n count_nonacetilated=0\n for raw_data in contents:\n #print(raw_data[0])\n acetilation_code=\"43.02\"\n acetilation_finder=raw_data.find(acetilation_code)\n #print(acetilation_finder)\n col_2=6\n col_11=11\n writing_sequence=worksheet.write(r_2,col_2,raw_data[0])\n r_2+=1\n writing_sequence=worksheet.write(r_11,col_11,raw_data[10])\n r_11+=1\n col=7\n if acetilation_finder > 0:\n writing_sequence=worksheet.write(r,col,\"Acetilated\")\n r+=1\n count_acetilated+=1\n else:\n writing_sequence=worksheet.write(r,col,\"Free\")\n r+=1\n count_nonacetilated+=1\n\n total=count_nonacetilated+count_acetilated\n acetilated_per=(count_acetilated/total)*100\n nonacetilated_per=(count_nonacetilated/total)*100\n my_data=[acetilated_per,nonacetilated_per]\n my_labels='Acetilated', 'Free'\n my_colors = ['lightblue','lightsteelblue','silver']\n plt.pie(my_data,labels=my_labels,autopct='%1.1f%%', colors=my_colors,textprops={'fontsize': 14})\n fig = plt.gcf()\n #fig.set_size_inches(10,10)\n #plt.show()\n imagefile=\"acetilation.png\"\n fig.savefig(imagefile,dpi=150)\n plt.close(fig)\n\n total=count_nonmet+count_met\n met_per=(count_met/total)*100\n nonmet_per=(count_nonmet/total)*100\n my_data=[met_per,nonmet_per]\n my_labels='Met-Removed', 'Met-Intact'\n my_colors = ['lightblue','lightsteelblue','silver']\n plt.pie(my_data,labels=my_labels,autopct='%1.1f%%', colors=my_colors,textprops={'fontsize': 14})\n fig = plt.gcf()\n fig.set_size_inches(10,10)\n plt.show()\n imagefile=\"met.png\"\n fig.savefig(imagefile,dpi=150)\n plt.close(fig)\n except StopIteration:\n pass \n \n workbook.close()\n return()\n\n\n\ndef testing_HTML(content):\n try:\n workbook=xlsxwriter.Workbook('sequences_remade.xlsx')\n worksheet = workbook.add_worksheet()\n worksheet.write(0,0,\"Teste\")\n col=0\n row=1\n url='https://www.uniprot.org/uniprot/P09681.txt'\n pep=content.find(\"Signal peptide\")\n nat=content.find(\"Natural variant\")\n pro=content.find(\"Propeptide\")\n #if pep > 0:\n # url_pep=\"\"\n # writing_col_8=worksheet.write(row,col,\"Natural\")\n # table1 = soup.find('table', {'id': 'peptides_section'})\n # headers=[] \n # for i in table1.find_all('th'):\n # title=i.text\n # headers.append(title) \n # mydata=pd.DataFrame(columns=headers)\n # for j in table1.find_all('tr')[1:]:\n # row_data = j.find_all('td')\n # row=[i.text for i in row_data]\n # length = len(mydata)\n # #print(row)\n # mydata.loc[length]=row \n # mydata.to_csv('a_data.csv',index=False) \n #number=y[-6:-4] \n #print(headers) \n #if pro>0:\n # url_pep=\"\"\n # writing_col_8=worksheet.write(row,col,\"Natural\")\n # y = soup.find('a', {'title' : 'BLAST subsequence'})\n # y=str(y)\n # number=y[-6:-4] \n # row+=1 \n #print(y) \n \n\n except StopIteration:\n pass \n\n######################################################################\n######################################################################\n######################################################################\n######################################################################\n######################################################################\n\n\n\n##########\nimport os\nimport pandas as pd\n\n##########\n\n\n\n# Verifica se um arquivo existe\ndef fileExists(fileName):\n return os.path.isfile(fileName)\n\n# Recebe o nome de arquivo e retorna uma lista de linhas com intervalos de classes\ndef filterFile(fileName):\n if not fileExists(\"./temp/{}\".format(fileName)):\n print(\"File {} doesn't exist, downloading...\".format(fileName))\n os.system(\"curl https://www.uniprot.org/uniprot/{} > ./temp/{}\".format(\n fileName,\n fileName))\n\n arq = open(\"./temp/{}\".format(fileName), \"r\")\n contents = arq.read()\n arq.close()\n\n lines = contents.split('\\n')\n\n keys = [\"SIGNAL\", \"PROPEP\"]\n\n results = []\n\n for key in keys:\n results += list(filter(lambda line: key in line, lines))\n\n results = list(map(lambda line: line.split(), results))\n\n final = []\n\n for result in results:\n interval = result[2].split(\"..\")\n final += [[result[1], int(interval[0]), int(interval[1])]]\n\n return final\n\n# Recebe uma lista de intervalos e um numero, e determina a classe\ndef classifier (number, intervals):\n for interval in intervals:\n if number >= interval[1] and number <= interval[2]:\n return interval[0]\n return \"OTHER\"\n\ndef getSheet (fileName):\n return pd.read_excel(fileName)\n\ndef main (sheetName):\n sheetContents = getSheet(sheetName)\n \n # We skip first line because the sheet has a header;\n for line in sheetContents[1:]:\n # 5th column corresponds to 'protein code';\n protein = line[4]\n\n # 11th column corresponds to 'P1 position';\n position = line[10]\n \n protein\n\n\n #contents=df.iloc[:,3]\n #=df.iloc[:,4]\n\n\n\nif __name__ == \"__main__\":\n if len(os.sys.argv) != 2:\n print(\"[Error] Expecting 1 argument. Usage:\\n\\t$ python peptideClassifier.py <sheetName.xlsx>\")\n os.sys.exit(1)\n sheetName = os.sys.argv[1]\n main(sheetName)\n\n \n\n\n\n planilha = open(nomePlanilha, \"r\")\n conteudo = planilha.readlines()\n planilha.close()\n \n\n #u = UniProt()\n #r=Rhea()\n\n #getPeptide()\n #testing_HTML()\n\n interval = filterFile(\"P09681.txt\")\n\n print(intervalo)\n\n elementos = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\n for elemento in elementos:\n print(\"Elemento: {} classificado como {}\".format(elemento, classificador(elemento, intervalo)))\n\n" ]
[ [ "pandas.read_excel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.close", "matplotlib.pyplot.pie", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]