repo_name
stringlengths 8
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
zdadadaz/jcc_dfdc | [
"672b61771e22b369c7950c89299b0a7a2f7586ad"
] | [
"tmp.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 12 19:28:12 2020\n\n@author: zdadadaz\n\"\"\"\n\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\n\n# dir_json = './../fb_whole/metadata_21.json'\n# train_list =[]\n# with open(dir_json) as json_file:\n# data = json.load(json_file)\n# train_list = pd.DataFrame.from_dict(data, orient='index')\n# train_list.reset_index(level=0, inplace=True)\n \n# train_list[train_list['label']=='REAL'].iloc[1]\n\nbase = pd.read_csv('submission_base.csv')\nmtcn = pd.read_csv('submission_mtcn.csv')\nwhole = pd.read_csv('metadata_small.csv')\n\nsLength = len(base['label'])\nbase['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)\nbase['original'] = pd.Series(np.random.randn(sLength), index=base.index)\nbase['folder'] = pd.Series(np.random.randn(sLength), index=base.index)\nbase['res'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['original'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['folder'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['res'] = pd.Series(np.random.randn(sLength), index=base.index)\n\nfor i in range(len(base)):\n print(str(i))\n fn = base.iloc[i][0]\n label = whole[whole['filename']==fn]['label']\n score =0\n origin = \"n\"\n folder = whole[whole['filename']==fn]['folder']\n if list(label)[0] ==\"FAKE\":\n score = 1\n origin = whole[whole['filename']==fn]['original']\n \n base['wrong'][i]= abs(score - base.iloc[i][1])>0.5\n base['original'][i]= list(origin)[0]\n base['folder'][i]= list(folder)[0]\n base['res'][i]= list(label)[0]\n \n mtcn['wrong'][i]= abs(score - mtcn.iloc[i][1])>0.5\n mtcn['original'][i]= list(origin)[0]\n mtcn['folder'][i]= list(folder)[0]\n mtcn['res'][i]= list(label)[0]\n \nfor i, d in base.groupby('res'):\n base['label'].plot(kind='hist', figsize=(15, 5), bins=20, alpha=0.8, title='base')\n plt.legend(['FAKE','REAL'])\nplt.show()\nfor i, d in base.groupby('res'):\n mtcn['label'].plot(kind='hist', figsize=(15, 5), bins=20, title='MTCNN', alpha=0.8)\n plt.legend(['FAKE','REAL'])\nplt.show()\n\nTP = sum(np.array(base['label']>0.5) & np.array(base['res']==\"FAKE\"))\nFP = sum(np.array(base['label']>0.5) & np.array(base['res']==\"REAL\"))\nTN = sum(np.array(base['label']<=0.5) & np.array(base['res']==\"FAKE\"))\nFN = sum(np.array(base['label']<=0.5) & np.array(base['res']==\"REAL\"))\nprecision = TP/len(base)*2\nrecall = TP/(TP+FP)\nFake_precision = TP/(TP+TN)\nReal_precision = FN/(FP+FN)"
] | [
[
"pandas.read_csv",
"matplotlib.pylab.legend",
"numpy.random.randn",
"matplotlib.pylab.show",
"numpy.array"
]
] |
antonvs88/crowddynamics-research | [
"61260aa26a6d5bc213252bf96eaa472a551918e3"
] | [
"data_analysis/calculate_field_data.py"
] | [
"from scipy.spatial import Voronoi, voronoi_plot_2d\n\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nfrom shapely.geometry import Polygon, MultiLineString, Point\nfrom shapely.ops import polygonize\nfrom descartes import PolygonPatch\nfrom voronoi_finite_polygons_2d import voronoi_finite_polygons_2d\nfrom recursive_mean import recursive_mean\n\n# Bound box representing the room. Used later in making Voronoi tessalation.\nwidth = 20\nheight = 20\nboundbox = Polygon([(0, 0), (0, height), (width, height), (width, 0)])\n\n# Create a grid structure over the room geometry.\n# Cell size in the grid, determines the resolution of the micro-macro converted data\ncell_size = 0.1\nm = np.round(width / cell_size)\nn = np.round(height / cell_size)\nm = m.astype(int)\nn = n.astype(int)\nX = np.linspace(0, width, m + 1)\nY = np.linspace(0, height, n + 1)\nhlines = [((x1, yi), (x2, yi)) for x1, x2 in zip(X[:-1], Y[1:]) for yi in Y]\nvlines = [((xi, y1), (xi, y2)) for y1, y2 in zip(Y[:-1], Y[1:]) for xi in X]\ngrids = list(polygonize(MultiLineString(hlines + vlines)))\n\n# The data is divided into four intervals. The number of pedestrians in the room determines the intervals.\n# The data when the 10 first and 10 last pedestrians leave the room is omitted to get rid of transient\n# behavior of the \"crowd system\".\ninterval1_start = 190\ninterval2_start = 145\ninterval3_start = 100\ninterval4_start = 55\ninterval4_end = 10\n\n# These should be the midpoints of the cells\nmid_x, mid_y = np.meshgrid(np.arange(cell_size/2, width, cell_size), np.arange(cell_size/2, height, cell_size))\n# The vector in each cell, pointing from the midpoint of the cell to the middle of the exit.\n# Used later in calculating the radial speed.\ndirection = np.zeros((mid_x.shape[0],mid_x.shape[0],2))\ndirection[:, :, 0] = mid_x - 20\ndirection[:, :, 1] = mid_y - 10\nd_norm = np.sqrt(direction[:,:,0] * direction[:,:,0] + direction[:,:,1] * direction[:,:,1])\n\n# We will calculate mean values of some part of the data recursively by taking a \"chunk\" of the data.\nchunk = 1000 # chunk size\n\n# The outer loop goes through the folders. The data from the simulations should be stored there in .npy.gz format.\nmylist = ['taset0'] # name of the folder, where the data is; can be an array of folders\nfor i in range(0, len(mylist)):\n\n # The inner loop goes through the simulations (in this case it goes through just one simulation)\n for j in range(int(sys.argv[1]), int(sys.argv[1]) + 1):\n\n\t# Data of pedestrians in the room at different times (0=\"not in room\", 1=\"in room\").\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz')):\n in_room = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz'))\n\n # Calculate number of pedestrians in room at different times\n sum_in_room = np.sum(in_room, axis=1)\n\n # The time steps when there are 190 pedestrians in the room\n time_interval1_start = np.where(sum_in_room == interval1_start)\n\n # Take the first instant when there are 190 pedestrians in the room.\n #\n # If there are no time steps when there are 190 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously, and thus the number of pedestrians go from 191 to 189), take the times when\n # there are 189 pedestrians in the room.\n if np.size(time_interval1_start) == 0:\n time_interval1_start = np.where(sum_in_room == (interval1_start - 1))[0][0]\n else:\n time_interval1_start = np.where(sum_in_room == interval1_start)[0][0]\n\n # The time steps when there are 145 pedestrians in the room\n time_interval2_start = np.where(sum_in_room == interval2_start)\n\n # Take the first instant when there are 145 pedestrians in the room.\n #\n # If there are no time steps when there are 145 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 146 to 144), take the times when\n # there are 144 pedestrians in the room.\n if np.size(time_interval2_start) == 0:\n time_interval2_start = np.where(sum_in_room == (interval2_start - 1))[0][0]\n else:\n time_interval2_start = np.where(sum_in_room == interval2_start)[0][0]\n\n # The time steps when there are 100 pedestrians in the room\n time_interval3_start = np.where(sum_in_room == interval3_start)\n\n # Take the first instant when there are 100 pedestrians in the room.\n #\n # If there are no time steps when there are 100 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 101 to 99), take the times when\n # there are 99 pedestrians in the room.\n if np.size(time_interval3_start) == 0:\n time_interval3_start = np.where(sum_in_room == (interval3_start - 1))[0][0]\n else:\n time_interval3_start = np.where(sum_in_room == interval3_start)[0][0]\n\n # The time steps when there are 55 pedestrians in the room\n time_interval4_start = np.where(sum_in_room == interval4_start)\n\n # Take the first instant when there are 55 pedestrians in the room.\n #\n # If there is no time steps when there are 55 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 56 to 54), take the times when\n # there are 54 pedestrians in the room.\n if np.size(time_interval4_start) == 0:\n time_interval4_start = np.where(sum_in_room == (interval4_start - 1))[0][0]\n else:\n time_interval4_start = np.where(sum_in_room == interval4_start)[0][0]\n\n # The time steps when there 10 pedestrians in the room\n time_interval4_end = np.where(sum_in_room == interval4_end)\n\n # Take the first instant when there are 10 pedestrians in the room.\n #\n # If there are no time steps when there are 10 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 11 to 9), take the times when\n # there are 9 pedestrians in the room.\n if np.size(time_interval4_end) == 0:\n time_interval4_end = np.where(sum_in_room == (interval4_end - 1))[0][0]\n else:\n time_interval4_end = np.where(sum_in_room == interval4_end)[0][0]\n\n\t# Data of x-positions of pedestrians at different times.\n # NOTE! The data is sampled at a finer resolution, thus we take only every second element of the array.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz')):\n positions_x = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz'))\n positions_x = positions_x[0::2] # take every second element\n\n # Data of y-positions of pedestrians at different times.\n # NOTE! The data is sampled at a finer resolution, thus we take only every second element of the array.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz')):\n positions_y = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz'))\n positions_y = positions_y[0::2] # take every second element\n\n # Data of pedestrians' velocities x-component at different times.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz')):\n velocities_x = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz'))\n\n # Data of pedestrians' velocities y-component at different times.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz')):\n velocities_y = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz'))\n\n # Arrays to save the micro-macro converted data\n velocity_x = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # velocity x-component\n velocity_y = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # velocity y-component\n speed = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # speed\n density = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # density\n projection = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # radial speed\n\n\t# Loop through the data when the number of pedestrians in the room goes from 190 to 10.\n # Using the Voronoi-method derive the macroscopic quantities.\n for t in range(time_interval1_start, time_interval4_end):\n\n # Positions of pedestrians inside the room\n agents_in_room = np.where(in_room[t, :] == 1)[0] # which pedestrians are in the room\n n_agents_in_room = len(agents_in_room) # number of pedestrians in the room\n points = np.concatenate((np.reshape(positions_x[t, agents_in_room], (n_agents_in_room, 1)),\n np.reshape(positions_y[t, agents_in_room], (n_agents_in_room, 1))), axis=1)\n\n # x- and y-components of velocities of pedestrians in room\n x_component = velocities_x[t, agents_in_room]\n y_component = velocities_y[t, agents_in_room]\n\n # Create a Voronoi tessalation from pedestrian center points\n vor = Voronoi(points)\n\n # Add also the Voronoi regions on the rim to the tessalation\n #\n # new_vertices contains all the vertices in the tessalation\n # new_regions contains the vertices used for each Voronoi area\n #\n # https://stackoverflow.com/questions/20515554/colorize-voronoi-diagram\n # https://gist.github.com/pv/8036995\n new_regions, new_vertices = voronoi_finite_polygons_2d(vor)\n\n # Loop through the Voronoi tessalations and calculate the density for each cell in the grid \n # (Steffen B, Seyfried A (2010) Methods for measuring pedestrian density, flow, speed and direction\n # with minimal scatter. Physica A: Statistical mechanics and its applications 389(9):1902-1910)\n for r in range(0, len(new_regions)):\n region = new_regions[r]\n # Shapely Polygon object from Voronoi cell\n voronoi_cell = Polygon(shell=new_vertices[region]) & boundbox\n\n # Area of the Voronoi cell\n vor_area = voronoi_cell.area\n\n # Calculate minimal and maximal x- and y-coordinate values of the Voronoi cell\n minx, miny, maxx, maxy = voronoi_cell.bounds\n # Round the minimal and maximal values to belong to a cell in the square grid\n minx, miny, maxx, maxy = np.round(\n (minx / cell_size, miny / cell_size, maxx / cell_size, maxy / cell_size)).astype(int)\n\n\t\t# Make sure that min and max values don't get out of bounds.\n minx = np.maximum(0, minx - 1)\n miny = np.maximum(0, miny - 1)\n maxx = np.minimum(m, maxx + 1)\n maxy = np.minimum(n, maxy + 1)\n\n # Loop over cells in the grid intersecting with the Voronoi cell.\n for x in range(minx, maxx):\n for y in range(miny, maxy):\n intersect_area = grids[x * n + y].intersection(voronoi_cell).area # intersecting area\n # Calculate the contribution of the pedestrian to the density and velocity in the grid cell.\n density[t - time_interval1_start, y, x] += intersect_area / vor_area\n velocity_x[t - time_interval1_start, y, x] += intersect_area * x_component[r]\n velocity_y[t - time_interval1_start, y, x] += intersect_area * y_component[r]\n\n # Finalize calculating the weighted density and velocity in the cell, by dividing it by the cell area\n density[t - time_interval1_start, :, :] /= cell_size * cell_size\n velocity_x[t - time_interval1_start, :, :] /= cell_size * cell_size\n velocity_y[t - time_interval1_start, :, :] /= cell_size * cell_size\n\n # Flip the density matrix upside down because of peculiar indexing in python\n density[t - time_interval1_start, :, :] = np.flipud(density[t - time_interval1_start, :, :])\n velocity_x[t - time_interval1_start, :, :] = np.flipud(velocity_x[t - time_interval1_start, :, :])\n velocity_y[t - time_interval1_start, :, :] = np.flipud(velocity_y[t - time_interval1_start, :, :])\n\n # Calculate speed in cells from the resultant velocity vectors\n speed[t - time_interval1_start, :, :] = np.hypot(velocity_x[t - time_interval1_start, :, :],\n velocity_y[t - time_interval1_start, :, :])\n\n # Radial speed (calculate projections of actualized velocities on desired velocities)\n projection[t - time_interval1_start, :, :] = (velocity_x[t - time_interval1_start, :, :] *\n direction[:, :, 0] + velocity_y[t -\n time_interval1_start, :, :] *\n direction[:, :, 1]) / d_norm\n\n # Save the length of the time intervals\n intervals = np.array((time_interval2_start - time_interval1_start, time_interval3_start - time_interval2_start,\n time_interval4_start - time_interval3_start, time_interval4_end - time_interval4_start))\n np.save(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'intervals', j, '.npy'), intervals)\n\n # Save the macroscopic data of speed, density and radial speed in .hdf5 format for each time interval\n # NOTE: The data is not averaged over time. The averaging is done in \"average_fields.py\". If one wants\n # to save space the averaging should be performed already in this code.\n\n # First interval (190...145 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval1', j, '.hdf5'), 'w') as hf1:\n hf1.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval1', j, '.npy.gz'),\n data=speed[time_interval1_start - time_interval1_start:\n time_interval2_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval1', j, '.hdf5')) as hf2:\n hf2.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval1', j, '.npy.gz'),\n data=density[time_interval1_start - time_interval1_start:\n time_interval2_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval1', j, '.hdf5')) as hf3:\n hf3.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval1', j, '.npy.gz'),\n data=projection[time_interval1_start - time_interval1_start:\n time_interval2_start - time_interval1_start, :, :])\n\n # Second interval (145...100 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval2', j, '.hdf5'), 'w') as hf4:\n hf4.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval2', j, '.npy.gz'),\n data=speed[time_interval2_start - time_interval1_start:\n time_interval3_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval2', j, '.hdf5')) as hf5:\n hf5.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval2', j, '.npy.gz'),\n data=density[time_interval2_start - time_interval1_start:\n time_interval3_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval2', j, '.hdf5')) as hf6:\n hf6.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval2', j, '.npy.gz'),\n data=projection[time_interval2_start - time_interval1_start:\n time_interval3_start - time_interval1_start, :, :])\n\n\n # First interval (100...55 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval3', j, '.hdf5'), 'w') as hf7:\n hf7.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval3', j, '.npy.gz'),\n data=speed[time_interval3_start - time_interval1_start:\n time_interval4_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval3', j, '.hdf5')) as hf8:\n hf8.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval3', j, '.npy.gz'),\n data=density[time_interval3_start - time_interval1_start:\n time_interval4_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval3', j, '.hdf5')) as hf9:\n hf9.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval3', j, '.npy.gz'),\n data=projection[time_interval3_start - time_interval1_start:\n time_interval4_start - time_interval1_start, :, :])\n\n # First interval (190...145 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval4', j, '.hdf5'), 'w') as hf10:\n hf10.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval4', j, '.npy.gz'),\n data=speed[time_interval4_start - time_interval1_start:\n time_interval4_end - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval4', j, '.hdf5')) as hf11:\n hf11.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval4', j, '.npy.gz'),\n data=density[time_interval4_start - time_interval1_start:\n time_interval4_end - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval4', j, '.hdf5')) as hf12:\n hf12.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval4', j, '.npy.gz'),\n data=projection[time_interval4_start - time_interval1_start:\n time_interval4_end - time_interval1_start, :, :])\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"scipy.spatial.Voronoi",
"numpy.flipud",
"numpy.hypot",
"numpy.maximum",
"numpy.reshape",
"numpy.size",
"numpy.arange",
"numpy.round",
"numpy.where",
"numpy.linspace",
"numpy.minimum"
]
] |
doublefloyd/beluga | [
"740bda376634945ef51bf1cf946fcbe002e9bc7f"
] | [
"beluga/numeric/compilation/component_compilation.py"
] | [
"import numpy as np\nfrom scipy.integrate import simps\n\nfrom beluga.numeric.compilation import jit_lambdify, jit_compile_func\nfrom beluga.symbolic.data_classes.components_structures import CostStruct\n\n\ndef compile_control(control_options, args, ham_func, lambdify_func=jit_lambdify):\n\n num_options = len(control_options)\n\n if num_options == 0:\n return None\n\n elif num_options == 1:\n compiled_option = lambdify_func(args, control_options[0])\n\n def calc_u(_y, _p, _k):\n return np.array(compiled_option(_y, _p, _k))\n\n else:\n compiled_options = lambdify_func(args, control_options)\n\n def calc_u(_y, _p, _k):\n u_set = np.array(compiled_options(_y, _p, _k))\n\n u = u_set[0, :]\n ham = ham_func(_y, u, _p, _k)\n for n in range(1, num_options):\n ham_i = ham_func(_y, u_set[n, :], _p, _k)\n if ham_i < ham:\n u = u_set[n, :]\n ham = ham_i\n\n return u\n\n return jit_compile_func(calc_u, args, func_name='control_function')\n\n\ndef compile_cost(symbolic_cost: CostStruct, dynamic_args, bc_args, lambdify_func=jit_lambdify):\n\n compute_initial_cost = lambdify_func(bc_args, symbolic_cost.initial)\n compute_terminal_cost = lambdify_func(bc_args, symbolic_cost.terminal)\n compute_path_cost = lambdify_func(dynamic_args, symbolic_cost.path)\n\n def compute_cost(_t, _y, _q, _u, _p, _k):\n\n if len(_q) > 0:\n cost = compute_initial_cost(_y[0, :], _q[0, :], _p, _k) \\\n + compute_terminal_cost(_y[-1, :], _q[-1, :], _p, _k)\n else:\n cost = compute_initial_cost(_y[0, :], _q, _p, _k) + compute_terminal_cost(_y[-1, :], _q, _p, _k)\n\n path_cost = np.array([compute_path_cost(yi, ui, _p, _k) for yi, ui in zip(_y, _u)])\n cost += simps(path_cost, _t, even='last')\n\n return cost\n\n return compute_cost\n"
] | [
[
"scipy.integrate.simps"
]
] |
ptrbortolotti/WISDEM | [
"2b7e44716d022e2f62140073dd078c5deeb8bf0a"
] | [
"wisdem/drivetrainse/rna.py"
] | [
"from __future__ import print_function\nimport numpy as np\nfrom openmdao.api import ExplicitComponent, Group, IndepVarComp\n\nfrom wisdem.commonse.utilities import hstack, vstack\nfrom wisdem.commonse.csystem import DirectionVector\nfrom wisdem.commonse import gravity\n\n# This is an extremely simple RNA mass calculator that should be used when DriveSE otherwise seems too complicated\n\n\nclass RNAMass(ExplicitComponent):\n def setup(self):\n\n # variables\n self.add_input('blades_mass', 0.0, units='kg', desc='mass of all blade')\n self.add_input('hub_mass', 0.0, units='kg', desc='mass of hub')\n self.add_input('nac_mass', 0.0, units='kg', desc='mass of nacelle')\n\n self.add_input('hub_cm', np.zeros(3), units='m', desc='location of hub center of mass relative to tower top in yaw-aligned c.s.')\n self.add_input('nac_cm', np.zeros(3), units='m', desc='location of nacelle center of mass relative to tower top in yaw-aligned c.s.')\n\n # order for all moments of inertia is (xx, yy, zz, xy, xz, yz) in the yaw-aligned coorinate system\n self.add_input('blades_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of all blades about hub center')\n self.add_input('hub_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of hub about its center of mass')\n self.add_input('nac_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of nacelle about its center of mass')\n\n # outputs\n self.add_output('rotor_mass', 0.0, units='kg', desc='mass of blades and hub')\n self.add_output('rna_mass', 0.0, units='kg', desc='total mass of RNA')\n self.add_output('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')\n self.add_output('rna_I_TT', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of RNA about tower top in yaw-aligned coordinate system')\n\n self.declare_partials('*','*')\n\n def _assembleI(self, I):\n Ixx, Iyy, Izz, Ixy, Ixz, Iyz = I[0], I[1], I[2], I[3], I[4], I[5] \n return np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])\n\n\n def _unassembleI(self, I):\n return np.array([I[0, 0], I[1, 1], I[2, 2], I[0, 1], I[0, 2], I[1, 2]])\n\n\n def compute(self, inputs, outputs):\n\n rotor_mass = inputs['blades_mass'] + inputs['hub_mass']\n nac_mass = inputs['nac_mass']\n\n # rna mass\n outputs['rotor_mass'] = rotor_mass\n outputs['rna_mass'] = rotor_mass + nac_mass\n\n # rna cm\n outputs['rna_cm'] = (rotor_mass*inputs['hub_cm'] + nac_mass*inputs['nac_cm'])/outputs['rna_mass']\n\n #TODO check if the use of assembleI and unassembleI functions are correct\n # rna I\n blades_I = self._assembleI(inputs['blades_I'])\n hub_I = self._assembleI(inputs['hub_I'])\n nac_I = self._assembleI(inputs['nac_I'])\n rotor_I = blades_I + hub_I\n\n R = inputs['hub_cm']\n rotor_I_TT = rotor_I + rotor_mass*(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n\n R = inputs['nac_cm']\n nac_I_TT = nac_I + inputs['nac_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n\n outputs['rna_I_TT'] = self._unassembleI(rotor_I_TT + nac_I_TT)\n\n\n def compute_partials(self, inputs, J):\n\n blades_mass = inputs['blades_mass']\n hub_mass = inputs['hub_mass']\n nac_mass = inputs['nac_mass']\n hub_cm = inputs['hub_cm']\n nac_cm = inputs['nac_cm']\n hub_I = inputs['hub_I']\n nac_I = inputs['nac_I']\n rotor_mass = blades_mass+hub_mass\n rna_mass = rotor_mass + nac_mass\n\n \n\n # mass\n J['rotor_mass', 'blades_mass'] = 1.0\n J['rotor_mass', 'hub_mass'] = 1.0\n J['rotor_mass', 'nac_mass'] = 0.0\n J['rotor_mass', 'hub_cm'] = np.zeros(3)\n J['rotor_mass', 'nac_cm'] = np.zeros(3)\n J['rotor_mass', 'blades_I'] = np.zeros(6)\n J['rotor_mass', 'hub_I'] = np.zeros(6)\n J['rotor_mass', 'nac_I'] = np.zeros(6)\n\n J['rna_mass', 'blades_mass'] = 1.0\n J['rna_mass', 'hub_mass'] = 1.0\n J['rna_mass', 'nac_mass'] = 1.0\n J['rna_mass', 'hub_cm'] = np.zeros(3)\n J['rna_mass', 'nac_cm'] = np.zeros(3)\n J['rna_mass', 'blades_I'] = np.zeros(6)\n J['rna_mass', 'hub_I'] = np.zeros(6)\n J['rna_mass', 'nac_I'] = np.zeros(6)\n \n\n # cm\n numerator = (blades_mass+hub_mass)*hub_cm+nac_mass*nac_cm\n\n J['rna_cm', 'blades_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2\n J['rna_cm', 'hub_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2\n J['rna_cm', 'nac_mass'] = (rna_mass*nac_cm-numerator)/rna_mass**2\n J['rna_cm', 'hub_cm'] = rotor_mass/rna_mass*np.eye(3)\n J['rna_cm', 'nac_cm'] = nac_mass/rna_mass*np.eye(3)\n J['rna_cm', 'blades_I'] = np.zeros((3, 6))\n J['rna_cm', 'hub_I'] = np.zeros((3, 6))\n J['rna_cm', 'nac_I'] = np.zeros((3, 6))\n\n\n # I\n R = hub_cm\n const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n\n J['rna_I_TT', 'blades_mass'] = const\n J['rna_I_TT', 'hub_mass'] = const\n dI_drx = rotor_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))\n dI_dry = rotor_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))\n dI_drz = rotor_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))\n J['rna_I_TT', 'hub_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T\n\n R = nac_cm\n const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n J['rna_I_TT', 'nac_mass'] = const\n dI_drx = nac_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))\n dI_dry = nac_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))\n dI_drz = nac_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))\n J['rna_I_TT', 'nac_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T\n\n J['rna_I_TT', 'blades_I'] = np.eye(6)\n J['rna_I_TT', 'hub_I'] = np.eye(6)\n J['rna_I_TT', 'nac_I'] = np.eye(6)\n\n \n\n\nclass RotorLoads(ExplicitComponent):\n def setup(self):\n\n # variables\n self.add_input('F', np.zeros(3), units='N', desc='forces in hub-aligned coordinate system')\n self.add_input('M', np.zeros(3), units='N*m', desc='moments in hub-aligned coordinate system')\n self.add_input('hub_cm', np.zeros(3), units='m', desc='position of rotor hub relative to tower top in yaw-aligned c.s.')\n self.add_input('rna_mass', 0.0, units='kg', desc='mass of rotor nacelle assembly')\n self.add_input('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')\n\n # # These are used for backwards compatibility - do not use\n # T = Float(iotype='in', desc='thrust in hub-aligned coordinate system') # THIS MEANS STILL YAWED THOUGH (Shaft tilt)\n # Q = Float(iotype='in', desc='torque in hub-aligned coordinate system')\n\n # parameters\n self.add_discrete_input('downwind', False)\n self.add_input('tilt', 0.0, units='deg')\n\n # out\n self.add_output('top_F', np.zeros(3), units='N') # in yaw-aligned\n self.add_output('top_M', np.zeros(3), units='N*m')\n\n self.declare_partials('top_F', ['F','M','hub_cm','rna_mass','rna_cm'])\n self.declare_partials('top_M', ['F','M','hub_cm','rna_mass','rna_cm'])\n\n\n def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):\n\n F = inputs['F']\n M = inputs['M']\n tilt = float(inputs['tilt'])\n \n F = DirectionVector.fromArray(F).hubToYaw(tilt)\n M = DirectionVector.fromArray(M).hubToYaw(tilt)\n\n # change x-direction if downwind\n hub_cm = np.copy(inputs['hub_cm'])\n rna_cm = np.copy(inputs['rna_cm'])\n if discrete_inputs['downwind']:\n hub_cm[0] *= -1\n rna_cm[0] *= -1\n hub_cm = DirectionVector.fromArray(hub_cm)\n rna_cm = DirectionVector.fromArray(rna_cm)\n self.save_rhub = hub_cm\n self.save_rcm = rna_cm\n\n # aerodynamic moments\n M = M + hub_cm.cross(F)\n self.saveF = F\n\n '''\n Removing this permanently gbarter 1/2020 because of too much confusion in TowerSE and Frame3DD\n From now on TowerSE will always add to loading of added mass items, including RNA\n \n # add weight loads\n F_w = DirectionVector(0.0, 0.0, -float(inputs['rna_mass'])*gravity)\n M_w = rna_cm.cross(F_w)\n self.saveF_w = F_w\n\n Fout = F + F_w\n\n if discrete_inputs['rna_weightM']:\n Mout = M + M_w\n else:\n Mout = M\n #REMOVE WEIGHT EFFECT TO ACCOUNT FOR P-Delta Effect\n print(\"!!!! No weight effect on rotor moments -TowerSE !!!!\")\n '''\n Fout = F\n Mout = M\n\n # put back in array\n outputs['top_F'] = np.array([Fout.x, Fout.y, Fout.z])\n outputs['top_M'] = np.array([Mout.x, Mout.y, Mout.z])\n\n def compute_partials(self, inputs, J, discrete_inputs):\n\n dF = DirectionVector.fromArray(inputs['F']).hubToYaw(inputs['tilt'])\n dFx, dFy, dFz = dF.dx, dF.dy, dF.dz\n\n dtopF_dFx = np.array([dFx['dx'], dFy['dx'], dFz['dx']])\n dtopF_dFy = np.array([dFx['dy'], dFy['dy'], dFz['dy']])\n dtopF_dFz = np.array([dFx['dz'], dFy['dz'], dFz['dz']])\n dtopF_dF = hstack([dtopF_dFx, dtopF_dFy, dtopF_dFz])\n dtopF_w_dm = np.array([0.0, 0.0, -gravity])\n\n #dtopF = hstack([dtopF_dF, np.zeros((3, 6)), dtopF_w_dm, np.zeros((3, 3))])\n\n\n dM = DirectionVector.fromArray(inputs['M']).hubToYaw(inputs['tilt'])\n dMx, dMy, dMz = dM.dx, dM.dy, dM.dz\n dMxcross, dMycross, dMzcross = self.save_rhub.cross_deriv(self.saveF, 'dr', 'dF')\n\n dtopM_dMx = np.array([dMx['dx'], dMy['dx'], dMz['dx']])\n dtopM_dMy = np.array([dMx['dy'], dMy['dy'], dMz['dy']])\n dtopM_dMz = np.array([dMx['dz'], dMy['dz'], dMz['dz']])\n dtopM_dM = hstack([dtopM_dMx, dtopM_dMy, dtopM_dMz])\n dM_dF = np.array([dMxcross['dF'], dMycross['dF'], dMzcross['dF']])\n\n dtopM_dFx = np.dot(dM_dF, dtopF_dFx)\n dtopM_dFy = np.dot(dM_dF, dtopF_dFy)\n dtopM_dFz = np.dot(dM_dF, dtopF_dFz)\n dtopM_dF = hstack([dtopM_dFx, dtopM_dFy, dtopM_dFz])\n dtopM_dr = np.array([dMxcross['dr'], dMycross['dr'], dMzcross['dr']])\n\n #dMx_w_cross, dMy_w_cross, dMz_w_cross = self.save_rcm.cross_deriv(self.saveF_w, 'dr', 'dF')\n\n #if discrete_inputs['rna_weightM']:\n # dtopM_drnacm = np.array([dMx_w_cross['dr'], dMy_w_cross['dr'], dMz_w_cross['dr']])\n # dtopM_dF_w = np.array([dMx_w_cross['dF'], dMy_w_cross['dF'], dMz_w_cross['dF']])\n #else:\n # dtopM_drnacm = np.zeros((3, 3))\n # dtopM_dF_w = np.zeros((3, 3))\n dtopM_drnacm = np.zeros((3, 3))\n dtopM_dF_w = np.zeros((3, 3))\n dtopM_dm = np.dot(dtopM_dF_w, dtopF_w_dm)\n\n if discrete_inputs['downwind']:\n dtopM_dr[:, 0] *= -1\n dtopM_drnacm[:, 0] *= -1\n\n #dtopM = hstack([dtopM_dF, dtopM_dM, dtopM_dr, dtopM_dm, dtopM_drnacm])\n\n \n J['top_F', 'F'] = dtopF_dF\n J['top_F', 'M'] = np.zeros((3, 3))\n J['top_F', 'hub_cm'] = np.zeros((3, 3))\n J['top_F', 'rna_mass'] = dtopF_w_dm\n J['top_F', 'rna_cm'] = np.zeros((3, 3))\n\n J['top_M', 'F'] = dtopM_dF\n J['top_M', 'M'] = dtopM_dM\n J['top_M', 'hub_cm'] = dtopM_dr\n J['top_M', 'rna_mass'] = dtopM_dm\n J['top_M', 'rna_cm'] = dtopM_drnacm\n\n \n\n\nclass RNA(Group):\n def initialize(self):\n self.options.declare('nLC')\n \n def setup(self):\n nLC = self.options['nLC']\n \n self.add_subsystem('mass', RNAMass(), promotes=['*'])\n for k in range(nLC):\n lc = '' if nLC==1 else str(k+1)\n self.add_subsystem('loads'+lc, RotorLoads(), promotes=['rna_mass','rna_cm','hub_cm','downwind','tilt'])\n\n \n"
] | [
[
"numpy.vstack",
"numpy.eye",
"numpy.zeros",
"numpy.copy",
"numpy.array",
"numpy.dot",
"numpy.outer"
]
] |
baajur/google-research | [
"9049acf9246c1b75170f0c6757e62a8f619a9db6"
] | [
"kws_streaming/layers/stream_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for kws_streaming.layers.stream.\"\"\"\n\nimport numpy as np\nfrom kws_streaming.layers import stream\nfrom kws_streaming.layers.compat import tf\nfrom kws_streaming.layers.compat import tf1\nfrom kws_streaming.layers.modes import Modes\nfrom kws_streaming.models import utils\ntf1.disable_eager_execution()\n\n\n# Toy example which require signal processing in time\nclass Sum(tf.keras.layers.Layer):\n \"\"\"Applies Sum on time_dim.\"\"\"\n\n def __init__(self, time_dim=1, **kwargs):\n super(Sum, self).__init__(**kwargs)\n self.time_dim = time_dim\n\n def call(self, inputs):\n return tf.keras.backend.sum(inputs, axis=self.time_dim)\n\n def get_config(self):\n config = {\"time_dim\": self.time_dim}\n base_config = super(Sum, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass StreamTest(tf.test.TestCase):\n\n def test_streaming_with_effective_tdim(self):\n time_size = 10\n feature_size = 3\n batch_size = 1\n\n time_dim = 1 # index of time dimensions\n ring_buffer_size_in_time_dim = 3 # effective size of aperture in time dim\n\n inputs = tf.keras.layers.Input(\n shape=(time_size, feature_size),\n batch_size=batch_size,\n name=\"inp_sequence\")\n\n mode = Modes.TRAINING\n\n # in streaming mode it will create a\n # ring buffer with time dim size ring_buffer_size_in_time_dim\n outputs = stream.Stream(\n cell=Sum(time_dim=time_dim),\n mode=mode,\n ring_buffer_size_in_time_dim=ring_buffer_size_in_time_dim)(inputs)\n model_train = tf.keras.Model(inputs, outputs)\n model_train.summary()\n\n mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE\n input_tensors = [\n tf.keras.layers.Input(\n shape=(\n 1, # time dim is size 1 in streaming mode\n feature_size,\n ), batch_size=batch_size, name=\"inp_stream\")\n ]\n # convert non streaming model to streaming one\n model_stream = utils.convert_to_inference_model(model_train,\n input_tensors, mode)\n model_stream.summary()\n\n # second input tostream model is a state, so we can use its shape\n input_state_np = np.zeros(model_stream.inputs[1].shape, dtype=np.float32)\n\n # input test data\n non_stream_input = np.random.randint(\n 1, 10, size=(batch_size, time_size, feature_size))\n\n # run streaming inference\n # iterate over time dim sample by sample\n for i in range(input_state_np.shape[1]):\n input_stream_np = np.expand_dims(non_stream_input[0][i], 0)\n input_stream_np = np.expand_dims(input_stream_np, 1)\n input_stream_np = input_stream_np.astype(np.float32)\n output_stream_np, output_state_np = model_stream.predict(\n [input_stream_np, input_state_np])\n input_state_np = output_state_np # update input state\n\n # emulate sliding window summation\n target = np.sum(\n non_stream_input[:, max(0, i - ring_buffer_size_in_time_dim):i + 1],\n axis=time_dim)\n self.assertAllEqual(target, output_stream_np)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"numpy.random.randint",
"numpy.expand_dims",
"numpy.zeros"
]
] |
michaelsilverstein/scikit-bio | [
"876efcf688a8f15e89bb70fa835a2f2a84b534c1"
] | [
"skbio/stats/distance/tests/test_anosim.py"
] | [
"# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport io\nfrom functools import partial\nfrom unittest import TestCase, main\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util.testing import assert_series_equal\n\nfrom skbio import DistanceMatrix\nfrom skbio.stats.distance import anosim\n\n\nclass TestANOSIM(TestCase):\n \"\"\"All results were verified with R (vegan::anosim).\"\"\"\n\n def setUp(self):\n # Distance matrices with and without ties in the ranks, with 2 groups\n # of equal size.\n dm_ids = ['s1', 's2', 's3', 's4']\n self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']\n self.df = pd.read_csv(\n io.StringIO('ID,Group\\ns2,Control\\ns3,Fast\\ns4,Fast\\ns5,Control\\n'\n 's1,Control'), index_col=0)\n\n self.dm_ties = DistanceMatrix([[0, 1, 1, 4],\n [1, 0, 3, 2],\n [1, 3, 0, 3],\n [4, 2, 3, 0]], dm_ids)\n\n self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],\n [1, 0, 3, 2],\n [5, 3, 0, 3],\n [4, 2, 3, 0]], dm_ids)\n\n # Test with 3 groups of unequal size. This data also generates a\n # negative R statistic.\n self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',\n 'Treatment1', 'Control', 'Control']\n\n # Equivalent grouping but with different labels -- groups should be\n # assigned different integer labels but results should be the same.\n self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']\n\n self.dm_unequal = DistanceMatrix(\n [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],\n [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],\n [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],\n [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],\n [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],\n [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],\n ['s1', 's2', 's3', 's4', 's5', 's6'])\n\n # Expected series index is the same across all tests.\n self.exp_index = ['method name', 'test statistic name', 'sample size',\n 'number of groups', 'test statistic', 'p-value',\n 'number of permutations']\n\n # Stricter series equality testing than the default.\n self.assert_series_equal = partial(assert_series_equal,\n check_index_type=True,\n check_series_type=True)\n\n def test_ties(self):\n # Ensure we get the same results if we rerun the method using the same\n # inputs. Also ensure we get the same results if we run the method\n # using a grouping vector or a data frame with equivalent groupings.\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999],\n name='ANOSIM results')\n\n for _ in range(2):\n np.random.seed(0)\n obs = anosim(self.dm_ties, self.grouping_equal)\n self.assert_series_equal(obs, exp)\n\n for _ in range(2):\n np.random.seed(0)\n obs = anosim(self.dm_ties, self.df, column='Group')\n self.assert_series_equal(obs, exp)\n\n def test_no_ties(self):\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999],\n name='ANOSIM results')\n np.random.seed(0)\n obs = anosim(self.dm_no_ties, self.grouping_equal)\n self.assert_series_equal(obs, exp)\n\n def test_no_permutations(self):\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0],\n name='ANOSIM results')\n obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)\n self.assert_series_equal(obs, exp)\n\n def test_unequal_group_sizes(self):\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999],\n name='ANOSIM results')\n\n np.random.seed(0)\n obs = anosim(self.dm_unequal, self.grouping_unequal)\n self.assert_series_equal(obs, exp)\n\n np.random.seed(0)\n obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled)\n self.assert_series_equal(obs, exp)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.Series",
"numpy.random.seed"
]
] |
eldar/acsm | [
"04069e8bb4c12185473dc10c3355e5367fa98968",
"04069e8bb4c12185473dc10c3355e5367fa98968"
] | [
"acsm/benchmark/pck_eval.py",
"acsm/data/base2.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport json\nimport os.path as osp\nimport numpy as np\nimport pprint\nimport pdb\nfrom . import evaluate_pr\nimport scipy.io as sio\n'''\nintervals : Define thresholds to evaluate pck score \nkpnames : Keypoint names\nbench_stats : stats\n'''\n\n\ndef remove_nans(x):\n return x[~np.isnan(x)]\n\n\ndef pck_at_intervals(intervals, error):\n accuracy = []\n for interval in intervals:\n accuracy.append(float(np.round(np.mean(np.array(error) < interval), 3)))\n return accuracy\n\n\ndef ck_at_interval(intervals, error):\n cks = []\n for interval in intervals:\n cks.append(np.array(error) < interval)\n return cks # len(intervals) x error.shape\n\n\ndef benchmark_all_instances(intervals, kpnames, bench_stats, img_size):\n stats = {}\n plot_intervals = [0.025 * i for i in range(40)]\n kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1\n pdb.set_trace()\n # valid_inds =\n kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'\n bench_stats_kps_err = bench_stats['kps_err'] / img_size\n mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask\n stats['mean_kp_err'] = [\n float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)\n ]\n stats['median_kp_err'] = [\n float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)\n ]\n stats['std_kp_err'] = [\n float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)\n ]\n stats['data'] = {}\n stats['pck'] = {}\n stats['interval'] = intervals\n stats['kp_names'] = kpnames\n stats['eval_params'] = {}\n\n for kpx, kp_name in enumerate(kpnames):\n stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])\n stats['data'][kp_name].sort()\n stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]\n stats['pck'][kp_name] = pck_at_intervals(\n intervals, stats['data'][kp_name]\n )\n stats['eval_params'][kp_name] = {}\n stats['eval_params'][kp_name]['thresh'] = plot_intervals\n stats['eval_params'][kp_name]['acc'] = pck_at_intervals(\n plot_intervals, stats['data'][kp_name]\n )\n\n return stats\n\n\ndef benchmark_all_instances_2(\n intervals, kpnames, bench_stats, img_size, select_kp_ids=None\n):\n stats = {}\n plot_intervals = [0.025 * i for i in range(40)]\n kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1\n\n # valid_inds =\n kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'\n bench_stats_kps_err = bench_stats['kps_err'] / img_size\n mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask\n stats['mean_kp_err'] = [\n float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)\n ]\n stats['median_kp_err'] = [\n float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)\n ]\n stats['std_kp_err'] = [\n float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)\n ]\n stats['data'] = {}\n stats['pck'] = {}\n stats['interval'] = intervals\n stats['kp_names'] = kpnames\n stats['eval_params'] = {}\n\n for kpx, kp_name in enumerate(kpnames):\n stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])\n stats['data'][kp_name].sort()\n stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]\n stats['pck'][kp_name] = pck_at_intervals(\n intervals, stats['data'][kp_name]\n )\n stats['eval_params'][kp_name] = {}\n stats['eval_params'][kp_name]['thresh'] = plot_intervals\n stats['eval_params'][kp_name]['acc'] = pck_at_intervals(\n plot_intervals, stats['data'][kp_name]\n )\n\n # pdb.set_trace()\n if select_kp_ids is not None:\n for group_name in select_kp_ids.keys():\n kp_ids = select_kp_ids[group_name]\n select_kp_error = mean_kp_error[:, kp_ids]\n samples = remove_nans(select_kp_error.reshape(-1))\n stats['eval_params'][\n '{}_acc'.format(group_name)\n ] = pck_at_intervals(intervals, samples.tolist())\n\n samples = remove_nans(mean_kp_error.reshape(-1))\n stats['eval_params']['acc'] = pck_at_intervals(intervals, samples.tolist())\n return stats\n\n\ndef benchmark_vis_instances(\n intervals, dist_thresholds, kpnames, bench_stats, img_size\n):\n stats = {}\n stats['data'] = {}\n stats['eval_params'] = {}\n stats['pck'] = {}\n stats['interval'] = intervals\n bench_stats_kps_error = 1 * bench_stats['kps_err']\n bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size\n ndata_points, nkps, _ = bench_stats['kps_err'].shape\n\n kps_vis1 = bench_stats['kps1'][:, :, 2] > 200\n kps_vis2 = bench_stats['kps2'][:, :, 2] > 200\n stats['eval_params']['total'] = np.sum(kps_vis1, axis=0) + 1E-10\n for dx, dist_thresh in enumerate(dist_thresholds):\n stats['eval_params'][dx] = {}\n stats['eval_params'][dx]['correct'] = np.zeros(\n (len(kpnames), len(intervals))\n )\n for kpx, kp_name in enumerate(kpnames):\n valid_inds = np.where(\n bench_stats_kps_error[:, kpx, 2] < dist_thresh\n )[0].tolist()\n common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5\n )[0].tolist()\n valid_inds = set(valid_inds)\n common_inds = set(common_inds)\n ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])\n ck = np.stack(ck, axis=1)\n ex = np.array(list(common_inds & valid_inds))\n if len(ex) > 0:\n stats['eval_params'][dx]['correct'][kpx] += np.sum(\n ck[ex, :], axis=0\n )\n\n kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]\n kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]\n ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))\n ).astype(np.int)\n if len(ex) > 0:\n stats['eval_params'][dx]['correct'][kpx] += np.sum(\n bench_stats_kps_error[ex, kpx, 2] > dist_thresh\n )\n stats['eval_params'][dx]['acc'] = stats['eval_params'][dx]['correct'] / \\\n stats['eval_params']['total'].reshape(-1, 1)\n return stats\n\n\ndef collate_all_instances(intervals, kp_names, bench_stats, img_size):\n bench_stats_kps_error = bench_stats['kps_err'] * 1\n bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size\n prediction_error = [] # N x 1\n prediction_score = [] # N x 1\n prediction_label = [] # N x len(intervals)\n gt_label = []\n\n kps_vis1 = bench_stats['kps1'][:, :, 2] > 200\n kps_vis2 = bench_stats['kps2'][:, :, 2] > 200\n\n for kpx, kp_name in enumerate(kp_names):\n common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5\n )[0].tolist()\n ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])\n ck = np.stack(ck, axis=1)\n ex = np.array(list(common_inds))\n if len(ex) > 0:\n prediction_error.append(bench_stats_kps_error[ex, kpx, 0])\n prediction_score.append(bench_stats_kps_error[ex, kpx, 2])\n prediction_label.append(ck[ex, :] * 1)\n gt_label.append(ck[ex, :] * 0 + 1)\n\n kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]\n kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]\n ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))\n ).astype(np.int)\n if len(ex) > 0:\n prediction_error.append(bench_stats_kps_error[ex, kpx, 0])\n prediction_score.append(bench_stats_kps_error[ex, kpx, 2])\n prediction_label.append(ck[ex, :] * 0)\n gt_label.append(ck[ex, :] * 0)\n\n prediction_error = np.concatenate(prediction_error, axis=0)\n prediction_score = np.concatenate(prediction_score, axis=0)\n prediction_label = np.concatenate(prediction_label, axis=0)\n gt_label = np.concatenate(gt_label, axis=0)\n\n stats = {}\n stats['pred_label'] = prediction_label\n stats['gt_label'] = gt_label\n stats['score'] = prediction_score # lower the score better it is.\n return stats\n\n\nkp_eval_thresholds = [0.05, 0.1, 0.2]\n# kp_eval_thresholds = [0.05, 1.0]\n'''\nselect_kp_ids dict is a group of kp points\n'''\n\n\ndef run_evaluation(\n bench_stats, n_iter, results_dir, img_size, kp_names, dist_thresholds,\n select_kp_ids\n):\n json_file = osp.join(results_dir, 'stats_m1_{}.json'.format(n_iter))\n stats_m1 = benchmark_all_instances_2(\n kp_eval_thresholds, kp_names, bench_stats, img_size, select_kp_ids\n )\n stats = stats_m1\n print(' Method 1 | Keypoint | Median Err | Mean Err | STD Err')\n pprint.pprint(\n zip(\n stats['kp_names'], stats['median_kp_err'], stats['mean_kp_err'],\n stats['std_kp_err']\n )\n )\n print('PCK Values')\n pprint.pprint(stats['interval'])\n pprint.pprint(stats['pck'])\n mean_pck = {}\n # pdb.set_trace()\n for i, thresh in enumerate(stats['interval']):\n mean_pck[thresh] = []\n for kp_name in kp_names:\n mean_pck[thresh].append(stats['pck'][kp_name][i])\n\n mean_pck = {k: np.mean(np.array(t)) for k, t in mean_pck.items()}\n pprint.pprint('Mean PCK ')\n pprint.pprint(mean_pck)\n\n print('Instance Average **** ')\n pprint.pprint(stats['eval_params']['acc'])\n for group_name in select_kp_ids.keys():\n print('Instance Average {} **** '.format(group_name))\n pprint.pprint(stats['eval_params']['{}_acc'.format(group_name)])\n\n print('########################## ')\n\n with open(json_file, 'w') as f:\n json.dump(stats, f)\n\n if dist_thresholds is not None:\n stats_m1 = benchmark_vis_instances(\n kp_eval_thresholds, dist_thresholds, kp_names, bench_stats, img_size\n )\n stats = stats_m1\n\n mean_pck = {}\n # points_per_kp = {k: v for k, v in zip(kp_names, stats['eval_params'][0]['npoints'])}\n # points_per_thresh = np.sum(np.array(points_per_kp.values()))\n for dx, thresh in enumerate(dist_thresholds):\n mean_pck[dx] = {}\n for i, thresh in enumerate(stats['interval']):\n mean_pck[dx][thresh] = []\n for kx, kp_name in enumerate(kp_names):\n mean_pck[dx][thresh].append(\n stats['eval_params'][dx]['acc'][kx, i]\n )\n\n mean_pck[dx] = {\n k: np.round(np.mean(np.array(t)), 4)\n for k, t in mean_pck[dx].items()\n }\n\n # pdb.set_trace()\n print('***** Distance Thresholds ***** ')\n pprint.pprint('Mean PCK Acc')\n pprint.pprint(mean_pck)\n # pprint.pprint(points_per_kp)\n\n stats = collate_all_instances(\n kp_eval_thresholds, kp_names, bench_stats, img_size\n )\n pr_stats = evaluate_pr.inst_bench_evaluate(\n stats['pred_label'], stats['gt_label'], stats['score']\n )\n pr_mat_file = osp.join(results_dir, 'pr_{}.mat'.format(n_iter))\n\n sio.savemat(pr_mat_file, pr_stats)\n return stats_m1\n",
"\"\"\"\nCode borrowed from https://github.com/shubhtuls/toe/blob/master/data/base.py\nBase data loading class.\nShould output:\n - img: B X 3 X H X W\n - mask: B X H X W\n - kp (optional): B X nKp X 2\n - sfm_pose (optional): B X 7 (s, tr, q)\n (kp, sfm_pose) correspond to image coordinates in [-1, 1]\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport numpy as np\n\nimport imageio\nimport scipy.linalg\nimport scipy.ndimage.interpolation\nfrom absl import flags, app\nfrom skimage import measure\nimport cv2\nfrom scipy import ndimage\nfrom skimage import measure\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataloader import default_collate\nimport pdb\nfrom ..utils import image as image_utils\nfrom ..utils import transformations\n\nflags.DEFINE_boolean('dl_shuffle_inds', False, 'Shuffle inds')\nflags.DEFINE_integer('img_size', 256, 'image size')\nflags.DEFINE_enum(\n 'split', 'train', ['train', 'val', 'all', 'test'], 'eval split'\n)\nflags.DEFINE_float(\n 'padding_frac', 0.05, 'bbox is increased by this fraction of max_dim'\n)\nflags.DEFINE_float(\n 'jitter_frac', 0.05, 'bbox is jittered by this fraction of max_dim'\n)\n\nflags.DEFINE_boolean('tight_crop', False, 'Use Tight crops')\nflags.DEFINE_boolean('flip_train', False, 'Mirror Images while training')\nflags.DEFINE_integer('n_contour', 1000, 'N random samples from the contours')\nflags.DEFINE_boolean(\n 'dl_out_pascal', True, 'Use pascal (implies use keypoints)'\n)\nflags.DEFINE_boolean('dl_out_imnet', True, 'Use iment')\nflags.DEFINE_string('pascal_class', 'horse', 'PASCAL VOC category name/ Cub')\nflags.DEFINE_integer('num_kps', 12, 'Number of keypoints')\n\n\n# -------------- Dataset ------------- #\n# ------------------------------------ #\nclass BaseDataset(Dataset):\n ''' \n img, mask, kp, pose data loader\n '''\n def __init__(self, opts, filter_key=None):\n # Child class should define/load:\n # self.img_dir\n # self.anno\n # self.kp_perm (optional)\n # self.anno_sfm (optional)\n self._out_kp = False\n self._out_pose = False\n self._shuffle_inds = opts.dl_shuffle_inds\n if self._shuffle_inds:\n self._index_perm = None\n\n self.opts = opts\n self.img_size = opts.img_size\n self.jitter_frac = opts.jitter_frac\n self.padding_frac = opts.padding_frac\n self.filter_key = filter_key\n self.n_contour = opts.n_contour\n\n def normalize_kp(self, kp, img_h, img_w):\n vis = kp[:, 2, None] > 0\n new_kp = np.stack(\n [2 * (kp[:, 0] / img_w) - 1, 2 * (kp[:, 1] / img_h) - 1, kp[:, 2]]\n ).T\n new_kp = vis * new_kp\n\n return new_kp\n\n def normalize_pose(self, sfm_pose, img_h, img_w):\n sfm_pose[0] *= (1.0 / img_w + 1.0 / img_h)\n sfm_pose[1][0] = 2.0 * (sfm_pose[1][0] / img_w) - 1\n sfm_pose[1][1] = 2.0 * (sfm_pose[1][1] / img_h) - 1\n return sfm_pose\n\n def crop_image(self, img, mask, bbox, kp=None, sfm_pose=None):\n # crop image and mask and translate kps\n img = image_utils.crop(img, bbox, bgval=1)\n mask = image_utils.crop(mask, bbox, bgval=0)\n if (kp is not None):\n vis = kp[:, 2] > 0\n kp[vis, 0] -= bbox[0]\n kp[vis, 1] -= bbox[1]\n if sfm_pose is not None:\n sfm_pose[1][0] -= bbox[0]\n sfm_pose[1][1] -= bbox[1]\n return img, mask, kp, sfm_pose\n\n def scale_image(self, img, mask, kp=None, sfm_pose=None):\n # Scale image so largest bbox size is img_size\n bwidth = np.shape(img)[0]\n bheight = np.shape(img)[1]\n scale = self.img_size / float(max(bwidth, bheight))\n img_scale, _ = image_utils.resize_img(img, scale)\n\n mask_scale, _ = image_utils.resize_img(mask, scale)\n if kp is not None:\n vis = kp[:, 2] > 0\n kp[vis, :2] *= scale\n if sfm_pose is not None:\n sfm_pose[0] *= scale\n sfm_pose[1] *= scale\n\n return img_scale, mask_scale, kp, sfm_pose\n\n def mirror_image(self, img, mask, kp=None, sfm_pose=None):\n if np.random.rand(1) > 0.5:\n # Need copy bc torch collate doesnt like neg strides\n img_flip = img[:, ::-1, :].copy()\n mask_flip = mask[:, ::-1].copy()\n\n if kp is not None:\n kp_perm = self.kp_perm\n # Flip kps.\n new_x = img.shape[1] - kp[:, 0] - 1\n kp_flip = np.hstack((new_x[:, None], kp[:, 1:]))\n if kp_perm is not None:\n kp_flip = kp_flip[kp_perm, :]\n kp = kp_flip\n\n if sfm_pose is not None:\n # Flip sfm_pose Rot.\n R = transformations.quaternion_matrix(sfm_pose[2])\n flip_R = np.diag([-1, 1, 1,\n 1]).dot(R.dot(np.diag([-1, 1, 1, 1])))\n sfm_pose[2] = transformations.quaternion_from_matrix(\n flip_R, isprecise=True\n )\n # Flip tx\n tx = img.shape[1] - sfm_pose[1][0] - 1\n sfm_pose[1][0] = tx\n\n return img_flip, mask_flip, kp, sfm_pose\n else:\n return img, mask, kp, sfm_pose\n\n def __len__(self):\n return self.num_imgs\n\n def forward_img(self, index):\n data = self.anno[index]\n\n img_path = osp.join(self.img_dir, str(data.rel_path))\n img = imageio.imread(img_path) / 255.0\n # Some are grayscale:\n if len(img.shape) == 2:\n img = np.repeat(np.expand_dims(img, 2), 3, axis=2)\n mask = np.expand_dims(data.mask, 2)\n\n # Adjust to 0 indexing\n bbox = np.array(\n [data.bbox.x1, data.bbox.y1, data.bbox.x2, data.bbox.y2], float\n ) - 1\n\n if self._out_pose:\n data_sfm = self.anno_sfm[index]\n # sfm_pose = (sfm_c, sfm_t, sfm_r)\n sfm_pose = [\n np.copy(data_sfm.scale),\n np.copy(data_sfm.trans),\n np.copy(data_sfm.rot)\n ]\n sfm_rot = np.pad(sfm_pose[2], (0, 1), 'constant')\n sfm_rot[3, 3] = 1\n sfm_pose[2] = transformations.quaternion_from_matrix(\n sfm_rot, isprecise=True\n )\n else:\n sfm_pose = None\n\n if self._out_kp:\n parts = data.parts.T.astype(float)\n kp = np.copy(parts)\n vis = kp[:, 2] > 0\n # 0 indexed from 1 indexed\n kp[vis, :2] -= 1\n kp[np.logical_not(vis), :2] = 0\n else:\n kp = None\n\n # print(kp.shape)\n # if len(kp) == 16:\n # pdb.set_trace()\n\n # Peturb bbox\n if self.opts.split == 'train':\n bbox = image_utils.peturb_bbox(\n bbox, pf=self.padding_frac, jf=self.jitter_frac\n )\n else:\n bbox = image_utils.peturb_bbox(bbox, pf=self.padding_frac, jf=0)\n bbox = image_utils.square_bbox(bbox)\n\n # crop image around bbox, translate kps\n img, mask, kp, sfm_pose = self.crop_image(\n img, mask, bbox, kp=kp, sfm_pose=sfm_pose\n )\n\n # scale image, and mask. And scale kps.\n img, mask, kp, sfm_pose = self.scale_image(\n img, mask, kp=kp, sfm_pose=sfm_pose\n )\n\n # Mirror image on random.\n if self.opts.split == 'train':\n img, mask, kp, sfm_pose = self.mirror_image(\n img, mask, kp=kp, sfm_pose=sfm_pose\n )\n # Normalize kp to be [-1, 1]\n img_h, img_w = img.shape[:2]\n if self._out_kp:\n kp = self.normalize_kp(kp, img_h, img_w)\n if self._out_pose:\n sfm_pose = self.normalize_pose(sfm_pose, img_h, img_w)\n\n # Finally transpose the image to 3xHxW\n img = np.transpose(img, (2, 0, 1))\n\n return img, mask, kp, sfm_pose\n\n def _filter(self, elem):\n if self.filter_key is not None:\n if self.filter_key not in elem.keys():\n print('Bad filter key %s' % self.filter_key)\n import ipdb\n ipdb.set_trace()\n if self.filter_key == 'sfm_pose':\n # Return both vis and sfm_pose\n vis = elem['kp'][:, 2]\n elem = {\n 'vis': vis,\n 'sfm_pose': elem['sfm_pose'],\n }\n else:\n elem = elem[self.filter_key]\n return elem\n\n def _sample_contour(\n self,\n mask,\n ):\n # indices_y, indices_x = np.where(mask)\n # npoints = len(indices_y)\n contour = measure.find_contours(mask, 0)\n contour = np.concatenate(contour)\n sample_size = self.n_contour\n\n def offset_and_clip_contour(contour, offset, img_size):\n contour = contour + offset\n contour = np.clip(contour, a_min=0, a_max=img_size - 1)\n return contour\n\n offsets = np.array(\n [\n [0, 0],\n [0, 1],\n [0, 2],\n [0, -1],\n [0, -2],\n [1, 0],\n [2, 0],\n [-1, 0],\n [-2, 0],\n [-1, -1],\n [-2, -2],\n [1, 1],\n [2, 2],\n [-1, 1],\n [-2, 2],\n [1, -1],\n [2, -2],\n ]\n )\n\n new_contours = []\n for offset in offsets:\n temp_contour = offset_and_clip_contour(\n contour, offset.reshape(-1, 2), self.img_size\n )\n new_contours.append(temp_contour)\n\n new_contours = np.concatenate(new_contours)\n # contour_mask = mask * 0\n # new_contours = new_contours.astype(np.int)\n # contour_mask[new_contours[:,0], new_contours[:,1]] = 1\n npoints = len(new_contours)\n sample_indices = np.random.choice(\n range(npoints), size=sample_size, replace=False\n )\n\n # swtich x any y.\n\n temp = np.stack(\n [new_contours[sample_indices, 1], new_contours[sample_indices, 0]],\n axis=1\n )\n temp = temp.copy()\n return temp\n\n def mask_truncated_df(self, mask):\n mask_df = ndimage.distance_transform_edt(1 - mask)\n return mask_df\n\n # def _sample_contour(self, mask, n_samples=1000):\n # contour = measure.find_contours(mask, 0)\n # contour = np.concatenate(contour)\n # sample_indices = np.random.choice(\n # range(contour.shape[0]), size=n_samples, replace=True\n # )\n # # swtich x any y.\n # samples = np.stack(\n # [contour[sample_indices, 1], contour[sample_indices, 0]], axis=1\n # )\n # return 2 * (samples / mask.shape[0] - 0.5)\n\n def __getitem__(self, index):\n if self._shuffle_inds:\n if self._index_perm is None:\n self._index_perm = np.random.RandomState(seed=0).permutation(\n self.num_imgs\n )\n index = self._index_perm[index]\n\n img, mask, kp, _ = self.forward_img(index)\n\n mask_df = self.mask_truncated_df(mask)\n contour = self._sample_contour(mask)\n valid = True\n if len(contour) != self.n_contour:\n valid = False\n\n elem = {\n 'valid': valid,\n 'img': img.astype(np.float32),\n 'mask': mask.astype(np.float32),\n 'inds': index,\n 'mask_df': mask_df.astype(np.float32),\n 'contour': contour.astype(np.float32)\n }\n if kp is not None:\n elem['kp'] = kp.astype(np.float32)\n\n if self.opts.flip_train:\n flip_img = img[:, :, ::-1].copy()\n elem['flip_img'] = flip_img\n flip_mask = mask[:, ::-1].copy()\n elem['flip_mask'] = flip_mask\n elem['flip_mask_df'] = self.mask_truncated_df(flip_mask)\n elem['flip_contour'] = self._sample_contour(flip_mask)\n\n return self._filter(elem)\n\n\n# --------- Kp + Cam Dataset --------- #\n# ------------------------------------ #\nclass BaseKpCamDataset(BaseDataset):\n ''' \n img, mask, kp, pose data loader\n '''\n def __init__(self, opts, filter_key=None):\n super(BaseKpCamDataset, self).__init__(opts, filter_key=filter_key)\n self._out_kp = True\n self._out_pose = True\n\n\n# ------------ Data Loader ----------- #\n# ------------------------------------ #\ndef base_loader(\n d_set_func,\n batch_size,\n opts,\n filter_key=None,\n shuffle=True,\n pascal_only=False\n):\n dset = d_set_func(opts, filter_key=filter_key, pascal_only=pascal_only)\n return DataLoader(\n dset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=opts.n_data_workers,\n drop_last=True,\n pin_memory=True,\n collate_fn=collate_fn,\n )\n\n\ndef collate_fn(batch):\n '''Globe data collater.\n\n Assumes each instance is a dict.\n Applies different collation rules for each field.\n\n Args:\n batch: List of loaded elements via Dataset.__getitem__\n '''\n collated_batch = {'empty': True}\n # iterate over keys\n new_batch = []\n for t in batch:\n if t['valid']:\n new_batch.append(t)\n else:\n 'Print, found an invalid batch'\n\n # batch = [t for t in batch if t is not None]\n batch = new_batch\n if len(batch) > 0:\n for key in batch[0]:\n collated_batch[key] = default_collate([elem[key] for elem in batch])\n collated_batch['empty'] = False\n return collated_batch"
] | [
[
"numpy.sum",
"numpy.nanmedian",
"numpy.nanmean",
"numpy.nanstd",
"numpy.where",
"scipy.io.savemat",
"numpy.isnan",
"numpy.stack",
"numpy.concatenate",
"numpy.array"
],
[
"torch.utils.data.DataLoader",
"torch.utils.data.dataloader.default_collate",
"numpy.transpose",
"scipy.ndimage.distance_transform_edt",
"numpy.diag",
"numpy.stack",
"numpy.pad",
"numpy.copy",
"numpy.logical_not",
"numpy.random.RandomState",
"numpy.hstack",
"numpy.expand_dims",
"numpy.shape",
"numpy.random.rand",
"numpy.clip",
"numpy.array",
"numpy.concatenate"
]
] |
mthrok/ctcdecode | [
"b1a30d7a65342012e0d2524d9bae1c5412b24a23"
] | [
"example/evalutate_wav2vec2/evaluate_wav2vec2_librispeech.py"
] | [
"#!/usr/bin/env python3\n\"\"\"Generate `trn` files for Librispeech\n\nGiven a Librispeech directory, parse transcript files,\ntranscribe the corresponding audio, and generate hypothesis files.\n\"\"\"\nimport os\nimport time\nimport logging\nimport argparse\nfrom pathlib import Path\n\nimport torch\nimport torchaudio\nimport fairseq\nimport simple_ctc\n\n\n_LG = logging.getLogger(__name__)\n\n\ndef _parse_args():\n def _path(path):\n return Path(os.path.normpath(path))\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n )\n parser.add_argument(\n '--root-dir',\n required=True,\n type=_path,\n help='The root directory on which data are persed.'\n )\n parser.add_argument(\n '--output-dir',\n required=True,\n type=_path,\n help='The output directory where trn files are generated.'\n )\n parser.add_argument(\n '--model-file',\n required=True,\n type=_path,\n help='Path to a finetuned weight file.'\n )\n parser.add_argument(\n '--dict-file',\n required=True,\n type=_path,\n help='Path to `dict.ltr.txt` file.'\n )\n parser.add_argument(\n '--num-threads',\n type=int,\n default=4,\n help='Maximum number of threads .'\n )\n\n args = parser.parse_args()\n for path in [args.root_dir, args.output_dir, args.model_file, args.dict_file]:\n if not os.path.exists(path):\n raise RuntimeError(f'File or directory does not exist: {path}')\n return args\n\n\ndef _parse_transcript(path):\n with open(path) as trans_fileobj:\n for line in trans_fileobj:\n line = line.strip()\n if not line:\n continue\n id, transcription = line.split(' ', maxsplit=1)\n yield id, transcription\n\n\ndef _parse_transcriptions(root_dir, output_dir):\n _LG.info('Parsing transcriptions')\n audios = []\n trn = output_dir / 'ref.trn'\n txt = output_dir / 'ref.trans.txt'\n with open(trn, 'w') as trn_fileobj, open(txt, 'w') as txt_fileobj:\n for trans_file in root_dir.glob('**/*.trans.txt'):\n trans_dir = trans_file.parent\n for id, transcription in _parse_transcript(trans_file):\n trn_fileobj.write(f'{transcription} ({id})\\n')\n txt_fileobj.write(f'{id} {transcription}\\n')\n audio_path = trans_dir / f'{id}.flac'\n audios.append((id, audio_path))\n return audios\n\n\ndef _load_vocab(dict_file):\n tokens = [\"<s>\", \"<pad>\", \"</s>\", \"<unk>\"]\n with open(dict_file, mode='r', encoding='utf-8') as fileobj:\n for line in fileobj:\n tokens.append(line.split()[0])\n return tokens\n\n\ndef _count_params(model):\n return sum(p.numel() for p in model.parameters())\n\n\ndef _load_model(model_file, dict_file):\n _LG.info('Loading the model')\n labels = _load_vocab(dict_file)\n\n overrides = {'data': str(dict_file.parent)}\n\n models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(\n [str(model_file)], arg_overrides=overrides\n )\n model = models[0].eval()\n\n encoder = model.w2v_encoder\n\n decoder = simple_ctc.BeamSearchDecoder(\n labels,\n cutoff_top_n=40,\n cutoff_prob=0.8,\n beam_size=100,\n num_processes=1,\n blank_id=0,\n is_nll=True,\n )\n _LG.info('#parameters: %s', _count_params(encoder))\n return encoder, decoder\n\n\ndef _decode(audios, encoder, decoder, output_dir):\n trn = output_dir / 'hyp.trn'\n trans = output_dir / 'hyp.trans.txt'\n t_enc, t_dec, num_frames = 0.0, 0.0, 0\n with open(trn, 'w') as trn_fileobj, open(trans, 'w') as txt_fileobj:\n for i, (id, path) in enumerate(audios):\n waveform, _ = torchaudio.load(path)\n mask = torch.zeros_like(waveform)\n\n t0 = time.monotonic()\n ir = encoder(waveform, mask)['encoder_out'].transpose(1, 0)\n t1 = time.monotonic()\n result = decoder.decode(ir)\n t2 = time.monotonic()\n trn = ''.join(result.label_sequences[0][0]).replace('|', ' ')\n trn_fileobj.write(f'{trn} ({id})\\n')\n txt_fileobj.write(f'{id} {trn}\\n')\n _LG.info('%d/%d: %s: %s', i, len(audios), id, trn)\n\n num_frames += waveform.size(1)\n t_enc += t1 - t0\n t_dec += t2 - t1\n t_audio = num_frames / 16000\n _LG.info('Audio duration: %s [sec]', t_audio)\n _LG.info('Encoding Time: %s [sec]', t_enc)\n _LG.info('Decoding Time: %s [sec]', t_dec)\n _LG.info('Total Inference Time: %s [sec]', t_enc + t_dec)\n\n\ndef _main():\n args = _parse_args()\n torch.set_num_threads(args.num_threads)\n logging.basicConfig(\n format='%(asctime)s %(levelname)s: %(message)s',\n level=logging.INFO)\n audios = _parse_transcriptions(args.root_dir, args.output_dir)\n encoder, decoder = _load_model(args.model_file, args.dict_file)\n _decode(audios, encoder, decoder, args.output_dir)\n\n\nif __name__ == '__main__':\n _main()\n"
] | [
[
"torch.set_num_threads",
"torch.zeros_like"
]
] |
reuvenperetz/model_optimization | [
"40de02d56750ee4cc20e693da63bc2e70b4d20e6"
] | [
"tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py"
] | [
"# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport operator\nfrom typing import List, Any, Tuple\nimport numpy as np\nimport torch\nfrom torch.nn import Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax, \\\n Sigmoid, Softplus, Softsign, Tanh\nfrom torch.nn.functional import hardswish, hardsigmoid, relu, hardtanh, relu6, leaky_relu, prelu, silu, softmax, \\\n softplus, softsign\nfrom torch.nn import UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d\nfrom torch.nn.functional import upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d\nfrom torch.nn import Conv2d, ConvTranspose2d, Linear, BatchNorm2d\nfrom torch.nn import Dropout, Flatten\nfrom torch import add, multiply, mul, sub, flatten, reshape, split, unsqueeze, concat, cat,\\\n mean, dropout, sigmoid, tanh\nfrom torch.fx import symbolic_trace\nfrom torch.nn import Module\n\nfrom model_compression_toolkit import FrameworkInfo, pytorch_post_training_quantization\nfrom model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation\nfrom model_compression_toolkit.core.tpc_models.default_tp_model import get_default_tp_model\nfrom model_compression_toolkit.core.tpc_models.pytorch_tp_models.pytorch_default import generate_pytorch_tpc\nfrom model_compression_toolkit.core.pytorch.constants import CALL_FUNCTION, OUTPUT, CALL_METHOD, PLACEHOLDER\nfrom model_compression_toolkit.core.pytorch.reader.graph_builders import DummyPlaceHolder, ConstantHolder\nfrom model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy, to_torch_tensor\nfrom tests.common_tests.base_layer_test import BaseLayerTest, LayerTestMode\nfrom model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO\nfrom model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation\nfrom tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model\n\n\nPYTORCH_LAYER_TEST_OPS = {\n \"kernel_ops\": [Conv2d, Linear, ConvTranspose2d],\n\n \"no_quantization\": [Dropout, Flatten, ConstantHolder, dropout, flatten, split, operator.getitem, reshape,\n unsqueeze],\n\n \"activation\": [DummyPlaceHolder,\n Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax,\n Sigmoid, Softplus, Softsign, Tanh, hardswish, hardsigmoid, relu, hardtanh,\n relu6, leaky_relu, prelu,\n silu, softmax, sigmoid, softplus, softsign, tanh, torch.relu,\n UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d,\n upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d,\n add, sub, mul, multiply,\n operator.add, operator.sub, operator.mul,\n BatchNorm2d, concat, cat, mean]\n}\n\n\nclass LayerTestModel(torch.nn.Module):\n def __init__(self, layer):\n super(LayerTestModel, self).__init__()\n self.layer = layer\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass OperationTestModel(torch.nn.Module):\n def __init__(self, layer):\n super(OperationTestModel, self).__init__()\n self.layer = layer\n\n def forward(self, x, y):\n return self.layer(x, y)\n\n\ndef is_node_fake_quant(node):\n return node.target == torch.fake_quantize_per_tensor_affine\n\n\ndef get_node_operation(node, model):\n if hasattr(model, str(node.target)):\n op = getattr(model, node.target)\n elif node.op == CALL_FUNCTION:\n op = node.target\n elif node.op == CALL_METHOD:\n op = getattr(torch, node.target)\n elif node.op == PLACEHOLDER:\n op = DummyPlaceHolder\n elif node.op == OUTPUT:\n op = OUTPUT\n else:\n op = None\n return op\n\n\ndef get_layer_weights(layer):\n # extract layer weights and named buffers\n weights = {}\n named_parameters_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in\n layer.named_parameters()}\n named_buffer_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in\n layer.named_buffers() if len(parameter.shape) > 0}\n weights.update(named_parameters_weights)\n weights.update(named_buffer_weights)\n return weights\n\n\nclass BasePytorchLayerTest(BaseLayerTest):\n def __init__(self,\n unit_test,\n layers: List[Any],\n val_batch_size: int = 1,\n num_calibration_iter: int = 1,\n num_of_inputs: int = 1,\n input_shape: Tuple[int, int, int] = (3, 8, 8),\n quantization_modes: List[LayerTestMode] = [LayerTestMode.FLOAT, LayerTestMode.QUANTIZED_8_BITS],\n is_inputs_a_list: bool = False,\n use_cpu: bool = False):\n\n super().__init__(unit_test=unit_test,\n layers=layers,\n val_batch_size=val_batch_size,\n num_calibration_iter=num_calibration_iter,\n num_of_inputs=num_of_inputs,\n input_shape=input_shape,\n quantization_modes=quantization_modes,\n is_inputs_a_list=is_inputs_a_list,\n use_cpu=use_cpu)\n\n def get_tpc(self):\n if self.current_mode == LayerTestMode.FLOAT:\n # Disable all features that are enabled by default:\n tp = generate_test_tp_model({'enable_weights_quantization': False,\n 'enable_activation_quantization': False})\n return generate_pytorch_tpc(name=\"base_layer_test\", tp_model=tp)\n elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:\n tp = generate_test_tp_model({'weights_n_bits': 8,\n 'activation_n_bits': 8})\n return generate_pytorch_tpc(name=\"8bit_layer_test\", tp_model=tp)\n else:\n raise NotImplemented\n\n def get_fw_info(self) -> FrameworkInfo:\n return DEFAULT_PYTORCH_INFO\n\n def get_fw_impl(self) -> FrameworkImplementation:\n return PytorchImplementation()\n\n def get_ptq_facade(self):\n return pytorch_post_training_quantization\n\n def generate_inputs(self):\n return to_torch_tensor([torch.randn(*in_shape) for in_shape in self.get_input_shapes()])\n\n def create_networks(self):\n models = []\n for layer in self.get_layers():\n if self.num_of_inputs > 1:\n models.append(OperationTestModel(layer))\n else:\n models.append(LayerTestModel(layer))\n return models\n\n\n def compare(self, quantized_model: Module, float_model: Module, input_x=None, quantization_info=None):\n quantized_model_fx = symbolic_trace(quantized_model)\n # Assert things that should happen when using FLOAT quantization mode\n if self.current_mode == LayerTestMode.FLOAT:\n self.__compare_float_mode(float_model, quantized_model, quantized_model_fx)\n\n # Assert things that should happen when using QUANTIZED_8_BITS quantization mode\n elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:\n self.__compare_8bits_quantization_mode(float_model, quantized_model, quantized_model_fx)\n\n # Check inference is possible\n input_tensors = self.generate_inputs()\n quantized_model(*input_tensors)\n quantized_model_fx(*input_tensors)\n\n def __compare_8bits_quantization_mode(self, float_model, quantized_model, quantized_model_fx):\n fw_info = self.get_fw_info()\n for node in quantized_model_fx.graph.nodes:\n op = get_node_operation(node, quantized_model)\n if op == OUTPUT or op == operator.getitem or is_node_fake_quant(node):\n continue\n if hasattr(quantized_model, str(node.target)):\n if type(op) in PYTORCH_LAYER_TEST_OPS['kernel_ops']:\n quantized_weights = get_layer_weights(getattr(quantized_model, node.target))\n float_weights = get_layer_weights(getattr(float_model, node.target))\n for k, v in quantized_weights.items():\n if k in fw_info.kernel_ops_attributes_mapping.get(type(op)):\n float_weight = float_weights.get(k)\n self.unit_test.assertFalse(float_weight is None)\n self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) > 0.0)\n node_next = node.next\n while get_node_operation(node_next, quantized_model) == operator.getitem:\n node_next = node_next.next\n self.unit_test.assertTrue(is_node_fake_quant(node_next))\n\n elif op in PYTORCH_LAYER_TEST_OPS['activation']:\n node_next = node.next\n while get_node_operation(node_next, quantized_model) == operator.getitem:\n node_next = node_next.next\n self.unit_test.assertTrue(is_node_fake_quant(node_next))\n\n elif op in PYTORCH_LAYER_TEST_OPS['no_quantization']:\n node_next = node.next\n while get_node_operation(node_next, quantized_model) == operator.getitem:\n node_next = node_next.next\n self.unit_test.assertFalse(is_node_fake_quant(node_next))\n else:\n raise Exception(f'Layer {op} is not in framework info')\n\n def __compare_float_mode(self, float_model, quantized_model, quantized_model_fx):\n for node in quantized_model_fx.graph.nodes:\n # Check there are no fake-quant layers\n self.unit_test.assertFalse(is_node_fake_quant(node))\n # check unchanged weights\n if hasattr(quantized_model, str(node.target)):\n quantized_weights = get_layer_weights(getattr(quantized_model, node.name))\n float_weights = get_layer_weights(getattr(float_model, node.name))\n for k, v in quantized_weights.items():\n float_weight = float_weights.get(k)\n self.unit_test.assertFalse(float_weight is None)\n self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) == 0.0)\n input_tensors = self.generate_inputs()\n y = float_model(*input_tensors)\n y_hat = quantized_model(*input_tensors)\n if isinstance(y, (list, tuple)):\n for fo, qo in zip(y, y_hat):\n distance = torch_tensor_to_numpy(torch.sum(torch.abs(fo - qo)))\n self.unit_test.assertTrue(distance == 0,\n msg=f'Outputs should be identical. Observed distance: {distance}')\n\n else:\n distance = torch_tensor_to_numpy(torch.sum(torch.abs(y - y_hat)))\n self.unit_test.assertTrue(distance == 0,\n msg=f'Outputs should be identical. Observed distance: {distance}')\n"
] | [
[
"torch.randn",
"numpy.abs",
"torch.abs",
"torch.fx.symbolic_trace"
]
] |
myu-wp/coveragecalc | [
"e2fac3baba3f240c8d776f7d28331899333a05c2"
] | [
"coveragecalc/fields.py"
] | [
"import numpy as np\n\n\nOUTPUTS = [\n 'primary phone is valid',\n 'primary phone to name',\n 'primary phone to address',\n 'primary phone line type',\n 'primary phone is prepaid',\n 'primary phone is commercial',\n 'primary address is valid',\n 'primary address diagnostic',\n 'primary address to name',\n 'primary address type',\n 'primary address is active',\n 'primary address is commercial',\n 'primary address is forwarder',\n 'secondary phone is valid',\n 'secondary phone to name',\n 'secondary phone to address',\n 'secondary phone line type',\n 'secondary phone is prepaid',\n 'secondary phone is commercial',\n 'secondary address is valid',\n 'secondary address diagnostic',\n 'secondary address to name',\n 'secondary address type',\n 'secondary address is active',\n 'secondary address is commercial',\n 'secondary address is forwarder',\n 'email is valid',\n 'email is disposable',\n 'email is auto-generated',\n 'email to name',\n 'email first seen days binned',\n 'ip is valid',\n 'ip distance from address binned',\n 'ip distance from phone binned',\n 'ip is proxy',\n 'ip connection type',\n 'confidence score binned',\n]\n\nBINS = {\n 'email first seen days': {\n 'labels': ['Never', '< 3 months', '3 months to a year', '1-4 years', '5+ years'],\n 'bins': [0, 1, 180, 365, 1825, np.inf],\n },\n 'ip distance from address': {\n 'labels': ['0-9', '10-99', '100-999', '1000+'],\n 'bins': [0, 10, 100, 1000, np.inf],\n },\n 'ip distance from phone': {\n 'labels': ['0-9', '10-99', '100-999', '1000+'],\n 'bins': [0, 10, 100, 1000, np.inf],\n },\n 'confidence score': {\n 'bins': np.arange(0,525,25),\n 'labels': ['0-25', '25-50', '50-75', '75-100', '100-125', '125-150', \n '150-175', '175-200', '200-225', '225-250', '250-275',\n '275-300', '300-325', '325-350', '350-375', '375-400',\n '400-425', '425-450', '450-475', '475-500',],\n },\n}\n\nto_name = [\n 'Match',\n 'No match',\n 'No name found',\n]\nto_address = [\n 'Match',\n 'Zip+4 match',\n 'Postal match',\n 'City/State match',\n 'No match',\n]\nline_type = [\n 'Mobile',\n 'Landline',\n 'Fixed VOIP',\n 'Non-fixed VOIP',\n 'Premium',\n 'Tollfree',\n 'Voicemail',\n 'Other',\n 'Unknown',\n]\naddress_type = [\n 'Commercial mail drop',\n 'Multi unit',\n 'Single unit',\n 'PO box',\n 'PO box throwback',\n 'Unknown address type',\n]\naddress_diagnostic = [\n 'Validated',\n 'Validated with corrections',\n 'Validated only Street, Postcode, City, Country. Premise not validated',\n 'Validated only Postcode, City, Country',\n 'Validated only City, Country',\n 'Validated only Country',\n]\nCATEGORIES = {\n 'primary phone to name': to_name,\n 'secondary phone to name': to_name,\n 'primary address to name': to_name,\n 'secondary address to name': to_name,\n 'email to name': to_name,\n 'primary phone to address': to_address,\n 'secondary phone to address': to_address,\n 'primary phone line type': line_type,\n 'secondary phone line type': line_type,\n 'primary address type': address_type,\n 'secondary address type': address_type,\n 'primary address diagnostic': address_diagnostic,\n 'secondary address diagnostic': address_diagnostic,\n 'ip connection type': [\n 'Cable/DSL',\n 'Corporate',\n 'Cellular',\n 'Dialup',\n ],\n}"
] | [
[
"numpy.arange"
]
] |
aksingh-fb/glow | [
"c50603a1885c9bffd935fbd1c7c10766b062cef9"
] | [
"torch_glow/tests/nodes/floor_test.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport torch\nfrom tests import utils\n\n\nclass SimpleFloorModule(torch.nn.Module):\n def forward(self, a, b):\n c = a + b\n return torch.floor(c)\n\n\nclass TestFloor(unittest.TestCase):\n def test_floor(self):\n \"\"\"Basic test of the PyTorch floor Node on Glow.\"\"\"\n\n x = torch.randn(3, 4, 5)\n y = torch.randn(3, 4, 5)\n utils.compare_tracing_methods(\n SimpleFloorModule(), x, y, fusible_ops={\"aten::floor\"}\n )\n"
] | [
[
"torch.randn",
"torch.floor"
]
] |
rapirent/DSAI-HW3 | [
"ee83990f511049b8d53be5765040ab2068af6c3f"
] | [
"addition-subtractor.py"
] | [
"\n# coding: utf-8\nfrom keras.models import Sequential\nfrom keras import layers\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom six.moves import range\n\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_size', default='45000')\nparser.add_argument('--train_size', default='40000')\nparser.add_argument('--digits', default='3')\nparser.add_argument('--epoch', default='2')\nparser.add_argument('--activation', default='softmax')\nparser.add_argument('--output_name', default='model_1')\nargs = parser.parse_args()\n\n# # Parameters Config\nclass colors:\n ok = '\\033[92m'\n fail = '\\033[91m'\n close = '\\033[0m'\n\nDATA_SIZE = int(args.data_size)\nTRAIN_SIZE = int(args.train_size)\nDIGITS = int(args.digits)\nREVERSE = False\nMAXLEN = DIGITS + 1 + DIGITS\nchars = '0123456789+- '\nRNN = layers.LSTM\nHIDDEN_SIZE = 128\nBATCH_SIZE = 128\nEPOCH_SIZE = int(args.epoch)\nLAYERS = 1\nACTIVATION = args.activation\n\noutput_file = open('./data/as-' + args.output_name, 'w')\nprint('DATA_SIZE = ', DATA_SIZE , file=output_file)\nprint('TRAIN_SIZE = ', TRAIN_SIZE, file=output_file)\nprint('DIGITS = ', DIGITS, file=output_file)\nprint('EPOCH_SIZE = ', EPOCH_SIZE, file=output_file)\nprint('ACTIVATION = ', ACTIVATION, file=output_file)\n\nclass CharacterTable(object):\n def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))\n\n def encode(self, C, num_rows):\n x = np.zeros((num_rows, len(self.chars)))\n for i, c in enumerate(C):\n x[i, self.char_indices[c]] = 1\n return x\n\n def decode(self, x, calc_argmax=True):\n if calc_argmax:\n x = x.argmax(axis=-1)\n return \"\".join(self.indices_char[i] for i in x)\n\nctable = CharacterTable(chars)\n\nctable.indices_char\n\n\n# # Data Generation\nquestions = []\nexpected = []\nseen = set()\nprint('Generating data...')\n\nwhile len(questions) < DATA_SIZE:\n f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))\n a, b = f(), f()\n if len(questions) % 2 == 0:\n q = '{}-{}'.format(a, b)\n query = q + ' ' * (MAXLEN - len(q))\n ans = str(a - b)\n else:\n q = '{}+{}'.format(a, b)\n query = q + ' ' * (MAXLEN - len(q))\n ans = str(a + b)\n if q in seen:\n continue\n seen.add(q)\n ans += ' ' * (DIGITS + 1 - len(ans))\n if REVERSE:\n query = query[::-1]\n questions.append(query)\n expected.append(ans)\nprint('Total addition questions:', len(questions))\n\nprint(questions[:5], expected[:5])\n\n\n# # Processing\nprint('Vectorization... (to the one-hot encoding)')\nx = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)\ny = np.zeros((len(expected), DIGITS + 1, len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(questions):\n x[i] = ctable.encode(sentence, MAXLEN)\nfor i, sentence in enumerate(expected):\n y[i] = ctable.encode(sentence, DIGITS + 1)\n\nindices = np.arange(len(y))\nnp.random.shuffle(indices)\nprint(indices)\nx = x[indices]\ny = y[indices]\n\n# train_test_split\ntrain_x = x[:TRAIN_SIZE]\ntrain_y = y[:TRAIN_SIZE]\ntest_x = x[TRAIN_SIZE:]\ntest_y = y[TRAIN_SIZE:]\n\nprint('Training Data:')\nprint(train_x.shape)\nprint(train_y.shape)\n\nsplit_at = len(train_x) - len(train_x) // 10\nprint('split_at', split_at)\n(x_train, x_val) = train_x[:split_at], train_x[split_at:]\n(y_train, y_val) = train_y[:split_at], train_y[split_at:]\n\nprint('Training Data:')\nprint(x_train.shape)\nprint(y_train.shape)\n\nprint('Validation Data:')\nprint(x_val.shape)\nprint(y_val.shape)\n\nprint('Testing Data:')\nprint(test_x.shape)\nprint(test_y.shape)\n\nprint(\"input: \", x_train[:3], '\\n\\n', \"label: \", y_train[:3])\n\n\n# # Build Model\nprint('Build model...')\nmodel = Sequential()\nmodel.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))\nmodel.add(layers.RepeatVector(DIGITS + 1))\nfor _ in range(LAYERS):\n model.add(RNN(HIDDEN_SIZE, return_sequences=True))\n\nmodel.add(layers.TimeDistributed(layers.Dense(len(chars))))\nmodel.add(layers.Activation(ACTIVATION))\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nmodel.summary()\n\nprint('train set = ', x_train.shape, 'validation set = ', x_val.shape, file=output_file)\nacc = []\nval_acc = []\nloss = []\nval_loss = []\n# # Training\nfor loop in range(100):\n print()\n print('-' * 50)\n print('Train Loop Num:', loop)\n history = model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCH_SIZE,\n validation_data=(x_val, y_val),\n shuffle=True)\n acc += history.history['acc']\n val_acc += history.history['val_acc']\n loss += history.history['loss']\n val_loss += history.history['val_loss']\n print('loop ', loop, file=output_file)\n print('acc = {} '.format(history.history['acc']), end='', file=output_file)\n print('val_acc = {} '.format(history.history['val_acc']), end='', file=output_file)\n print('loss = {} '.format(history.history['loss']), end='', file=output_file)\n print('val_loss = {} '.format(history.history['val_loss']), file=output_file)\n print('-' * 50 , file=output_file)\n\n for i in range(10):\n ind = np.random.randint(0, len(x_val))\n rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]\n preds = model.predict_classes(rowx, verbose=0)\n q = ctable.decode(rowx[0])\n correct = ctable.decode(rowy[0])\n guess = ctable.decode(preds[0], calc_argmax=False)\n print('Q', q[::-1] if REVERSE else q, end=' ')\n print('T', correct, end=' ')\n if correct == guess:\n print(colors.ok + '☑' + colors.close, end=' ')\n else:\n print(colors.fail + '☒' + colors.close, end=' ')\n print(guess)\n\n\n# # Testing\nprint(\"MSG : Prediction\")\nprint(\"-\" * 50)\nright = 0\npreds = model.predict_classes(test_x, verbose=0)\nfor i in range(len(preds)):\n q = ctable.decode(test_x[i])\n correct = ctable.decode(test_y[i])\n guess = ctable.decode(preds[i], calc_argmax=False)\n print('Q', q[::-1] if REVERSE else q, end=' ')\n print('T', correct, end=' ')\n if correct == guess:\n print(colors.ok + '☑' + colors.close, end=' ')\n right += 1\n else:\n print(colors.fail + '☒' + colors.close, end=' ')\n print(guess)\nprint(\"MSG : Accuracy is {}\".format(right / len(preds)))\nprint(\"MSG : Accuracy is {}\".format(right / len(preds)), file=output_file)\nmodel.save('./models/as-' + args.output_name + '.h5')\nwith open('./corpus/as-' + args.output_name + '-training-corpus.csv', 'w') as corpus:\n print('questions,expected', file=corpus)\n for (x, y) in zip(x_train, y_train):\n print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)\nwith open('./corpus/as-' + args.output_name + '-validation-corpus.csv', 'w') as corpus:\n print('questions,expected', file=corpus)\n for (x, y) in zip(x_val, y_val):\n print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)\nwith open('./corpus/as-' + args.output_name + '-testing-corpus.csv', 'w') as corpus:\n print('questions,expected', file=corpus)\n for (x, y) in zip(test_x, test_y):\n print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)\nplt.plot(acc)\nplt.plot(val_acc)\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('./fig/as-accuracy-' + args.output_name + '.png')\nplt.clf()\n# summarize history for loss\nplt.plot(loss)\nplt.plot(val_loss)\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('./fig/as-loss-' + args.output_name + '.png')\noutput_file.close()\nplt.clf()\n"
] | [
[
"numpy.random.shuffle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.random.randint",
"matplotlib.pyplot.xlabel"
]
] |
yyong119/EE208-Teamproject | [
"4cfecbf83981d89a98e811fcc7eefa9134036c43"
] | [
"train/pytorch-train/crnn_main.py"
] | [
"# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function\r\nimport argparse\r\nimport random\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.optim as optim\r\nimport torch.utils.data\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nfrom warpctc_pytorch import CTCLoss\r\nimport os\r\nimport utils\r\nimport dataset\r\nfrom keys import alphabet\r\n#Alphabet = [e.encode('utf-8') for e in alphabet]\r\nimport models.crnn as crnn\r\n#with open('../run/char.txt') as f:\r\n# newChars = f.read().strip().decode('utf-8')\r\n#alphabet += u''.join(list(set(newChars) - set(alphabet)))\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--trainroot', help='path to dataset',default='../data/lmdb/train')\r\nparser.add_argument('--valroot', help='path to dataset',default='../data/lmdb/val')\r\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\r\nparser.add_argument('--batchSize', type=int, default=128, help='input batch size')\r\nparser.add_argument('--imgH', type=int, default=32, help='the height of the input image to network')\r\nparser.add_argument('--imgW', type=int, default=256, help='the width of the input image to network')\r\nparser.add_argument('--nh', type=int, default=256, help='size of the lstm hidden state')\r\nparser.add_argument('--niter', type=int, default=1000000, help='number of epochs to train for')\r\nparser.add_argument('--lr', type=float, default=0.005, help='learning rate for Critic, default=0.00005')\r\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\r\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\r\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\r\nparser.add_argument('--crnn', help=\"path to crnn (to continue training)\",default='../pretrain-models/netCRNN.pth')\r\n#parser.add_argument('--crnn', help=\"path to crnn (to continue training)\",default='')\r\nparser.add_argument('--alphabet', default=alphabet)\r\nparser.add_argument('--experiment', help='Where to store samples and models',default='./save_model')\r\nparser.add_argument('--displayInterval', type=int, default=50, help='Interval to be displayed')\r\nparser.add_argument('--n_test_disp', type=int, default=1000, help='Number of samples to display when test')\r\nparser.add_argument('--valInterval', type=int, default=50, help='Interval to be displayed')\r\nparser.add_argument('--saveInterval', type=int, default=1000, help='Interval to be displayed')\r\nparser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')\r\nparser.add_argument('--adadelta', action='store_true', help='Whether to use adadelta (default is rmsprop)')\r\nparser.add_argument('--keep_ratio', action='store_true', help='whether to keep ratio for image resize')\r\nparser.add_argument('--random_sample', action='store_true', help='whether to sample the dataset with random sampler')\r\nopt = parser.parse_args()\r\nprint(opt)\r\nifUnicode=True\r\nif opt.experiment is None:\r\n opt.experiment = 'expr'\r\nos.system('mkdir {0}'.format(opt.experiment))\r\n\r\nopt.manualSeed = random.randint(1, 10000) # fix seed\r\nprint(\"Random Seed: \", opt.manualSeed)\r\nrandom.seed(opt.manualSeed)\r\nnp.random.seed(opt.manualSeed)\r\ntorch.manual_seed(opt.manualSeed)\r\n\r\ncudnn.benchmark = True\r\n\r\nif torch.cuda.is_available() and not opt.cuda:\r\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\r\n\r\ntrain_dataset = dataset.lmdbDataset(root=opt.trainroot)\r\nassert train_dataset\r\nif not opt.random_sample:\r\n sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)\r\nelse:\r\n sampler = None\r\ntrain_loader = torch.utils.data.DataLoader(\r\n train_dataset, batch_size=opt.batchSize,\r\n shuffle=True, sampler=sampler,\r\n num_workers=int(opt.workers),\r\n collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))\r\ntest_dataset = dataset.lmdbDataset(\r\n root=opt.valroot, transform=dataset.resizeNormalize((256, 32)))\r\n\r\nngpu = int(opt.ngpu)\r\nnh = int(opt.nh)\r\nalphabet = opt.alphabet\r\nnclass = len(alphabet) + 1\r\nnc = 1\r\n\r\nconverter = utils.strLabelConverter(alphabet)\r\ncriterion = CTCLoss()\r\n\r\n\r\n# custom weights initialization called on crnn\r\ndef weights_init(m):\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n m.weight.data.normal_(0.0, 0.02)\r\n elif classname.find('BatchNorm') != -1:\r\n m.weight.data.normal_(1.0, 0.02)\r\n m.bias.data.fill_(0)\r\n\r\ncrnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)\r\ncrnn.apply(weights_init)\r\nif opt.crnn != '':\r\n print('loading pretrained model from %s' % opt.crnn)\r\n crnn.load_state_dict(torch.load(opt.crnn))\r\nprint(crnn)\r\n\r\nimage = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)\r\ntext = torch.IntTensor(opt.batchSize * 5)\r\nlength = torch.IntTensor(opt.batchSize)\r\n\r\nif opt.cuda:\r\n crnn.cuda()\r\n image = image.cuda()\r\n criterion = criterion.cuda()\r\n\r\nimage = Variable(image)\r\ntext = Variable(text)\r\nlength = Variable(length)\r\n\r\n# loss averager\r\nloss_avg = utils.averager()\r\n\r\n# setup optimizer\r\nif opt.adam:\r\n optimizer = optim.Adam(crnn.parameters(), lr=opt.lr,\r\n betas=(opt.beta1, 0.999))\r\nelif opt.adadelta:\r\n optimizer = optim.Adadelta(crnn.parameters(), lr=opt.lr)\r\nelse:\r\n optimizer = optim.RMSprop(crnn.parameters(), lr=opt.lr)\r\n\r\n\r\ndef val(net, dataset, criterion, max_iter=2):\r\n print('Start val')\r\n\r\n for p in crnn.parameters():\r\n p.requires_grad = False\r\n\r\n net.eval()\r\n data_loader = torch.utils.data.DataLoader(\r\n dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers))\r\n val_iter = iter(data_loader)\r\n\r\n i = 0\r\n n_correct = 0\r\n loss_avg = utils.averager()\r\n\r\n max_iter = min(max_iter, len(data_loader))\r\n for i in range(max_iter):\r\n data = val_iter.next()\r\n i += 1\r\n cpu_images, cpu_texts = data\r\n batch_size = cpu_images.size(0)\r\n utils.loadData(image, cpu_images)\r\n if ifUnicode:\r\n cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]\r\n t, l = converter.encode(cpu_texts)\r\n utils.loadData(text, t)\r\n utils.loadData(length, l)\r\n\r\n preds = crnn(image)\r\n preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))\r\n cost = criterion(preds, text, preds_size, length) / batch_size\r\n loss_avg.add(cost)\r\n\r\n _, preds = preds.max(2)\r\n preds = preds.squeeze(2)\r\n preds = preds.transpose(1, 0).contiguous().view(-1)\r\n sim_preds = converter.decode(preds.data, preds_size.data, raw=False)\r\n for pred, target in zip(sim_preds, cpu_texts):\r\n if pred.strip() == target.strip():\r\n n_correct += 1\r\n\r\n raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]\r\n #for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):\r\n #print((pred, gt))\r\n #print \r\n accuracy = n_correct / float(max_iter * opt.batchSize)\r\n testLoss = loss_avg.val()\r\n #print('Test loss: %f, accuray: %f' % (testLoss, accuracy))\r\n return testLoss,accuracy\r\n\r\ndef clean_txt(txt):\r\n \"\"\"\r\n filter char where not in alphabet with ' '\r\n \"\"\"\r\n newTxt = u''\r\n for t in txt:\r\n if t in alphabet:\r\n newTxt+=t\r\n else:\r\n newTxt+=u' '\r\n return newTxt\r\n \r\ndef trainBatch(net, criterion, optimizer,flage=False):\r\n data = train_iter.next()\r\n cpu_images, cpu_texts = data##decode utf-8 to unicode\r\n if ifUnicode:\r\n cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]\r\n \r\n batch_size = cpu_images.size(0)\r\n utils.loadData(image, cpu_images)\r\n t, l = converter.encode(cpu_texts)\r\n utils.loadData(text, t)\r\n utils.loadData(length, l)\r\n\r\n preds = crnn(image)\r\n preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))\r\n cost = criterion(preds, text, preds_size, length) / batch_size\r\n crnn.zero_grad()\r\n cost.backward()\r\n if flage:\r\n lr = 0.0001\r\n optimizer = optim.Adadelta(crnn.parameters(), lr=lr)\r\n optimizer.step()\r\n return cost\r\n\r\nnum =0\r\nlasttestLoss = 10000\r\ntestLoss = 10000\r\nimport os\r\n\r\ndef delete(path):\r\n \"\"\"\r\n 删除文件\r\n \"\"\"\r\n import os\r\n import glob\r\n paths = glob.glob(path+'/*.pth')\r\n for p in paths:\r\n os.remove(p)\r\n \r\n \r\n \r\n \r\nnumLoss = 0##判断训练参数是否下降 \r\n \r\nfor epoch in range(opt.niter):\r\n train_iter = iter(train_loader)\r\n i = 0\r\n while i < len(train_loader):\r\n #print('The step{} ........\\n'.format(i))\r\n for p in crnn.parameters():\r\n p.requires_grad = True\r\n crnn.train()\r\n #if numLoss>50:\r\n # cost = trainBatch(crnn, criterion, optimizer,True)\r\n # numLoss = 0\r\n #else:\r\n cost = trainBatch(crnn, criterion, optimizer)\r\n loss_avg.add(cost)\r\n i += 1\r\n\r\n #if i % opt.displayInterval == 0:\r\n # print('[%d/%d][%d/%d] Loss: %f' %\r\n # (epoch, opt.niter, i, len(train_loader), loss_avg.val()))\r\n # loss_avg.reset()\r\n\r\n if i % opt.valInterval == 0:\r\n testLoss,accuracy = val(crnn, test_dataset, criterion)\r\n #print('Test loss: %f, accuray: %f' % (testLoss, accuracy))\r\n print(\"epoch:{},step:{},Test loss:{},accuracy:{},train loss:{}\".format(epoch,num,testLoss,accuracy,loss_avg.val()))\r\n loss_avg.reset()\r\n # do checkpointing\r\n num +=1\r\n #lasttestLoss = min(lasttestLoss,testLoss)\r\n \r\n if lasttestLoss >testLoss:\r\n print(\"The step {},last lost:{}, current: {},save model!\".format(num,lasttestLoss,testLoss))\r\n lasttestLoss = testLoss\r\n #delete(opt.experiment)##删除历史模型\r\n torch.save(crnn.state_dict(), '{}/netCRNN.pth'.format(opt.experiment))\r\n numLoss = 0\r\n else:\r\n numLoss+=1\r\n \r\n"
] | [
[
"torch.FloatTensor",
"torch.load",
"torch.manual_seed",
"torch.autograd.Variable",
"numpy.random.seed",
"torch.cuda.is_available",
"torch.IntTensor"
]
] |
cebarbosa/summer_project_hydra | [
"386a01253d92075ff00396229e83caf44eed07a3"
] | [
"source_extraction.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on 28/10/2017\n\n@author: Carlos Eduardo Barbosa\n\nDetection of sources in data and separation of bins prior to Voronoi\ntesselation\n\n\"\"\"\nfrom __future__ import division, print_function\nimport os\n\nimport pyregion\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.convolution import Gaussian2DKernel, convolve\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom astropy.stats import SigmaClip\nfrom photutils.background import Background2D, MedianBackground\n\nimport sewpy\n\nimport context\nfrom misc import array_from_header\n\ndef background_removed_data(imgname, redo=False, output=None, hdunum=1):\n \"\"\" Remove background from the image \"\"\"\n data = fits.getdata(imgname, ext=1)\n output = \"detection.fits\"\n if os.path.exists(output) and not redo:\n return output\n sigma_clip = SigmaClip(sigma=3.)\n bkg_estimator = MedianBackground()\n bkg = Background2D(data, (8, 8), filter_size=(5, 5),\n sigma_clip=sigma_clip, bkg_estimator = bkg_estimator)\n outdata = data - bkg.background\n fits.writeto(output, outdata, overwrite=True)\n return output\n\ndef mask_from_regions(imgname, redo=False):\n \"\"\" Mask regions marked in file mask.reg made in ds9. \"\"\"\n data = fits.getdata(imgname)\n filename = \"mask.reg\"\n outfile = \"detection_masked.fits\"\n if os.path.exists(outfile) and not redo:\n mask = fits.getdata(outfile)\n return mask\n r = pyregion.open(filename)\n for i, region in enumerate(r.get_filter()):\n mask = region.mask(data.shape)\n data[mask] = np.nan\n hdu = fits.PrimaryHDU(data)\n hdu.writeto(outfile, overwrite=True)\n return outfile\n\ndef run_sextractor(img, redo=False, outfile=None):\n \"\"\" Produces a catalogue of sources in a given field. \"\"\"\n if outfile is None:\n outfile = \"source-catalog.fits\"\n if os.path.exists(outfile) and not redo:\n return outfile\n params = [\"NUMBER\", \"X_IMAGE\", \"Y_IMAGE\", \"KRON_RADIUS\", \"ELLIPTICITY\",\n \"THETA_IMAGE\", \"A_IMAGE\", \"B_IMAGE\", \"MAG_AUTO\", \"FLUX_RADIUS\"]\n config = {\"CHECKIMAGE_TYPE\": \"BACKGROUND\",\n \"CHECKIMAGE_NAME\": \"background.fits\",\n \"DETECT_THRESH\" : 1.5}\n sew = sewpy.SEW(config=config, sexpath=\"source-extractor\", params=params)\n cat = sew(img)\n cat[\"table\"].write(outfile, format=\"fits\", overwrite=True)\n return outfile\n\ndef mask_sources(img, cat, ignore=None, redo=False, output=None):\n \"\"\" Produces segmentation image with bins for detected sources using\n elliptical regions. \"\"\"\n if output is None:\n output = \"sources_mask.fits\"\n if os.path.exists(output) and not redo:\n return output\n data = fits.getdata(img)\n ydim, xdim = data.shape\n xx, yy = np.meshgrid(np.arange(1, xdim + 1), np.arange(1, ydim + 1))\n table = Table.read(cat, 1)\n if ignore is not None:\n idx = np.array([i for i,x in enumerate(table[\"NUMBER\"]) if x not in\n ignore])\n table = table[idx]\n axratio = table[\"B_IMAGE\"] / table[\"A_IMAGE\"]\n # table = table[axratio > 0.4]\n mask = np.zeros_like(data)\n for source in table:\n R = calc_isophotes(xx, yy, source[\"X_IMAGE\"], source[\"Y_IMAGE\"], \\\n source[\"THETA_IMAGE\"] - 90, source[\"B_IMAGE\"] /\n source[\"A_IMAGE\"])\n Rmax = 1.5 * source[\"KRON_RADIUS\"]\n mask += np.where(R <= Rmax, 1, 0)\n hdu = fits.PrimaryHDU(mask)\n hdu.writeto(output, overwrite=True)\n return output\n\ndef calc_isophotes(x, y, x0, y0, PA, q):\n \"\"\" Calculate isophotes for a given component. \"\"\"\n x = np.copy(x) - x0\n y = np.copy(y) - y0\n shape = x.shape\n theta = np.radians(PA)\n c, s = np.cos(theta), np.sin(theta)\n rot = np.array([[s, c], [-c, s]])\n xy = np.dot(np.column_stack((x.flatten(), y.flatten())), rot).T\n x = np.reshape(xy[0], newshape=shape)\n y = np.reshape(xy[1], newshape=shape)\n return np.sqrt(np.power(x, 2) + np.power(y / q, 2))\n\ndef run_ngc3311(redo=False):\n data_dir = os.path.join(context.home_dir, \"data\")\n fields = context.fields\n for field in fields:\n os.chdir(os.path.join(data_dir, field))\n if field == \"fieldA\":\n imgname = \"ellipse_model.fits\"\n else:\n imgname = f\"sn_field{field[-1]}.fits\"\n detimg = background_removed_data(imgname, redo=redo)\n immasked = mask_from_regions(detimg, redo=redo)\n sexcat = run_sextractor(immasked, redo=redo)\n mask_sources(immasked, sexcat, redo=redo)\n\nif __name__ == \"__main__\":\n run_ngc3311(redo=True)\n"
] | [
[
"numpy.zeros_like",
"numpy.reshape",
"numpy.cos",
"numpy.copy",
"numpy.arange",
"numpy.power",
"numpy.array",
"numpy.sin",
"numpy.where",
"numpy.radians"
]
] |
google/init2winit | [
"62ec9fd31bd7b38bb7c220f15d4187bf0706506d"
] | [
"init2winit/mt_eval/main.py"
] | [
"# coding=utf-8\n# Copyright 2021 The init2winit Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Used to evaluate MT model (BLEU/cross_entropy_loss/log_perplexity).\n\n\"\"\"\n\nimport json\nimport os\nimport sys\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom init2winit import hyperparameters\nfrom init2winit.dataset_lib import datasets\nfrom init2winit.model_lib import models\nfrom init2winit.mt_eval import bleu_evaluator\nimport jax\nimport tensorflow.compat.v2 as tf\n\n\n\n\n# Enable flax xprof trace labelling.\nos.environ['FLAX_PROFILE'] = 'true'\n\nflags.DEFINE_string('checkpoint_dir', '', 'Path to the checkpoint to evaluate.')\nflags.DEFINE_integer('seed', 0, 'seed used to initialize the computation.')\nflags.DEFINE_integer('worker_id', 1,\n 'Client id for hparam sweeps and tuning studies.')\nflags.DEFINE_string('experiment_config_filename', None,\n 'Path to the config.json file for this experiment.')\nflags.DEFINE_string(\n 'model', '', 'Name of the model used to evaluate (not'\n 'needed if experiment_config_filenmae is provided).')\nflags.DEFINE_string(\n 'dataset', '', 'Name of the dataset used to evaluate (not'\n 'needed if experiment_config_filenmae is provided).')\nflags.DEFINE_string(\n 'hparam_overrides', '', 'json representation of a flattened dict of hparam '\n 'overrides. For nested dictionaries, the override key '\n 'should be specified as lr_hparams.initial_value.')\nflags.DEFINE_string(\n 'trial_hparams_filename', None,\n 'Path to the hparams.json file for the trial we want to run inference on.')\nflags.DEFINE_string('mt_eval_config', '',\n 'Json representation of the mt evaluation config.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n # Necessary to use the tfds loader.\n tf.enable_v2_behavior()\n\n if jax.process_count() > 1:\n # TODO(ankugarg): Add support for multihost inference.\n raise NotImplementedError('BLEU eval does not support multihost inference.')\n\n rng = jax.random.PRNGKey(FLAGS.seed)\n\n mt_eval_config = json.loads(FLAGS.mt_eval_config)\n\n if FLAGS.experiment_config_filename:\n with tf.io.gfile.GFile(FLAGS.experiment_config_filename) as f:\n experiment_config = json.load(f)\n if jax.process_index() == 0:\n logging.info('experiment_config: %r', experiment_config)\n dataset_name = experiment_config['dataset']\n model_name = experiment_config['model']\n else:\n assert FLAGS.dataset and FLAGS.model\n dataset_name = FLAGS.dataset\n model_name = FLAGS.model\n\n if jax.process_index() == 0:\n logging.info('argv:\\n%s', ' '.join(sys.argv))\n logging.info('device_count: %d', jax.device_count())\n logging.info('num_hosts : %d', jax.host_count())\n logging.info('host_id : %d', jax.host_id())\n\n model_class = models.get_model(model_name)\n dataset_builder = datasets.get_dataset(dataset_name)\n dataset_meta_data = datasets.get_dataset_meta_data(dataset_name)\n\n hparam_overrides = None\n if FLAGS.hparam_overrides:\n if isinstance(FLAGS.hparam_overrides, str):\n hparam_overrides = json.loads(FLAGS.hparam_overrides)\n\n merged_hps = hyperparameters.build_hparams(\n model_name=model_name,\n initializer_name=experiment_config['initializer'],\n dataset_name=dataset_name,\n hparam_file=FLAGS.trial_hparams_filename,\n hparam_overrides=hparam_overrides)\n\n if jax.process_index() == 0:\n logging.info('Merged hps are: %s', json.dumps(merged_hps.to_json()))\n\n evaluator = bleu_evaluator.BLEUEvaluator(FLAGS.checkpoint_dir, merged_hps,\n rng,\n model_class, dataset_builder,\n dataset_meta_data,\n mt_eval_config)\n evaluator.translate_and_calculate_bleu()\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.enable_v2_behavior"
]
] |
Jacks0nJ/Importance-Sampling-Code | [
"f41fac451e9f78ab2130600ca83e1a6406ee43a5"
] | [
"pyfpt/numerics/is_simulation.py"
] | [
"'''\nImportance Sampling Simulation\n------------------------------\nThis is the main module of the PyFPT code, as it runs the simulations, post\nprocesses and exports the data ready for plotting.\n'''\n\nfrom timeit import default_timer as timer\nimport multiprocessing as mp\nfrom multiprocessing import Process, Queue\n\nimport numpy as np\n\nfrom .multi_processing_error import multi_processing_error\nfrom .histogram_data_truncation import histogram_data_truncation\nfrom .save_data_to_file import save_data_to_file\nfrom .data_points_pdf import data_points_pdf\n\nfrom .importance_sampling_cython import\\\n importance_sampling_simulations\n\n\ndef is_simulation(drift, diffusion, x_in, x_end,\n num_runs, bias, time_step, bins=50, min_bin_size=400,\n num_sub_samples=20, estimator='lognormal',\n save_data=False, t_in=0., t_f=100, x_r=None):\n \"\"\"Executes the simulation runs, then returns the histogram bin centres,\n heights and errors.\n\n Parameters\n ----------\n drift : function\n The drift term of the simulated Langevin equation. Must take both x and\n t as arguments in the format ``(x, t)``.\n diffusion : function\n The diffusion term of the simulated Langevin equation. Must take both\n x and t as arguments in the format ``(x, t)``.\n x_in : float\n The initial position value.\n x_end : float\n The end position value, i.e. the threshold which defines the FPT\n problem.\n num_runs : int\n The number of simulation runs.\n bias : scalar or function\n The bias used in the simulated Langevin equation to achieve importance\n sampling\n\n If a scalar (float or int), this the bias amplitude, i.e. a coefficent\n which mutiplies the the diffusion to define the bias.\n\n If a function, this simply defines the bias used. Must take arguments\n for both position and time in the format ``(x, t)``.\n bins : int or sequence, optional\n If bins is an integer, it defines the number equal width bins for the\n first-passage times. If bins is a list or numpy array, it defines the\n bin edges, including the left edge of the first bin and the right edge\n of the last bin. The widths can vary. Defaults to 50 evenly spaced\n bins.\n time_step : float or int, optional\n The time step. This should be at least smaller than the standard\n deviation of the FPTs.\n min_bin_size : int, optional\n The minimum number of runs per bin to included in the data analysis.\n If a bin has less than this number, it is truncated. Defaults to 400.\n estimator : string, optional\n The estimator used to reconstruct the target distribution probability\n density from the importance sample. If ``'lognormal'``, it assumes the\n weights in each bin follow a lognomral distribution. If ``'naive'``, no\n assumption is made but more runs are required for convergance.\n num_sub_samples : int, optional\n The number of subsamples used in jackknife estimation of the errors\n used for the ``'naive'`` estimator. Defaults to 20 when ``estimator``\n is ``'naive'``.\n Save_data : bool, optional\n If ``True``, the first-passage times and the associated weights for\n each run is saved to a file.\n t_in : float, optional\n The initial time value of simulation Defaults to 0.\n t_f : float, optional\n The maxiumum FPT allowed per run. If this is exceded, the\n simulation run ends and returns ``t_f``, which can then be\n truncated. Defaults to 100.\n x_r : float, optional\n The value of the reflective boundary. Must be compatible with the x_in\n and x_end chosen. Defaults to unreachable value, effectively no\n boundary.\n Returns\n -------\n bin_centres : list\n The centres of the histogram bins.\n heights : list\n The heights of the normalised histogram bars.\n errors : list\n The errors in estimating the heights.\n \"\"\"\n # Checking drift and diffusion are of the correct format\n if callable(drift) is True:\n if isinstance(drift(x_in, t_in), float) is True:\n pass\n else:\n ValueError('Provided drift is not the format (x, t)')\n else:\n ValueError('Provided drift is not a function')\n if callable(diffusion) is True:\n if isinstance(diffusion(x_in, t_in), float) is True:\n pass\n else:\n ValueError('Provided diffusion is not the format (x, t)')\n else:\n ValueError('Provided diffusion is not a function')\n\n # Make sure provided values are floats for Cython\n if isinstance(x_in, int) is True:\n x_in = 1.0*x_in\n if isinstance(x_end, int) is True:\n x_end = 1.0*x_end\n # Checking bias is of correct form\n if isinstance(bias, float) is True or isinstance(bias, float) is True:\n # If the bias argument is a scalar, use diffusion based bias\n bias_type = 'diffusion'\n if bias == 0:\n estimator = 'naive'\n print('As direct simulation, defaulting to naive estimator')\n elif callable(bias):\n # If a function is provided, check it is of the correct form\n if isinstance(bias(x_in, t_in), float) is True:\n bias_type = 'custom'\n else:\n ValueError('bias function must be of the form bias(x, t)')\n else:\n ValueError('Provided bias is not a number or function')\n\n if isinstance(time_step, float) is not True\\\n and isinstance(time_step, int) is not True:\n raise ValueError('time_step is not a number')\n\n # Check the user has provided a estimator\n if estimator != 'lognormal' and estimator != 'naive':\n print('Invalid estimator argument, defaulting to naive method')\n estimator = 'naive'\n\n # If no x_r argument is provided, default to infinite boundary\n if x_r is None:\n # Set the reflective surface at an arbitrarily large value in the\n # opposite direction to propagation\n x_r = 10000*(x_in-x_end)\n elif isinstance(x_r, float) is False:\n if isinstance(x_r, int) is True:\n if isinstance(x_r, bool) is True:\n raise ValueError('x_r is not a number')\n else:\n pass\n else:\n raise ValueError('x_r is not a number')\n elif (x_r-x_in)*(x_in-x_end) < 0:\n raise ValueError('End and relfective surfaces not compatible with' +\n ' initial value.')\n\n # The number of sims per core, so the total is correct\n num_runs_per_core = int(num_runs/mp.cpu_count())\n # Time how long the simulation runs take\n start = timer()\n\n # Using multiprocessing\n def multi_processing_func(x_in, x_r, x_end, t_in, t_f,\n time_step, bias, num_runs, queue_efolds,\n queue_ws, queue_refs):\n results =\\\n importance_sampling_simulations(x_in, x_r, x_end, t_in,\n t_f, time_step, bias,\n num_runs, drift, diffusion,\n bias_type=bias_type,\n count_refs=False)\n fpt_values = np.array(results[0][:])\n ws = np.array(results[1][:])\n queue_efolds.put(fpt_values)\n queue_ws.put(ws)\n\n queue_efolds = Queue()\n queue_ws = Queue()\n queue_refs = Queue()\n cores = int(mp.cpu_count()/1)\n\n print('Number of cores used: '+str(cores))\n processes = [Process(target=multi_processing_func,\n args=(x_in, x_r, x_end, t_in, t_f,\n time_step, bias, num_runs_per_core,\n queue_efolds, queue_ws, queue_refs))\n for i in range(cores)]\n\n for p in processes:\n p.start()\n\n # More efficient to work with numpy arrays\n fpt_array = np.array([queue_efolds.get() for p in processes])\n ws_array = np.array([queue_ws.get() for p in processes])\n\n end = timer()\n print(f'The simulations took: {end - start} seconds')\n\n # Combine into columns into 1\n fpt_values = fpt_array.flatten()\n w_values = ws_array.flatten()\n\n # Sort in order of increasing Ns\n sort_idx = np.argsort(fpt_values)\n fpt_values = fpt_values[sort_idx]\n w_values = w_values[sort_idx]\n\n # Checking if multipprocessing error occured, by looking at correlation\n _ = multi_processing_error(fpt_values, w_values)\n\n # Truncating any data which did not reach x_end\n fpt_values, w_values =\\\n histogram_data_truncation(fpt_values, t_f, weights=w_values,\n num_sub_samples=num_sub_samples)\n # Saving the data\n if save_data is True:\n if bias_type == 'diffusion':\n save_data_to_file(fpt_values, w_values, x_in, num_runs, bias)\n else:\n # Label the file differently if custom bias is used.\n save_data_to_file(fpt_values, w_values, x_in, num_runs,\n bias(x_in, 0), extra_label='_custom_bias')\n\n # Now analysisng the data to creating the histogram/PDF data\n bin_centres, heights, errors, num_runs_used, bin_edges_untruncated =\\\n data_points_pdf(fpt_values, w_values, estimator, bins=bins,\n min_bin_size=min_bin_size,\n num_sub_samples=num_sub_samples)\n # Return data as lists\n return bin_centres.tolist(), heights.tolist(), errors.tolist()\n"
] | [
[
"numpy.array",
"numpy.argsort"
]
] |
HaeckelK/bookkeeping | [
"6f8b62f1322fe1c409f397222653382d302d9754"
] | [
"ledger.py"
] | [
"from abc import ABC, abstractmethod\nfrom typing import List\n\nimport numpy as np\n\n\nclass Ledger(ABC):\n @abstractmethod\n def get_next_batch_id(self) -> int:\n \"\"\"Return next available batch id.\"\"\"\n\n @abstractmethod\n def get_next_transaction_id(self) -> int:\n \"\"\"Return next available transaction id.\"\"\"\n\n\nclass PandasLedger(Ledger):\n def get_next_batch_id(self) -> int:\n try:\n next_id = int(self.df[\"batch_id\"].max()) + 1\n except ValueError:\n return 0\n return next_id\n\n def append(self, df) -> List[int]:\n next_id = self.get_next_transaction_id()\n ids = np.arange(start=next_id, stop=next_id + df.shape[0])\n df[\"transaction_id\"] = ids\n self.df = self.df.append(df[self.columns], ignore_index=True, sort=False)\n return list(ids)\n\n def get_next_transaction_id(self) -> int:\n try:\n next_id = int(self.df[\"transaction_id\"].max()) + 1\n except ValueError:\n return 0\n return next_id\n"
] | [
[
"numpy.arange"
]
] |
lamyiowce/training | [
"498b945dd914573bdbf7a871eaeebd9388b60b76"
] | [
"object_detection/pytorch/tools/test_net.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"maskrcnn_benchmark\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n model = build_detection_model(cfg)\n model.to(cfg.MODEL.DEVICE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n )\n synchronize()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.set_device"
]
] |
ahmadianlab/tc-gan | [
"06c549e8ae74bc6af62fddeed698565ea1f548c5",
"06c549e8ae74bc6af62fddeed698565ea1f548c5"
] | [
"tc_gan/networks/tests/test_tuning_curve.py",
"tc_gan/loaders/tests/test_gen_moments_records.py"
] | [
"import numpy as np\nimport pytest\n\nfrom ...core import consume_config\nfrom ..cwgan import ConditionalTuningCurveGenerator\nfrom ..ssn import emit_tuning_curve_generator, ssn_type_choices\nfrom ..wgan import DEFAULT_PARAMS\nfrom .test_euler_ssn import JDS\n\nTEST_PARAMS = dict(\n DEFAULT_PARAMS,\n # Stimulator:\n num_tcdom=10,\n num_sites=50,\n # Model / SSN:\n tau_E=2,\n dt=0.1,\n seqlen=240,\n skip_steps=200,\n # Prober:\n probes=[0],\n **JDS # Model / SSN\n)\ndel TEST_PARAMS['bandwidths']\ndel TEST_PARAMS['contrasts']\ndel TEST_PARAMS['sample_sites']\ndel TEST_PARAMS['gen']\ndel TEST_PARAMS['disc']\n\n\ndef emit_tcg_for_test(**kwargs):\n return emit_tuning_curve_generator(**dict(TEST_PARAMS, **kwargs))\n\n\ndef tcg_for_test(config={}, **kwargs):\n tcg, rest = consume_config(emit_tcg_for_test, config, **kwargs)\n assert not rest\n return tcg\n\n\ndef get_param_values(self):\n values = {}\n for p in self.get_all_params():\n values[p.name] = p.get_value()\n return values\n\n\[email protected]('ssn_type, params', [\n ('default', {}),\n # dict(J=0.5), # unsupported (should I?)\n ('default', dict(J=[[1, 2], [3, 4]])),\n ('default', dict(J=np.array([[1, 2], [3, 4]], dtype=int))),\n ('default', dict(J=np.array([[1, 2], [3, 4]], dtype='float32'))),\n ('heteroin', dict(V=[0.3, 0])),\n ('deg-heteroin', dict(V=0.5)),\n])\ndef test_tcg_set_params(ssn_type, params):\n config = dict(ssn_type=ssn_type)\n tcg = tcg_for_test(config)\n keys = set(params)\n tcg.set_params(params)\n assert keys == set(params) # set_params must not modify params\n actual = get_param_values(tcg)\n\n test = {}\n for k in keys:\n test[k] = np.allclose(actual[k], params[k])\n # Manually compare parameters (instead of\n # np.testing.assert_equal) since params[k] may be broadcast to\n # array.\n\n assert all(test.values())\n\n\ndef test_tcg_set_unknown_params():\n tcg = tcg_for_test()\n with pytest.raises(ValueError) as excinfo:\n tcg.set_params(dict(V=[0.3, 0]))\n assert 'Unknown parameters:' in str(excinfo.value)\n\n\nflat_param_names = {\n 'default': [\n 'J_EE', 'J_EI',\n 'J_IE', 'J_II',\n 'D_EE', 'D_EI',\n 'D_IE', 'D_II',\n 'S_EE', 'S_EI',\n 'S_IE', 'S_II',\n ],\n}\nflat_param_names['heteroin'] = ['V_E', 'V_I'] + flat_param_names['default']\nflat_param_names['deg-heteroin'] = ['V'] + flat_param_names['default']\n\n\[email protected]('ssn_type', ssn_type_choices)\[email protected]('conditional', [False, True])\ndef test_tcg_flat_param_names(ssn_type, conditional):\n desired_names = tuple(flat_param_names[ssn_type])\n config = {}\n if conditional:\n config['emit_tcg'] = ConditionalTuningCurveGenerator.consume_kwargs\n tcg = tcg_for_test(config, ssn_type=ssn_type)\n assert tcg.get_flat_param_names() == desired_names\n",
"from types import SimpleNamespace\n\nimport numpy as np\nimport pandas\nimport pytest\n\nfrom ...execution import DataStore\nfrom ...recorders import GenMomentsRecorder\nfrom ..datastore_loader import DataStoreLoader1\n\n\[email protected]('num_mom_conds', [1, 2, 12])\ndef test_record_load(num_mom_conds, tmpdir):\n datastore = DataStore(str(tmpdir))\n recorder = GenMomentsRecorder(datastore, num_mom_conds)\n\n num_steps = 10\n mom_shape = (num_steps, 2 * num_mom_conds)\n desired = pandas.DataFrame(\n np.arange(np.prod(mom_shape)).reshape(mom_shape),\n columns=pandas.MultiIndex.from_product([['mean', 'var'],\n range(num_mom_conds)]),\n dtype='double')\n desired['step'] = np.arange(num_steps, dtype='uint32')\n\n for gen_step in range(num_steps):\n update_result = SimpleNamespace(gen_moments=np.asarray([\n desired.loc[gen_step, 'mean'],\n desired.loc[gen_step, 'var'],\n ]))\n recorder.record(gen_step, update_result)\n\n loader = DataStoreLoader1(str(tmpdir))\n actual = loader.load('gen_moments')\n pandas.testing.assert_frame_equal(actual, desired)\n"
] | [
[
"numpy.array",
"numpy.allclose"
],
[
"numpy.arange",
"pandas.testing.assert_frame_equal",
"numpy.asarray",
"numpy.prod"
]
] |
manivaradarajan/tensorboard | [
"6ba7155a614cf1cfab97f8ec7c561adb0a609b0d"
] | [
"tensorboard/plugins/core/core_plugin_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the TensorBoard core endpoints.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections.abc\nimport contextlib\nimport json\nimport os\nimport six\nimport zipfile\n\ntry:\n # python version >= 3.3\n from unittest import mock\nexcept ImportError:\n import mock # pylint: disable=unused-import\n\nimport tensorflow as tf\n\nfrom werkzeug import test as werkzeug_test\nfrom werkzeug import wrappers\n\nfrom tensorboard.backend import application\nfrom tensorboard.backend.event_processing import (\n plugin_event_multiplexer as event_multiplexer,\n)\nfrom tensorboard.data import provider\nfrom tensorboard.plugins import base_plugin\nfrom tensorboard.plugins.core import core_plugin\nfrom tensorboard.util import test_util\n\nFAKE_INDEX_HTML = b\"<!doctype html><title>fake-index</title>\"\n\n\nclass FakeFlags(object):\n def __init__(\n self,\n bind_all=False,\n host=None,\n inspect=False,\n version_tb=False,\n logdir=\"\",\n logdir_spec=\"\",\n event_file=\"\",\n db=\"\",\n path_prefix=\"\",\n generic_data=\"true\",\n ):\n self.bind_all = bind_all\n self.host = host\n self.inspect = inspect\n self.version_tb = version_tb\n self.logdir = logdir\n self.logdir_spec = logdir_spec\n self.event_file = event_file\n self.db = db\n self.path_prefix = path_prefix\n self.generic_data = generic_data\n\n\nclass CorePluginFlagsTest(tf.test.TestCase):\n def testFlag(self):\n loader = core_plugin.CorePluginLoader()\n loader.fix_flags(FakeFlags(version_tb=True))\n loader.fix_flags(FakeFlags(inspect=True, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=True, event_file=\"/tmp/event.out\"))\n loader.fix_flags(FakeFlags(inspect=False, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=False, db=\"sqlite:foo\"))\n # User can pass both, although the behavior is not clearly defined.\n loader.fix_flags(\n FakeFlags(inspect=False, logdir=\"/tmp\", db=\"sqlite:foo\")\n )\n\n logdir_or_db_req = r\"A logdir or db must be specified\"\n one_of_event_or_logdir_req = (\n r\"Must specify either --logdir.*but not both.$\"\n )\n event_or_logdir_req = r\"Must specify either --logdir or --event_file.$\"\n\n with six.assertRaisesRegex(self, ValueError, event_or_logdir_req):\n loader.fix_flags(FakeFlags(inspect=True))\n with six.assertRaisesRegex(\n self, ValueError, one_of_event_or_logdir_req\n ):\n loader.fix_flags(\n FakeFlags(\n inspect=True, logdir=\"/tmp\", event_file=\"/tmp/event.out\"\n )\n )\n with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):\n loader.fix_flags(FakeFlags(inspect=False))\n with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):\n loader.fix_flags(\n FakeFlags(inspect=False, event_file=\"/tmp/event.out\")\n )\n\n def testPathPrefix_stripsTrailingSlashes(self):\n loader = core_plugin.CorePluginLoader()\n for path_prefix in (\"/hello\", \"/hello/\", \"/hello//\", \"/hello///\"):\n flag = FakeFlags(\n inspect=False, logdir=\"/tmp\", path_prefix=path_prefix\n )\n loader.fix_flags(flag)\n self.assertEqual(\n flag.path_prefix,\n \"/hello\",\n \"got %r (input %r)\" % (flag.path_prefix, path_prefix),\n )\n\n def testPathPrefix_mustStartWithSlash(self):\n loader = core_plugin.CorePluginLoader()\n flag = FakeFlags(inspect=False, logdir=\"/tmp\", path_prefix=\"noslash\")\n with self.assertRaises(base_plugin.FlagsError) as cm:\n loader.fix_flags(flag)\n msg = str(cm.exception)\n self.assertIn(\"must start with slash\", msg)\n self.assertIn(repr(\"noslash\"), msg)\n\n\nclass CorePluginNoDataTest(tf.test.TestCase):\n def setUp(self):\n super(CorePluginNoDataTest, self).setUp()\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=self.get_temp_dir(),\n multiplexer=event_multiplexer.EventMultiplexer(),\n window_title=\"title foo\",\n )\n self.plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testRoutesProvided(self):\n \"\"\"Tests that the plugin offers the correct routes.\"\"\"\n routes = self.plugin.get_plugin_apps()\n self.assertIsInstance(routes[\"/data/logdir\"], collections.abc.Callable)\n self.assertIsInstance(routes[\"/data/runs\"], collections.abc.Callable)\n\n def testIndex_returnsActualHtml(self):\n \"\"\"Test the format of the root / endpoint.\"\"\"\n response = self.server.get(\"/\")\n self.assertEqual(200, response.status_code)\n self.assertStartsWith(response.headers.get(\"Content-Type\"), \"text/html\")\n html = response.get_data()\n self.assertEqual(html, FAKE_INDEX_HTML)\n\n def testDataPaths_disableAllCaching(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n for path in (\"/data/runs\", \"/data/logdir\"):\n response = self.server.get(path)\n self.assertEqual(200, response.status_code, msg=path)\n self.assertEqual(\"0\", response.headers.get(\"Expires\"), msg=path)\n\n def testEnvironmentForWindowTitle(self):\n \"\"\"Test that the environment route correctly returns the window\n title.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"window_title\"], \"title foo\")\n\n def testEnvironmentForLogdir(self):\n \"\"\"Test that the environment route correctly returns the logdir.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], self.get_temp_dir())\n\n def testLogdir(self):\n \"\"\"Test the format of the data/logdir endpoint.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/logdir\")\n self.assertEqual(parsed_object, {\"logdir\": self.get_temp_dir()})\n\n\nclass CorePluginExperimentMetadataTest(tf.test.TestCase):\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testGetEnvironmentDataWithExperimentMetadata(self):\n \"\"\"Test environment route returns correct metadata about experiment.\"\"\"\n\n class FakeDataProvider(object):\n def data_location(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return \"\"\n\n def experiment_metadata(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return provider.ExperimentMetadata(\n experiment_name=\"Experiment #5 (実験#5)\",\n experiment_description=\"Take five (😊)\",\n creation_time=1234.5,\n )\n\n self.context = base_plugin.TBContext(\n flags=FakeFlags(generic_data=\"true\"),\n data_provider=FakeDataProvider(),\n )\n\n self.plugin = core_plugin.CorePlugin(self.context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], \"\")\n self.assertEqual(parsed_object[\"window_title\"], None)\n self.assertEqual(\n parsed_object[\"experiment_name\"], \"Experiment #5 (実験#5)\"\n )\n self.assertEqual(\n parsed_object[\"experiment_description\"], \"Take five (😊)\"\n )\n self.assertEqual(parsed_object[\"creation_time\"], 1234.5)\n\n def testGetEnvironmentDataWithNoExperimentMetadata(self):\n \"\"\"Test environment route works when no experiment metadata exists.\"\"\"\n\n class FakeDataProvider(object):\n def data_location(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return \"\"\n\n def experiment_metadata(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return None\n\n self.context = base_plugin.TBContext(\n flags=FakeFlags(generic_data=\"true\"),\n data_provider=FakeDataProvider(),\n )\n\n self.plugin = core_plugin.CorePlugin(self.context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], \"\")\n self.assertEqual(parsed_object[\"window_title\"], None)\n self.assertNotIn(\"experiment_name\", parsed_object)\n self.assertNotIn(\"experiment_description\", parsed_object)\n self.assertNotIn(\"creation_time\", parsed_object)\n\n\nclass CorePluginTestBase(object):\n def setUp(self):\n super(CorePluginTestBase, self).setUp()\n self.logdir = self.get_temp_dir()\n self.multiplexer = event_multiplexer.EventMultiplexer()\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=self.logdir,\n multiplexer=self.multiplexer,\n )\n self.plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n def create_multiplexer(self):\n raise NotImplementedError()\n\n def _add_run(self, run_name):\n run_path = os.path.join(self.logdir, run_name)\n with test_util.FileWriter(run_path) as writer:\n writer.add_test_summary(\"foo\")\n self.multiplexer.AddRunsFromDirectory(self.logdir)\n self.multiplexer.Reload()\n\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testRuns(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n self._add_run(\"run1\")\n run_json = self._get_json(self.server, \"/data/runs\")\n self.assertEqual(run_json, [\"run1\"])\n\n def testRunsAppendOnly(self):\n \"\"\"Test that new runs appear after old ones in /data/runs.\"\"\"\n fake_wall_times = {\n \"run1\": 1234.0,\n \"avocado\": 2345.0,\n \"zebra\": 3456.0,\n \"ox\": 4567.0,\n \"mysterious\": None,\n \"enigmatic\": None,\n }\n\n def FirstEventTimestamp_stub(run_name):\n matches = [\n candidate_name\n for candidate_name in fake_wall_times\n if run_name.endswith(candidate_name)\n ]\n self.assertEqual(len(matches), 1, \"%s (%s)\" % (matches, run_name))\n wall_time = fake_wall_times[matches[0]]\n if wall_time is None:\n raise ValueError(\"No event timestamp could be found\")\n else:\n return wall_time\n\n with mock.patch.object(\n self.multiplexer, \"FirstEventTimestamp\"\n ) as mock_first_event_timestamp:\n mock_first_event_timestamp.side_effect = FirstEventTimestamp_stub\n # Start with a single run.\n self._add_run(\"run1\")\n\n # Add one run: it should come last.\n self._add_run(\"avocado\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"), [\"run1\", \"avocado\"],\n )\n\n # Add another run: it should come last, too.\n self._add_run(\"zebra\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\"],\n )\n\n # And maybe there's a run for which we somehow have no timestamp.\n self._add_run(\"mysterious\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"mysterious\"],\n )\n\n # Add another timestamped run: it should come before the timestamp-less one.\n self._add_run(\"ox\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"mysterious\"],\n )\n\n # Add another timestamp-less run, lexicographically before the other one:\n # it should come after all timestamped runs but first among timestamp-less.\n self._add_run(\"enigmatic\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"enigmatic\", \"mysterious\"],\n )\n\n\ndef get_test_assets_zip_provider():\n memfile = six.BytesIO()\n with zipfile.ZipFile(\n memfile, mode=\"w\", compression=zipfile.ZIP_DEFLATED\n ) as zf:\n zf.writestr(\"index.html\", FAKE_INDEX_HTML)\n return lambda: contextlib.closing(six.BytesIO(memfile.getvalue()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.test.main"
]
] |
madcpt/MachineWontLie | [
"992156f3916bafeaa01a3685eae285550391132e"
] | [
"models/PCA.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.utils.data import DataLoader\nfrom overrides import overrides\nimport numpy as np\nimport time\n\nfrom models.BaseModel import BaseModel\n\n\nclass PCAModel(BaseModel):\n def __init__(self, configs: object):\n super().__init__(configs.model.model_name, configs.device)\n from sklearn.decomposition import PCA\n self.pca_cls = PCA(n_components=30)\n\n from sklearn.svm import SVC\n self.svm_cls = SVC(kernel=\"rbf\", probability=True, )\n\n @overrides\n def train_epoch(self, epoch_num: int, train_loader: DataLoader):\n x = torch.flatten(train_loader.dataset.data, 1).numpy()\n y = train_loader.dataset.targets.numpy()\n self.pca_cls.fit(x, y)\n x_pca = self.pca_cls.transform(x)\n # print(x_pca.shape)\n self.svm_cls.fit(x_pca, y)\n\n @overrides\n def test_epoch(self, epoch_num: int, test_loader: DataLoader):\n x = torch.flatten(test_loader.dataset.data, 1).numpy()\n y = test_loader.dataset.targets.numpy()\n pca_result: np.ndarray = self.pca_cls.transform(x)\n predict_score = self.svm_cls.predict(pca_result)\n predict_result = predict_score\n # predict_result = np.argmax(predict_score,axis=1)\n # print(x.shape, predict_score.shape, predict_result.shape, y.shape)\n results: np.ndarray = predict_result == y\n return sum(results) / len(results)\n\n @overrides\n def run_epochs(self, epochs: int, train_loader: DataLoader, test_loader: DataLoader):\n t1 = time.time()\n self.train_epoch(0, train_loader)\n t2 = time.time()\n acc = self.test_epoch(0, test_loader)\n if self.writer:\n self.writer.add_scalar('test_acc', acc, 0)\n print(acc, t2 - t1, time.time() - t2)\n"
] | [
[
"torch.flatten",
"sklearn.svm.SVC",
"sklearn.decomposition.PCA"
]
] |
thentgesMindee/doctr | [
"f97e92ba1b7bcb785a60f2cf549f13f88e510609"
] | [
"doctr/models/recognition/predictor/pytorch.py"
] | [
"# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom typing import Any, List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom doctr.models.preprocessor import PreProcessor\n\nfrom ._utils import remap_preds, split_crops\n\n__all__ = ['RecognitionPredictor']\n\n\nclass RecognitionPredictor(nn.Module):\n \"\"\"Implements an object able to identify character sequences in images\n\n Args:\n pre_processor: transform inputs for easier batched model inference\n model: core detection architecture\n split_wide_crops: wether to use crop splitting for high aspect ratio crops\n \"\"\"\n\n def __init__(\n self,\n pre_processor: PreProcessor,\n model: nn.Module,\n split_wide_crops: bool = True,\n ) -> None:\n\n super().__init__()\n self.pre_processor = pre_processor\n self.model = model.eval()\n self.split_wide_crops = split_wide_crops\n self.critical_ar = 8 # Critical aspect ratio\n self.dil_factor = 1.4 # Dilation factor to overlap the crops\n self.target_ar = 4 # Target aspect ratio\n\n @torch.no_grad()\n def forward(\n self,\n crops: List[Union[np.ndarray, torch.Tensor]],\n **kwargs: Any,\n ) -> List[Tuple[str, float]]:\n\n if len(crops) == 0:\n return []\n # Dimension check\n if any(crop.ndim != 3 for crop in crops):\n raise ValueError(\"incorrect input shape: all crops are expected to be multi-channel 2D images.\")\n\n # Split crops that are too wide\n remapped = False\n if self.split_wide_crops:\n new_crops, crop_map, remapped = split_crops(\n crops,\n self.critical_ar,\n self.target_ar,\n self.dil_factor,\n isinstance(crops[0], np.ndarray)\n )\n if remapped:\n crops = new_crops\n\n # Resize & batch them\n processed_batches = self.pre_processor(crops)\n\n # Forward it\n raw = [\n self.model(batch, return_preds=True, **kwargs)['preds'] # type: ignore[operator]\n for batch in processed_batches\n ]\n\n # Process outputs\n out = [charseq for batch in raw for charseq in batch]\n\n # Remap crops\n if self.split_wide_crops and remapped:\n out = remap_preds(out, crop_map, self.dil_factor)\n\n return out\n"
] | [
[
"torch.no_grad"
]
] |
goodcq/CommPy | [
"af3a9acba32d2f9c6b723705f709fee2cb9352e2"
] | [
"commpy/tests/test_channels.py"
] | [
"# Authors: CommPy contributors\n# License: BSD 3-Clause\n\nfrom __future__ import division, print_function # Python 2 compatibility\n\nfrom math import cos\n\nfrom numpy import ones, inf, sqrt, array, identity, zeros, dot, trace, einsum, absolute, exp, pi, fromiter, kron, \\\n zeros_like, empty\nfrom numpy.random import seed, choice, randn\nfrom numpy.testing import run_module_suite, assert_raises, assert_equal, assert_allclose, \\\n assert_array_equal, dec\n\nfrom commpy.channels import SISOFlatChannel, MIMOFlatChannel\nfrom commpy.utilities import signal_power\n\n\nclass TestSISOFlatChannel:\n msg_length = 100000\n real_mods = array((-1, 1)), array((-3, 3))\n all_mods = array((-1, 1)), array((-3, 3)), \\\n array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))\n\n def test_default_args(self):\n def check(chan):\n assert_equal(chan.noises, None,\n err_msg='Default noises is not None')\n assert_equal(chan.channel_gains, None,\n err_msg='Default channel gains is not None')\n assert_equal(chan.unnoisy_output, None,\n err_msg='Default unnoisy output is not None')\n\n chan = SISOFlatChannel()\n\n # Test output state before any propagation\n check(chan)\n\n # Test that noise standard deviation must be set before propagation\n with assert_raises(AssertionError):\n chan.propagate(array((1, 1)))\n\n # Test output state before any propagation\n check(chan)\n\n assert_equal(chan.nb_rx, 1,\n err_msg='SISO channel as more than 1 Rx')\n assert_equal(chan.nb_tx, 1,\n err_msg='SISO channel as more than 1 Tx')\n\n def test_fading(self):\n # Set seed\n seed(17121996)\n\n def check_chan_gain(mod, chan):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg)\n P_unnoisy = signal_power(chan.unnoisy_output)\n\n assert_allclose(P_unnoisy, P_msg, rtol=0.2,\n err_msg='Channel add or remove energy')\n\n # Test value checking in constructor construction\n with assert_raises(ValueError):\n SISOFlatChannel(0, (1, 1))\n\n chan = SISOFlatChannel(0)\n\n # Test on real channel\n for mod in self.real_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (1, 1)\n\n # Test without fading\n chan.fading_param = (1, 0)\n check_chan_gain(mod, chan)\n assert_array_equal(chan.channel_gains, ones(self.msg_length),\n err_msg='Channel fading while fading is disabled')\n\n # Test with Rayleigh fading\n chan.fading_param = (0, 1)\n check_chan_gain(mod, chan)\n assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 1, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n # Test with rician fading\n chan.fading_param = (sqrt(2 / 3), 1 / 3)\n check_chan_gain(mod, chan)\n assert_allclose(chan.channel_gains.mean(), sqrt(2 / 3), atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 1 / 3, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n # Test on complex channel\n for mod in self.all_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (1, 1)\n\n # Test without fading\n chan.fading_param = (1 + 0j, 0)\n check_chan_gain(mod, chan)\n assert_array_equal(chan.channel_gains, ones(self.msg_length),\n err_msg='Channel fading while fading is disabled')\n\n # Test with Rayleigh fading\n chan.fading_param = (0j, 1)\n check_chan_gain(mod, chan)\n assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 1, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n # Test with rician fading\n chan.fading_param = (0.5 + 0.5j, 0.5)\n check_chan_gain(mod, chan)\n assert_allclose(absolute(chan.channel_gains.mean()), sqrt(0.5), atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 0.5, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n def test_noise_generation(self):\n # Set seed\n seed(17121996)\n\n def check_noise(mod, chan, corrected_SNR_lin):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy\n P_noise = signal_power(chan.noises)\n\n assert_allclose(absolute(chan.noises.mean()), 0., atol=5e-2,\n err_msg='Noise mean is not 0')\n if corrected_SNR_lin == inf:\n assert_allclose(P_noise, 0, atol=1e-2,\n err_msg='There is noise that should not be here')\n else:\n assert_allclose(P_msg / P_noise, corrected_SNR_lin, atol=0.2,\n err_msg='Wrong SNR')\n\n chan = SISOFlatChannel(fading_param=(1 + 0j, 0))\n for mod in self.all_mods:\n chan.noise_std = 0\n check_noise(mod, chan, inf)\n chan.set_SNR_lin(6, Es=signal_power(mod))\n check_noise(mod, chan, 6)\n chan.set_SNR_lin(6, .5, signal_power(mod))\n check_noise(mod, chan, 3)\n chan.set_SNR_dB(0, Es=signal_power(mod))\n check_noise(mod, chan, 1)\n chan.set_SNR_dB(0, .5, signal_power(mod))\n check_noise(mod, chan, .5)\n\n chan = SISOFlatChannel(fading_param=(1, 0))\n for mod in self.real_mods:\n chan.noise_std = 0\n check_noise(mod, chan, inf)\n chan.set_SNR_lin(6, Es=signal_power(mod))\n check_noise(mod, chan, 6)\n chan.set_SNR_lin(6, .5, signal_power(mod))\n check_noise(mod, chan, 3)\n chan.set_SNR_dB(0, Es=signal_power(mod))\n check_noise(mod, chan, 1)\n chan.set_SNR_dB(0, .5, signal_power(mod))\n check_noise(mod, chan, .5)\n\n def test_type_check(self):\n chan = SISOFlatChannel(0)\n with assert_raises(TypeError):\n chan.propagate(array((1, 1j)))\n\n def test_k_factor(self):\n # Real channel\n chan = SISOFlatChannel()\n assert_allclose(chan.k_factor, inf,\n err_msg='k-factor should be infinite without fading in SISO channels')\n chan.fading_param = 0, 1\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n chan.fading_param = sqrt(0.5), 0.5\n assert_allclose(chan.k_factor, 1,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n # Complex channel\n chan.fading_param = 1j, 0\n assert_allclose(chan.k_factor, inf,\n err_msg='k-factor should be infinite without fading in SISO channels')\n chan.fading_param = 0j, 1\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n chan.fading_param = 0.5 + 0.5j, 0.5\n assert_allclose(chan.k_factor, 1,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n\nclass MIMOTestCase(object):\n msg_length = 100000\n real_mods = array((-1, 1)), array((-3, 3))\n all_mods = array((-1, 1)), array((-3, 3)), \\\n array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))\n\n @staticmethod\n def random_SDP_matrix(n):\n G = randn(n, n)\n dot(G, G.T, G)\n return G / trace(G)\n\n def test_symetric(self):\n nb_tx = 8\n nb_rx = 8\n self.do(nb_tx, nb_rx)\n\n def test_more_rx(self):\n nb_tx = 4\n nb_rx = 8\n self.do(nb_tx, nb_rx)\n\n def test_more_tx(self):\n nb_tx = 8\n nb_rx = 4\n self.do(nb_tx, nb_rx)\n\n def test_SIMO(self):\n nb_tx = 1\n nb_rx = 8\n self.do(nb_tx, nb_rx)\n\n def test_MISO(self):\n nb_tx = 8\n nb_rx = 1\n self.do(nb_tx, nb_rx)\n\n def test_SISO(self):\n nb_tx = 1\n nb_rx = 1\n self.do(nb_tx, nb_rx)\n\n\nclass TestMIMODefaultArgs(MIMOTestCase):\n def __init__(self):\n super(TestMIMODefaultArgs, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n def check(chan):\n assert_equal(chan.noises, None,\n err_msg='Default noises is not None')\n assert_equal(chan.channel_gains, None,\n err_msg='Default channel gains is not None')\n assert_equal(chan.unnoisy_output, None,\n err_msg='Default unnoisy output is not None')\n\n chan = MIMOFlatChannel(nb_tx, nb_rx)\n\n # Test output state before any propagation\n check(chan)\n\n # Test that noise standard deviation must be set before propagation\n with assert_raises(AssertionError):\n chan.propagate(array((1, 1)))\n\n # Test output state before any propagation\n check(chan)\n\n\[email protected]\nclass TestMIMOFading(MIMOTestCase):\n def __init__(self):\n super(TestMIMOFading, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Set seed\n seed(17121996)\n\n def check_chan_gain(mod, chan):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg)\n P_unnoisy = signal_power(chan.unnoisy_output)\n\n assert_allclose(P_unnoisy, P_msg * chan.nb_tx, rtol=0.2,\n err_msg='Channel add or remove energy')\n\n def expo_correlation(t, r):\n # Construct the exponent matrix\n expo_tx = fromiter((j - i for i in range(chan.nb_tx) for j in range(chan.nb_tx)), int, chan.nb_tx ** 2)\n expo_rx = fromiter((j - i for i in range(chan.nb_rx) for j in range(chan.nb_rx)), int, chan.nb_rx ** 2)\n\n # Reshape\n expo_tx = expo_tx.reshape(chan.nb_tx, chan.nb_tx)\n expo_rx = expo_rx.reshape(chan.nb_rx, chan.nb_rx)\n\n return t ** expo_tx, r ** expo_rx\n\n def check_correlation(chan, Rt, Rr):\n nb_ant = chan.nb_tx * chan.nb_rx\n Rdes = kron(Rt, Rr)\n H = chan.channel_gains\n Ract = zeros_like(Rdes)\n for i in range(len(H)):\n Ract += H[i].T.reshape(nb_ant, 1).dot(H[i].T.reshape(1, nb_ant).conj())\n Ract /= len(H)\n assert_allclose(Rdes, Ract, atol=0.05,\n err_msg='Wrong correlation matrix')\n\n # Test value checking in constructor construction\n with assert_raises(ValueError):\n MIMOFlatChannel(nb_tx, nb_tx, 0, (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx))))\n\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n prod_nb = nb_tx * nb_rx\n\n # Test on real channel\n for mod in self.real_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))\n\n # Test with Rayleigh fading\n chan.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx))\n check_chan_gain(mod, chan)\n\n # Test with rician fading\n mean = randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = (mean, Rt, Rr)\n check_chan_gain(mod, chan)\n\n # Test helper functions\n chan.uncorr_rayleigh_fading(float)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with uncorrelated Rayleigh fading')\n\n mean = randn(nb_rx, nb_tx)\n chan.uncorr_rician_fading(mean, 10)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 10,\n err_msg='Wrong k-factor with uncorrelated rician fading')\n\n # Test on complex channel\n for mod in self.all_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))\n\n # Test with Rayleigh fading\n chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))\n check_chan_gain(mod, chan)\n assert_allclose(chan.channel_gains.mean(), 0, atol=1e-2,\n err_msg='Wrong channel mean with complex channel')\n assert_allclose(chan.channel_gains.var(), 1, atol=5e-2,\n err_msg='Wrong channel variance with complex channel')\n\n # Test with rician fading\n mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = (mean, Rt, Rr)\n check_chan_gain(mod, chan)\n\n assert_allclose(chan.channel_gains.mean(0).real, mean.real, atol=0.1,\n err_msg='Wrong channel mean with complex channel')\n assert_allclose(chan.channel_gains.mean(0).imag, mean.imag, atol=0.1,\n err_msg='Wrong channel mean with complex channel')\n\n # Test helper functions\n chan.uncorr_rayleigh_fading(complex)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with uncorrelated Rayleigh fading')\n\n mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j\n chan.uncorr_rician_fading(mean, 10)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 10,\n err_msg='Wrong k-factor with uncorrelated rician fading')\n\n chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi))\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with correlated Rayleigh fading')\n Rt, Rr = expo_correlation(exp(-0.2j * pi), exp(-0.1j * pi))\n check_correlation(chan, Rt, Rr)\n\n mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j\n chan.expo_corr_rician_fading(mean, 10, exp(-0.1j * pi), exp(-0.2j * pi))\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 10,\n err_msg='Wrong k-factor with correlated rician fading')\n\n # Test with beta > 0\n chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi), 1, 0.5)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with correlated Rayleigh fading')\n\n mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j\n chan.expo_corr_rician_fading(mean, 5, exp(-0.1j * pi), exp(-0.2j * pi), 3, 2)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 5,\n err_msg='Wrong k-factor with correlated rician fading')\n\n\nclass TestMIMOSpectular(MIMOTestCase):\n def __init__(self):\n super(TestMIMOSpectular, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n\n # Test raising of ValueError\n with assert_raises(ValueError):\n chan.specular_compo(0, -1, 0, 1)\n with assert_raises(ValueError):\n chan.specular_compo(0, 1, 0, -1)\n\n # Test the result\n desired = empty((nb_rx, nb_tx), dtype=complex)\n for n in range(nb_rx):\n for m in range(nb_tx):\n desired[n, m] = exp(1j * 2 * pi * (n * 1 * cos(0.5) - m * 0.1 * cos(2)))\n assert_allclose(chan.specular_compo(2, 0.1, 0.5, 1), desired, rtol=0.02,\n err_msg='Wrong specular component')\n\n\[email protected]\nclass TestMIMONoiseGeneration(MIMOTestCase):\n def __init__(self):\n super(TestMIMONoiseGeneration, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Set seed\n seed(17121996)\n\n def check_noise(mod, chan, corrected_SNR_lin):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy\n P_noise = signal_power(chan.noises)\n\n assert_allclose(abs(chan.noises.mean()), 0., atol=0.5,\n err_msg='Noise mean is not 0')\n if corrected_SNR_lin == inf:\n assert_allclose(P_noise, 0, atol=1e-2,\n err_msg='There is noise that should not be here')\n else:\n assert_allclose(chan.nb_tx * P_msg / P_noise, corrected_SNR_lin, atol=0.2,\n err_msg='Wrong SNR')\n\n fading_param = zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)\n chan = MIMOFlatChannel(nb_tx, nb_rx, fading_param=fading_param)\n for mod in self.all_mods:\n chan.noise_std = 0\n check_noise(mod, chan, inf)\n chan.set_SNR_lin(6, Es=signal_power(mod))\n check_noise(mod, chan, 6)\n chan.set_SNR_lin(6, .5, signal_power(mod))\n check_noise(mod, chan, 3)\n chan.set_SNR_dB(0, Es=signal_power(mod))\n check_noise(mod, chan, 1)\n chan.set_SNR_dB(0, .5, signal_power(mod))\n check_noise(mod, chan, .5)\n\n\nclass TestMIMOTypeCheck(MIMOTestCase):\n def __init__(self):\n super(TestMIMOTypeCheck, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n with assert_raises(TypeError):\n chan.propagate(array((1, 1j)))\n\n\nclass TestMIMOShapes(MIMOTestCase):\n def __init__(self):\n super(TestMIMOShapes, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Without padding\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n out = chan.propagate(ones(nb_tx * 2))\n assert_array_equal(chan.channel_gains.shape, (2, nb_rx, nb_tx),\n err_msg='Wrong channel shape without padding')\n assert_array_equal(chan.noises.shape, (2, nb_rx),\n err_msg='Wrong channel shape without padding')\n assert_array_equal(chan.unnoisy_output.shape, (2, nb_rx),\n err_msg='Wrong channel shape without padding')\n assert_array_equal(out.shape, (2, nb_rx),\n err_msg='Wrong channel shape without padding')\n\n # With padding\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n out = chan.propagate(ones(nb_tx * 2 + 1))\n assert_array_equal(chan.channel_gains.shape, (3, nb_rx, nb_tx),\n err_msg='Wrong channel shape with padding')\n assert_array_equal(chan.noises.shape, (3, nb_rx),\n err_msg='Wrong channel shape with padding')\n assert_array_equal(chan.unnoisy_output.shape, (3, nb_rx),\n err_msg='Wrong channel shape with padding')\n assert_array_equal(out.shape, (3, nb_rx),\n err_msg='Wrong channel shape with padding')\n\n\nclass TestMIMOkFactor(MIMOTestCase):\n def __init__(self):\n super(TestMIMOkFactor, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Set seed\n seed(17121996)\n\n prod_nb = nb_tx * nb_rx\n\n # Real channel\n chan = MIMOFlatChannel(nb_tx, nb_rx)\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n mean = randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = mean, Rs, Rr\n assert_allclose(chan.k_factor, 3,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n # Complex channel\n chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = (mean, Rs, Rr)\n assert_allclose(chan.k_factor, 3,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.ones",
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite",
"numpy.random.seed",
"numpy.trace",
"numpy.kron",
"numpy.testing.assert_array_equal",
"numpy.random.choice",
"numpy.absolute",
"numpy.identity",
"numpy.zeros",
"numpy.array",
"numpy.testing.assert_raises",
"numpy.zeros_like",
"numpy.empty",
"numpy.random.randn",
"numpy.exp",
"numpy.testing.assert_allclose",
"numpy.sqrt",
"numpy.dot"
]
] |
hovey/py3DViewer | [
"7ae1697aa4860430d0d94b854f8b1f2a4b2d895f"
] | [
"Py3DViewer/structures/Trimesh.py"
] | [
"from .Abstractmesh import AbstractMesh\nimport numpy as np\nfrom ..utils import IO, ObservableArray, deprecated, utilities\nfrom ..utils.load_operations import get_connectivity_info_surface as get_connectivity_info \nfrom ..utils.load_operations import compute_vertex_normals, compute_face_normals\nfrom ..utils.load_operations import _compute_three_vertex_normals as compute_three_normals\nfrom ..utils.metrics import triangle_aspect_ratio, triangle_area\n\n\nclass Trimesh(AbstractMesh):\n \"\"\"\n This class represents a mesh composed of triangles. It is possible to load the mesh from a file or\n from raw geometry and topology data.\n\n Parameters:\n\n filename (string): The name of the file to load \n vertices (Array (Nx3) type=float): The list of vertices of the mesh\n polys (Array (Nx3) type=int): The list of polygons of the mesh\n labels (Array (Nx1) type=int): The list of labels of the mesh (Optional)\n\n\n \"\"\"\n\n def __init__(self, filename=None, vertices=None, polys=None, labels=None, texture=None, mtl=None, smoothness=False):\n\n super(Trimesh, self).__init__()\n \n self.vtx_normals = None # npArray (Nx3)\n self.poly_normals = None # npArray (Nx3)\n self.texture = texture\n self.material = {}\n self.groups = {}\n self.smoothness = smoothness\n\n self.__map_poly_indices = []\n\n\n if mtl is not None:\n self.__load_from_file(mtl)\n\n if filename is not None:\n self.__load_from_file(filename)\n self._AbstractMesh__filename = filename.split('/')[-1]\n\n elif vertices is not None and polys is not None:\n\n vertices = np.array(vertices)\n polys = np.array(polys)\n self.vertices = ObservableArray(vertices.shape)\n self.vertices[:] = vertices\n self.vertices.attach(self)\n self._AbstractMesh__polys = ObservableArray(polys.shape, dtype=np.int64)\n self._AbstractMesh__polys[:] = polys\n self._AbstractMesh__polys.attach(self)\n self.__load_operations()\n\n if labels is not None:\n labels = np.array(labels)\n assert(labels.shape[0] == polys.shape[0])\n self.labels = ObservableArray(labels.shape, dtype=np.int)\n self.labels[:] = labels\n self.labels.attach(self)\n else:\n self.labels = ObservableArray(polys.shape[0], dtype=np.int)\n self.labels[:] = np.zeros(self.labels.shape, dtype=np.int)\n self.labels.attach(self)\n \n self._AbstractMesh__poly_size = 3\n self._AbstractMesh__finished_loading = True\n \n\n # ==================== METHODS ==================== # \n\n\n def __load_operations(self):\n self._dont_update = True\n self._AbstractMesh__boundary_needs_update = True\n self._AbstractMesh__simplex_centroids = None\n\n \n self._AbstractMesh__edges, \\\n self._AbstractMesh__adj_vtx2vtx, \\\n self._AbstractMesh__adj_vtx2edge, \\\n self._AbstractMesh__adj_vtx2poly, \\\n self._AbstractMesh__adj_edge2vtx, \\\n self._AbstractMesh__adj_edge2edge, \\\n self._AbstractMesh__adj_edge2poly, \\\n self._AbstractMesh__adj_poly2vtx, \\\n self._AbstractMesh__adj_poly2edge, \\\n self._AbstractMesh__adj_poly2poly = get_connectivity_info(self.num_vertices, self.polys)\n \n\n self._AbstractMesh__update_bounding_box()\n self.reset_clipping()\n self.poly_normals = compute_face_normals(self.vertices, self.polys)\n self.vtx_normals = compute_vertex_normals(self.poly_normals, self.adj_vtx2poly._NList__list)\n self.__compute_metrics()\n self._AbstractMesh__simplex_centroids = None\n\n self._dont_update = False\n self.update()\n\n def __load_from_file(self, filename):\n\n ext = filename.split('.')[-1]\n\n if ext == 'obj':\n self.vertices, self._AbstractMesh__polys, self.poly_normals, self.uvcoords, self.coor, self.groups = IO.read_obj(filename)\n # self.vertices, self.faces, self.face_normals = IO.read_obj(filename)\n self.vertices.attach(self)\n self._AbstractMesh__polys.attach(self)\n self.poly_normals.attach(self)\n self.uvcoords.attach(self)\n self.coor.attach(self)\n elif ext == 'mtl':\n self.material = IO.read_mtl(filename)\n return\n\n elif ext == 'off':\n self.vertices, self._AbstractMesh__polys = IO.read_off(filename)\n self.vertices.attach(self)\n self._AbstractMesh__polys.attach(self)\n\n elif ext == 'mesh':\n self.vertices, self._AbstractMesh__polys, labels = IO.read_mesh(filename)\n self.vertices.attach(self)\n self._AbstractMesh__polys.attach(self)\n\n else:\n raise Exception(\"Only .obj, .off and .mesh files are supported\")\n\n self.labels = ObservableArray(self.num_polys, dtype=np.int)\n self.labels[:] = np.zeros(self.labels.shape, dtype=np.int) if ext != 'mesh' else labels\n self.labels.attach(self)\n\n self.__load_operations()\n\n return self\n\n def save_file(self, filename):\n\n \"\"\"\n Save the current mesh in a file. Currently it supports the .obj extension. \n\n Parameters:\n\n filename (string): The name of the file\n\n \"\"\"\n\n ext = filename.split('.')[-1]\n\n if ext == 'obj':\n IO.save_obj(self, filename)\n elif ext == 'off':\n IO.save_off(self, filename)\n elif ext == 'mesh':\n IO.save_mesh(self, filename)\n else:\n raise Exception(\"Only .obj, .off and .mesh files are supported\")\n\n def __compute_metrics(self):\n\n self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)\n self.simplex_metrics['aspect_ratio'] = triangle_aspect_ratio(self.vertices, self.polys)\n\n def update_metrics(self):\n self.__compute_metrics()\n\n @property\n def _map_poly_indices(self):\n return self.__map_poly_indices\n\n def boundary(self):\n\n \"\"\"\n Compute the boundary of the current mesh. It only returns the faces that are inside the clipping\n \"\"\"\n if (self._AbstractMesh__boundary_needs_update):\n clipping_range = super(Trimesh, self).boundary()\n self._AbstractMesh__visible_polys = clipping_range \n self._AbstractMesh__boundary_cached = clipping_range\n self._AbstractMesh__boundary_needs_update = False\n\n self.__map_poly_indices = []\n counter = 0\n for c in clipping_range:\n if c:\n self.__map_poly_indices.append(counter)\n else:\n counter = counter + 1\n\n return self.polys[self._AbstractMesh__boundary_cached], self._AbstractMesh__boundary_cached\n\n def as_edges_flat(self):\n # Faces inside the bounding box\n boundaries = self.boundary()[0]\n # Insert into a vertical array all the correspondences between all the vertices collapsed in one dimension\n edges = np.c_[boundaries[:, :2], boundaries[:, 1:], boundaries[:, 2], boundaries[:, 0]].flatten()\n # edges_flat = self.vertices[edges].tolist()\n return edges\n\n def _as_threejs_triangle_soup(self):\n\n tris = self.vertices[self.boundary()[0].flatten()]\n return tris.astype(np.float32), compute_three_normals(tris).astype(np.float32)\n\n def as_triangles(self):\n return self.boundary()[0].flatten().astype(\"uint32\")\n\n def _as_threejs_colors(self, colors=None):\n\n if colors is not None:\n return np.repeat(colors, 3, axis=0)\n return np.repeat(self.boundary()[1], 3)\n \n @property\n def num_triangles(self):\n return self.num_polys\n\n\n \n def vertex_remove(self, vtx_id):\n\n \"\"\"\n Remove a vertex from the current mesh. It affects the mesh geometry. \n\n Parameters:\n\n vtx_id (int): The index of the vertex to remove \n\n \"\"\"\n\n self.vertices_remove([vtx_id])\n\n def vertices_remove(self, vtx_ids):\n \"\"\"\n Remove a list of vertices from the current mesh. It affects the mesh geometry. \n\n Parameters:\n\n vtx_ids (Array (Nx1 / 1xN) type=int): List of vertices to remove. Each vertex is in the form [int]\n\n \"\"\"\n self._dont_update = True\n vtx_ids = np.array(vtx_ids)\n\n for v_id in vtx_ids:\n\n self.vertices = np.delete(self.vertices, v_id, 0)\n condition = ((self._AbstractMesh__polys[:, 0] != v_id) &\n (self._AbstractMesh__polys[:, 1] != v_id) &\n (self._AbstractMesh__polys[:, 2] != v_id))\n\n if self.labels is not None:\n self.labels = self.labels[condition]\n\n self._AbstractMesh__polys = self._AbstractMesh__polys[condition]\n\n self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 0] > v_id)] -= np.array([1, 0, 0])\n self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 1] > v_id)] -= np.array([0, 1, 0])\n self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 2] > v_id)] -= np.array([0, 0, 1])\n\n vtx_ids[vtx_ids > v_id] -= 1\n\n self.__load_operations()\n\n def poly_add(self, new_poly):\n \"\"\"\n Add a new face to the current mesh. It affects the mesh topology. \n\n Parameters:\n\n new_poly (Array (Nx1) type=int): Poly to add in the form [int, ..., int]\n\n \"\"\"\n self.polys_add(new_poly)\n\n def polys_add(self, new_polys):\n\n \"\"\"\n Add a list of new faces to the current mesh. It affects the mesh topology. \n\n Parameters:\n\n new_polys (Array (NxM) type=int): List of faces to add. Each face is in the form [int, ..., int]\n \"\"\"\n\n AbstractMesh.polys_add(self, new_polys)\n self.__load_operations()\n \n\n\n def poly_remove(self, poly_id):\n\n \"\"\"\n Remove a poly from the current mesh. It affects the mesh topology. \n\n Parameters:\n\n poly_id (int): The index of the face to remove \n\n \"\"\"\n\n self.polys_remove([poly_id])\n\n \n def polys_remove(self, poly_ids):\n\n \"\"\"\n Remove a list of polys from the current mesh. It affects the mesh topology. \n\n Parameters:\n\n poly_ids (Array (Nx1 / 1xN) type=int): List of polys to remove. Each face is in the form [int]\n\n \"\"\"\n AbstractMesh.polys_remove(self, poly_ids)\n self.__load_operations()\n \n def tessellate(self):\n return self.polys\n \n @property\n def edge_is_manifold(self):\n val = self.edge_valence\n return np.logical_and(val > 0, val < 3)\n\n \n @property\n def poly_is_on_boundary(self):\n return np.logical_not(np.all(self.adj_poly2poly != -1, axis = 1))\n \n @property\n def edge_is_on_boundary(self):\n boundary_edges = self.adj_poly2edge[self.poly_is_on_boundary].reshape(-1)\n boundary_edges = [e for e in boundary_edges if len(self.adj_edge2poly[e]) == 1]\n bool_vec = np.zeros((self.num_edges), dtype=np.bool)\n bool_vec[boundary_edges] = True\n return bool_vec\n \n @property\n def vert_is_on_boundary(self):\n boundary_verts = self.edges[self.edge_is_on_boundary].reshape(-1)\n bool_vec = np.zeros((self.num_vertices), dtype=np.bool)\n bool_vec[boundary_verts] = True\n return bool_vec\n\n \n @property\n def area(self):\n return np.sum(self.simplex_metrics['area'][1])\n\n def normalize_area(self):\n scale_factor = 1.0/np.sqrt(self.area)\n self.transform_scale([scale_factor, scale_factor, scale_factor])\n self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)\n \n def sharp_creases(self, threshold=1.0472):\n e2p = self.adj_edge2poly.array\n indices = np.logical_not(np.all(e2p != -1, axis=1)) \n angles = utilities.angle_between_vectors(self.poly_normals[e2p[:,0]], self.poly_normals[e2p[:,1]], True)[0]\n result = angles > threshold\n result[indices] = True\n return result\n \n def fix_poly_order():\n normals = self.poly_normals\n center = self.mesh_centroid\n a = (normals-center)\n norm = np.linalg.norm(a, axis=1)\n norm.shape = (-1,1)\n a /= norm\n condition = np.einsum(\"ij,ij->i\", a, normals) > 0\n self.polys[condition] = np.flip(mesh.polys[condition], axis=1)\n self.__load_operations()\n \n\n #deprecated\n @property\n @deprecated(\"Use the method adj_poly2poly instead\")\n def face2face(self):\n return self._AbstractMesh__adj_poly2poly\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.zeros",
"numpy.einsum",
"numpy.logical_and",
"numpy.repeat",
"numpy.all",
"numpy.delete",
"numpy.flip",
"numpy.array",
"numpy.linalg.norm"
]
] |
tianjiashuo/akg | [
"a9cbf642063fb1086a93e8bc6be6feb145689817"
] | [
"tests/common/test_run/sqrt_run.py"
] | [
"# Copyright 2019-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport akg\nimport numpy as np\nfrom akg.utils import kernel_exec as utils\nfrom akg.ops.math import Sqrt\nfrom tests.common.tensorio import compare_tensor\nfrom tests.common.gen_random import random_gaussian\nfrom akg.utils.result_analysis import target_profiling\nfrom akg.utils.format_transform import to_tvm_nd_array\n\ndef sqrt_run(shape, dtype, attrs):\n if 'tuning' in attrs.keys():\n t = attrs.get(\"tuning\", False)\n kernel_name = attrs.get(\"kernel_name\", False)\n mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)\n if t:\n expect, input, output = gen_data(dtype, shape)\n return mod, expect, (input, output)\n else:\n return mod\n else:\n expect, input, output = gen_data(dtype, shape)\n mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name='sqrt', attrs=attrs)\n output = utils.mod_launch(mod, (input, output), expect=expect)\n if attrs.get(\"profiling\", False):\n target_name = attrs[\"target\"].split()[0]\n args_list = to_tvm_nd_array([input, output], akg.tvm.context(target_name, 0))\n target_profiling(mod, *args_list, target=target_name, repeat_time=attrs[\"repeat_times\"])\n return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)\n\n\ndef gen_data(dtype, shape):\n # Generate data for testing the op\n input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)\n input = np.abs(input)\n expect = np.sqrt(input)\n output = np.full(expect.shape, np.nan, dtype)\n return expect, input, output\n"
] | [
[
"numpy.sqrt",
"numpy.full",
"numpy.abs"
]
] |
zcdzcdzcd/models | [
"a31b526a7617a152a138a865b5689bf5b59f655d",
"a31b526a7617a152a138a865b5689bf5b59f655d",
"a31b526a7617a152a138a865b5689bf5b59f655d"
] | [
"official/nlp/optimization.py",
"research/attention_ocr/python/datasets/unittest_utils.py",
"official/transformer/utils/tokenizer_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nimport tensorflow as tf\n\n\nclass WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Applys a warmup schedule on a given learning rate decay schedule.\"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_schedule_fn,\n warmup_steps,\n power=1.0,\n name=None):\n super(WarmUp, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.warmup_steps = warmup_steps\n self.power = power\n self.decay_schedule_fn = decay_schedule_fn\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name or 'WarmUp') as name:\n # Implements polynomial warmup. i.e., if global_step < warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n global_step_float = tf.cast(step, tf.float32)\n warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)\n warmup_percent_done = global_step_float / warmup_steps_float\n warmup_learning_rate = (\n self.initial_learning_rate *\n tf.math.pow(warmup_percent_done, self.power))\n return tf.cond(global_step_float < warmup_steps_float,\n lambda: warmup_learning_rate,\n lambda: self.decay_schedule_fn(step),\n name=name)\n\n def get_config(self):\n return {\n 'initial_learning_rate': self.initial_learning_rate,\n 'decay_schedule_fn': self.decay_schedule_fn,\n 'warmup_steps': self.warmup_steps,\n 'power': self.power,\n 'name': self.name\n }\n\n\ndef create_optimizer(init_lr, num_train_steps, num_warmup_steps):\n \"\"\"Creates an optimizer with learning rate schedule.\"\"\"\n # Implements linear decay of the learning rate.\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr,\n decay_steps=num_train_steps,\n end_learning_rate=0.0)\n if num_warmup_steps:\n learning_rate_fn = WarmUp(initial_learning_rate=init_lr,\n decay_schedule_fn=learning_rate_fn,\n warmup_steps=num_warmup_steps)\n optimizer = AdamWeightDecay(\n learning_rate=learning_rate_fn,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=['layer_norm', 'bias'])\n return optimizer\n\n\nclass AdamWeightDecay(tf.keras.optimizers.Adam):\n \"\"\"Adam enables L2 weight decay and clip_by_global_norm on gradients.\n\n Just adding the square of the weights to the loss function is *not* the\n correct way of using L2 regularization/weight decay with Adam, since that will\n interact with the m and v parameters in strange ways.\n\n Instead we want ot decay the weights in a manner that doesn't interact with\n the m/v parameters. This is equivalent to adding the square of the weights to\n the loss with plain (non-momentum) SGD.\n \"\"\"\n\n def __init__(self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-7,\n amsgrad=False,\n weight_decay_rate=0.0,\n include_in_weight_decay=None,\n exclude_from_weight_decay=None,\n name='AdamWeightDecay',\n **kwargs):\n super(AdamWeightDecay, self).__init__(\n learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)\n self.weight_decay_rate = weight_decay_rate\n self._include_in_weight_decay = include_in_weight_decay\n self._exclude_from_weight_decay = exclude_from_weight_decay\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates an optimizer from its config with WarmUp custom object.\"\"\"\n custom_objects = {'WarmUp': WarmUp}\n return super(AdamWeightDecay, cls).from_config(\n config, custom_objects=custom_objects)\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,\n apply_state)\n apply_state['weight_decay_rate'] = tf.constant(\n self.weight_decay_rate, name='adam_weight_decay_rate')\n\n def _decay_weights_op(self, var, learning_rate, apply_state):\n do_decay = self._do_use_weight_decay(var.name)\n if do_decay:\n return var.assign_sub(\n learning_rate * var *\n apply_state['weight_decay_rate'],\n use_locking=self._use_locking)\n return tf.no_op()\n\n def apply_gradients(self, grads_and_vars, name=None):\n grads, tvars = list(zip(*grads_and_vars))\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars))\n\n def _get_lr(self, var_device, var_dtype, apply_state):\n \"\"\"Retrieves the learning rate with the given state.\"\"\"\n if apply_state is None:\n return self._decayed_lr_t[var_dtype], {}\n\n apply_state = apply_state or {}\n coefficients = apply_state.get((var_device, var_dtype))\n if coefficients is None:\n coefficients = self._fallback_apply_state(var_device, var_dtype)\n apply_state[(var_device, var_dtype)] = coefficients\n\n return coefficients['lr_t'], dict(apply_state=apply_state)\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super(AdamWeightDecay, self)._resource_apply_dense(\n grad, var, **kwargs)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super(AdamWeightDecay, self)._resource_apply_sparse(\n grad, var, indices, **kwargs)\n\n def get_config(self):\n config = super(AdamWeightDecay, self).get_config()\n config.update({\n 'weight_decay_rate': self.weight_decay_rate,\n })\n return config\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if self.weight_decay_rate == 0:\n return False\n\n if self._include_in_weight_decay:\n for r in self._include_in_weight_decay:\n if re.search(r, param_name) is not None:\n return True\n\n if self._exclude_from_weight_decay:\n for r in self._exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions to make unit testing easier.\"\"\"\n\nimport StringIO\nimport numpy as np\nfrom PIL import Image as PILImage\nimport tensorflow as tf\n\n\ndef create_random_image(image_format, shape):\n \"\"\"Creates an image with random values.\n\n Args:\n image_format: An image format (PNG or JPEG).\n shape: A tuple with image shape (including channels).\n\n Returns:\n A tuple (<numpy ndarray>, <a string with encoded image>)\n \"\"\"\n image = np.random.randint(low=0, high=255, size=shape, dtype='uint8')\n io = StringIO.StringIO()\n image_pil = PILImage.fromarray(image)\n image_pil.save(io, image_format, subsampling=0, quality=100)\n return image, io.getvalue()\n\n\ndef create_serialized_example(name_to_values):\n \"\"\"Creates a tf.Example proto using a dictionary.\n\n It automatically detects type of values and define a corresponding feature.\n\n Args:\n name_to_values: A dictionary.\n\n Returns:\n tf.Example proto.\n \"\"\"\n example = tf.train.Example()\n for name, values in name_to_values.items():\n feature = example.features.feature[name]\n if isinstance(values[0], str):\n add = feature.bytes_list.value.extend\n elif isinstance(values[0], float):\n add = feature.float32_list.value.extend\n elif isinstance(values[0], int):\n add = feature.int64_list.value.extend\n else:\n raise AssertionError('Unsupported type: %s' % type(values[0]))\n add(values)\n return example.SerializeToString()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test Subtokenizer and string helper methods.\"\"\"\n\nimport collections\nimport tempfile\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom official.transformer.utils import tokenizer\n\n\nclass SubtokenizerTest(tf.test.TestCase):\n\n def _init_subtokenizer(self, vocab_list):\n temp_file = tempfile.NamedTemporaryFile(delete=False)\n with tf.io.gfile.GFile(temp_file.name, \"w\") as w:\n for subtoken in vocab_list:\n w.write(\"'%s'\" % subtoken)\n w.write(\"\\n\")\n return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])\n\n def test_encode(self):\n vocab_list = [\"123_\", \"test\", \"ing_\"]\n subtokenizer = self._init_subtokenizer(vocab_list)\n s = \"testing 123\"\n encoded_list = subtokenizer.encode(s)\n self.assertEqual([1, 2, 0], encoded_list)\n\n def test_decode(self):\n vocab_list = [\"123_\", \"test\", \"ing_\"]\n subtokenizer = self._init_subtokenizer(vocab_list)\n encoded_list = [1, 2, 0] # testing 123\n decoded_str = subtokenizer.decode(encoded_list)\n self.assertEqual(\"testing 123\", decoded_str)\n\n def test_subtoken_ids_to_tokens(self):\n vocab_list = [\"123_\", \"test\", \"ing_\"]\n subtokenizer = self._init_subtokenizer(vocab_list)\n encoded_list = [1, 2, 0] # testing 123\n token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)\n self.assertEqual([u\"testing\", u\"123\"], token_list)\n\n\nclass StringHelperTest(tf.test.TestCase):\n\n def test_split_string_to_tokens(self):\n text = \"test? testing 123.\"\n\n tokens = tokenizer._split_string_to_tokens(text)\n self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"], tokens)\n\n def test_join_tokens_to_string(self):\n tokens = [\"test\", \"? \", \"testing\", \"123\", \".\"]\n\n s = tokenizer._join_tokens_to_string(tokens)\n self.assertEqual(\"test? testing 123.\", s)\n\n def test_escape_token(self):\n token = u\"abc_\\\\4\"\n alphabet = set(\"abc_\\\\u;\")\n\n escaped_token = tokenizer._escape_token(token, alphabet)\n self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token)\n\n def test_unescape_token(self):\n escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\"\n\n unescaped_token = tokenizer._unescape_token(escaped_token)\n self.assertEqual(\n \"Underline: _, Backslash: \\\\, Unicode: 4\", unescaped_token)\n\n def test_list_to_index_dict(self):\n lst = [\"test\", \"strings\"]\n\n d = tokenizer._list_to_index_dict(lst)\n self.assertDictEqual({\"test\": 0, \"strings\": 1}, d)\n\n def test_split_token_to_subtokens(self):\n token = \"abc\"\n subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}\n max_subtoken_length = 2\n\n subtokens = tokenizer._split_token_to_subtokens(\n token, subtoken_dict, max_subtoken_length)\n self.assertEqual([\"ab\", \"c\"], subtokens)\n\n def test_generate_alphabet_dict(self):\n s = [\"testing\", \"123\"]\n reserved_tokens = [\"???\"]\n\n alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)\n self.assertIn(\"?\", alphabet)\n self.assertIn(\"t\", alphabet)\n self.assertIn(\"e\", alphabet)\n self.assertIn(\"s\", alphabet)\n self.assertIn(\"i\", alphabet)\n self.assertIn(\"n\", alphabet)\n self.assertIn(\"g\", alphabet)\n self.assertIn(\"1\", alphabet)\n self.assertIn(\"2\", alphabet)\n self.assertIn(\"3\", alphabet)\n\n def test_count_and_gen_subtokens(self):\n token_counts = {\"abc\": 5}\n alphabet = set(\"abc_\")\n subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"_\": 3}\n max_subtoken_length = 2\n\n subtoken_counts = tokenizer._count_and_gen_subtokens(\n token_counts, alphabet, subtoken_dict, max_subtoken_length)\n\n self.assertIsInstance(subtoken_counts, collections.defaultdict)\n self.assertDictEqual(\n {\"a\": 5, \"b\": 5, \"c\": 5, \"_\": 5, \"ab\": 5, \"bc\": 5, \"c_\": 5,\n \"abc\": 5, \"bc_\": 5, \"abc_\": 5}, subtoken_counts)\n\n def test_filter_and_bucket_subtokens(self):\n subtoken_counts = collections.defaultdict(\n int, {\"a\": 2, \"b\": 4, \"c\": 1, \"ab\": 6, \"ac\": 3, \"abbc\": 5})\n min_count = 3\n\n subtoken_buckets = tokenizer._filter_and_bucket_subtokens(\n subtoken_counts, min_count)\n\n self.assertEqual(len(subtoken_buckets[0]), 0)\n self.assertEqual(set(\"b\"), subtoken_buckets[1])\n self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2])\n self.assertEqual(len(subtoken_buckets[3]), 0)\n self.assertEqual(set([\"abbc\"]), subtoken_buckets[4])\n\n def test_gen_new_subtoken_list(self):\n subtoken_counts = collections.defaultdict(\n int, {\"translate\": 10, \"t\": 40, \"tr\": 16, \"tra\": 12})\n min_count = 5\n alphabet = set(\"translate\")\n reserved_tokens = [\"reserved\", \"tokens\"]\n\n subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(\n subtoken_counts, min_count, alphabet, reserved_tokens)\n\n # Check that \"tra\" isn\"t in the list (its count should be decremented to 2,\n # so it should not be added to the canddiate list).\n self.assertNotIn(\"tra\", subtoken_list)\n\n self.assertIn(\"tr\", subtoken_list)\n self.assertIn(\"t\", subtoken_list)\n\n self.assertEqual(len(\"translate\"), max_token_length)\n\n def test_generate_subtokens(self):\n token_counts = {\"ab\": 1, \"bc\": 3, \"abc\": 5}\n alphabet = set(\"abc_\")\n min_count = 100\n num_iterations = 1\n reserved_tokens = [\"reserved\", \"tokens\"]\n\n vocab_list = tokenizer._generate_subtokens(\n token_counts, alphabet, min_count, num_iterations, reserved_tokens)\n\n # Check that reserved tokens are at the front of the list\n self.assertEqual(vocab_list[:2], reserved_tokens)\n\n # Check that each character in alphabet is in the vocab list\n for c in alphabet:\n self.assertIn(c, vocab_list)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.no_op",
"tensorflow.clip_by_global_norm",
"tensorflow.keras.optimizers.schedules.PolynomialDecay",
"tensorflow.cast",
"tensorflow.name_scope",
"tensorflow.math.pow",
"tensorflow.constant",
"tensorflow.control_dependencies"
],
[
"numpy.random.randint",
"tensorflow.train.Example"
],
[
"tensorflow.test.main",
"tensorflow.io.gfile.GFile"
]
] |
ffcccc/MachineLearning | [
"78bc9c5df08b14f5d70ad5d6774c74f85a585c7e"
] | [
"KNN.py"
] | [
"\"\"\"\n@Filename: KNN.py\n@Author: Danc1elion\n@Author: ffcccc\n@Create Date: 2019-04-29\n@Update Date: 2019-05-03\n@Description: Implement of KNN\n\"\"\"\n\nimport numpy as np\nimport operator as op\nimport AClassifier\nimport preProcess\n\nclass KNNClassifier(AClassifier.aClassifier):\n def __init__(self, k, norm_type=\"Normalization\"):\n self.k = k\n self.norm_type = \"Normalization\"\n self.x_train = None\n self.y_train = None\n\n '''\n Function: Normalization\n Description: Normalize input data. For vector x, the normalization process is given by\n normalization(x) = (x - min(x))/(max(x) - min(x))\n Input: data dataType: ndarray description: input data\n Output: norm_data dataType: ndarray description: output data after normalization\n '''\n # def Normalization(self, data):\n # # get the max and min value of each column\n # min_value = data.min(axis=0)\n # max_value = data.max(axis=0)\n # diff = max_value - min_value\n # # normalization\n # min_data = np.tile(min_value, (data.shape[0], 1))\n # norm_data = (data - min_data)/np.tile(diff, (data.shape[0], 1))\n # return norm_data\n\n '''\n Function: Standardization\n Description: Standardize input data. For vector x, the normalization process is given by\n Standardization(x) = x - mean(x)/std(x)\n Input: data dataType: ndarray description: input data\n Output: standard_data dataType: ndarray description: output data after standardization\n '''\n # def Standardization(self, data):\n # # get the mean and the variance of each column\n # mean_value = data.mean(axis=0)\n # var_value = data.std(axis=0)\n # standard_data = (data - np.tile(mean_value, (data.shape[0], 1)))/np.tile(var_value, (data.shape[0], 1))\n # return standard_data\n\n '''\n Function: train\n Description: train the model\n Input: train_data dataType: ndarray description: features\n test_data dataType: ndarray description: labels\n Output: self dataType: obj description: \n '''\n def train(self, train_data, train_label):\n if self.norm_type == \"Standardization\":\n train_data = preProcess.Standardization(train_data)\n else:\n train_data = preProcess.Normalization(train_data)\n self.x_train = train_data\n self.y_train = train_label\n return self\n\n '''\n Function: predict\n Description: give the prediction for test data\n Input: test_data dataType: ndarray description: data for testing\n test_abel dataType: ndarray description: labels of train data\n norm_type dataType: string description: type of normalization, default:Normalization\n probability dataType: bool description: if true return label and probability, else return label only\n showResult dataType: bool description: display the prediction result\n Output: results dataType: ndarray description: label or probability\n '''\n def predict(self, test_data):\n # Normalization\n if self.norm_type == \"Standardization\":\n testData = preProcess.Standardization(test_data)\n else:\n testData = preProcess.Normalization(test_data)\n\n test_num = testData.shape[0]\n prediction = np.zeros([test_num, 1])\n probability = np.zeros([test_num, 1])\n # predict each samples in test data\n for i in range(test_num):\n prediction[i], probability[i] = self.calculateDistance(testData[i], self.x_train, self.y_train, self.k)\n\n self.prediction = prediction\n self.probability = probability\n\n return prediction\n\n '''\n Function: calculateDistance\n Description: calcuate the distance between input vector and train data\n Input: input dataType: ndarray description: input vector\n traind_ata dataType: ndarray description: data for training\n train_label dataType: ndarray description: labels of train data\n k dataType: int description: select the first k distances\n Output: prob dataType: float description: max probability of prediction \n label dataType: int description: prediction label of input vector\n '''\n def calculateDistance(self, input, train_data, train_label, k):\n train_num = train_data.shape[0]\n # calcuate the distances\n distances = np.tile(input, (train_num, 1)) - train_data\n distances = distances**2\n distances = distances.sum(axis=1)\n distances = distances**0.5\n\n # get the labels of the first k distances\n disIndex = distances.argsort()\n labelCount = {}\n for i in range(k):\n label = train_label[disIndex[i]]\n labelCount[label] = labelCount.get(label, 0) + 1\n\n prediction = sorted(labelCount.items(), key=op.itemgetter(1), reverse=True)\n label = prediction[0][0]\n prob = prediction[0][1]/k\n return label, prob\n\n '''\n Function: showDetectionResult\n Description: show detection result\n Input: test_data dataType: ndarray description: data for test\n test_label dataType: ndarray description: labels of test data\n Output: accuracy dataType: float description: detection accuarcy\n '''\n # def showDetectionResult(self, test_data, test_label):\n # test_label = np.expand_dims(test_label, axis=1)\n # prediction = self.predict(test_data)\n # accuarcy = sum(prediction == test_label)/len(test_label)\n # return accuarcy\n"
] | [
[
"numpy.tile",
"numpy.zeros"
]
] |
EricLi404/tensorflow | [
"23759800d89f7b5362c338d9a3fd72a6810c3e22",
"23759800d89f7b5362c338d9a3fd72a6810c3e22"
] | [
"tensorflow/python/kernel_tests/conv_ops_test.py",
"tensorflow/python/distribute/tpu_values.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for convolutional operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\nimport numpy as np\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.layers import convolutional\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.util.compat import collections_abc\n\n\ndef GetShrunkInceptionShapes(shrink=10):\n \"\"\"Iterator for smaller versions of convolution shapes in 2015 Inception.\n\n Relative to inception, each depth value is `depth // shrink`.\n\n Args:\n shrink: Factor to shrink each depth value by relative to Inception.\n\n Yields:\n Tuple (input_size, filter_size, out_size, stride, padding), the convolution\n parameters of Inception layers.\n \"\"\"\n input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],\n [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],\n [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],\n [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],\n [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],\n [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],\n [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],\n [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],\n [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],\n [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],\n [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],\n [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],\n [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],\n [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],\n [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],\n [4, 147, 147, 24]]\n filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],\n [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],\n [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],\n [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],\n [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],\n [3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],\n [3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],\n [1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],\n [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],\n [3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],\n [1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],\n [3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],\n [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],\n [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],\n [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],\n [1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],\n [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],\n [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],\n [1, 1, 24, 64]]\n out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],\n [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],\n [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],\n [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],\n [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],\n [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],\n [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],\n [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],\n [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],\n [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],\n [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],\n [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],\n [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],\n [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],\n [4, 147, 147, 64]]\n strides = [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1\n ]\n # Shrink sizes to make the test faster\n for i in input_sizes:\n i[3] //= shrink\n for f in filter_sizes:\n f[2] //= shrink\n f[3] //= shrink\n for o in out_sizes:\n o[3] //= shrink\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [\n SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, VALID, VALID, VALID\n ]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p\n\n\ndef GetTestConfigs():\n \"\"\"Get all the valid tests configs to run.\n\n Returns:\n all the valid test configs as tuples of data_format and use_gpu.\n \"\"\"\n test_configs = [(\"NHWC\", False), (\"NHWC\", True)]\n if test.is_gpu_available(cuda_only=True):\n # \"NCHW\" format is only supported on CUDA.\n test_configs += [(\"NCHW\", True)]\n return test_configs\n\n\nclass Conv2DTest(test.TestCase):\n\n def _DtypesToTest(self, use_gpu):\n # double datatype is currently not supported for convolution ops\n # on the ROCm platform\n optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]\n if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():\n return [dtypes.float32] + optional_float64\n else:\n # It is important that float32 comes before float16 here,\n # as we will be using its gradients as reference for fp16 gradients.\n return [dtypes.float32, dtypes.float16] + optional_float64\n\n def _CreateNumpyTensor(self, shape):\n total_size = 1\n for s in shape:\n total_size *= s\n return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)\n\n def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,\n strides, padding, data_format, dtype, use_gpu):\n \"\"\"Verifies the output values of the convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [kernel_rows, kernel_cols, input_depth, output_depth].\n dilations: Dilated rate: [col_dilation, row_dilation]\n strides: Stride: [col_stride, row_stride]\n padding: Padding type.\n data_format: Format of the data tensors.\n dtype: Data type for inputs and outputs.\n use_gpu: True if the operations should be run on GPU\n Returns:\n Symbolic tensor value that can be used to execute the computation\n \"\"\"\n x1 = self._CreateNumpyTensor(tensor_in_sizes)\n x2 = self._CreateNumpyTensor(filter_in_sizes)\n\n with test_util.device(use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)\n t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n if isinstance(padding, (list, tuple)):\n padding = [(0, 0)] + padding + [(0, 0)]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n dilations = test_util.NHWCToNCHW(dilations)\n if isinstance(padding, (list, tuple)):\n padding = test_util.NHWCToNCHW(padding)\n conv = nn_ops.conv2d(\n t1,\n t2,\n dilations=dilations,\n strides=strides,\n padding=padding,\n data_format=data_format)\n self.assertEqual(conv.dtype, dtype)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n\n return conv\n\n def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,\n padding):\n \"\"\"Verifies that CPU and GPU produce the same values.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [kernel_rows, kernel_cols, input_depth, output_depth].\n conv_strides: [row_stride, col_stride] for the convolution;\n padding: Padding type.\n \"\"\"\n x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n\n def _SetupVal(data_format, use_gpu):\n with test_util.device(use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n strides = [1] + conv_strides + [1]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv2d(\n t1, t2, strides=strides, padding=padding, data_format=data_format)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n return conv\n\n tensors = []\n for (data_format, use_gpu) in GetTestConfigs():\n tensors.append(_SetupVal(data_format, use_gpu))\n values = self.evaluate(tensors)\n for i in range(1, len(values)):\n self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)\n\n def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,\n stride, dilation, padding, data_format,\n use_gpu):\n x1 = self._CreateNumpyTensor(tensor_in_sizes)\n x2 = self._CreateNumpyTensor(filter_in_sizes)\n with test_util.device(use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n if isinstance(stride, collections_abc.Iterable):\n strides = list(stride)\n else:\n strides = [stride, stride]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n full_strides = [1, 1] + strides\n full_dilation = [1, 1] + dilation\n else:\n full_strides = [1] + strides + [1]\n full_dilation = [1] + dilation + [1]\n expected = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilation,\n data_format=data_format)\n computed = nn_ops.conv2d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilation,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n expected = test_util.NCHWToNHWC(expected)\n computed = test_util.NCHWToNHWC(computed)\n return expected, computed\n\n def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,\n padding, dilations, rtol=1e-4):\n expected_results = []\n computed_results = []\n for data_format, use_gpu in GetTestConfigs():\n expected, computed = self._ComputeReferenceDilatedConv(\n tensor_in_sizes, filter_in_sizes, strides, dilations, padding,\n data_format, use_gpu)\n expected_results.append(expected)\n computed_results.append(computed)\n tolerance = 1e-2 if use_gpu else 1e-5\n expected_values = self.evaluate(expected_results)\n computed_values = self.evaluate(computed_results)\n for e_value, c_value in zip(expected_values, computed_values):\n tf_logging.debug(\"expected = %s\", e_value)\n tf_logging.debug(\"actual = %s\", c_value)\n self.assertAllClose(\n e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)\n\n def _VerifyValues(self,\n tensor_in_sizes,\n filter_in_sizes,\n strides,\n padding,\n expected,\n dilations=(1, 1),\n gpu_only=False,\n test_grappler_layout_optimizer=False,\n tol=1e-5,\n fp16_tol=1e-3):\n if gpu_only and not test.is_gpu_available(cuda_only=True):\n return\n tensors = []\n dilations = list(dilations)\n for (data_format, use_gpu) in GetTestConfigs():\n if gpu_only and not use_gpu:\n continue\n dtypes_to_test = self._DtypesToTest(use_gpu)\n if not test_grappler_layout_optimizer and data_format == \"NHWC\":\n dtypes_to_test.append(dtypes.int32)\n for dtype in dtypes_to_test:\n result = self._SetupValuesForDevice(\n tensor_in_sizes,\n filter_in_sizes,\n dilations,\n strides,\n padding,\n data_format,\n dtype,\n use_gpu=use_gpu)\n if test_grappler_layout_optimizer and data_format == \"NHWC\" and use_gpu:\n # Grappler's layout optimizer will not optimize a fetch node, so\n # this identity allows Grappler to optimize the Conv2D node.\n result = array_ops.identity(result)\n tensors.append(result)\n values = self.evaluate(tensors)\n for i in range(len(tensors)):\n conv = tensors[i]\n value = values[i]\n tf_logging.debug(\"expected = %s\", expected)\n tf_logging.debug(\"actual = %s\", value)\n tol_to_use = fp16_tol if value.dtype == np.float16 else tol\n if np.issubdtype(value.dtype, np.integer):\n self.assertAllEqual(np.rint(expected), np.ravel(value))\n else:\n self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,\n rtol=tol_to_use)\n self.assertShapeEqual(value, conv)\n self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)\n\n def _VerifyExplicitPaddings(self,\n tensor_in_sizes,\n filter_in_sizes,\n strides,\n padding,\n dilations=(1, 1),\n test_grappler_layout_optimizer=False,\n tol=1e-5,\n fp16_tol=1e-3):\n \"\"\"Verifies Conv2D with explicit padding generates correct values.\n\n It does this by comparing with Conv2D without explicit padding. This\n function assumes Conv2D without explicit padding works correctly.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in [batch, input_rows,\n input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\n input_depth, output_depth].\n strides: [row_stride, col_stride] for the convolution;\n padding: Explicit padding amounts.\n dilations: Dilation values\n test_grappler_layout_optimizer: If True, allow the Grappler layout\n optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.\n tol: The absolute and relative tolerance for non-fp16 dtypes.\n fp16_tol: The absolute and relative tolerance for fp16.\n \"\"\"\n input_tensor = self._CreateNumpyTensor(tensor_in_sizes)\n filter_tensor = self._CreateNumpyTensor(filter_in_sizes)\n input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])\n dilations = list(dilations)\n conv2d_result = nn_ops.conv2d(\n input_tensor,\n filter_tensor, [1] + list(strides) + [1],\n \"VALID\",\n dilations=[1] + dilations + [1])\n expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))\n self._VerifyValues(\n tensor_in_sizes,\n filter_in_sizes,\n strides,\n padding,\n expected,\n dilations,\n test_grappler_layout_optimizer=test_grappler_layout_optimizer,\n tol=tol,\n fp16_tol=fp16_tol)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D1x1Filter(self):\n expected_output = [\n 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,\n 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[1, 1, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]\n filter_in_sizes = [1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n conv1 = nn_ops.conv2d(\n x1,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n conv2 = nn_ops.conv2d(\n x2,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(\n conv1,\n self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConvolutionClass2DExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]\n filter_in_sizes = [1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n convolver1 = nn_ops.Convolution(\n input_shape=x1.shape,\n filter_shape=filter_in.shape,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(convolver1.num_batch_dims, 1)\n convolver2 = nn_ops.Convolution(\n input_shape=x2.shape,\n filter_shape=filter_in.shape,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(convolver2.num_batch_dims, 2)\n conv1 = convolver1(x1, filter_in)\n conv2 = convolver2(x2, filter_in)\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(\n conv1,\n self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]\n filter_in_sizes = [1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n conv1 = nn_ops.convolution(\n x1,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n conv2 = nn_ops.convolution(\n x2,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(\n conv1,\n self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Filter2x1Dilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 4, 4, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmpty(self):\n expected_output = []\n self._VerifyValues(\n tensor_in_sizes=[0, 2, 3, 3],\n filter_in_sizes=[1, 1, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmptyDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[0, 2, 3, 3],\n filter_in_sizes=[1, 1, 3, 3],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Filter(self):\n # The outputs are computed using third_party/py/IPython/notebook.\n expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D1x2Filter(self):\n # The outputs are computed using third_party/py/IPython/notebook.\n expected_output = [\n 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,\n 936.0, 1029.0\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[1, 2, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D1x2FilterDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[1, 2, 3, 3],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterStride2(self):\n expected_output = [2271.0, 2367.0, 2463.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[2, 2],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterStride2Same(self):\n expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[2, 2],\n padding=\"SAME\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterStride1x2(self):\n expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 3, 6, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[1, 2],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSmallerThanStrideValid(self):\n expected_output = [65, 95, 275, 305]\n self._VerifyValues(\n tensor_in_sizes=[1, 7, 7, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[3, 3],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSmallerThanStrideSame(self):\n self._VerifyValues(\n tensor_in_sizes=[1, 3, 3, 1],\n filter_in_sizes=[1, 1, 1, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=[1, 3, 7, 9])\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 1],\n filter_in_sizes=[1, 1, 1, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=[1, 3, 9, 11])\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[3, 3],\n padding=\"SAME\",\n expected=[44, 28, 41, 16])\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSize(self):\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 2, 1],\n filter_in_sizes=[2, 2, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=[50, 60])\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSizeDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 3, 3, 1],\n filter_in_sizes=[2, 2, 1, 2],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D0x0Padding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n padding=[[0, 0], [0, 0]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[3, 4, 3, 2],\n filter_in_sizes=[1, 1, 2, 1],\n strides=[2, 2],\n padding=[[0, 0], [0, 0]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D1x1Padding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 2, 1],\n filter_in_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Padding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 1, 2],\n filter_in_sizes=[2, 1, 2, 1],\n strides=[1, 1],\n padding=[[2, 2], [2, 2]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 1, 2],\n filter_in_sizes=[1, 1, 2, 1],\n strides=[2, 1],\n padding=[[2, 2], [2, 2]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DOnlyBottomPadding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 2],\n strides=[1, 1],\n padding=[[0, 3], [0, 0]], tol=2e-5)\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[2, 2, 4, 3],\n filter_in_sizes=[1, 2, 3, 2],\n strides=[2, 2],\n padding=[[0, 3], [0, 0]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DOnlyTopRightPadding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 2],\n strides=[1, 1],\n padding=[[1, 0], [0, 2]],\n tol=5e-5)\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 4, 2],\n filter_in_sizes=[2, 2, 2, 2],\n strides=[1, 3],\n padding=[[1, 0], [0, 2]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DLotsPadding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 1, 1, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n padding=[[3, 4], [4, 2]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 1, 1],\n filter_in_sizes=[2, 2, 1, 3],\n strides=[2, 1],\n padding=[[3, 4], [4, 2]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DExplicitPaddingWithDilations(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 3, 2, 1],\n filter_in_sizes=[1, 2, 1, 2],\n strides=[1, 1],\n padding=[[1, 0], [0, 1]],\n dilations=[2, 1])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[3, 2, 2, 1],\n strides=[1, 1],\n padding=[[2, 1], [1, 2]],\n dilations=[2, 3])\n\n def testConv2DExplicitPaddingWithLayoutOptimizer(self):\n # Test with Grappler's layout optimizer, to ensure the layout optimizer\n # handles explicit padding correctly.\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 3, 2, 1],\n filter_in_sizes=[1, 2, 1, 2],\n strides=[1, 1],\n padding=[[1, 0], [0, 1]],\n dilations=[2, 1],\n test_grappler_layout_optimizer=True)\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[3, 2, 2, 1],\n strides=[1, 1],\n padding=[[2, 1], [1, 2]],\n dilations=[2, 3],\n test_grappler_layout_optimizer=True)\n\n def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,\n strides, padding, data_format, dtype):\n \"\"\"Verify the output of group convolution is equal to a for-loop implementation.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in [batch, input_rows,\n input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\n input_depth, output_depth].\n dilations: Dilated rate: [col_dilation, row_dilation]\n strides: Stride: [col_stride, row_stride]\n padding: Padding type.\n data_format: Format of the data tensors.\n dtype: Data type for inputs and outputs.\n \"\"\"\n tensor_in = self._CreateNumpyTensor(tensor_in_sizes)\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n num_groups = tensor_in_sizes[3] // filter_in_sizes[2]\n assert num_groups > 1 and \\\n filter_in_sizes[2] * num_groups == tensor_in_sizes[3]\n with test_util.device(True):\n t1 = constant_op.constant(tensor_in, dtype=dtype)\n t2 = constant_op.constant(filter_in, dtype=dtype)\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n dilations = test_util.NHWCToNCHW(dilations)\n t1_splits = array_ops.split(t1, num_groups, axis=1)\n else:\n t1_splits = array_ops.split(t1, num_groups, axis=3)\n t2_splits = array_ops.split(t2, num_groups, axis=3)\n\n def MakeConv2d(inputs, filters):\n return nn_ops.conv2d(\n inputs,\n filters,\n strides,\n padding,\n dilations=dilations,\n data_format=data_format)\n\n group_conv = MakeConv2d(t1, t2)\n group_conv_loop = array_ops.concat(\n [MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],\n axis=1 if data_format == \"NCHW\" else 3)\n\n results = self.evaluate([group_conv, group_conv_loop])\n tol_to_use = 1e-5\n self.assertAllClose(\n results[0], results[1], atol=tol_to_use, rtol=tol_to_use)\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.run_cuda_only\n def testConv2DGroupConvFwd(self):\n for data_format in [\"NHWC\", \"NCHW\"]:\n for dilation in [1, 2]:\n for stride in [1, 2]:\n self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],\n dilations=[dilation, dilation],\n strides=[stride, stride],\n padding=\"SAME\",\n data_format=data_format,\n dtype=dtypes.float32)\n\n @test_util.deprecated_graph_mode_only\n @test_util.run_cuda_only\n def testInputGradientGroupConv(self):\n for data_format in [\"NCHW\", \"NHWC\"]:\n for test_input in [True, False]:\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n num_groups=2,\n padding=\"VALID\",\n in_depth=4,\n out_depth=6,\n stride_rows=1,\n stride_cols=1,\n test_input=test_input,\n data_format=data_format,\n use_gpu=True,\n max_err=0.005)\n\n @test_util.deprecated_graph_mode_only\n @test_util.run_cuda_only\n def testFilterGradientGroupConv(self):\n for data_format in [\"NCHW\", \"NHWC\"]:\n for test_input in [True, False]:\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n num_groups=2,\n padding=\"VALID\",\n in_depth=4,\n out_depth=6,\n stride_rows=1,\n stride_cols=1,\n test_input=test_input,\n data_format=data_format,\n use_gpu=True,\n max_err=0.005)\n # TODO(yzhwang): this currently fails.\n # self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],\n # filter_in_sizes=[2, 2, 1, 1],\n # strides=[4, 4], padding=\"SAME\",\n # expected=[72, 112, 392, 432])\n\n # Testing for backprops\n def _RunAndVerifyBackpropInput(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu,\n err,\n dilations=(1, 1)):\n if use_gpu and not test.is_gpu_available(cuda_only=True):\n return\n x1 = self._CreateNumpyTensor(filter_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n with test_util.device(use_gpu):\n if len(input_sizes) == 4:\n if data_format == \"NCHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])\n t1 = constant_op.constant(x1, shape=filter_sizes)\n t2 = constant_op.constant(x2, shape=output_sizes)\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n if isinstance(padding, (list, tuple)):\n padding = [(0, 0)] + padding + [(0, 0)]\n if data_format == \"NCHW\":\n t2 = test_util.NHWCToNCHW(t2)\n strides = test_util.NHWCToNCHW(strides)\n dilations = test_util.NHWCToNCHW(dilations)\n if isinstance(padding, (list, tuple)):\n padding = test_util.NHWCToNCHW((padding))\n conv = nn_ops.conv2d_backprop_input(\n t0,\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilations=dilations)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n # \"values\" consists of two tensors for two backprops\n value = self.evaluate(conv)\n self.assertShapeEqual(value, conv)\n tf_logging.debug(\"expected = %s\", expected)\n tf_logging.debug(\"actual = %s\", value)\n self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5)\n\n def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,\n conv_strides, padding):\n x1 = np.random.rand(*filter_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(data_format, use_gpu):\n with test_util.device(use_gpu):\n if data_format == \"NCHW\":\n new_input_sizes = test_util.NHWCToNCHW(input_sizes)\n else:\n new_input_sizes = input_sizes\n t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])\n t1 = constant_op.constant(x1, shape=filter_sizes)\n t2 = constant_op.constant(x2, shape=output_sizes)\n strides = [1] + conv_strides + [1]\n if data_format == \"NCHW\":\n t2 = test_util.NHWCToNCHW(t2)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv2d_backprop_input(\n t0,\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n ret = self.evaluate(conv)\n self.assertShapeEqual(ret, conv)\n return ret\n\n values = []\n for (data_format, use_gpu) in GetTestConfigs():\n values.append(_GetVal(data_format, use_gpu))\n\n for i in range(1, len(values)):\n self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth1ValidBackpropInput(self):\n expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmptyBackpropInput(self):\n expected_output = []\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[0, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[0, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropInput(self):\n expected_output = [\n 14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,\n 140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n # The GPU version of this test is not very stable. So adjusting the\n # error threshold to 1e-4.\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 2, 3, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):\n expected_output = [\n 1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,\n 16.0, 15.0, 20.0, 18.0, 24.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 2, 3, 1],\n strides=[1, 2],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DStrideTwoFilterOneSameBackpropInput(self):\n expected_output = [\n 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,\n 0.0, 0.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 4, 4, 1],\n filter_sizes=[1, 1, 1, 1],\n output_sizes=[1, 2, 2, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):\n expected_output = [5.0, 11.0, 17.0, 23.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 2, 2, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.disable_xla(\"XLA requires input_sizes to be a 4D shape.\")\n def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self):\n expected_output = [5.0, 11.0, 17.0, 23.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[2, 2],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n # Testing for backprops\n def _RunAndVerifyBackpropFilter(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu,\n dilations=(1, 1),\n err=1e-5):\n x0 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n explicit_strides = [1] + strides + [1]\n new_padding = padding\n new_dilations = [1] + dilations + [1]\n if isinstance(new_padding, (list, tuple)):\n new_padding = [(0, 0)] + new_padding + [(0, 0)]\n if data_format == \"NCHW\":\n explicit_strides = test_util.NHWCToNCHW(explicit_strides)\n new_dilations = test_util.NHWCToNCHW(new_dilations)\n if isinstance(padding, (list, tuple)):\n new_padding = test_util.NHWCToNCHW(new_padding)\n for dtype in self._DtypesToTest(use_gpu=use_gpu):\n with test_util.device(use_gpu):\n t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)\n if data_format == \"NCHW\":\n t0 = test_util.NHWCToNCHW(t0)\n t2 = test_util.NHWCToNCHW(t2)\n conv = nn_ops.conv2d_backprop_filter(\n t0,\n t1,\n t2,\n strides=explicit_strides,\n padding=new_padding,\n dilations=new_dilations,\n data_format=data_format)\n value = self.evaluate(conv)\n self.assertShapeEqual(value, conv)\n tf_logging.debug(\"expected = %s\", expected)\n tf_logging.debug(\"actual = %s\", value)\n self.assertArrayNear(expected, value.flatten(), err)\n\n def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,\n conv_strides, padding):\n x0 = np.random.rand(*input_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(data_format, use_gpu):\n with test_util.device(use_gpu):\n t0 = constant_op.constant(x0, shape=input_sizes)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = constant_op.constant(x2, shape=output_sizes)\n strides = [1] + conv_strides + [1]\n if data_format == \"NCHW\":\n t0 = test_util.NHWCToNCHW(t0)\n t2 = test_util.NHWCToNCHW(t2)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv2d_backprop_filter(\n t0,\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format)\n ret = self.evaluate(conv)\n self.assertShapeEqual(ret, conv)\n return ret\n\n values = []\n for (data_format, use_gpu) in GetTestConfigs():\n values.append(_GetVal(data_format, use_gpu))\n for i in range(1, len(values)):\n self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth1ValidBackpropFilter(self):\n expected = [5.0, 8.0, 14.0, 17.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmptyBackpropFilter(self):\n expected = []\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 0],\n output_sizes=[1, 1, 2, 0],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DBackpropFilterWithEmptyInput(self):\n expected = [0, 0, 0, 0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[0, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[0, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropFilter(self):\n expected = [\n 17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,\n 37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,\n 117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,\n 120.0, 153.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 3, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):\n expected = [161.0, 182.0, 287.0, 308.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 2, 3, 1],\n strides=[1, 2],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DStrideTwoFilterOneSameBackpropFilter(self):\n expected_output = [78.]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 4, 4, 1],\n filter_sizes=[1, 1, 1, 1],\n output_sizes=[1, 2, 2, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):\n expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 2, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu)\n\n # Testing for backprops\n def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,\n output_sizes, strides, dilations,\n padding, data_format, use_gpu, err):\n x1 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(filter_sizes)\n default_dilations = (dilations[0] == 1 and dilations[1] == 1)\n if default_dilations or use_gpu:\n with self.cached_session(use_gpu=use_gpu) as sess:\n if data_format == \"NCHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t1 = constant_op.constant(x1, shape=input_sizes)\n t2 = constant_op.constant(x2, shape=filter_sizes)\n full_strides = [1] + strides + [1]\n full_dilations = [1] + dilations + [1]\n if data_format == \"NCHW\":\n full_strides = test_util.NHWCToNCHW(full_strides)\n full_dilations = test_util.NHWCToNCHW(full_dilations)\n conv_forward = nn_ops.conv2d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilations,\n padding=padding,\n data_format=data_format)\n conv_forward_2 = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilations,\n data_format=data_format)\n if data_format == \"NCHW\":\n conv_forward = test_util.NCHWToNHWC(conv_forward)\n conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)\n conv = gradients_impl.gradients(conv_forward, t1)[0]\n conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]\n # \"values\" consists of two tensors for two backprops\n value = self.evaluate(conv)\n value_2 = self.evaluate(conv_2)\n self.assertShapeEqual(value, conv)\n self.assertShapeEqual(value_2, conv_2)\n tf_logging.debug(\"expected = %s\", value_2)\n tf_logging.debug(\"actual = %s\", value)\n self.assertArrayNear(value_2.flatten(), value.flatten(), err)\n\n # Testing for backprops\n def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,\n output_sizes, strides, dilations,\n padding, data_format, use_gpu, err):\n x1 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(filter_sizes)\n default_dilations = (dilations[0] == 1 and dilations[1] == 1)\n if default_dilations or use_gpu:\n with self.cached_session(use_gpu=use_gpu) as sess:\n if data_format == \"NCHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t1 = constant_op.constant(x1, shape=input_sizes)\n t2 = constant_op.constant(x2, shape=filter_sizes)\n full_strides = [1] + strides + [1]\n full_dilations = [1] + dilations + [1]\n if data_format == \"NCHW\":\n full_strides = test_util.NHWCToNCHW(full_strides)\n full_dilations = test_util.NHWCToNCHW(full_dilations)\n conv_forward = nn_ops.conv2d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilations,\n padding=padding,\n data_format=data_format)\n conv_forward_2 = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilations,\n data_format=data_format)\n if data_format == \"NCHW\":\n conv_forward = test_util.NCHWToNHWC(conv_forward)\n conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)\n conv = gradients_impl.gradients(conv_forward, t2)[0]\n conv_2 = gradients_impl.gradients(conv_forward, t2)[0]\n value = self.evaluate(conv)\n value_2 = self.evaluate(conv_2)\n self.assertShapeEqual(value, conv)\n self.assertShapeEqual(value_2, conv_2)\n tf_logging.debug(\"expected = %s\", value_2)\n tf_logging.debug(\"actual = %s\", value)\n self.assertArrayNear(value_2.flatten(), value.flatten(), err)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 5, 1],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DEmptyBackpropFilterDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 0],\n output_sizes=[1, 1, 2, 0],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 3, 4, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 5, 1],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DEmptyBackpropInputDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[0, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[0, 1, 2, 1],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n # The GPU version of this test is not very stable. So adjusting the\n # error threshold to 1e-4.\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 3, 2, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n def _RunAndVerifyBackpropInputExplicitPadding(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n data_format,\n use_gpu,\n dilations=(1, 1),\n err=2e-5):\n if use_gpu and not test.is_gpu_available(cuda_only=True):\n return\n if not use_gpu and dilations != (1, 1):\n return # Non-default dilations is currently not supported on the CPU.\n\n x1 = self._CreateNumpyTensor(filter_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n padded_input_sizes = input_sizes[:]\n padded_input_sizes[1] += padding[0][0] + padding[0][1]\n padded_input_sizes[2] += padding[1][0] + padding[1][1]\n c = nn_ops.conv2d_backprop_input(\n padded_input_sizes,\n x1,\n x2,\n strides=[1] + strides + [1],\n padding=\"VALID\",\n dilations=[1] + dilations + [1])\n c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(\n c.shape[2] - padding[1][1]), :]\n expected = list(self.evaluate(array_ops.reshape(c, [-1])))\n self._RunAndVerifyBackpropInput(\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu=use_gpu,\n err=err,\n dilations=dilations)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding0x0BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=[[0, 0], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 3, 4, 2],\n filter_sizes=[2, 2, 2, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[2, 2],\n padding=[[0, 0], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding1x1BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 3, 4, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 2],\n filter_sizes=[1, 1, 2, 1],\n output_sizes=[1, 4, 3, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 4, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 4, 2, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n dilations=[2, 2], use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding2x2BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[2, 3, 1, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[2, 2, 5, 1],\n strides=[3, 1],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 3, 4, 1],\n strides=[1, 2],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n dilations=[2, 3],\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 10, 8, 1],\n strides=[1, 1],\n padding=[[1, 8], [4, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=5e-5)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 5, 3, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 4, 8, 1],\n strides=[3, 1],\n padding=[[1, 8], [4, 2]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[1, 7, 7, 1],\n strides=[1, 1],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n err=5e-5,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 4, 2, 1],\n filter_sizes=[3, 3, 1, 1],\n output_sizes=[1, 5, 2, 1],\n strides=[1, 2],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n dilations=[2, 1],\n use_gpu=use_gpu)\n\n def _RunAndVerifyBackpropFilterExplicitPadding(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n data_format,\n use_gpu,\n dilations=(1, 1),\n err=1e-5):\n if use_gpu and not test.is_gpu_available(cuda_only=True):\n return\n if not use_gpu and dilations != (1, 1):\n return # Non-default dilations is currently not supported on the CPU.\n\n x0 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n\n x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], \"constant\")\n c = nn_ops.conv2d_backprop_filter(\n x0,\n filter_sizes,\n x2,\n strides=[1] + strides + [1],\n padding=\"VALID\",\n dilations=[1] + dilations + [1])\n expected = list(self.evaluate(array_ops.reshape(c, [-1])))\n self._RunAndVerifyBackpropFilter(\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu=use_gpu,\n dilations=dilations,\n err=err)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding0x0BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=[[0, 0], [0, 0]],\n data_format=data_format, use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 3, 4, 2],\n filter_sizes=[2, 2, 2, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[2, 2],\n padding=[[0, 0], [0, 0]],\n data_format=data_format, use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding1x1BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 3, 4, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=5e-5)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 2],\n filter_sizes=[1, 1, 2, 1],\n output_sizes=[1, 4, 3, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n use_gpu=use_gpu,\n data_format=data_format)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 4, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 4, 2, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu,\n dilations=[2, 2])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding2x2BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[2, 3, 1, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[2, 2, 5, 1],\n strides=[3, 1],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 3, 4, 1],\n strides=[1, 2],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n dilations=[2, 3])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 10, 8, 1],\n strides=[1, 1],\n padding=[[1, 8], [4, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 5, 3, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 4, 8, 1],\n strides=[3, 1],\n padding=[[1, 8], [4, 2]],\n use_gpu=use_gpu,\n data_format=data_format)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[1, 7, 7, 1],\n strides=[1, 1],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 4, 2, 1],\n filter_sizes=[3, 3, 1, 1],\n output_sizes=[1, 5, 2, 1],\n strides=[1, 2],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n dilations=[2, 1])\n\n # Gradient checkers\n def ConstructAndTestGradient(self,\n batch,\n input_rows,\n input_cols,\n filter_rows,\n filter_cols,\n in_depth,\n out_depth,\n stride_rows,\n stride_cols,\n padding,\n test_input,\n data_format,\n use_gpu,\n num_groups=1,\n max_err=0.003):\n assert in_depth % num_groups == 0 and out_depth % num_groups == 0\n input_shape = [batch, input_rows, input_cols, in_depth]\n filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]\n # TODO(yangke): re-factor the computation of output shape.\n if padding == \"VALID\":\n output_rows = (input_rows - filter_rows + stride_rows) // stride_rows\n output_cols = (input_cols - filter_cols + stride_cols) // stride_cols\n elif padding == \"SAME\":\n output_rows = (input_rows + stride_rows - 1) // stride_rows\n output_cols = (input_cols + stride_cols - 1) // stride_cols\n else:\n self.assertIsInstance(padding, (list, tuple))\n output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +\n stride_rows) // stride_rows\n output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +\n stride_cols) // stride_cols\n output_shape = [batch, output_rows, output_cols, out_depth]\n input_size = 1\n for x in input_shape:\n input_size *= x\n filter_size = 1\n for x in filter_shape:\n filter_size *= x\n input_data = [x * 1.0 / input_size for x in range(0, input_size)]\n filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]\n # Conv2DGrad functions are not compiled for double due to\n # a problem in the way Eigen's Conv2DGrad works for double.\n # So we disable the DOUBLE path. We should re-enable this\n # when double support returns for CPU and/or GPU.\n for dtype in self._DtypesToTest(use_gpu=use_gpu):\n with self.cached_session(use_gpu=use_gpu):\n input_tensor = constant_op.constant(\n input_data, shape=input_shape, dtype=dtype, name=\"input\")\n filter_tensor = constant_op.constant(\n filter_data, shape=filter_shape, dtype=dtype, name=\"filter\")\n strides = [1, stride_rows, stride_cols, 1]\n new_padding = padding\n if data_format == \"NCHW\":\n new_input_tensor = test_util.NHWCToNCHW(input_tensor)\n strides = test_util.NHWCToNCHW(strides)\n if isinstance(padding, (list, tuple)):\n new_padding = test_util.NHWCToNCHW(padding)\n else:\n new_input_tensor = input_tensor\n conv = nn_ops.conv2d(\n new_input_tensor,\n filter_tensor,\n strides,\n new_padding,\n data_format=data_format,\n name=\"conv\")\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n self.assertEqual(output_shape, conv.get_shape())\n if test_input:\n jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,\n input_shape,\n conv,\n output_shape)\n else:\n jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,\n filter_shape,\n conv,\n output_shape)\n if dtype == dtypes.float32:\n reference_jacob_t = jacob_t\n err = np.fabs(jacob_t - jacob_n).max()\n else:\n # Compare fp16 theoretical gradients to fp32 theoretical gradients,\n # since fp16 numerical gradients are too imprecise.\n err = np.fabs(jacob_t - reference_jacob_t).max()\n\n tf_logging.debug(\"conv_2d gradient error = %s\", err)\n self.assertLess(err, max_err)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientValidPaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientValidPaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientValidPaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=5,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientValidPaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientValidPaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=7,\n input_cols=6,\n filter_rows=3,\n filter_cols=3,\n in_depth=4,\n out_depth=5,\n stride_rows=3,\n stride_cols=3,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientValidPaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=7,\n filter_rows=4,\n filter_cols=4,\n in_depth=2,\n out_depth=3,\n stride_rows=3,\n stride_cols=3,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientSamePaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=7,\n input_cols=6,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"SAME\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientSamePaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=3,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"SAME\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientSamePaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=7,\n input_cols=6,\n filter_rows=3,\n filter_cols=3,\n in_depth=4,\n out_depth=5,\n stride_rows=3,\n stride_cols=3,\n padding=\"SAME\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=7,\n filter_rows=4,\n filter_cols=4,\n in_depth=2,\n out_depth=3,\n stride_rows=3,\n stride_cols=3,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStride2x1(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=7,\n filter_rows=4,\n filter_cols=4,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=1,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientKernelSizeMatchesInputSize(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=3,\n filter_rows=4,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientKernelSizeMatchesInputSize(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=3,\n filter_rows=4,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient1x1PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu,\n max_err=0.0025)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient1x1PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient1x1PaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=5,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient1x1PaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=5,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient2x2PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [2, 2], [2, 2], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu,\n max_err=0.003)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient2x2PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [2, 2], [2, 2], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu,\n max_err=0.003)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient1_2_3_4PaddingStride3x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=5,\n filter_rows=4,\n filter_cols=2,\n in_depth=3,\n out_depth=2,\n stride_rows=3,\n stride_cols=2,\n padding=[[0, 0], [1, 2], [3, 4], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient1_2_3_4PaddingStride3x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=5,\n filter_rows=4,\n filter_cols=2,\n in_depth=3,\n out_depth=2,\n stride_rows=3,\n stride_cols=2,\n padding=[[0, 0], [1, 2], [3, 4], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient4_3_2_1PaddingStride2x1(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=3,\n input_rows=5,\n input_cols=7,\n filter_rows=3,\n filter_cols=2,\n in_depth=1,\n out_depth=2,\n stride_rows=2,\n stride_cols=1,\n padding=[[0, 0], [4, 3], [2, 1], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient4_3_2_1PaddingStride2x1(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=3,\n input_rows=5,\n input_cols=7,\n filter_rows=3,\n filter_cols=2,\n in_depth=1,\n out_depth=2,\n stride_rows=2,\n stride_cols=1,\n padding=[[0, 0], [4, 3], [2, 1], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient0_0_0_5PaddingStride1x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=6,\n input_cols=7,\n filter_rows=3,\n filter_cols=4,\n in_depth=3,\n out_depth=2,\n stride_rows=1,\n stride_cols=2,\n padding=[[0, 0], [0, 0], [0, 5], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient0_0_0_5PaddingStride1x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=6,\n input_cols=7,\n filter_rows=3,\n filter_cols=4,\n in_depth=3,\n out_depth=2,\n stride_rows=1,\n stride_cols=2,\n padding=[[0, 0], [0, 0], [0, 5], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testShapeFunctionEdgeCases(self):\n # All shapes unknown.\n c1 = nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n self.assertEqual([None, None, None, None], c1.get_shape().as_list())\n\n # Incorrect input shape.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(\n dtypes.float32, shape=[1, 3]),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Incorrect filter shape.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(\n dtypes.float32, shape=[1, 3]),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Depth mismatch.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3]),\n array_ops.placeholder(\n dtypes.float32, shape=[4, 4, 2, 2]),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Input depth divisible by filter depth (group convolution).\n # No exceptions should appear.\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),\n array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Negative padding.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [0, -1], [1, 2], [0, 0]])\n\n # Nonzero padding in nonspatial dimension.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[1, 0], [0, 0], [0, 0], [0, 0]])\n\n # Nonzero NCHW padding in nonspatial dimension.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [0, 1], [0, 0], [0, 0]],\n data_format=\"NCHW\")\n\n # Wrong amount of padding\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [0, 0], [0, 0]])\n\n # Only specify one padding amount per dimension\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0], [0], [0], [0]])\n\n # Explicit padding elements are not lists\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[0, 0, 0, 0])\n\n @test_util.deprecated_graph_mode_only\n def testOpEdgeCases(self):\n with self.cached_session() as sess:\n # Illegal strides.\n with self.assertRaisesRegex(errors_impl.UnimplementedError,\n \"strides in the batch and depth\"):\n input_placeholder = array_ops.placeholder(dtypes.float32)\n input_val = np.ones([10, 10])\n filter_placeholder = array_ops.placeholder(dtypes.float32)\n filter_val = np.ones([10, 10])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[2, 1, 1, 1],\n padding=\"SAME\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n with self.assertRaisesRegex(errors_impl.UnimplementedError,\n \"strides in the batch and depth\"):\n input_placeholder = array_ops.placeholder(dtypes.float32)\n filter_placeholder = array_ops.placeholder(dtypes.float32)\n input_val = np.ones([10, 10])\n filter_val = np.ones([10, 10])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 2],\n padding=\"SAME\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n\n # Filter larger than input.\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[20, 21, 3, 2])\n filter_val = np.ones([20, 21, 3, 2])\n\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 1],\n padding=\"VALID\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[21, 20, 3, 2])\n filter_val = np.ones([21, 20, 3, 2])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 1],\n padding=\"VALID\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n\n # Filter larger than input + padding.\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[24, 25, 3, 2])\n filter_val = np.ones([24, 25, 3, 2])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [2, 2], [2, 2], [0, 0]]),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n\n # Negative padding during backprop.\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n \"All elements of explicit_paddings must be nonnegative\"):\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[18, 18, 3, 2])\n filter_val = np.ones([18, 18, 3, 2])\n out_backprop = array_ops.placeholder(\n dtypes.float32, shape=[32, 3, 2, 2])\n out_backprop_val = np.ones([32, 3, 2, 2])\n sess.run(\n nn_ops.conv2d_backprop_input([32, 20, 20, 3],\n filter_placeholder,\n out_backprop,\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [-1, 0], [0, 0],\n [0, 0]]),\n feed_dict={\n filter_placeholder: filter_val,\n out_backprop: out_backprop_val\n })\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n \"All elements of explicit_paddings must be nonnegative\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n out_backprop = array_ops.placeholder(\n dtypes.float32, shape=[32, 3, 2, 2])\n out_backprop_val = np.ones([32, 3, 2, 2])\n sess.run(\n nn_ops.conv2d_backprop_filter(\n input_placeholder, [18, 18, 3, 2],\n out_backprop,\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]),\n feed_dict={\n input_placeholder: input_val,\n out_backprop: out_backprop_val\n })\n\n\nclass DepthwiseConv2DTest(test.TestCase):\n\n def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,\n expected):\n \"\"\"Verifies the output values of the convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [filter_rows, filter_cols, input_depth, depth_multiplier].\n stride: Stride.\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n \"\"\"\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]\n x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]\n with self.cached_session() as sess:\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t1.set_shape(tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n conv = nn_impl.depthwise_conv2d(\n t1, t2, strides=[1, stride, stride, 1], padding=padding)\n value = self.evaluate(conv)\n tf_logging.debug(\"value = %s\", value)\n self.assertArrayNear(expected, np.ravel(value), 1e-5)\n self.assertShapeEqual(value, conv)\n\n def testConv2D2x2Filter(self):\n # The inputs look like this (it's a 3 x 2 matrix, each of depth 2):\n #\n # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]\n # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]\n # We can view this as two inputs\n #\n # input depth 0:\n #\n # [ 1.0, 3.0, 5.0 ]\n # [ 7.0, 9.0, 11.0 ]\n #\n # input depth 1:\n #\n # [ 2.0, 4.0, 6.0 ]\n # [ 8.0, 10.0, 12.0 ]\n #\n # The filter looks like this (it has two 2 x 2 patches, each generating 2\n # depths):\n #\n # filter #0:\n #\n # [ (1.0, 3.0), ( 5.0, 7.0)]\n # [ (9.0, 11.0), (13.0, 15.0)]\n #\n # filter #1:\n #\n # [ ( 2.0, 4.0), ( 6.0, 8.0)]\n # [ (10.0, 12.0), (14.0, 16.0)]\n #\n # So the outputs are:\n #\n # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196\n # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216\n # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272\n # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296\n #\n # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252\n # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280\n # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344\n # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376\n expected_output = [196, 216, 272, 296, 252, 280, 344, 376]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n\n\nclass SeparableConv2DTest(test.TestCase):\n\n def _InitValues(self, sizes):\n \"\"\"Initializes values for input tensors.\n\n Args:\n sizes: Tensor dimensions.\n\n Returns:\n Tensor initialized to values.\n \"\"\"\n total_size = 1\n for s in sizes:\n total_size *= s\n x = [f * 0.5 for f in range(1, total_size + 1)]\n return constant_op.constant(x, shape=sizes)\n\n def _VerifyValues(self,\n tensor_in_sizes,\n depthwise_filter_in_sizes,\n pointwise_filter_in_sizes,\n stride,\n padding,\n expected,\n data_format=\"NHWC\"):\n \"\"\"Verifies the output values of the separable convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions.\n depthwise_filter_in_sizes: Depthwise filter tensor dimensions.\n pointwise_filter_in_sizes: Pointwise filter tensor dimensions.\n stride: Stride.\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n data_format: string data format for input tensor.\n \"\"\"\n with self.cached_session(use_gpu=True) as sess:\n t1 = self._InitValues(tensor_in_sizes)\n f1 = self._InitValues(depthwise_filter_in_sizes)\n f1.set_shape(depthwise_filter_in_sizes)\n f2 = self._InitValues(pointwise_filter_in_sizes)\n\n real_t1 = t1\n strides = [1, stride, stride, 1]\n if data_format == \"NCHW\":\n real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n strides = [1, 1, stride, stride]\n if isinstance(padding, list):\n padding = [padding[0], padding[3], padding[1], padding[2]]\n\n conv = nn_impl.separable_conv2d(\n real_t1,\n f1,\n f2,\n strides=strides,\n padding=padding,\n data_format=data_format)\n\n if data_format == \"NCHW\":\n conv = array_ops.transpose(conv, [0, 2, 3, 1])\n\n value = self.evaluate(conv)\n tf_logging.debug(\"value = %s\", value)\n self.assertArrayNear(expected, np.ravel(value), 2e-3)\n self.assertShapeEqual(value, conv)\n\n def _testSeparableConv2D(self, data_format):\n # The output is the result of two convolutions:\n # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].\n # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].\n # Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).\n expected_output = [\n 6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,\n 8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,\n 11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,\n 4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,\n 15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,\n 18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,\n 6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,\n 19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,\n 22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,\n 24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,\n 10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,\n 7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,\n 7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,\n 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75\n ]\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 2],\n depthwise_filter_in_sizes=[2, 2, 2, 3],\n pointwise_filter_in_sizes=[1, 1, 6, 7],\n stride=1,\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format)\n\n def testSeparableConv2D(self):\n self._testSeparableConv2D(\"NHWC\")\n\n def disabledtestSeparableConv2DNCHW(self):\n if not test.is_gpu_available():\n return\n self._testSeparableConv2D(\"NCHW\")\n\n def _testSeparableConv2DEqualInputOutputDepth(self, data_format):\n # The output is the result of two convolutions:\n # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].\n # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].\n # Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).\n expected_output = [\n 5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,\n 8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,\n 10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,\n 11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,\n 14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,\n 17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,\n 17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,\n 20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,\n 24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,\n 5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,\n 6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,\n 1923.75, 2007.0, 2090.25, 2173.5\n ]\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 2],\n depthwise_filter_in_sizes=[2, 2, 2, 3],\n pointwise_filter_in_sizes=[1, 1, 6, 6],\n stride=1,\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format)\n\n @test_util.deprecated_graph_mode_only\n def testSeparableConv2DEqualInputOutputDepth(self):\n self._testSeparableConv2DEqualInputOutputDepth(\"NHWC\")\n\n def testSeparableConv2DEqualInputOutputDepthNCHW(self):\n if not test.is_gpu_available():\n return\n self._testSeparableConv2DEqualInputOutputDepth(\"NCHW\")\n\n def _testSeparableConv2dExplicitPadding(self, data_format):\n tensor_in_sizes = [1, 4, 4, 2]\n depthwise_filter_in_sizes = [2, 2, 2, 3]\n pointwise_filter_in_sizes = [1, 1, 6, 7]\n padding = [[0, 0], [1, 2], [3, 4], [0, 0]]\n with self.cached_session(use_gpu=True):\n # Compute the 'expected' values by manually padding before calling\n # separable_conv2d\n t1 = self._InitValues(tensor_in_sizes)\n t1 = array_ops.pad(t1, padding)\n f1 = self._InitValues(depthwise_filter_in_sizes)\n f1.set_shape(depthwise_filter_in_sizes)\n f2 = self._InitValues(pointwise_filter_in_sizes)\n conv = nn_impl.separable_conv2d(\n t1,\n f1,\n f2,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n data_format=\"NHWC\")\n expected = self.evaluate(conv)\n expected = np.ravel(expected)\n self._VerifyValues(\n tensor_in_sizes=tensor_in_sizes,\n depthwise_filter_in_sizes=depthwise_filter_in_sizes,\n pointwise_filter_in_sizes=pointwise_filter_in_sizes,\n stride=1,\n padding=padding,\n expected=expected,\n data_format=data_format)\n\n def testSeparableConv2dExplicitPadding(self):\n self._testSeparableConv2dExplicitPadding(\"NHWC\")\n\n def testSeparableConv2dExplicitPaddingNCHW(self):\n if not test.is_gpu_available():\n return\n self._testSeparableConv2dExplicitPadding(\"NCHW\")\n\n\nclass DeepConv2DTest(test.TestCase):\n\n def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,\n padding):\n \"\"\"Verifies that DeepConv2D and Conv2D produce the same values.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [kernel_rows, kernel_cols, input_depth, output_depth].\n conv_strides: [row_stride, col_stride] for the convolution;\n padding: Padding type.\n \"\"\"\n x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n\n with self.cached_session(use_gpu=False) as sess:\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n strides = [1] + conv_strides + [1]\n\n conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)\n\n os.environ[\"TF_USE_DEEP_CONV2D\"] = \"0\"\n values_expect = self.evaluate([conv])\n\n os.environ[\"TF_USE_DEEP_CONV2D\"] = \"1\"\n values_test = self.evaluate([conv])\n\n self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)\n\n def _RunTestCases(self, conv_strides, padding):\n input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],\n [2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]\n filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],\n [3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]\n for input_shape, filter_shape in zip(input_sizes, filter_sizes):\n self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)\n\n def testConv2D3x3FilterStride1x1Valid(self):\n self._RunTestCases([1, 1], \"VALID\")\n\n def testConv2D3x3FilterStride1x1Same(self):\n self._RunTestCases([1, 1], \"SAME\")\n\n\nclass Conv2DBenchmark(test.Benchmark):\n\n def benchmarkGPUConvStackFirst(self):\n # Benchmark the first iteration of a conv-net with many identical conv\n # operations.\n if not test.is_gpu_available():\n return\n\n with ops.Graph().as_default(), session_lib.Session() as session:\n batch_size = 1\n timesteps = 600\n features = 1\n\n inputs = random_ops.random_uniform(\n [batch_size, 1, timesteps, features], seed=1234)\n num_outputs_list = [512] * 40 + [1]\n kernel_w = 3\n x = inputs\n for num_outputs in num_outputs_list:\n x = convolutional.conv2d(x, num_outputs, [1, kernel_w])\n outputs = x\n\n self.evaluate(variables.global_variables_initializer())\n num_iterations = 4\n for iter_index in xrange(num_iterations):\n start = time.time()\n session.run(outputs)\n wall_time = time.time() - start\n self.report_benchmark(\n name=\"conv_stack_iter_%d\" % iter_index, wall_time=wall_time)\n tf_logging.info(\"conv_stack_iter_%d: %.4f\" % (iter_index, wall_time))\n\n def _bench_op(self, name, op, burn_iters, num_iters):\n config = config_pb2.ConfigProto()\n # Prevent Grappler from optimizing away the entire graph.\n config.graph_options.rewrite_options.dependency_optimization = (\n rewriter_config_pb2.RewriterConfig.OFF)\n with session_lib.Session(config=config) as session:\n self.evaluate(variables.global_variables_initializer())\n self.run_op_benchmark(\n session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)\n\n def benchmarkExplicitVsManualPadding(self):\n \"\"\"Compare performance of EXPLICIT padding and calling tf.pad.\n\n A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same\n padding followed by an equivalent Conv2D op is benchmarked.\n \"\"\"\n if not test.is_gpu_available():\n return\n\n with ops.Graph().as_default():\n burn_iters = 15\n num_iters = 300\n batch_size = 64\n # The input and filter correspond to the first layer of Resnet50.\n input = variables.Variable( # pylint: disable=redefined-builtin\n random_ops.random_uniform([\n batch_size,\n 3,\n 224,\n 224\n ]))\n filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin\n strides = [1, 1, 2, 2]\n padding = [(0, 0), (0, 0), (3, 3), (3, 3)]\n output_explicit_pad = nn_ops.conv2d(\n input, filter, strides, padding=padding, data_format=\"NCHW\")\n input_padded = array_ops.pad(input, padding)\n output_manual_pad = nn_ops.conv2d(\n input_padded, filter, strides, padding=\"VALID\", data_format=\"NCHW\")\n # Benchmark just the forward pass.\n self._bench_op(\"explicit_pad_forward\", output_explicit_pad.op, burn_iters,\n num_iters)\n self._bench_op(\"manual_pad_forward\", output_manual_pad.op, burn_iters,\n num_iters)\n\n # Benchmark both the forward and backwards passes.\n input_grad_explicit_pad, filter_grad_explicit_pad = (\n gradients_impl.gradients(output_explicit_pad, [input, filter]))\n self._bench_op(\n \"explicit_pad_backward\",\n control_flow_ops.group(input_grad_explicit_pad,\n filter_grad_explicit_pad), burn_iters,\n num_iters)\n input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(\n output_manual_pad, [input, filter])\n self._bench_op(\n \"manual_pad_backward\",\n control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),\n burn_iters, num_iters)\n\n def benchmarkExplicitVsSamePaddingGraph(self):\n \"\"\"Compare performance of EXPLICIT and SAME padding in graph mode.\n\n A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op\n with explicit padding is benchmarked, where the padding is the same as in\n the SAME case. The purpose is to ensure EXPLICIT padding is just as\n efficient as the SAME case\n \"\"\"\n if not test.is_gpu_available():\n return\n\n with ops.Graph().as_default():\n burn_iters = 15\n num_convs = 20\n num_iters = 50\n batch_size = 64\n # The input and filter correspond to a middle layer of Resnet50.\n input = variables.Variable( # pylint: disable=redefined-builtin\n random_ops.random_uniform([\n batch_size,\n 256,\n 14,\n 14\n ]))\n filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin\n strides = [1, 1, 1, 1]\n padding = [(0, 0), (0, 0), (1, 1), (1, 1)]\n output_explicit_pad = input\n output_same_pad = input\n\n for _ in range(num_convs):\n output_explicit_pad = nn_ops.conv2d(\n output_explicit_pad,\n filter,\n strides,\n padding=padding,\n data_format=\"NCHW\")\n output_same_pad = nn_ops.conv2d(\n output_same_pad,\n filter,\n strides,\n padding=\"SAME\",\n data_format=\"NCHW\")\n grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)\n grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)\n self._bench_op(\"graph_explicit_pad\", grad_explicit_pad.op, burn_iters,\n num_iters)\n self._bench_op(\"graph_same_pad\", grad_same_pad.op, burn_iters, num_iters)\n\n def benchmarkExplicitVsSamePaddingEager(self):\n \"\"\"Compare performance of EXPLICIT and SAME padding in eager mode.\n\n A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op\n with explicit padding is benchmarked, where the padding is the same as in\n the SAME case. Currently, EXPLICIT padding is slightly slower, due to the\n fact the Python padding list must be checked and processed before the Conv2D\n op can run.\n \"\"\"\n # TODO(reedwm): Make EXPLICIT padding as fast as SAME padding.\n if not test.is_gpu_available():\n return\n\n with context.eager_mode():\n burn_iters = 15\n num_convs = 20\n num_iters = 50\n batch_size = 64\n # The input and filter correspond to a middle layer of Resnet50.\n input = variables.Variable( # pylint: disable=redefined-builtin\n random_ops.random_uniform([\n batch_size,\n 256,\n 14,\n 14\n ]))\n filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin\n strides = [1, 1, 1, 1]\n padding = [(0, 0), (0, 0), (1, 1), (1, 1)]\n output_explicit_pad = input\n output_same_pad = input\n for _ in range(burn_iters):\n output_explicit_pad = nn_ops.conv2d(\n output_explicit_pad,\n filter,\n strides,\n padding=padding,\n data_format=\"NCHW\")\n output_same_pad = nn_ops.conv2d(\n output_same_pad,\n filter,\n strides,\n padding=\"SAME\",\n data_format=\"NCHW\")\n\n start = time.time()\n for _ in range(num_iters):\n with backprop.GradientTape() as tape:\n for _ in range(num_convs):\n output_explicit_pad = nn_ops.conv2d(\n output_explicit_pad,\n filter,\n strides,\n padding=padding,\n data_format=\"NCHW\")\n tape.gradient(output_explicit_pad, filter)\n end = time.time()\n self.report_benchmark(\n name=\"eager_explicit_pad\",\n wall_time=(end - start) / num_iters,\n iters=num_iters)\n\n start = time.time()\n for _ in range(num_iters):\n with backprop.GradientTape() as tape:\n for _ in range(num_convs):\n output_same_pad = nn_ops.conv2d(\n output_same_pad,\n filter,\n strides,\n padding=\"SAME\",\n data_format=\"NCHW\")\n tape.gradient(output_same_pad, filter)\n end = time.time()\n self.report_benchmark(\n name=\"eager_same_pad\",\n wall_time=(end - start) / num_iters,\n iters=num_iters)\n\n\ndef GetInceptionFwdTest(input_size, filter_size, stride, padding,\n gpu_only=False):\n\n def Test(self):\n if gpu_only and not test.is_gpu_available():\n tf_logging.info(\"Skipping InceptionFwd %s\", (input_size, filter_size,\n stride, padding))\n return\n tf_logging.info(\"Testing InceptionFwd %s\", (input_size, filter_size, stride,\n padding))\n self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)\n\n return Test\n\n\ndef GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):\n\n def Test(self):\n if stride == 1:\n tf_logging.info(\"Testing InceptionFwd with dilations %s\",\n (input_size, filter_size, stride, padding))\n self._VerifyDilatedConvValues(\n tensor_in_sizes=input_size,\n filter_in_sizes=filter_size,\n strides=[stride, stride],\n dilations=[2, 2],\n padding=padding,\n rtol=5e-4)\n\n return Test\n\n\ndef GetInceptionBackInputTest(input_size, filter_size, output_size, stride,\n padding,\n gpu_only=False):\n\n def Test(self):\n if gpu_only and not test.is_gpu_available():\n tf_logging.info(\"Skipping InceptionBackInput %s\",\n (input_size, filter_size, output_size, stride, padding))\n return\n tf_logging.info(\"Testing InceptionBackInput %s\",\n (input_size, filter_size, output_size, stride, padding))\n self._CompareBackpropInput(input_size, filter_size, output_size,\n [stride, stride], padding)\n\n return Test\n\n\ndef GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,\n padding, gpu_only=False):\n\n def Test(self):\n if gpu_only and not test.is_gpu_available():\n tf_logging.info(\"Skipping InceptionBackFilter %s\",\n (input_size, filter_size, output_size, strides, padding))\n return\n tf_logging.info(\"Testing InceptionBackFilter %s\",\n (input_size, filter_size, output_size, strides, padding))\n self._CompareBackFilter(input_size, filter_size, output_size, strides,\n padding)\n\n return Test\n\n\nif __name__ == \"__main__\":\n for index, (input_size_, filter_size_, output_size_, stride_,\n padding_) in enumerate(GetShrunkInceptionShapes()):\n setattr(Conv2DTest, \"testInceptionFwd_\" + str(index),\n test_util.run_in_graph_and_eager_modes(\n GetInceptionFwdTest(input_size_, filter_size_, stride_,\n padding_)))\n setattr(\n Conv2DTest, \"testInceptionFwdDilatedConv_\" + str(index),\n test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(\n input_size_, filter_size_, stride_, padding_)))\n setattr(Conv2DTest, \"testInceptionBackInput_\" + str(index),\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackInputTest(input_size_, filter_size_,\n output_size_, stride_, padding_)))\n setattr(Conv2DTest, \"testInceptionBackFilter_\" + str(index),\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackFilterTest(input_size_, filter_size_,\n output_size_, [stride_, stride_],\n padding_)))\n\n # TODO(b/35359731)\n # Fwd, BckInput, and BackFilter to test that for certain input parameter\n # set, winograd nonfused algorithm will be excluded from conv autotune. If\n # in such case, winograd nonfused algorithm is added as one option of the\n # conv autotune, and cuDNN version is smaller than 7, the following tests\n # will fail.\n ishape = [1, 400, 400, 1]\n fshape = [1, 1, 1, 256]\n oshape = [1, 400, 400, 256]\n setattr(Conv2DTest, \"testInceptionFwd_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionFwdTest(ishape, fshape, 1, \"SAME\", gpu_only=True)))\n setattr(Conv2DTest, \"testInceptionFwdDilatedConv_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionFwdDilatedConvTest(ishape, fshape, 1, \"SAME\")))\n setattr(Conv2DTest, \"testInceptionBackInput_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackInputTest(ishape, fshape, oshape, 1, \"SAME\",\n gpu_only=True)))\n setattr(Conv2DTest, \"testInceptionBackFilter_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], \"SAME\",\n gpu_only=True)))\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Various classes representing TPU distributed values.\n\nNote that the tests are in values_test.py .\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\n\nfrom tensorflow.python.distribute import packed_distributed_variable as packed\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute import values_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.tpu import tpu\n\n\[email protected]\ndef _maybe_enter_graph(tensor):\n # Note: might have an eager tensor but not be executing eagerly when\n # building functions.\n if (context.executing_eagerly() or isinstance(tensor, ops.EagerTensor) or\n ops.has_default_graph()):\n yield\n else:\n with tensor.graph.as_default():\n yield\n\n\[email protected]\ndef _maybe_on_device(var):\n # Add a device scope for packed variables.\n if isinstance(var, packed.PackedVarAndDevice):\n with ops.device(var.device):\n yield\n else:\n yield\n\n\ndef _make_raw_assign_fn(raw_assign_fn): # pylint: disable=missing-docstring\n\n def assign_fn(var, value, use_locking=False, name=None, read_value=True): # pylint: disable=missing-docstring\n del use_locking # Unused.\n\n handle = var.handle\n with _maybe_enter_graph(handle), _maybe_on_device(var):\n op = raw_assign_fn(\n handle,\n ops.convert_to_tensor(value, dtype=var.dtype),\n name=name)\n with ops.control_dependencies([op]):\n return var._read_variable_op() if read_value else op # pylint: disable=protected-access\n\n return assign_fn\n\n\nclass TPUVariableMixin(object):\n \"\"\"Mixin for TPU variables.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TPUVariableMixin, self).__init__(*args, **kwargs)\n\n # Handle ID is needed for `get_replicated_var_handle` to cache the variables\n # correctly since in eager mode different variables can have the same name.\n if ops.executing_eagerly_outside_functions():\n self._handle_id = self._common_name + \"_\" + str(id(self._primary))\n else:\n self._handle_id = self._common_name\n\n def __getattr__(self, name):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).__getattr__(name)\n else:\n raise AttributeError(\n \"'{}' not accessible within a TPU context.\".format(name))\n\n def get(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).get()\n else:\n raise NotImplementedError(\n \"`TPUVariableMixin.get()` is not supported within a TPU context.\")\n\n def _get_as_operand(self):\n return self.read_value()\n\n def _is_mirrored(self):\n raise NotImplementedError(\n \"`TPUVariableMixin._is_mirrored()` must be implemented by subclasses.\")\n\n @property\n def handle(self):\n \"\"\"The handle by which this variable can be accessed.\"\"\"\n # If we're in a tpu.rewrite(), return the replicated handle.\n tpu_context = enclosing_tpu_context()\n if tpu_context is None or context.executing_eagerly():\n return self._get_on_device_or_primary().handle\n else:\n is_packed = self._packed_var is not None\n val = self._values\n if is_packed:\n val = [self._packed_var]\n\n return tpu_context.get_replicated_var_handle(self._handle_id, val,\n self._is_mirrored(),\n is_packed)\n\n @property\n def device(self):\n return self.handle.device\n\n def _read_variable_op(self):\n \"\"\"Reads the value of this variable.\"\"\"\n if self.trainable:\n tape.variable_accessed(self)\n\n handle = self.handle\n if getattr(handle, \"is_packed\", False):\n # Add a device scope for a packed variable handle.\n with ops.device(self._get_on_device_or_primary().device):\n return gen_resource_variable_ops.read_variable_op(handle, self.dtype)\n else:\n return gen_resource_variable_ops.read_variable_op(handle, self.dtype)\n\n def read_value(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).read_value()\n else:\n return self._read_variable_op()\n\n def value(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).value()\n else:\n return self._read_variable_op()\n\n def _as_graph_element(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self)._as_graph_element() # pylint: disable=protected-access\n else:\n return None\n\n @property\n def op(self):\n if values_util.is_saving_non_distributed():\n return self._primary.op\n return values.DistributedVarOp(self._primary.op.name,\n self._primary.op.graph,\n self._primary.op.traceback,\n self._primary.op.type)\n\n def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n \"\"\"Converts a variable to a tensor.\"\"\"\n # pylint: disable=protected-access\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self)._dense_var_to_tensor(\n dtype=dtype, name=name, as_ref=as_ref)\n # pylint: enable=protected-access\n elif dtype is not None and dtype != self.dtype:\n return math_ops.cast(self.read_value(), dtype)\n else:\n return self.handle if as_ref else self.read_value()\n\n\ndef enclosing_tpu_context():\n \"\"\"Returns the TPUReplicateContext, which exists inside a tpu.rewrite().\"\"\"\n graph = ops.get_default_graph()\n while graph is not None:\n # pylint: disable=protected-access\n context_ = graph._get_control_flow_context()\n # pylint: enable=protected-access\n while context_ is not None:\n if isinstance(context_, tpu.TPUReplicateContext):\n return context_\n context_ = context_.outer_context\n # This may be a FuncGraph due to defuns or v2 control flow. We need to\n # find the original graph with the XLAControlFlowContext.\n graph = getattr(graph, \"outer_graph\", None)\n return None\n\n\nclass TPUDistributedVariable(TPUVariableMixin, values.DistributedVariable):\n \"\"\"DistributedVariable subclass for TPUStrategy.\"\"\"\n\n def _is_mirrored(self):\n self._policy._is_mirrored() # pylint: disable=protected-access\n\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_sub(value, use_locking, name, read_value)\n return self._policy.assign_sub(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_add(value, use_locking, name, read_value)\n return self._policy.assign_add(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign(value, use_locking, name, read_value)\n return self._policy.assign(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_sub(sparse_delta, use_locking, name)\n return self._policy.scatter_sub(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_add(sparse_delta, use_locking, name)\n return self._policy.scatter_add(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_mul(sparse_delta, use_locking, name)\n return self._policy.scatter_mul(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_div(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_div(sparse_delta, use_locking, name)\n return self._policy.scatter_div(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(sparse_delta, use_locking, name)\n return self._policy.scatter_min(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_max(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(sparse_delta, use_locking, name)\n return self._policy.scatter_max(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(sparse_delta, use_locking, name)\n return self._policy.scatter_update(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n\nclass TPUMirroredVariable(TPUVariableMixin, values.MirroredVariable):\n \"\"\"Holds a map from replica to TPU variables whose values are kept in sync.\"\"\"\n\n def assign_sub(self, value, use_locking=False, name=None,\n read_value=True):\n if (enclosing_tpu_context() and\n self.aggregation == variable_scope.VariableAggregation.NONE):\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(\n self,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_sub(self, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, value, use_locking=False, name=None,\n read_value=True):\n if (enclosing_tpu_context() and\n self.aggregation == variable_scope.VariableAggregation.NONE):\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(\n self,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_add(self, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n if (enclosing_tpu_context() and\n self.aggregation == variable_scope.VariableAggregation.NONE):\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_variable_op)(\n self,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign(self, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_sub(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_add(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_mul(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_div(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(*args, **kwargs)\n raise NotImplementedError\n\n def _is_mirrored(self):\n return True\n\n\nclass TPUSyncOnReadVariable(TPUVariableMixin, values.SyncOnReadVariable):\n \"\"\"Holds a map from replica to variables whose values are reduced on save.\"\"\"\n\n def assign_sub(self, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return values.SyncOnReadVariable.assign_sub(self, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(self, *args,\n **kwargs)\n\n def assign_add(self, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return values.SyncOnReadVariable.assign_add(self, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(self, *args,\n **kwargs)\n\n def assign(self, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return values.SyncOnReadVariable.assign(self, *args, **kwargs)\n else:\n return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(\n self, *args, **kwargs)\n\n def _is_mirrored(self):\n return False\n\n\n# Common method between AutoPolicy, OnWrite and Mirrored variables.\ndef assign_sub(var, value, use_locking=False, name=None, read_value=True):\n assign_sub_fn = _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)\n return var._update( # pylint: disable=protected-access\n update_fn=assign_sub_fn,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n\ndef assign_add(var, value, use_locking=False, name=None, read_value=True):\n assign_add_fn = _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)\n return var._update( # pylint: disable=protected-access\n update_fn=assign_add_fn,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n\ndef assign(var, value, use_locking=False, name=None, read_value=True):\n assign_fn = _make_raw_assign_fn(\n gen_resource_variable_ops.assign_variable_op)\n return var._update( # pylint: disable=protected-access\n update_fn=assign_fn,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n\nclass TPUAutoPolicy(values.AutoPolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.AUTO` synchronization.\n\n This policy is created when `synchronization` is set to\n `tf.VariableSynchronization.AUTO` and `aggregation` is set to\n `tf.VariableAggregation.NONE` when creating a `tf.Variable` in `tf.distribute`\n scope.\n \"\"\"\n\n def assign_sub(self, var, value, use_locking=False, name=None,\n read_value=True):\n if enclosing_tpu_context():\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(\n var,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_sub(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, var, value, use_locking=False, name=None,\n read_value=True):\n if enclosing_tpu_context():\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(\n var,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_add(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, var, value, use_locking=False, name=None, read_value=True):\n if enclosing_tpu_context():\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_variable_op)(\n var,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n raise NotImplementedError\n\n def _is_mirrored(self):\n return True\n\n\nclass TPUOnWritePolicy(values.OnWritePolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.ON_WRITE` synchronization.\n\n This policy is created when the following `synchronization` and\n `aggregation` parameters are specified when creating a `tf.Variable` in\n `tf.distribute` scope:\n * `synchronization` is equal to `tf.VariableSynchronization.AUTO` and\n aggregation can be any of the following `tf.VariableAggregation` enum\n values such as `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.\n * `synchronization` is equal to `tf.VariableSynchronization.ON_WRITE` and\n aggregation can be any of the following `tf.VariableAggregation` enum\n values such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.\n \"\"\"\n\n def assign_sub(self, var, value, use_locking=False, name=None,\n read_value=True):\n return assign_sub(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, var, value, use_locking=False, name=None,\n read_value=True):\n return assign_add(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, var, value, use_locking=False, name=None, read_value=True):\n return assign(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n raise NotImplementedError\n\n def _is_mirrored(self):\n return True\n\n\nclass TPUOnReadPolicy(values.OnReadPolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.ON_READ` synchronization.\n\n This policy is created when `synchronization` is set to\n `tf.VariableSynchronization.ON_READ` and `aggregation` is set to any of the\n values allowed by the `tf.VariableAggregation` enum such as `NONE`, `SUM`,\n `MEAN` or `ONLY_FIRST_REPLICA`when creating a `tf.Variable` in `tf.distribute`\n scope.\n \"\"\"\n\n def assign_sub(self, var, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return super(TPUOnReadPolicy, self).assign_sub(var, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(var, *args,\n **kwargs)\n\n def assign_add(self, var, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return super(TPUOnReadPolicy, self).assign_add(var, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(var, *args,\n **kwargs)\n\n def assign(self, var, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return super(TPUOnReadPolicy, self).assign(var, *args, **kwargs)\n else:\n return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(\n var, *args, **kwargs)\n\n def _is_mirrored(self):\n return False\n\n def scatter_sub(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n raise NotImplementedError\n"
] | [
[
"numpy.ones",
"tensorflow.python.ops.nn_ops.conv2d",
"numpy.issubdtype",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.ops.nn_impl.depthwise_conv2d",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.framework.test_util.GpuSupportsHalfMatMulAndConv",
"tensorflow.python.ops.gradient_checker.compute_gradient",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.nn_ops.conv2d_backprop_input",
"numpy.fabs",
"tensorflow.python.framework.test_util.NCHWToNHWC",
"tensorflow.python.platform.test.is_built_with_rocm",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.framework.test_util.disable_xla",
"tensorflow.python.client.session.Session",
"numpy.random.rand",
"tensorflow.python.ops.nn_ops.Convolution",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.framework.test_util.device",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.arange",
"numpy.pad",
"numpy.rint",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.ops.nn_ops.conv2d_backprop_filter",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.test_util.NHWCToNCHW",
"tensorflow.python.framework.test_util.IsMklEnabled",
"numpy.ravel",
"tensorflow.python.layers.convolutional.conv2d",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.nn_impl.separable_conv2d",
"tensorflow.python.ops.nn_ops.convolution",
"tensorflow.python.ops.array_ops.transpose"
],
[
"tensorflow.python.distribute.values.SyncOnReadVariable.assign_sub",
"tensorflow.python.framework.ops.has_default_graph",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.eager.tape.variable_accessed",
"tensorflow.python.distribute.values.SyncOnReadVariable.assign_add",
"tensorflow.python.distribute.values_util.is_saving_non_distributed",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.distribute.values.SyncOnReadVariable.assign",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.distribute.values.DistributedVarOp",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_resource_variable_ops.read_variable_op",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions"
]
] |
vikranth22446/PyHessian | [
"e8b1fbadb24349eef8f3a137ecfd27dfc6e3bb53"
] | [
"density_plot.py"
] | [
"#*\n# @file Different utility functions\n# Copyright (c) Zhewei Yao, Amir Gholami\n# All rights reserved.\n# This file is part of PyHessian library.\n#\n# PyHessian is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# PyHessian is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with PyHessian. If not, see <http://www.gnu.org/licenses/>.\n#*\n\nimport math\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef get_esd_plot(eigenvalues, weights):\n density, grids = density_generate(eigenvalues, weights)\n plt.semilogy(grids, density + 1.0e-7)\n plt.ylabel('Density (Log Scale)', fontsize=14, labelpad=10)\n plt.xlabel('Eigenvlaue', fontsize=14, labelpad=10)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.axis([np.min(eigenvalues) - 1, np.max(eigenvalues) + 1, None, None])\n plt.tight_layout()\n plt.savefig('example.pdf')\n\n\ndef density_generate(eigenvalues,\n weights,\n num_bins=10000,\n sigma_squared=1e-5,\n overhead=0.01):\n\n eigenvalues = np.array(eigenvalues)\n weights = np.array(weights)\n\n lambda_max = np.mean(np.max(eigenvalues, axis=1), axis=0) + overhead\n lambda_min = np.mean(np.min(eigenvalues, axis=1), axis=0) - overhead\n\n grids = np.linspace(lambda_min, lambda_max, num=num_bins)\n sigma = sigma_squared * max(1, (lambda_max - lambda_min))\n\n num_runs = eigenvalues.shape[0]\n density_output = np.zeros((num_runs, num_bins))\n\n for i in range(num_runs):\n for j in range(num_bins):\n x = grids[j]\n tmp_result = gaussian(eigenvalues[i, :], x, sigma)\n density_output[i, j] = np.sum(tmp_result * weights[i, :])\n density = np.mean(density_output, axis=0)\n normalization = np.sum(density) * (grids[1] - grids[0])\n density = density / normalization\n return density, grids\n\n\ndef gaussian(x, x0, sigma_squared):\n return np.exp(-(x0 - x)**2 /\n (2.0 * sigma_squared)) / np.sqrt(2 * np.pi * sigma_squared)\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"matplotlib.pyplot.semilogy",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"numpy.exp",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.min",
"matplotlib.use",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
nokia/integratedimputation | [
"ca72bda54cb66e99d79ff0b174cf8f99ccb554ba"
] | [
"evals/gain/gain_fixm.py"
] | [
"#!/usr/bin/env python3\n\n# © 2021 Nokia\n#\n# Licensed under the BSD 3 Clause license\n# SPDX-License-Identifier: BSD-3-Clause\n\n# http://proceedings.mlr.press/v80/yoon18a/yoon18a.pdf\n\nimport sys\n\nsys.path.append('../../common/')\nfrom defaults import *\nfrom gain_ import train\nfrom data_mobile import loadData, normData, foldData\nfrom eval_ import EvalACC\nimport utils\n\nsys.path.append('../../common/nets/')\nfrom net_ae import NetAEConvTrans\nfrom net_disc import NetDiscConvTrans\n\nimport numpy as np\n\nimport torch\nimport torch.utils.data\n\nimport argparse\n\n# ==============================================================================\n# Settings =====================================================================\nparser = argparse.ArgumentParser()\nparser.add_argument('--out_folder', default = './out_test')\nparser.add_argument('--missing_type', default = 'ran')\nparser.add_argument('--gpu_id', default = None, type = int)\nparser.add_argument('--missing_rate_train', default = 0.5, type = float)\nparser.add_argument('--fold', default = 0, type = int)\nargs = parser.parse_args()\n\nout_folder = args.out_folder\nmissing_type = args.missing_type\ngpu_id = args.gpu_id\nmissing_rate_train = args.missing_rate_train\nfold = args.fold\n\nlr_ae = 0.0001\nwd_ae = 1e-05\n\nlr_disc = 0.0001\nwd_disc = 1e-05\n\nalpha = 10\niter_disc = 5\n\n\n# ==============================================================================\n# Data =========================================================================\nutils.makeFolders(out_folder)\n\nvalues_np, labels_np = loadData()\nvalues_np = normData(values_np)\n\nvalues_np_train, values_np_test, labels_np_train, labels_np_test = foldData(values_np, labels_np, fold)\n\n\n# ==============================================================================\n# Data loaders =================================================================\ndataset_train = torch.utils.data.TensorDataset(\n torch.tensor(values_np_train, dtype = torch.float),\n torch.tensor(labels_np_train, dtype = torch.long)\n)\n\ndataloader_train = torch.utils.data.DataLoader(\n dataset_train,\n batch_size = batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = 3\n)\n\ndataset_test = torch.utils.data.TensorDataset(\n torch.tensor(values_np_test, dtype = torch.float),\n torch.tensor(labels_np_test, dtype = torch.long)\n)\n\ndataloader_test = torch.utils.data.DataLoader(\n dataset_test,\n batch_size = batch_size,\n shuffle = False,\n pin_memory = True,\n num_workers = 3\n)\n\n# ==============================================================================\n# Definitions ==================================================================\nif missing_type == 'seq':\n introduceMissingTrain = utils.IntroduceMissingSeq(missing_rate_train)\nelse:\n introduceMissingTrain = utils.IntroduceMissing(missing_rate_train)\n \n# ==============================================================================\n# Instantiation ================================================================\nnet_ae = NetAEConvTrans(values_np.shape[1] * 2, values_np.shape[1])\nnet_disc = NetDiscConvTrans(values_np.shape[1], values_np.shape[1])\neval_acc = EvalACC(values_np.shape[1] * 2, out_folder, fold, epochs_gain_fixm[0], eval_acc_every)\n\nnet_dict = {\n \"net_ae\": net_ae,\n \"net_disc\": net_disc\n}\n\n\n# ==============================================================================\n# Move to GPU ==================================================================\ndevice = torch.device(\"cuda:%d\" % utils.gpuAssign(gpu_id))\n\nnet_ae.to(device)\nnet_disc.to(device)\n\neval_acc.to(device)\n\n\n# ==============================================================================\n# Opts =========================================================================\nopt_ae = torch.optim.Adam(\n net_ae.parameters(),\n lr = lr_ae,\n weight_decay = wd_ae\n)\n\nopt_disc = torch.optim.Adam(\n net_disc.parameters(),\n lr = lr_disc,\n weight_decay = wd_disc\n)\n\nopt_dict = {\n \"opt_ae\": opt_ae,\n \"opt_disc\": opt_disc\n}\n\n\n# ==============================================================================\n# Calls ========================================================================\ntrain(\n alpha,\n iter_disc,\n introduceMissingTrain,\n net_dict,\n opt_dict,\n dataloader_train,\n dataloader_test,\n device,\n eval_every,\n out_folder,\n eval_acc,\n epochs_end = epochs_gain_fixm[1],\n epochs_start = epochs_gain_fixm[0]\n)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.tensor"
]
] |
aimimi2015/LDA_patent | [
"e5df0b8e1b741c19352485b5b2dca560e1a961f1"
] | [
"matplotlib/line.py"
] | [
"# coding: utf-8\nfrom __future__ import print_function\nfrom __future__ import print_function\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pymysql\nimport pickle\nfrom sympy import *\n\n#x1 = np.arange(1, 23, 1)\n# y = np.array([4.00, 6.40, 8.00, 8.80, 9.22, 9.50, 9.70, 9.86, 10.00, 10.20, 10.32, 11.42, 12.00, 12.42, 13.00, 15.00, 16.20, 17.32, 19.42, 21.00])\n#y1 = np.array([0.145, 0.046, 0.044, 0.040, 0.18, 0.047, 0.048 ,0.13, 0.035, 0.035, 0.032,0.145, 0.046, 0.044, 0.040, 0.18, 0.047, 0.048 ,0.13, 0.035, 0.035, 0.032])\n\n\npkl_file = open('../领域预测/topiclist/list22.pkl', 'rb')\n\nlist1 = pickle.load(pkl_file)\n\n\n#print(json.dumps(list1 , encoding='UTF-8', ensure_ascii=False))\n\n#print len(list1)\n\nnewlist=[]\nsumlist=0\ni=0\nh=0\nj=1 #这是间隔,如果 =1,就是一个月一个月的\nwhile i<len(list1):\n while h<j:\n sumlist = sumlist+list1[i+h]\n h=h+1\n newlist.append(sumlist)\n sumlist=0\n h=0\n i=i+j\n\nprint (len(newlist))\n\nx = np.arange(1, len(newlist)+1, 1)\n#y = np.array([4.00, 6.40, 8.00, 8.80, 9.22, 9.50, 9.70, 9.86, 10.00, 10.20, 10.32, 11.42, 12.00, 12.42, 13.00, 15.00, 16.20, 17.32, 19.42, 21.00])\ny = np.array(newlist)\n\n\nz1 = np.polyfit(x, y, 2) # 用3次多项式拟合\np1 = np.poly1d(z1)\n\nyvals = p1(x)\np2 = abs(yvals - y)\nsigma = np.std(p2)\nprint(sigma)\nprint(p2)\n\n'''\n具体来说,三西格玛规则是建立在数据服从正态分布的基础之上的,其阈值为\n正态分布平均值与三倍标准差之和。在正态分布中标准差为𝜎,均值为𝜇,对于全部\n的数据来说,数值分布在(𝜇 − 𝜎,𝜇 + 𝜎)中的概率为 0.655,布在(𝜇 − 2𝜎,𝜇 + 2𝜎)中的\n概率为 0.954,分布在(𝜇 − 3𝜎,𝜇 + 3𝜎)中的概率大致为 0.997。规则规定任何大于三\n西格玛阈值的值都极有可能是异常值。因此我们以图 4.3 中程序移除异常值,并进行\n临近数据点平均值替换。\n\n'''\nprint (\"p1:\"),\nprint(p1) # 在屏幕上打印拟合多项式\nyvals = p1(x) # 也可以使用yvals=np.polyval(z1,x)\nybar = np.sum(y) / len(y)\n\n#print(type(np.mean(p2)))\nout = p2>sigma*3\n#print(type(out))\nprint (out)\n\nssreg = np.sum((yvals - ybar) ** 2) #拟合数据方差\nsstot = np.sum((y - ybar) ** 2) #原始数据方差\nprint (ssreg / sstot) # 准确率\n\nplot1 = plt.plot(x, y, '*', label='original values')\nplot2 = plt.plot(x, yvals, 'r', label='polyfit values')\nplt.xlabel('year(05-15)')\nplt.ylabel('Proportion')\nplt.legend(loc=4) # 指定legend的位置,读者可以自己help它的用法\nplt.title('topic1')\nplt.show()\nplt.savefig('p1.png')\n\n\ny_new = y.tolist() #准备修改 这就是之后被替换的新的y分布\nyvals1 = yvals.tolist() #准备修改\n\n#\n# def quzao(sigma,y_new,yvals1):\n# i=0\n# while i < len(y_new):\n# if abs(y_new[i] - yvals1[i]) >= sigma * 3:\n# print(y_new[i])\n# if i != 0 and i != len(y) - 1:\n# y_new[i] = (y_new[i - 1] + y_new[i - 2]) * 0.5\n#\n# elif i == len(y) - 1:\n# y_new[i] = (y_new[len(y) - 2] + y_new[len(y) - 3]) * 0.5\n#\n# z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合\n# p1 = np.poly1d(z1)\n#\n# i = i + 1\n\n\nwhile True:\n i = 0\n while i < len(y):\n if abs(y_new[i]-yvals1[i])>=sigma*3:\n print (y_new[i])\n if i!=0 and i!=len(y)-1:\n y_new[i] = (y_new[i - 1] + y_new[i-2]) * 0.5\n elif i==1:\n y_new[i] = (y_new[0] + y_new[2]) * 0.5\n #z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合\n #p1 = np.poly1d(z1)\n\n\n # yvals_new = p1(x1)\n # plot_new1 = plt.plot(x1, y_new, '*', label='original values')\n # plot_new12 = plt.plot(x1, yvals_new, 'r', label='polyfit values')\n # plt.xlabel('x axis')\n # plt.ylabel('y axis')\n # plt.legend(loc=4) # 指定legend的位置\n # plt.title('polyfitting')\n # plt.show()\n # print('========')\n\n i=i+1\n z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合\n p1 = np.poly1d(z1)\n\n yvals = p1(x)\n p2 = abs(yvals - y_new)\n sigma1 = np.std(p2)\n print(sigma1)\n if(sigma==sigma1):\n break\n else:\n sigma=sigma1\n\n\nprint(y_new)\n\nz_new = np.polyfit(x, y_new, 2) # 用3次多项式拟合\np_new = np.poly1d(z_new)\nyvals_new = p_new(x)\nybar_new = np.sum(y_new) / len(y)\nssreg = np.sum((yvals_new - ybar_new) ** 2)\nsstot = np.sum((y_new - ybar_new) ** 2)\nsstot_old = np.sum((y - ybar) ** 2) #原始数据方差\n\nprint (ssreg / sstot_old) # 准确率\n\n\nplot_new1 = plt.plot(x, y_new, '*', label='original values')\nplot_new12 = plt.plot(x, yvals_new, 'r', label='polyfit values')\nplt.xlabel('year(05-15)')\nplt.ylabel('Proportion')\nplt.legend(loc=4) # 指定legend的位置\nplt.title('topic10')\nplt.show()\nplt.savefig('p1.png')\n\n\nprint(p_new)\n# # 定义函数变量x\n# x=Symbol(\"x\")\n#\n# # 对函数sin(x)求导,并且显示\n# print(diff(p_new, x))"
] | [
[
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"numpy.std",
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.polyfit",
"matplotlib.pyplot.xlabel"
]
] |
Ceyron/Lattice-Boltzmann-Method-JAX | [
"f18e136e6e12fa575104053818c53b1689e50948"
] | [
"lattice_boltzmann_method_python_jax.py"
] | [
"r\"\"\"\nSolves the incompressible Navier Stokes equations using the Lattice-Boltzmann\nMethod¹. The scenario is the flow around a cylinder in 2D which yields a van\nKarman vortex street.\n\n\n periodic\n +-------------------------------------------------------------+\n | |\n | ---> |\n | |\n | ---> **** |\n | ******** | \ninflow | ---> ********** | outflow\n | ******** |\n | ---> **** |\n | |\n | ---> |\n | |\n +-------------------------------------------------------------+\n periodic\n\n-> uniform inflow profile with only horizontal velocities at left boundary\n-> outflow boundary at the right\n-> top and bottom boundary connected by periodicity\n-> the circle in the center (representing a slice from the 3d cylinder)\n uses a no-slip Boundary Condition\n-> initially, fluid is NOT at rest and has the horizontal velocity profile\n all over the domain\n\n¹ To be fully correct, LBM considers the compressible Navier-Stokes Equations.\nThis can also be seen by the fact that we have a changing macroscopic density over\nthe domain and that we actively use it throughout the computations. However, our\nflow speeds are below the 0.3 Mach limit which results in only minor density\nfluctuations. Hence, the fluid behaves almost incompressible. \n\n------\n\nSolution strategy:\n\nDiscretize the domain into a Cartesian mesh. Each grid vertex is associated\nwith 9 discrete velocities (D2Q9) and 2 macroscopic velocities. Then iterate\nover time.\n\n\n1. Apply outflow boundary condition on the right boundary\n\n2. Compute Macroscopic Quantities (density and velocities)\n\n3. Apply Inflow Profile by Zou/He Dirichlet Boundary Condition\n on the left boundary\n\n4. Compute the discrete equilibria velocities\n\n5. Perform a Collision step according to BGK (Bhatnagar–Gross–Krook)\n\n6. Apply Bounce-Back Boundary Conditions on the cylinder obstacle\n\n7. Stream alongside the lattice velocities\n\n8. Advance in time (repeat the loop)\n\n\nThe 7th step implicitly yields the periodic Boundary Conditions at\nthe top and bottom boundary.\n\n------\n\nEmployed Discretization:\n\nD2Q9 grid, i.e. 2-dim space with 9 discrete\nvelocities per node. In Other words the 2d space is discretized into\nN_x by N_y by 9 points.\n\n 6 2 5\n \\ | /\n 3 - 0 - 1\n / | \\\n 7 4 8 \n\nTherefore we have the shapes:\n\n- macroscopic velocity : (N_x, N_y, 2)\n- discrete velocity : (N_x, N_y, 9)\n- density : (N_x, N_y)\n\n\n------\n\nLattice Boltzmann Computations\n\nDensity:\n\nρ = ∑ᵢ fᵢ\n\n\nVelocities:\n\nu = 1/ρ ∑ᵢ fᵢ cᵢ\n\n\nEquilibrium:\n\nfᵢᵉ = ρ Wᵢ (1 + 3 cᵢ ⋅ u + 9/2 (cᵢ ⋅ u)² − 3/2 ||u||₂²)\n\n\nBGK Collision:\n\nfᵢ ← fᵢ − ω (fᵢ − fᵢᵉ)\n\n\nwith the following quantities:\n\nfᵢ : Discrete velocities\nfᵢᵉ : Equilibrium discrete velocities\nρ : Density\n∑ᵢ : Summation over all discrete velocities\ncᵢ : Lattice Velocities\nWᵢ : Lattice Weights\nω : Relaxation factor\n\n------\n\nThe flow configuration is defined using the Reynolds Number\n\nRe = (U R) / ν\n\nwith:\n\nRe : Reynolds Number\nU : Inflow Velocity\nR : Cylinder Radius\nν : Kinematic Viscosity\n\nCan be re-arranged in terms of the kinematic viscosity\n\nν = (U R) / Re\n\nThen the relaxation factor is computed according to\n\nω = 1 / (3 ν + 0.5)\n\n------\n\nNote that this scheme can become unstable for Reynoldsnumbers >~ 350 ²\n\n² Note that the stability of the D2Q9 scheme is mathematically not\nlinked to the Reynoldsnumber. Just use this as a reference. Stability\nfor this scheme is realted to the velocity magnitude.\nConsequentially, the actual limiting factor is the Mach number (the\nratio between velocity magnitude and the speed of sound).\n\n\"\"\"\nimport jax\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport cmasher as cmr\nfrom tqdm import tqdm\n\nN_ITERATIONS = 15_000\nREYNOLDS_NUMBER = 80\n\nN_POINTS_X = 300\nN_POINTS_Y = 50\n\nCYLINDER_CENTER_INDEX_X = N_POINTS_X // 5\nCYLINDER_CENTER_INDEX_Y = N_POINTS_Y // 2\nCYLINDER_RADIUS_INDICES = N_POINTS_Y // 9\n\nMAX_HORIZONTAL_INFLOW_VELOCITY = 0.04\n\nVISUALIZE = True\nPLOT_EVERY_N_STEPS = 100\nSKIP_FIRST_N_ITERATIONS = 5000\n\n\nr\"\"\"\nLBM Grid: D2Q9\n 6 2 5\n \\ | /\n 3 - 0 - 1\n / | \\\n 7 4 8 \n\"\"\"\n\nN_DISCRETE_VELOCITIES = 9\n\nLATTICE_VELOCITIES = jnp.array([\n [ 0, 1, 0, -1, 0, 1, -1, -1, 1,],\n [ 0, 0, 1, 0, -1, 1, 1, -1, -1,]\n])\n\nLATTICE_INDICES = jnp.array([\n 0, 1, 2, 3, 4, 5, 6, 7, 8,\n])\n\nOPPOSITE_LATTICE_INDICES = jnp.array([\n 0, 3, 4, 1, 2, 7, 8, 5, 6,\n])\n\nLATTICE_WEIGHTS = jnp.array([\n 4/9, # Center Velocity [0,]\n 1/9, 1/9, 1/9, 1/9, # Axis-Aligned Velocities [1, 2, 3, 4]\n 1/36, 1/36, 1/36, 1/36, # 45 ° Velocities [5, 6, 7, 8]\n])\n\nRIGHT_VELOCITIES = jnp.array([1, 5, 8])\nUP_VELOCITIES = jnp.array([2, 5, 6])\nLEFT_VELOCITIES = jnp.array([3, 6, 7])\nDOWN_VELOCITIES = jnp.array([4, 7, 8])\nPURE_VERTICAL_VELOCITIES = jnp.array([0, 2, 4])\nPURE_HORIZONTAL_VELOCITIES = jnp.array([0, 1, 3])\n\n\ndef get_density(discrete_velocities):\n density = jnp.sum(discrete_velocities, axis=-1)\n\n return density\n\ndef get_macroscopic_velocities(discrete_velocities, density):\n macroscopic_velocities = jnp.einsum(\n \"NMQ,dQ->NMd\",\n discrete_velocities,\n LATTICE_VELOCITIES,\n ) / density[..., jnp.newaxis]\n\n return macroscopic_velocities\n\ndef get_equilibrium_discrete_velocities(macroscopic_velocities, density):\n projected_discrete_velocities = jnp.einsum(\n \"dQ,NMd->NMQ\",\n LATTICE_VELOCITIES,\n macroscopic_velocities,\n )\n macroscopic_velocity_magnitude = jnp.linalg.norm(\n macroscopic_velocities,\n axis=-1,\n ord=2,\n )\n equilibrium_discrete_velocities = (\n density[..., jnp.newaxis]\n *\n LATTICE_WEIGHTS[jnp.newaxis, jnp.newaxis, :]\n *\n (\n 1\n +\n 3 * projected_discrete_velocities\n +\n 9/2 * projected_discrete_velocities**2\n -\n 3/2 * macroscopic_velocity_magnitude[..., jnp.newaxis]**2\n )\n )\n\n return equilibrium_discrete_velocities\n\ndef main():\n jax.config.update(\"jax_enable_x64\", True)\n\n kinematic_viscosity = (\n (\n MAX_HORIZONTAL_INFLOW_VELOCITY\n *\n CYLINDER_RADIUS_INDICES\n ) / (\n REYNOLDS_NUMBER\n )\n )\n relaxation_omega = (\n (\n 1.0\n ) / (\n 3.0\n *\n kinematic_viscosity\n +\n 0.5\n )\n )\n\n # Define a mesh\n x = jnp.arange(N_POINTS_X)\n y = jnp.arange(N_POINTS_Y)\n X, Y = jnp.meshgrid(x, y, indexing=\"ij\")\n\n # Obstacle Mask: An array of the shape like X or Y, but contains True if the\n # point belongs to the obstacle and False if not\n obstacle_mask = (\n jnp.sqrt(\n (\n X\n -\n CYLINDER_CENTER_INDEX_X\n )**2\n +\n (\n Y\n -\n CYLINDER_CENTER_INDEX_Y\n )**2\n )\n <\n CYLINDER_RADIUS_INDICES\n )\n\n velocity_profile = jnp.zeros((N_POINTS_X, N_POINTS_Y, 2))\n velocity_profile = velocity_profile.at[:, :, 0].set(MAX_HORIZONTAL_INFLOW_VELOCITY)\n\n @jax.jit\n def update(discrete_velocities_prev):\n # (1) Prescribe the outflow BC on the right boundary\n discrete_velocities_prev = discrete_velocities_prev.at[-1, :, LEFT_VELOCITIES].set(\n discrete_velocities_prev[-2, :, LEFT_VELOCITIES]\n )\n\n # (2) Macroscopic Velocities\n density_prev = get_density(discrete_velocities_prev)\n macroscopic_velocities_prev = get_macroscopic_velocities(\n discrete_velocities_prev,\n density_prev,\n )\n\n # (3) Prescribe Inflow Dirichlet BC using Zou/He scheme\n macroscopic_velocities_prev =\\\n macroscopic_velocities_prev.at[0, 1:-1, :].set(\n velocity_profile[0, 1:-1, :]\n )\n density_prev = density_prev.at[0, :].set(\n (\n get_density(discrete_velocities_prev[0, :, PURE_VERTICAL_VELOCITIES].T)\n +\n 2 *\n get_density(discrete_velocities_prev[0, :, LEFT_VELOCITIES].T)\n ) / (\n 1 - macroscopic_velocities_prev[0, :, 0]\n )\n )\n\n # (4) Compute discrete Equilibria velocities\n equilibrium_discrete_velocities = get_equilibrium_discrete_velocities(\n macroscopic_velocities_prev,\n density_prev,\n )\n\n # (3) Belongs to the Zou/He scheme\n discrete_velocities_prev = \\\n discrete_velocities_prev.at[0, :, RIGHT_VELOCITIES].set(\n equilibrium_discrete_velocities[0, :, RIGHT_VELOCITIES]\n )\n \n # (5) Collide according to BGK\n discrete_velocities_post_collision = (\n discrete_velocities_prev\n -\n relaxation_omega\n *\n (\n discrete_velocities_prev\n -\n equilibrium_discrete_velocities\n )\n )\n\n # (6) Bounce-Back Boundary Conditions to enfore the no-slip\n for i in range(N_DISCRETE_VELOCITIES):\n discrete_velocities_post_collision =\\\n discrete_velocities_post_collision.at[obstacle_mask, LATTICE_INDICES[i]].set(\n discrete_velocities_prev[obstacle_mask, OPPOSITE_LATTICE_INDICES[i]]\n )\n \n # (7) Stream alongside lattice velocities\n discrete_velocities_streamed = discrete_velocities_post_collision\n for i in range(N_DISCRETE_VELOCITIES):\n discrete_velocities_streamed = discrete_velocities_streamed.at[:, :, i].set(\n jnp.roll(\n jnp.roll(\n discrete_velocities_post_collision[:, :, i],\n LATTICE_VELOCITIES[0, i],\n axis=0,\n ),\n LATTICE_VELOCITIES[1, i],\n axis=1,\n )\n )\n \n return discrete_velocities_streamed\n\n\n discrete_velocities_prev = get_equilibrium_discrete_velocities(\n velocity_profile,\n jnp.ones((N_POINTS_X, N_POINTS_Y)),\n )\n\n plt.style.use(\"dark_background\")\n plt.figure(figsize=(15, 6), dpi=100)\n\n for iteration_index in tqdm(range(N_ITERATIONS)):\n discrete_velocities_next = update(discrete_velocities_prev)\n\n discrete_velocities_prev = discrete_velocities_next\n\n if iteration_index % PLOT_EVERY_N_STEPS == 0 and VISUALIZE and iteration_index > SKIP_FIRST_N_ITERATIONS:\n density = get_density(discrete_velocities_next)\n macroscopic_velocities = get_macroscopic_velocities(\n discrete_velocities_next,\n density,\n )\n velocity_magnitude = jnp.linalg.norm(\n macroscopic_velocities,\n axis=-1,\n ord=2,\n )\n d_u__d_x, d_u__d_y = jnp.gradient(macroscopic_velocities[..., 0])\n d_v__d_x, d_v__d_y = jnp.gradient(macroscopic_velocities[..., 1])\n curl = (d_u__d_y - d_v__d_x)\n\n # Velocity Magnitude Contour Plot in the top\n plt.subplot(211)\n plt.contourf(\n X,\n Y,\n velocity_magnitude,\n levels=50,\n cmap=cmr.amber,\n )\n plt.colorbar().set_label(\"Velocity Magnitude\")\n plt.gca().add_patch(plt.Circle(\n (CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),\n CYLINDER_RADIUS_INDICES,\n color=\"darkgreen\",\n ))\n\n # Vorticity Magnitude Contour PLot in the bottom\n plt.subplot(212)\n plt.contourf(\n X,\n Y, \n curl,\n levels=50,\n cmap=cmr.redshift,\n vmin=-0.02,\n vmax= 0.02,\n )\n plt.colorbar().set_label(\"Vorticity Magnitude\")\n plt.gca().add_patch(plt.Circle(\n (CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),\n CYLINDER_RADIUS_INDICES,\n color=\"darkgreen\",\n ))\n\n plt.draw()\n plt.pause(0.0001)\n plt.clf()\n \n if VISUALIZE:\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.contourf"
]
] |
take2rohit/monk_v1 | [
"9c567bf2c8b571021b120d879ba9edf7751b9f92"
] | [
"monk/system_unit_tests/keras/test_block_squeezenet_fire.py"
] | [
"import os\nimport sys\nsys.path.append(\"../../../../monk_v1/\");\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom keras_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\nimport numpy as np\n\n\ndef test_block_squeezenet_fire(system_dict):\n forward = True;\n\n test = \"test_block_squeezenet_fire\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.squeezenet_fire_block(squeeze_channels=16, expand_channels_1x1=32, expand_channels_3x3=64));\n gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);\n\n x = tf.placeholder(tf.float32, shape=(1, 64, 64, 1))\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n"
] | [
[
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.disable_v2_behavior"
]
] |
wangqingyu985/OpenStereo | [
"91d605357d65281b99b0d8cf45e3f15f0543c9fa"
] | [
"models/CFPNet/submodule.py"
] | [
"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),\n nn.BatchNorm2d(out_planes)\n )\n\n\ndef convbn_3d(in_planes, out_planes, kernel_size, stride, pad):\n return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),\n nn.BatchNorm3d(out_planes)\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, kernel_size, stride, downsample, pad, dilation):\n super(BasicBlock, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(inplanes, planes, kernel_size, stride, pad, dilation),\n nn.ReLU(inplace=True)\n )\n self.conv2 = convbn(planes, planes, kernel_size, 1, pad, dilation)\n self.stride = stride\n self.downsample = downsample\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n if self.downsample is not None:\n x = self.downsample(x)\n out += x\n return out\n\n\nclass matchshifted(nn.Module):\n def __init__(self):\n super(matchshifted, self).__init__()\n\n def forward(self, left, right, shift):\n batch, filters, height, width = left.size()\n shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))\n shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width-shift)])).cuda()), (shift, 0, 0, 0))\n out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters*2, 1, height, width)\n return out\n\n\nclass disparityregression(nn.Module):\n def __init__(self, maxdisp):\n super(disparityregression, self).__init__()\n self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)\n\n def forward(self, x):\n disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])\n out = torch.sum(x*disp, 1)\n return out\n\n\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.layer0 = nn.Sequential(convbn(in_planes=3, out_planes=32, kernel_size=3, stride=1, pad=1, dilation=1),\n nn.ReLU(inplace=True)\n )\n self.layer1 = self._make_layer(block=BasicBlock, planes=32, blocks=3, kernel_size=3, stride=2, pad=1, dilation=1, order=1)\n self.layer2 = self._make_layer(BasicBlock, 64, 8, 3, 2, 1, 1, 1)\n self.layer3 = self._make_layer(BasicBlock, 128, 3, 3, 2, 1, 1, 2)\n\n self.layer1_after = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n self.layer2_after = nn.Sequential(convbn(32, 64, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n self.layer3_after = nn.Sequential(convbn(64, 128, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n self.layer1_final = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dilat1 = nn.Sequential(convbn(128, 32, 3, 1, 1, 32),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dilat2 = nn.Sequential(convbn(128, 32, 3, 1, 1, 16),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dilat3 = nn.Sequential(convbn(128, 32, 3, 1, 1, 8),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 4),\n nn.ReLU(inplace=True))\n\n self.dilat4 = nn.Sequential(convbn(128, 32, 3, 1, 1, 6),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.concat_dilate_pool = nn.Sequential(convbn(64, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))\n\n self.lastconv = nn.Sequential(convbn(352, 128, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))\n\n def _make_layer(self, block, planes, blocks, kernel_size, stride, pad, dilation, order):\n downsample = None\n if stride != 1:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes * order, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),)\n layers = []\n layers.append(block(self.inplanes*order, planes, kernel_size, stride, downsample, pad, dilation))\n if blocks != 1:\n for i in range(1, blocks):\n layers.append(block(planes, planes, kernel_size, 1, None, pad, dilation))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out_0 = self.layer0(x)\n out_1 = self.layer1(out_0)\n out_1_a = self.layer1_after(out_0)\n out_1 = out_1 + out_1_a\n out_2 = self.layer2(out_1)\n out_2_a = self.layer2_after(out_1)\n out_2 = out_2 + out_2_a\n out_3 = self.layer3(out_2)\n out_3_a = self.layer3_after(out_2)\n out_3 = out_3 + out_3_a\n out_1 = self.layer1_final(out_1)\n inPooling = F.upsample(out_3, (out_2.size()[2], out_2.size()[3]), mode='bilinear')\n #Pooling \n output_dilate1 = self.dilat1(inPooling)\n output_dilate2 = self.dilat2(inPooling)\n output_dilate3 = self.dilat3(inPooling)\n output_dilate4 = self.dilat4(inPooling)\n\n output_branch1 = self.branch1(inPooling)\n output_branch1 = F.upsample(output_branch1, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n output_branch2 = self.branch2(inPooling)\n output_branch2 = F.upsample(output_branch2, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n output_branch3 = self.branch3(inPooling)\n output_branch3 = F.upsample(output_branch3, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n output_branch4 = self.branch4(inPooling)\n output_branch4 = F.upsample(output_branch4, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n #concat dilate and avgpool\n out_fusion1 = torch.cat((output_dilate1, output_branch1), 1)\n out_fusion1 = self.concat_dilate_pool(out_fusion1)\n\n out_fusion2 = torch.cat((output_dilate2, output_branch2), 1)\n out_fusion2 = self.concat_dilate_pool(out_fusion2)\n\n out_fusion3 = torch.cat((output_dilate3, output_branch3), 1)\n out_fusion3 = self.concat_dilate_pool(out_fusion3)\n\n out_fusion4 = torch.cat((output_dilate4, output_branch4), 1)\n out_fusion4 = self.concat_dilate_pool(out_fusion4)\n\n output_feature = torch.cat((out_1, out_2, inPooling, out_fusion1, out_fusion2, out_fusion3, out_fusion4), 1)\n output_feature = self.lastconv(output_feature)\n\n return output_feature\n"
] | [
[
"torch.sum",
"torch.nn.BatchNorm2d",
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.cat",
"torch.nn.Conv3d"
]
] |
vincentadam87/SVGPs | [
"0de1194bf0f24997148dfce0cd6fbffae16fb3bc"
] | [
"SVGPs/functions.py"
] | [
"# Copyright 2016 James Hensman, alexggmatthews\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# ------------------------------------------\n# Modification notice:\n# This file was modified by Vincent ADAM\n# ------------------------------------------\n\nimport tensorflow as tf\nfrom settings import float_type\nfrom quadrature import hermgauss\nimport numpy as np\n\n\ndef eye(N):\n \"\"\"\n An identitiy matrix\n \"\"\"\n return tf.diag(tf.ones(tf.stack([N, ]), dtype=float_type))\n\n\ndef variational_expectations( Fmu, Fvar, phi, num_gauss_hermite_points=20):\n \"\"\"\n Compute the expected value of a function phi, given a Gaussian\n distribution for the input values.\n if\n q(f) = N(Fmu, Fvar)\n then this method computes\n \\int phi(f) q(f) df.\n Here, we implement a default Gauss-Hermite quadrature routine\n \"\"\"\n gh_x, gh_w = hermgauss(num_gauss_hermite_points)\n gh_x = gh_x.reshape(1, -1)\n gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)\n shape = tf.shape(Fmu)\n Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]\n X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu\n logp = phi(X)\n return tf.reshape(tf.matmul(logp, gh_w), shape)\n\n\n\nimport tensorflow as tf\n\ndef block_diagonal(matrices, dtype=tf.float32):\n \"\"\"Constructs block-diagonal matrices from a list of batched 2D tensors.\n Args:\n matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of\n matrices with the same batch dimension).\n dtype: Data type to use. The Tensors in `matrices` must match this dtype.\n Returns:\n A matrix with the input matrices stacked along its main diagonal, having\n shape [..., \\sum_i N_i, \\sum_i M_i].\n \n \"\"\"\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]],\n axis=0)))\n blocked = tf.concat(row_blocks, -2)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked"
] | [
[
"tensorflow.stack",
"tensorflow.add_n",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.sqrt",
"tensorflow.Dimension",
"tensorflow.matmul",
"tensorflow.TensorShape",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.rank"
]
] |
ttung/napari | [
"fa97a05b763dacc71d4c47e6b4b2a97c208e3551"
] | [
"napari/components/add_layers_mixin.py"
] | [
"import itertools\nimport numpy as np\n\nfrom .. import layers\nfrom ..utils import colormaps\nfrom ..utils.misc import ensure_iterable, is_iterable\nfrom ..utils import io\n\n\nclass AddLayersMixin:\n \"\"\"A mixin that adds add_* methods for adding layers to the ViewerModel.\n\n Each method corresponds to adding one or more layers to the viewer.\n Methods that just add a single layer contain the keyword arguments and\n copies of the documentation from that the layer. These are copied and\n pasted instead of being autogenerated because IDEs like PyCharm parse the\n source code for docs instead of pulling it up dynamically.\n\n These methods are separated into a mixin to keep the ViewerModel class\n easier to read and make these methods easier to maintain.\n \"\"\"\n\n def add_layer(self, layer):\n \"\"\"Add a layer to the viewer.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer to add.\n \"\"\"\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n\n def add_image(\n self,\n data=None,\n *,\n channel_axis=None,\n rgb=None,\n is_pyramid=None,\n colormap=None,\n contrast_limits=None,\n gamma=1,\n interpolation='nearest',\n rendering='mip',\n iso_threshold=0.5,\n attenuation=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending=None,\n visible=True,\n path=None,\n ):\n \"\"\"Add an image layer to the layers list.\n\n Parameters\n ----------\n data : array or list of array\n Image data. Can be N dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a\n list and arrays are decreasing in shape then the data is treated as\n an image pyramid.\n channel_axis : int, optional\n Axis to expand image along.\n rgb : bool\n Whether the image is rgb RGB or RGBA. If not specified by user and\n the last dimension of the data has length 3 or 4 it will be set as\n `True`. If `False` the image is interpreted as a luminance image.\n is_pyramid : bool\n Whether the data is an image pyramid or not. Pyramid data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be a pyramid. The first image in the list\n should be the largest.\n colormap : str, vispy.Color.Colormap, tuple, dict, list\n Colormaps to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap. If a list then must be same length as the axis that is\n being expanded as channels, and each colormap is applied to each\n new image layer.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image. If list of lists then must be same length as the axis\n that is being expanded and then each colormap is applied to each\n image.\n gamma : list, float\n Gamma correction for determining colormap linearity. Defaults to 1.\n If a list then must be same length as the axis that is being\n expanded and then each entry in the list is applied to each image.\n interpolation : str\n Interpolation mode used by vispy. Must be one of our supported\n modes.\n rendering : str\n Rendering mode used by vispy. Must be one of our supported\n modes.\n iso_threshold : float\n Threshold for isosurface.\n attenuation : float\n Attenuation rate for attenuated maximum intensity projection.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n path : str or list of str\n Path or list of paths to image data. Paths can be passed as strings\n or `pathlib.Path` instances.\n\n Returns\n -------\n layer : :class:`napari.layers.Image` or list\n The newly-created image layer or list of image layers.\n \"\"\"\n if data is None and path is None:\n raise ValueError(\"One of either data or path must be provided\")\n elif data is not None and path is not None:\n raise ValueError(\"Only one of data or path can be provided\")\n elif data is None:\n data = io.magic_imread(path)\n\n if channel_axis is None:\n if colormap is None:\n colormap = 'gray'\n if blending is None:\n blending = 'translucent'\n layer = layers.Image(\n data,\n rgb=rgb,\n is_pyramid=is_pyramid,\n colormap=colormap,\n contrast_limits=contrast_limits,\n gamma=gamma,\n interpolation=interpolation,\n rendering=rendering,\n iso_threshold=iso_threshold,\n attenuation=attenuation,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n else:\n if is_pyramid:\n n_channels = data[0].shape[channel_axis]\n else:\n n_channels = data.shape[channel_axis]\n\n name = ensure_iterable(name)\n\n if blending is None:\n blending = 'additive'\n\n if colormap is None:\n if n_channels < 3:\n colormap = colormaps.MAGENTA_GREEN\n else:\n colormap = itertools.cycle(colormaps.CYMRGB)\n else:\n colormap = ensure_iterable(colormap)\n\n # If one pair of clim values is passed then need to iterate them to\n # all layers.\n if contrast_limits is not None and not is_iterable(\n contrast_limits[0]\n ):\n contrast_limits = itertools.repeat(contrast_limits)\n else:\n contrast_limits = ensure_iterable(contrast_limits)\n\n gamma = ensure_iterable(gamma)\n\n layer_list = []\n zipped_args = zip(\n range(n_channels), colormap, contrast_limits, gamma, name\n )\n for i, cmap, clims, _gamma, name in zipped_args:\n if is_pyramid:\n image = [\n np.take(data[j], i, axis=channel_axis)\n for j in range(len(data))\n ]\n else:\n image = np.take(data, i, axis=channel_axis)\n layer = layers.Image(\n image,\n rgb=rgb,\n colormap=cmap,\n contrast_limits=clims,\n gamma=_gamma,\n interpolation=interpolation,\n rendering=rendering,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n layer_list.append(layer)\n return layer_list\n\n def add_points(\n self,\n data=None,\n *,\n properties=None,\n symbol='o',\n size=10,\n edge_width=1,\n edge_color='black',\n edge_color_cycle=None,\n edge_colormap='viridis',\n edge_contrast_limits=None,\n face_color='white',\n face_color_cycle=None,\n face_colormap='viridis',\n face_contrast_limits=None,\n n_dimensional=False,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a points layer to the layers list.\n\n Parameters\n ----------\n data : array (N, D)\n Coordinates for N points in D dimensions.\n properties : dict {str: array (N,)}, DataFrame\n Properties for each point. Each property should be an array of length N,\n where N is the number of points.\n symbol : str\n Symbol to be used for the point markers. Must be one of the\n following: arrow, clobber, cross, diamond, disc, hbar, ring,\n square, star, tailed_arrow, triangle_down, triangle_up, vbar, x.\n size : float, array\n Size of the point marker. If given as a scalar, all points are made\n the same size. If given as an array, size must be the same\n broadcastable to the same shape as the data.\n edge_width : float\n Width of the symbol edge in pixels.\n edge_color : str, array-like\n Color of the point marker border. Numeric color values should be RGB(A).\n edge_color_cycle : np.ndarray, list, cycle\n Cycle of colors (provided as RGBA) to map to edge_color if a\n categorical attribute is used to set face_color.\n edge_colormap : str, vispy.color.colormap.Colormap\n Colormap to set edge_color if a continuous attribute is used to set face_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n edge_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n face_color : str, array-like\n Color of the point marker body. Numeric color values should be RGB(A).\n face_color_cycle : np.ndarray, list, cycle\n Cycle of colors (provided as RGBA) to map to face_color if a\n categorical attribute is used to set face_color.\n face_colormap : str, vispy.color.colormap.Colormap\n Colormap to set face_color if a continuous attribute is used to set face_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n face_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n n_dimensional : bool\n If True, renders points not just in central plane but also in all\n n-dimensions according to specified point marker size.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Points`\n The newly-created points layer.\n\n Notes\n -----\n See vispy's marker visual docs for more details:\n http://api.vispy.org/en/latest/visuals.html#vispy.visuals.MarkersVisual\n \"\"\"\n if data is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty([0, ndim])\n\n layer = layers.Points(\n data=data,\n properties=properties,\n symbol=symbol,\n size=size,\n edge_width=edge_width,\n edge_color=edge_color,\n edge_color_cycle=edge_color_cycle,\n edge_colormap=edge_colormap,\n edge_contrast_limits=edge_contrast_limits,\n face_color=face_color,\n face_color_cycle=face_color_cycle,\n face_colormap=face_colormap,\n face_contrast_limits=face_contrast_limits,\n n_dimensional=n_dimensional,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_labels(\n self,\n data=None,\n *,\n is_pyramid=None,\n num_colors=50,\n seed=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n path=None,\n ):\n \"\"\"Add a labels (or segmentation) layer to the layers list.\n\n An image-like layer where every pixel contains an integer ID\n corresponding to the region it belongs to.\n\n Parameters\n ----------\n data : array or list of array\n Labels data as an array or pyramid.\n is_pyramid : bool\n Whether the data is an image pyramid or not. Pyramid data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be a pyramid. The first image in the list\n should be the largest.\n num_colors : int\n Number of unique colors to use in colormap.\n seed : float\n Seed for colormap random generator.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n path : str or list of str\n Path or list of paths to image data. Paths can be passed as strings\n or `pathlib.Path` instances.\n\n Returns\n -------\n layer : :class:`napari.layers.Labels`\n The newly-created labels layer.\n \"\"\"\n if data is None and path is None:\n raise ValueError(\"One of either data or path must be provided\")\n elif data is not None and path is not None:\n raise ValueError(\"Only one of data or path can be provided\")\n elif data is None:\n data = io.magic_imread(path)\n\n layer = layers.Labels(\n data,\n is_pyramid=is_pyramid,\n num_colors=num_colors,\n seed=seed,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_shapes(\n self,\n data=None,\n *,\n shape_type='rectangle',\n edge_width=1,\n edge_color='black',\n face_color='white',\n z_index=0,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a shapes layer to the layers list.\n\n Parameters\n ----------\n data : list or array\n List of shape data, where each element is an (N, D) array of the\n N vertices of a shape in D dimensions. Can be an 3-dimensional\n array if each shape has the same number of vertices.\n shape_type : string or list\n String of shape shape_type, must be one of \"{'line', 'rectangle',\n 'ellipse', 'path', 'polygon'}\". If a list is supplied it must be\n the same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_width : float or list\n Thickness of lines and edges. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_color : str or list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n face_color : str or list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n z_index : int or list\n Specifier of z order priority. Shapes with higher z order are\n displayed ontop of others. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float or list\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Shapes`\n The newly-created shapes layer.\n \"\"\"\n if data is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty((0, 0, ndim))\n\n layer = layers.Shapes(\n data=data,\n shape_type=shape_type,\n edge_width=edge_width,\n edge_color=edge_color,\n face_color=face_color,\n z_index=z_index,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_surface(\n self,\n data,\n *,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a surface layer to the layers list.\n\n Parameters\n ----------\n data : 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The third element is the (K0, ..., KL, N)\n array of values used to color vertices where the additional L\n dimensions are used to color the same mesh with different values.\n colormap : str, vispy.Color.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Surface`\n The newly-created surface layer.\n \"\"\"\n layer = layers.Surface(\n data,\n colormap=colormap,\n contrast_limits=contrast_limits,\n gamma=gamma,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_vectors(\n self,\n data,\n *,\n edge_width=1,\n edge_color='red',\n length=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a vectors layer to the layers list.\n\n Parameters\n ----------\n data : (N, 2, D) or (N1, N2, ..., ND, D) array\n An (N, 2, D) array is interpreted as \"coordinate-like\" data and a\n list of N vectors with start point and projections of the vector in\n D dimensions. An (N1, N2, ..., ND, D) array is interpreted as\n \"image-like\" data where there is a length D vector of the\n projections at each pixel.\n edge_width : float\n Width for all vectors in pixels.\n length : float\n Multiplicative factor on projections for length of all vectors.\n edge_color : str\n Edge color of all the vectors.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Vectors`\n The newly-created vectors layer.\n \"\"\"\n layer = layers.Vectors(\n data,\n edge_width=edge_width,\n edge_color=edge_color,\n length=length,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def _add_layer_from_data(\n self, data, meta: dict = None, layer_type: str = 'image'\n ):\n \"\"\"Add arbitrary layer data to the viewer.\n\n Primarily intended for usage by reader plugin hooks.\n\n Parameters\n ----------\n data : Any\n Data in a format that is valid for the corresponding `add_*` method\n of the specified ``layer_type``.\n meta : dict, optional\n Dict of keyword arguments that will be passed to the corresponding\n `add_*` method. MUST NOT contain any keyword arguments that are\n not valid for the corresponding method.\n layer_type : str\n Type of layer to add. MUST have a corresponding add_* method on\n on the viewer instance.\n\n Raises\n ------\n ValueError\n If ``layer_type`` is not one of the recognized layer types.\n TypeError\n If any keyword arguments in ``meta`` are unexpected for the\n corresponding `add_*` method for this layer_type.\n\n Examples\n --------\n A typical use case might be to upack a tuple of layer data with a\n specified layer_type.\n\n >>> viewer = napari.Viewer()\n >>> data = (\n ... np.random.random((10, 2)) * 20,\n ... {'face_color': 'blue'},\n ... 'points',\n ... )\n >>> viewer._add_layer_from_data(*data)\n\n \"\"\"\n\n layer_type = layer_type.lower()\n if layer_type not in layers.NAMES:\n raise ValueError(\n f\"Unrecognized layer_type: '{layer_type}'. \"\n f\"Must be one of: {layers.NAMES}.\"\n )\n\n try:\n add_method = getattr(self, 'add_' + layer_type)\n except AttributeError:\n raise NotImplementedError(\n f\"Sorry! {layer_type} is a valid layer type, but there is no \"\n f\"viewer.add_{layer_type} available yet.\"\n )\n\n try:\n add_method(data, **(meta or {}))\n except TypeError as exc:\n if 'unexpected keyword argument' in str(exc):\n bad_key = str(exc).split('keyword argument ')[-1]\n raise TypeError(\n \"_add_layer_from_data received an unexpected keyword \"\n f\"argument ({bad_key}) for layer type {layer_type}\"\n ) from exc\n"
] | [
[
"numpy.empty",
"numpy.take"
]
] |
askerlee/craft | [
"921a47a4e81017e5baf49c2823958cf86a0c1fc2"
] | [
"core/gma.py"
] | [
"import torch\nfrom torch import nn, einsum\nfrom einops import rearrange\n\n# max_pos_size = 160\nclass RelPosEmb(nn.Module):\n def __init__(\n self,\n max_pos_size,\n dim_head\n ):\n super().__init__()\n self.rel_height = nn.Embedding(2 * max_pos_size - 1, dim_head)\n self.rel_width = nn.Embedding(2 * max_pos_size - 1, dim_head)\n\n deltas = torch.arange(max_pos_size).view(1, -1) - torch.arange(max_pos_size).view(-1, 1)\n # rel_ind[i, j] = j - i + 159.\n rel_ind = deltas + max_pos_size - 1\n self.register_buffer('rel_ind', rel_ind)\n\n def forward(self, q):\n # q: [8, 1, 46, 62, 128]\n batch, heads, h, w, c = q.shape\n # self.rel_ind[:h, :h]: [46, 46]\n # self.rel_ind[:w, :w]: [62, 62]\n # rel_ind[i,j] = j - i + 159, precomputed distance between i, j. \n # This assumes the input x (from which q is derived) is precisely on the grid.\n # This is fine when we do self-attention on x.\n # However, it will be somewhat limiting if we use RelPosEmb on cross-attention between two frames, \n # particularly when we use flow_init != 0 (on sintel), \n # we better get the positional encodings of x according to flow_init, instead of the grid of x.\n # However, an accurate computation of the relative distances between all input units is expensive.\n # Since values in flow_init are usually small, this inaccuracy may be negligible.\n height_emb = self.rel_height(self.rel_ind[:h, :h].reshape(-1))\n width_emb = self.rel_width( self.rel_ind[:w, :w].reshape(-1))\n\n # height_emb: [46*46, 128] => [46, 46, 1, 128]\n # width_emb: [62*62, 128] => [62, 1, 62, 128]\n # height_emb[i, j]: the embedding of element at (i,j) as a function of the height difference (i-j).\n # width_emb[i, j]: the embedding of element at (i,j) as a function of the width difference (i-j).\n height_emb = rearrange(height_emb, '(x u) d -> x u () d', x=h)\n width_emb = rearrange(width_emb, '(y v) d -> y () v d', y=w)\n \n # outer product? y, uv -> y u v b h x y d x u v d\n # height_score: [8, 1, 46, 62, 46, 1] <= [8, 1, 46, 62, 128] * [46, 46, 1, 128]\n # width_score: [8, 1, 46, 62, 1, 62]\n height_score = einsum('b h x y d, x u v d -> b h x y u v', q, height_emb)\n width_score = einsum('b h x y d, y u v d -> b h x y u v', q, width_emb)\n # height_score + width_score: [8, 1, 46, 62, 46, 62], 65071232 elements.\n return height_score + width_score\n\n\nclass Attention(nn.Module):\n def __init__(\n self,\n *,\n args,\n dim,\n max_pos_size = 100,\n heads = 4,\n dim_head = 128,\n ):\n super().__init__()\n self.args = args\n self.heads = heads\n self.scale = dim_head ** -0.5\n inner_dim = heads * dim_head\n\n self.to_qk = nn.Conv2d(dim, inner_dim * 2, 1, bias=False)\n\n self.pos_emb = RelPosEmb(max_pos_size, dim_head)\n self.pos_embed_weight = 1.0\n \n def forward(self, fmap):\n heads, b, c, h, w = self.heads, *fmap.shape\n\n # q, k: [8, 128, 46, 62]\n q, k = self.to_qk(fmap).chunk(2, dim=1)\n\n # q, k: [8, 1, 46, 62, 128]\n q, k = map(lambda t: rearrange(t, 'b (h d) x y -> b h x y d', h=heads), (q, k))\n # Why not scale k?\n q = self.scale * q\n \n if self.args.position_only:\n sim = self.pos_emb(q)\n\n elif self.args.position_and_content:\n # [..., 46, 62, ...] . [..., 46, 62, ...] => [..., 46, 62, 46, 62]\n sim_content = einsum('b h x y d, b h u v d -> b h x y u v', q, k)\n sim_pos = self.pos_emb(q)\n sim = sim_content + self.pos_embed_weight * sim_pos\n \n else:\n # q, k: [B, 1, 46, 62, 128]\n # sim: [B, 1, 46, 62, 46, 62]\n sim = einsum('b h x y d, b h u v d -> b h x y u v', q, k)\n\n sim = rearrange(sim, 'b h x y u v -> b h (x y) (u v)')\n attn = sim.softmax(dim=-1)\n\n return attn\n\n# Aggregate output is dim-dimensional, same as the input. No FFN is used.\nclass Aggregate(nn.Module):\n def __init__(\n self,\n args,\n dim,\n heads = 4,\n dim_head = 128,\n ):\n super().__init__()\n self.args = args\n self.heads = heads\n self.scale = dim_head ** -0.5\n inner_dim = heads * dim_head\n\n self.to_v = nn.Conv2d(dim, inner_dim, 1, bias=False)\n\n self.gamma = nn.Parameter(torch.zeros(1))\n\n if dim != inner_dim:\n self.project = nn.Conv2d(inner_dim, dim, 1, bias=False)\n else:\n self.project = None\n\n def forward(self, attn, fmap):\n heads, b, c, h, w = self.heads, *fmap.shape\n\n v = self.to_v(fmap)\n v = rearrange(v, 'b (h d) x y -> b h (x y) d', h=heads)\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h (x y) d -> b (h d) x y', x=h, y=w)\n\n # project is None for GMA. \n if self.project is not None:\n out = self.project(out)\n\n out = fmap + self.gamma * out\n\n return out\n\n\nif __name__ == \"__main__\":\n att = Attention(dim=128, heads=1)\n fmap = torch.randn(2, 128, 40, 90)\n out = att(fmap)\n\n print(out.shape)\n"
] | [
[
"torch.randn",
"torch.nn.Embedding",
"torch.arange",
"torch.nn.Conv2d",
"torch.zeros",
"torch.einsum"
]
] |
ThomasGmeinder/incubator-mxnet | [
"0f3c5da37bf1647e18fce26beb9f06f5d6183846"
] | [
"python/mxnet/ndarray/numpy/_op.py"
] | [
"# pylint: disable=C0302\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=unused-argument\n\"\"\"Namespace for numpy operators used in Gluon dispatched by F=ndarray.\"\"\"\n\nimport numpy as _np\nfrom ...base import numeric_types, integer_types\nfrom ...util import _sanity_check_params, set_module\nfrom ...util import wrap_np_unary_func, wrap_np_binary_func\nfrom ...context import current_context\nfrom . import _internal as _npi\nfrom ..ndarray import NDArray\n\n__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',\n 'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',\n 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert',\n 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',\n 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',\n 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',\n 'tensordot', 'eye', 'linspace',\n 'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',\n 'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',\n 'average', 'mean', 'maximum', 'minimum',\n 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',\n 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',\n 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',\n 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',\n 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',\n 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',\n 'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',\n 'where', 'bincount']\n\n\n@set_module('mxnet.ndarray.numpy')\ndef shape(a):\n \"\"\"\n Return the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n shape : tuple of ints\n The elements of the shape tuple give the lengths of the\n corresponding array dimensions.\n\n See Also\n --------\n ndarray.shape : Equivalent array method.\n\n Examples\n --------\n >>> np.shape(np.eye(3))\n (3, 3)\n >>> np.shape([[1, 2]])\n (1, 2)\n >>> np.shape([0])\n (1,)\n >>> np.shape(0)\n ()\n \"\"\"\n return a.shape\n\n\n@set_module('mxnet.ndarray.numpy')\ndef zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with zeros.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `numpy.float32`. Note that this\n behavior is different from NumPy's `zeros` function where `float64`\n is the default value, because `float32` is considered as the default\n data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of zeros with the given shape, dtype, and ctx.\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n dtype = _np.float32 if dtype is None else dtype\n return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with ones.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `numpy.float32`. Note that this\n behavior is different from NumPy's `ones` function where `float64`\n is the default value, because `float32` is considered as the default\n data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of ones with the given shape, dtype, and ctx.\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n dtype = _np.float32 if dtype is None else dtype\n return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)\n\n\n# pylint: disable=too-many-arguments, redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef zeros_like(a, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.zeros_like(x)\n array([[0., 0., 0.],\n [0., 0., 0.]])\n >>> np.zeros_like(x, int)\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.zeros_like(y)\n array([0., 0., 0.], dtype=float64)\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ones_like(a, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n ones : Return a new array setting values to one.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.ones_like(x)\n array([[1., 1., 1.],\n [1., 1., 1.]])\n >>> np.ones_like(x, int)\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.ones_like(y)\n array([1., 1., 1.], dtype=float64)\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef broadcast_to(array, shape):\n \"\"\"\n Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : ndarray or scalar\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n MXNetError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n \"\"\"\n if _np.isscalar(array):\n return full(shape, array)\n return _npi.broadcast_to(array, shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar or ndarray\n Fill value.\n dtype : data-type, optional\n The desired data-type for the array. The default, `None`, means\n `np.array(fill_value).dtype`.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n If `fill_value` is an ndarray, out will have the same context as `fill_value`\n regardless of the provided `ctx`.\n\n Notes\n -----\n This function differs from the original `numpy.full\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in\n the following way(s):\n - Have an additional `ctx` argument to specify the device\n - Have an additional `out` argument\n - Currently does not support `order` selection\n\n See Also\n --------\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> np.full((2, 2), 10)\n array([[10., 10.],\n [10., 10.]])\n >>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))\n array([[2, 2],\n [2, 2]], dtype=int32)\n\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n if isinstance(fill_value, NDArray):\n if dtype is None:\n ret = broadcast_to(fill_value, shape)\n else:\n ret = broadcast_to(fill_value, shape).astype(dtype)\n return ret\n dtype = _np.float32 if dtype is None else dtype\n return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)\n# pylint: enable=too-many-arguments, redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6, dtype=int)\n >>> np.full_like(x, 1)\n array([1, 1, 1, 1, 1, 1], dtype=int64)\n >>> np.full_like(x, 0.1)\n array([0, 0, 0, 0, 0, 0], dtype=int64)\n >>> np.full_like(x, 0.1, dtype=np.float64)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)\n >>> np.full_like(x, np.nan, dtype=np.double)\n array([nan, nan, nan, nan, nan, nan], dtype=float64)\n >>> y = np.arange(6, dtype=np.float32)\n >>> np.full_like(y, 0.1)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621\n \"\"\"\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : ndarray\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n subok : {False}, optional\n If True, then the newly created array will use the sub-class\n type of 'a', otherwise it will be a base-class array. Defaults\n to False.\n (Only support False at this moment)\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n (Not supported at this moment)\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n\n Examples\n --------\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> np.empty_like(a)\n array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized\n [ 4567052944, -5764607523034234880, 844424930131968]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized\n [2.0e-323, 2.5e-323, 3.0e-323]])\n \"\"\"\n dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',\n _np.int64:'int64', _np.float16:'float16', _np.float32:'float32',\n _np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}\n if order != 'C':\n raise NotImplementedError(\"Only support C-order at this moment\")\n if subok:\n raise NotImplementedError(\"Creating array by using sub-class is not supported at this moment\")\n if shape is not None:\n raise NotImplementedError(\"Assigning new shape is not supported at this moment\")\n try:\n dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]\n except:\n raise NotImplementedError(\"Do not support this dtype at this moment\")\n return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef arange(start, stop=None, step=1, dtype=None, ctx=None):\n \"\"\"Return evenly spaced values within a given interval.\n\n Values are generated within the half-open interval ``[start, stop)``\n (in other words, the interval including `start` but excluding `stop`).\n For integer arguments the function is equivalent to the Python built-in\n `range` function, but returns an ndarray rather than a list.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default\n start value is 0.\n stop : number\n End of interval. The interval does not include this value, except\n in some cases where `step` is not an integer and floating point\n round-off affects the length of `out`.\n step : number, optional\n Spacing between values. For any output `out`, this is the distance\n between two adjacent values, ``out[i+1] - out[i]``. The default\n step size is 1. If `step` is specified as a position argument,\n `start` must also be given.\n dtype : dtype\n The type of the output array. The default is `float32`.\n\n Returns\n -------\n arange : ndarray\n Array of evenly spaced values.\n\n For floating point arguments, the length of the result is\n ``ceil((stop - start)/step)``. Because of floating point overflow,\n this rule may result in the last element of `out` being greater\n than `stop`.\n \"\"\"\n if dtype is None:\n dtype = 'float32'\n if ctx is None:\n ctx = current_context()\n if stop is None:\n stop = start\n start = 0\n if step is None:\n step = 1\n if start is None and stop is None:\n raise ValueError('start and stop cannot be both None')\n if step == 0:\n raise ZeroDivisionError('step cannot be 0')\n return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef identity(n, dtype=None, ctx=None):\n \"\"\"\n Return the identity array.\n\n The identity array is a square array with ones on\n the main diagonal.\n\n Parameters\n ----------\n n : int\n Number of rows (and columns) in `n` x `n` output.\n dtype : data-type, optional\n Data-type of the output. Defaults to ``numpy.float32``.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n `n` x `n` array with its main diagonal set to one,\n and all other elements 0.\n\n Examples\n --------\n >>> np.identity(3)\n >>> np.identity(3)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n \"\"\"\n if not isinstance(n, int):\n raise TypeError(\"Input 'n' should be an integer\")\n if n < 0:\n raise ValueError(\"Input 'n' cannot be negative\")\n if ctx is None:\n ctx = current_context()\n dtype = _np.float32 if dtype is None else dtype\n return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef take(a, indices, axis=None, mode='raise', out=None):\n r\"\"\"\n Take elements from an array along an axis.\n\n When axis is not None, this function does the same thing as \"fancy\"\n indexing (indexing arrays using arrays); however, it can be easier to use\n if you need elements along a given axis. A call such as\n ``np.take(arr, indices, axis=3)`` is equivalent to\n ``arr[:,:,:,indices,...]``.\n\n Explained without fancy indexing, this is equivalent to the following use\n of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of\n indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n Nj = indices.shape\n for ii in ndindex(Ni):\n for jj in ndindex(Nj):\n for kk in ndindex(Nk):\n out[ii + jj + kk] = a[ii + (indices[jj],) + kk]\n\n Parameters\n ----------\n a : ndarray\n The source array.\n indices : ndarray\n The indices of the values to extract. Also allow scalars for indices.\n axis : int, optional\n The axis over which to select values. By default, the flattened\n input array is used.\n out : ndarray, optional\n If provided, the result will be placed in this array. It should\n be of the appropriate shape and dtype.\n mode : {'clip', 'wrap'}, optional\n Specifies how out-of-bounds indices will behave.\n\n * 'clip' -- clip to the range (default)\n * 'wrap' -- wrap around\n\n 'clip' mode means that all indices that are too large are replaced\n by the index that addresses the last element along that axis. Note\n that this disables indexing with negative numbers.\n\n Returns\n -------\n out : ndarray\n The returned array has the same type as `a`.\n\n Notes\n -----\n\n This function differs from the original `numpy.take\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in\n the following way(s):\n\n - Only ndarray or scalar ndarray is accepted as valid input.\n\n Examples\n --------\n >>> a = np.array([4, 3, 5, 7, 6, 8])\n >>> indices = np.array([0, 1, 4])\n >>> np.take(a, indices)\n array([4., 3., 6.])\n\n In this example for `a` is an ndarray, \"fancy\" indexing can be used.\n\n >>> a[indices]\n array([4., 3., 6.])\n\n If `indices` is not one dimensional, the output also has these dimensions.\n\n >>> np.take(a, np.array([[0, 1], [2, 3]]))\n array([[4., 3.],\n [5., 7.]])\n \"\"\"\n if mode not in ('wrap', 'clip', 'raise'):\n raise NotImplementedError(\n \"function take does not support mode '{}'\".format(mode))\n if axis is None:\n return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)\n else:\n return _npi.take(a, indices, axis, mode, out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef insert(arr, obj, values, axis=None):\n \"\"\"\n Insert values along the given axis before the given indices.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : int, slice or ndarray of int64\n Object that defines the index or indices before which `values` is\n inserted.\n Support for multiple insertions when `obj` is a single scalar or a\n sequence with one element (only support int32 and int64 element).\n values : ndarray\n Values to insert into `arr`.\n If the type of values is different from that of arr, values is converted\n to the type of arr.\n axis : int, optional\n Axis along which to insert `values`. If `axis` is None then `arr`\n is flattened first.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with `values` inserted. Note that `insert`\n does not occur in-place: a new array is returned. If\n `axis` is None, `out` is a flattened array.\n\n Notes\n -----\n - Note that for higher dimensional inserts `obj=0` behaves very different\n from `obj=[0]` just like `arr[:,0,:] = values` is different from\n `arr[:,[0],:] = values`.\n - If obj is a ndarray, it's dtype only supports int64\n\n Examples\n --------\n >>> a = np.array([[1, 1], [2, 2], [3, 3]])\n >>> a\n array([[1., 1.],\n [2., 2.],\n [3., 3.]])\n >>> np.insert(a, 1, np.array(5))\n array([1., 5., 1., 2., 2., 3., 3.])\n >>> np.insert(a, 1, np.array(5), axis=1)\n array([[1., 5., 1.],\n [2., 5., 2.],\n [3., 5., 3.]])\n\n Difference between sequence and scalars:\n\n >>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n\n >>> b = a.flatten()\n >>> b\n array([1., 1., 2., 2., 3., 3.])\n >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))\n array([1., 1., 5., 6., 2., 2., 3., 3.])\n\n >>> np.insert(b, slice(2, 4), np.array([5, 6]))\n array([1., 1., 5., 2., 6., 2., 3., 3.])\n\n # type casting\n >>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))\n array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)\n\n >>> x = np.arange(8).reshape(2, 4)\n >>> idx = np.array([1, 3], dtype=np.int64)\n >>> np.insert(x, idx, np.array([999]), axis=1)\n array([[ 0., 999., 1., 2., 999., 3.],\n [ 4., 999., 5., 6., 999., 7.]])\n \"\"\"\n if isinstance(values, numeric_types):\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)\n elif isinstance(obj, integer_types):\n return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)\n elif isinstance(obj, NDArray):\n return _npi.insert_tensor(arr, obj, val=values, axis=axis)\n\n if not isinstance(arr, NDArray):\n raise TypeError(\"'arr' can not support type {}\".format(str(type(arr))))\n if not isinstance(values, NDArray):\n raise TypeError(\"'values' can not support type {}\".format(str(type(values))))\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)\n elif isinstance(obj, integer_types):\n return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)\n elif isinstance(obj, NDArray):\n return _npi.insert_tensor(arr, values, obj, axis=axis)\n else:\n raise TypeError(\"'obj' can not support type {}\".format(str(type(obj))))\n\n\n#pylint: disable= too-many-arguments, no-member, protected-access\ndef _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):\n \"\"\" Helper function for element-wise operation.\n The function will perform numpy-like broadcasting if needed and call different functions.\n\n Parameters\n --------\n lhs : ndarray or numeric value\n Left-hand side operand.\n\n rhs : ndarray or numeric value\n Right-hand operand,\n\n fn_array : function\n Function to be called if both lhs and rhs are of ``ndarray`` type.\n\n fn_scalar : function\n Function to be called if both lhs and rhs are numeric values.\n\n lfn_scalar : function\n Function to be called if lhs is ``ndarray`` while rhs is numeric value\n\n rfn_scalar : function\n Function to be called if lhs is numeric value while rhs is ``ndarray``;\n if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar\n\n Returns\n --------\n mxnet.numpy.ndarray or scalar\n result array or scalar\n \"\"\"\n from ...numpy import ndarray\n if isinstance(lhs, numeric_types):\n if isinstance(rhs, numeric_types):\n return fn_scalar(lhs, rhs, out=out)\n else:\n if rfn_scalar is None:\n # commutative function\n return lfn_scalar(rhs, float(lhs), out=out)\n else:\n return rfn_scalar(rhs, float(lhs), out=out)\n elif isinstance(rhs, numeric_types):\n return lfn_scalar(lhs, float(rhs), out=out)\n elif isinstance(rhs, ndarray):\n return fn_array(lhs, rhs, out=out)\n else:\n raise TypeError('type {} not supported'.format(str(type(rhs))))\n#pylint: enable= too-many-arguments, no-member, protected-access\n\n\n@set_module('mxnet.ndarray.numpy')\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):\n \"\"\"\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements:\n\n * the indices of the input array that give the unique values\n * the indices of the unique array that reconstruct the input array\n * the number of times each unique value comes up in the input array\n\n Parameters\n ----------\n ar : ndarray\n Input array. Unless `axis` is specified, this will be flattened if it\n is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` (along the specified axis,\n if provided, or in the flattened array) that result in the unique array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array (for the specified\n axis, if provided) that can be used to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique item appears\n in `ar`.\n axis : int or None, optional\n The axis to operate on. If None, `ar` will be flattened. If an integer,\n the subarrays indexed by the given axis will be flattened and treated\n as the elements of a 1-D array with the dimension of the given axis,\n see the notes for more details. The default is None.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n Notes\n -----\n When an axis is specified the subarrays indexed by the axis are sorted.\n This is done by making the specified axis the first dimension of the array\n and then flattening the subarrays in C order. The flattened subarrays are\n then viewed as a structured type with each element given a label, with the\n effect that we end up with a 1-D array of structured types that can be\n treated in the same way as any other 1-D array. The result is that the\n flattened subarrays are sorted in lexicographic order starting with the\n first element.\n\n This function differs from the original `numpy.unique\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in\n the following aspects:\n\n - Only support ndarray as input.\n - Object arrays or structured arrays are not supported.\n\n Examples\n --------\n >>> np.unique(np.array([1, 1, 2, 2, 3, 3]))\n array([1., 2., 3.])\n >>> a = np.array([[1, 1], [2, 3]])\n >>> np.unique(a)\n array([1., 2., 3.])\n\n Return the unique rows of a 2D array\n\n >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])\n >>> np.unique(a, axis=0)\n array([[1., 0., 0.],\n [2., 3., 4.]])\n\n Return the indices of the original array that give the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_index=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 5, 3, 2], dtype=int64)\n >>> a[indices]\n array([1., 2., 3., 4., 6.])\n\n Reconstruct the input array from the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_inverse=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 4, 3, 1, 2, 1], dtype=int64)\n >>> u[indices]\n array([1., 2., 6., 4., 2., 3., 2.])\n \"\"\"\n ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)\n if isinstance(ret, list):\n return tuple(ret)\n else:\n return ret\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef add(x1, x2, out=None, **kwargs):\n \"\"\"\n Add arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n add : ndarray or scalar\n The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef subtract(x1, x2, out=None, **kwargs):\n \"\"\"\n Subtract arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be subtracted from each other. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape\n of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n subtract : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,\n _npi.rsubtract_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef multiply(x1, x2, out=None, **kwargs):\n \"\"\"\n Multiply arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2\n are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef divide(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns a true division of the inputs, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 type.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,\n _npi.rtrue_divide_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef true_divide(x1, x2, out=None):\n \"\"\"Returns a true division of the inputs, element-wise.\n\n Instead of the Python traditional 'floor division', this returns a true\n division. True division adjusts the output type to present the best\n answer, regardless of input types.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 type.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,\n _npi.rtrue_divide_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef mod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef delete(arr, obj, axis=None):\n \"\"\"\n Return a new array with sub-arrays along an axis deleted. For a one\n dimensional array, this returns those entries not returned by\n `arr[obj]`.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : slice, int or ndarray of ints\n Indicate indices of sub-arrays to remove along the specified axis.\n axis : int, optional\n The axis along which to delete the subarray defined by `obj`.\n If `axis` is None, `obj` is applied to the flattened array.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with the elements specified by `obj` removed. Note\n that `delete` does not occur in-place. If `axis` is None, `out` is\n a flattened array.\n\n Examples\n --------\n >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n >>> arr\n array([[ 1., 2., 3., 4.],\n [ 5., 6., 7., 8.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, 1, 0)\n array([[ 1., 2., 3., 4.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, slice(None, None, 2), 1)\n array([[ 2., 4.],\n [ 6., 8.],\n [10., 12.]])\n\n >>> np.delete(arr, np.array([1,3,5]), None)\n array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])\n >>> np.delete(arr, np.array([1,1,5]), None)\n array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])\n \"\"\"\n if not isinstance(arr, NDArray):\n raise TypeError(\"'arr' can not support type {}\".format(str(type(arr))))\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)\n elif isinstance(obj, integer_types):\n return _npi.delete(arr, int_ind=obj, axis=axis)\n elif isinstance(obj, NDArray):\n return _npi.delete(arr, obj, axis=axis)\n else:\n raise TypeError(\"'obj' can not support type {}\".format(str(type(obj))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef matmul(a, b, out=None):\n \"\"\"\n Matrix product of two arrays.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays, scalars not allowed.\n out : ndarray, optional\n A location into which the result is stored.\n If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The matrix product of the inputs.\n This is a scalar only when both x1, x2 are 1-d vectors.\n\n Raises\n ------\n MXNetError\n If the last dimension of a is not the same size as the second-to-last dimension of b.\n If a scalar value is passed in.\n\n See Also\n --------\n tensordot :\n Sum products over arbitrary axes.\n dot :\n alternative matrix product with different broadcasting rules.\n einsum :\n Einstein summation convention.\n\n Notes\n -----\n The behavior depends on the arguments in the following way.\n\n - If both arguments are 2-D they are multiplied like conventional matrices.\n - If either argument is N-D, N > 2, it is treated as a stack of matrices\n residing in the last two indexes and broadcast accordingly.\n - If the first argument is 1-D, it is promoted to a matrix by prepending\n a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n - If the second argument is 1-D, it is promoted to a matrix by appending a 1\n to its dimensions. After matrix multiplication the appended 1 is removed.\n\n matmul differs from dot in two important ways:\n\n - Multiplication by scalars is not allowed, use multiply instead.\n - Stacks of matrices are broadcast together as if the matrices were elements,\n respecting the signature (n,k),(k,m)->(n,m):\n >>> a = np.ones([9, 5, 7, 4])\n >>> c = np.ones([9, 5, 4, 3])\n >>> np.dot(a, c).shape\n (9, 5, 7, 9, 5, 3)\n >>> np.matmul(a, c).shape\n (9, 5, 7, 3)\n >>> # n is 7, k is 4, m is 3\n\n Examples\n --------\n For 2-D arrays it is the matrix product:\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([[4, 1],\n ... [2, 2]])\n >>> np.matmul(a, b)\n array([[4., 1.],\n [2., 2.]])\n\n For 2-D mixed with 1-D, the result is the usual.\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([1, 2])\n >>> np.matmul(a, b)\n array([1., 2.])\n >>> np.matmul(b, a)\n array([1., 2.])\n\n Broadcasting is conventional for stacks of arrays\n >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))\n >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))\n >>> np.matmul(a, b).shape\n (2, 2, 2)\n >>> np.matmul(a, b)[0, 1, 1]\n array(98.)\n >>> sum(a[0, 1, :] * b[0, :, 1])\n array(98.)\n\n Scalar multiplication raises an error.\n >>> np.matmul([1, 2], 3)\n Traceback (most recent call last):\n ...\n mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.\n \"\"\"\n return _npi.matmul(a, b, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef remainder(x1, x2, out=None):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef power(x1, x2, out=None, **kwargs):\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n The bases.\n\n x2 : ndarray or scalar\n The exponent.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argsort(a, axis=-1, kind=None, order=None):\n \"\"\"\n Returns the indices that would sort an array.\n Perform an indirect sort along the given axis using the algorithm specified\n by the `kind` keyword. It returns an array of indices of the same shape as\n `a` that index data along the given axis in sorted order.\n\n Parameters\n ----------\n a : ndarray\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n kind : string, optional\n This argument can take any string, but it does not have any effect on the\n final result.\n order : str or list of str, optional\n Not supported yet, will raise NotImplementedError if not None.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified `axis`.\n If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.\n More generally, ``np.take_along_axis(a, index_array, axis=axis)``\n always yields the sorted `a`, irrespective of dimensionality.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 1, 2])\n >>> np.argsort(x)\n array([1, 2, 0])\n\n Two-dimensional array:\n\n >>> x = np.array([[0, 3], [2, 2]])\n >>> x\n array([[0, 3],\n [2, 2]])\n >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)\n >>> ind\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)\n array([[0, 2],\n [2, 3]])\n >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)\n >>> ind\n array([[0, 1],\n [0, 1]])\n >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)\n array([[0, 3],\n [2, 2]])\n\n Indices of the sorted elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)\n >>> ind\n (array([0, 1, 1, 0]), array([0, 0, 1, 1]))\n >>> x[ind] # same as np.sort(x, axis=None)\n array([0, 2, 2, 3])\n \"\"\"\n if order is not None:\n raise NotImplementedError(\"order not supported here\")\n\n return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')\n\n\n@set_module('mxnet.ndarray.numpy')\ndef sort(a, axis=-1, kind=None, order=None):\n \"\"\"\n Return a sorted copy of an array.\n\n Parameters\n ----------\n a : ndarray\n Array to be sorted.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n kind : string, optional\n This argument can take any string, but it does not have any effect on the\n final result.\n order : str or list of str, optional\n Not supported yet, will raise NotImplementedError if not None.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n >>> a = np.array([[1,4],[3,1]])\n >>> np.sort(a) # sort along the last axis\n array([[1, 4],\n [1, 3]])\n >>> np.sort(a, axis=None) # sort the flattened array\n array([1, 1, 3, 4])\n >>> np.sort(a, axis=0) # sort along the first axis\n array([[1, 1],\n [3, 4]])\n \"\"\"\n if order is not None:\n raise NotImplementedError(\"order not supported here\")\n return _npi.sort(data=a, axis=axis, is_ascend=True)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tensordot(a, b, axes=2):\n r\"\"\"\n tensordot(a, b, axes=2)\n Compute tensor dot product along specified axes for arrays >= 1-D.\n Given two tensors (arrays of dimension greater than or equal to one),\n `a` and `b`, and an ndarray object containing two ndarray\n objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s\n elements (components) over the axes specified by ``a_axes`` and\n ``b_axes``. The third argument can be a single non-negative\n integer_like scalar, ``N``; if it is such, then the last ``N``\n dimensions of `a` and the first ``N`` dimensions of `b` are summed\n over.\n Parameters\n ----------\n a, b : ndarray, len(shape) >= 1\n Tensors to \"dot\".\n axes : int or (2,) ndarray\n * integer_like\n If an int N, sum over the last N axes of `a` and the first N axes\n of `b` in order. The sizes of the corresponding axes must match.\n * (2,) ndarray\n Or, a list of axes to be summed over, first sequence applying to `a`,\n second to `b`. Both elements ndarray must be of the same length.\n See Also\n --------\n dot, einsum\n Notes\n -----\n Three common use cases are:\n * ``axes = 0`` : tensor product :math:`a\\otimes b`\n * ``axes = 1`` : tensor dot product :math:`a\\cdot b`\n * ``axes = 2`` : (default) tensor double contraction :math:`a:b`\n When `axes` is integer_like, the sequence for evaluation will be: first\n the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and\n Nth axis in `b` last.\n When there is more than one axis to sum over - and they are not the last\n (first) axes of `a` (`b`) - the argument `axes` should consist of\n two sequences of the same length, with the first axis to sum over given\n first in both sequences, the second axis second, and so forth.\n Examples\n --------\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))\n >>> c.shape\n (5, 2)\n >>> c\n array([[ 4400., 4730.],\n [ 4532., 4874.],\n [ 4664., 5018.],\n [ 4796., 5162.],\n [ 4928., 5306.]])\n \"\"\"\n if _np.isscalar(axes):\n return _npi.tensordot_int_axes(a, b, axes)\n\n if len(axes) != 2:\n raise ValueError('Axes must consist of two arrays.')\n a_axes_summed, b_axes_summed = axes\n if _np.isscalar(a_axes_summed):\n a_axes_summed = (a_axes_summed,)\n if _np.isscalar(b_axes_summed):\n b_axes_summed = (b_axes_summed,)\n\n if len(a_axes_summed) != len(b_axes_summed):\n raise ValueError('Axes length mismatch')\n\n return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : ndarray\n Input data. The histogram is computed over the flattened array.\n bins : int or NDArray\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n .. versionadded:: 1.11.0\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n range : (float, float)\n The lower and upper range of the bins. Required when `bins` is an integer.\n Values outside the range are ignored. The first element of the range must\n be less than or equal to the second.\n normed : bool, optional\n Not supported yet, coming soon.\n weights : array_like, optional\n Not supported yet, coming soon.\n density : bool, optional\n Not supported yet, coming soon.\n \"\"\"\n if normed is True:\n raise NotImplementedError(\"normed is not supported yet...\")\n if weights is not None:\n raise NotImplementedError(\"weights is not supported yet...\")\n if density is True:\n raise NotImplementedError(\"density is not supported yet...\")\n if isinstance(bins, numeric_types):\n if range is None:\n raise NotImplementedError(\"automatic range is not supported yet...\")\n return _npi.histogram(a, bin_cnt=bins, range=range)\n if isinstance(bins, (list, tuple)):\n raise NotImplementedError(\"array_like bins is not supported yet...\")\n if isinstance(bins, str):\n raise NotImplementedError(\"string bins is not supported yet...\")\n if isinstance(bins, NDArray):\n return _npi.histogram(a, bins=bins)\n raise ValueError(\"np.histogram fails with\", locals())\n\n\n@set_module('mxnet.ndarray.numpy')\ndef eye(N, M=None, k=0, dtype=_np.float32, **kwargs):\n \"\"\"\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to N.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero,\n except for the k-th diagonal, whose values are equal to one.\n \"\"\"\n _sanity_check_params('eye', ['order'], kwargs)\n ctx = kwargs.pop('ctx', current_context())\n if ctx is None:\n ctx = current_context()\n return _npi.eye(N, M, k, ctx, dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"\n Return evenly spaced numbers over a specified interval.\n Returns num evenly spaced samples, calculated over the interval [start, stop].\n The endpoint of the interval can optionally be excluded.\n\n Parameters\n ----------\n start : real number\n The starting value of the sequence.\n stop : real number\n The end value of the sequence, unless endpoint is set to False. In\n that case, the sequence consists of all but the last of num + 1\n evenly spaced samples, so that stop is excluded. Note that the step\n size changes when endpoint is False.\n num : int, optional\n Number of samples to generate. Default is 50. Must be non-negative.\n endpoint : bool, optional\n If True, stop is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between samples.\n dtype : dtype, optional\n The type of the output array. If dtype is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start or\n stop are array-like. By default (0), the samples will be along a new\n axis inserted at the beginning. Use -1 to get an axis at the end.\n\n Returns\n -------\n samples : ndarray\n There are num equally spaced samples in the closed interval\n `[start, stop]` or the half-open interval `[start, stop)`\n (depending on whether endpoint is True or False).\n step : float, optional\n Only returned if retstep is True\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similar to `linspace`, but uses a step size (instead of the\n number of samples).\n\n Examples\n --------\n >>> np.linspace(2.0, 3.0, num=5)\n array([2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n Notes\n -----\n\n This function differs from the original `numpy.linspace\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in\n the following aspects:\n\n - `start` and `stop` do not support list, numpy ndarray and mxnet ndarray\n - axis could only be 0\n - There could be an additional `ctx` argument to specify the device, e.g. the i-th\n GPU.\n \"\"\"\n if isinstance(start, (list, _np.ndarray, NDArray)) or \\\n isinstance(stop, (list, _np.ndarray, NDArray)):\n raise NotImplementedError('start and stop only support int')\n if axis != 0:\n raise NotImplementedError(\"the function only support axis 0\")\n if ctx is None:\n ctx = current_context()\n if retstep:\n step = (stop - start) / (num - 1)\n return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step\n else:\n return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n Non-scalar `start` and `stop` are now supported.\n\n Parameters\n ----------\n start : int or float\n ``base ** start`` is the starting value of the sequence.\n stop : int or float\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : float, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n dtype : dtype\n The type of the output array. If `dtype` is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Now, axis only support axis = 0.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n\n Notes\n -----\n Logspace is equivalent to the code. Now wo only support axis = 0.\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ...\n >>> power(base, y).astype(dtype)\n ...\n\n Examples\n --------\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.44347, 464.15887, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([100. , 177.82794, 316.22775, 562.3413 ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([4. , 5.0396843, 6.349604 , 8. ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)\n array([4, 5, 6, 8], dtype=int32)\n >>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))\n array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))\n \"\"\"\n if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \\\n isinstance(stop, (list, tuple, _np.ndarray, NDArray)):\n raise NotImplementedError('start and stop only support int and float')\n if axis != 0:\n raise NotImplementedError(\"the function only support axis 0\")\n if ctx is None:\n ctx = current_context()\n return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef expand_dims(a, axis):\n \"\"\"Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n \"\"\"\n return _npi.expand_dims(a, axis)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef lcm(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the lowest common multiple of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing lowest common multiple. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The lowest common multiple of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n gcd : The greatest common divisor\n\n Examples\n --------\n >>> np.lcm(12, 20)\n 60\n >>> np.lcm(np.arange(6, dtype=int), 20)\n array([ 0, 20, 20, 60, 20, 20], dtype=int64)\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tril(m, k=0):\n r\"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : ndarray, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : same thing, only for the upper triangle\n\n Examples\n --------\n >>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\n >>> np.tril(a, -1)\n array([[ 0., 0., 0.],\n [ 4., 0., 0.],\n [ 7., 8., 0.],\n [10., 11., 12.]])\n \"\"\"\n return _npi.tril(m, k)\n\n\ndef _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):\n \"\"\"Helper function for unary operators.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input of the unary operator.\n fn_array : function\n Function to be called if x is of ``ndarray`` type.\n fn_scalar : function\n Function to be called if x is a Python scalar.\n out : ndarray\n The buffer ndarray for storing the result of the unary function.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n Result array or scalar.\n \"\"\"\n if isinstance(x, numeric_types):\n return fn_scalar(x, **kwargs)\n elif isinstance(x, NDArray):\n return fn_array(x, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sin(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The sine of each element of x. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sin(np.pi/2.)\n 1.0\n >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)\n array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])\n \"\"\"\n return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cos(x, out=None, **kwargs):\n r\"\"\"\n Cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding cosine values. This is a scalar if x is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cos(np.array([0, np.pi/2, np.pi]))\n array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.cos(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sinh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic sine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sinh(0)\n 0.0\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.sinh(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cosh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic cosine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cosh(0)\n 1.0\n \"\"\"\n return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef tanh(x, out=None, **kwargs):\n \"\"\"\n Compute hyperbolic tangent element-wise.\n Equivalent to ``np.sinh(x)/np.cosh(x)``.\n\n Parameters\n ----------\n x : ndarray or scalar.\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic tangent values.\n\n Notes\n -----\n If `out` is provided, the function writes the result into it,\n and returns a reference to `out`. (See Examples)\n - input x does not support complex computation (like imaginary number)\n >>> np.tanh(np.pi*1j)\n TypeError: type <type 'complex'> not supported\n\n Examples\n --------\n >>> np.tanh(np.array[0, np.pi]))\n array([0. , 0.9962721])\n >>> np.tanh(np.pi)\n 0.99627207622075\n >>> # Example of providing the optional output parameter illustrating\n >>> # that what is returned is a reference to said parameter\n >>> out1 = np.array(1)\n >>> out2 = np.tanh(np.array(0.1), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log10(x, out=None, **kwargs):\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which t'absolute', he result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The logarithm to the base 10 of `x`, element-wise. NaNs are\n returned where x is negative. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.log10(np.array([1e-15, -3.]))\n array([-15., nan])\n \"\"\"\n return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sqrt(x, out=None, **kwargs):\n \"\"\"\n Return the non-negative square-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose square-roots are required.\n out : ndarray, or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n An array of the same shape as `x`, containing the positive\n square-root of each element in `x`. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sqrt(np.array([1,4,9]))\n array([1., 2., 3.])\n >>> np.sqrt(np.array([4, -1, _np.inf]))\n array([ 2., nan, inf])\n \"\"\"\n return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cbrt(x, out=None, **kwargs):\n r\"\"\"\n Return the cube-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray\n The values whose cube-roots are required.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n y : ndarray\n An array of the same shape as x, containing the cube cube-root of each element in x.\n If out was provided, y is a reference to it. This is a scalar if x is a scalar.\n\n Examples\n ----------\n >>> np.cbrt([1,8,27])\n array([ 1., 2., 3.])\n \"\"\"\n return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef abs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> x = np.array([-1.2, 1.2])\n >>> np.abs(x)\n array([1.2, 1.2])\n \"\"\"\n return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef absolute(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n np.abs is a shorthand for this function.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n absolute : ndarray\n An ndarray containing the absolute value of each element in x.\n\n Examples\n ----------\n >>> x = np.array([-1.2, 1.2])\n >>> np.absolute(x)\n array([ 1.2, 1.2])\n \"\"\"\n return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sign(x, out=None, **kwargs):\n r\"\"\"\n Returns an element-wise indication of the sign of a number.\n The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.\n\n Parameters\n ----------\n x : ndarray or a scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The sign of `x`.\n This is a scalar if `x` is a scalar.\n\n Note\n -------\n - Only supports real number as input elements.\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([-5., 4.5])\n >>> np.sign(a)\n array([-1., 1.])\n >>> # Use scalars as inputs:\n >>> np.sign(4.0)\n 1.0\n >>> np.sign(0)\n 0\n >>> # Use ``out`` parameter:\n >>> b = np.zeros((2, ))\n >>> np.sign(a, out=b)\n array([-1., 1.])\n >>> b\n array([-1., 1.])\n \"\"\"\n return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef exp(x, out=None, **kwargs):\n r\"\"\"\n Calculate the exponential of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential of `x`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.exp(1)\n 2.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])\n \"\"\"\n return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef expm1(x, out=None, **kwargs):\n r\"\"\"\n Calculate `exp(x) - 1` of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential minus one: `out = exp(x) - 1`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.expm1(1)\n 1.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.expm1(x)\n array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])\n \"\"\"\n return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arcsin(x, out=None, **kwargs):\n r\"\"\"\n Inverse sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n `y`-coordinate on the unit circle.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n angle : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n The inverse sine of each element in `x`, in radians and in the\n closed interval ``[-pi/2, pi/2]``.\n\n Examples\n --------\n >>> np.arcsin(1) # pi/2\n 1.5707963267948966\n >>> np.arcsin(-1) # -pi/2\n -1.5707963267948966\n >>> np.arcsin(0)\n 0.0\n\n Notes\n -----\n `arcsin` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that :math:`sin(z) = x`. The convention is to\n return the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, *arcsin* always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n The inverse sine is also known as `asin` or sin^{-1}.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.arcsin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in\n the following aspects:\n - Only support ndarray or scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,\n 10th printing, New York: Dover, 1964, pp. 79ff.\n http://www.math.sfu.ca/~cbm/aands/\n \"\"\"\n return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arccos(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse cosine, element-wise.\n The inverse of cos so that, if y = cos(x), then x = arccos(y).\n\n Parameters\n ----------\n x : ndarray\n x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that\n the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n angle : ndarray\n The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].\n This is a scalar if x is a scalar.\n\n See also\n ----------\n cos, arctan, arcsin\n\n Notes\n ----------\n arccos is a multivalued function: for each x there are infinitely many numbers z such that\n cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].\n For real-valued input data types, arccos always returns real output.\n For each value that cannot be expressed as a real number or infinity, it yields nan and sets\n the invalid floating point error flag.\n The inverse cos is also known as acos or cos^-1.\n\n Examples\n ----------\n >>> np.arccos([1, -1])\n array([ 0. , 3.14159265])\n \"\"\"\n return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arctan(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse tangent, element-wise.\n The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Out has the same shape as `x`. It lies is in\n ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctan` is a multi-valued function: for each `x` there are infinitely\n many numbers `z` such that tan(`z`) = `x`. The convention is to return\n the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, `arctan` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n For complex-valued input, we do not have support for them yet.\n The inverse tangent is also known as `atan` or tan^{-1}.\n\n Examples\n --------\n >>> x = np.array([0, 1])\n >>> np.arctan(x)\n array([0. , 0.7853982])\n >>> np.pi/4\n 0.7853981633974483\n \"\"\"\n return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log(x, out=None, **kwargs):\n \"\"\"\n Natural logarithm, element-wise.\n The natural logarithm `log` is the inverse of the exponential function,\n so that `log(exp(x)) = x`. The natural logarithm is logarithm in base\n `e`.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The natural logarithm of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and\n ``nan`` according to the input.\n This function differs from the original `numpy.log\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)\n >>> np.log(a)\n array([ 0., 1., 2., -inf], dtype=float64)\n >>> # Using default float32 dtype may lead to slightly different behavior:\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)\n >>> np.log(a)\n array([ 0., 0.99999994, 2., -inf])\n >>> np.log(1)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef degrees(x, out=None, **kwargs):\n \"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding degree values; if `out` was supplied this is a\n reference to it.\n This is a scalar if `x` is a scalar.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> rad = np.arange(12.) * np.pi / 6\n >>> np.degrees(rad)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> # Use specified ``out`` ndarray:\n >>> out = np.zeros((rad.shape))\n >>> np.degrees(rad, out)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> out\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n \"\"\"\n return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef rad2deg(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"rad2deg(x)\" is \"x *180 / pi\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.rad2deg(np.pi/2)\n 90.0\n \"\"\"\n return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef rint(x, out=None, **kwargs):\n \"\"\"\n Round elements of the array to the nearest integer.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.rint\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.rint(a)\n array([-2., -2., -0., 0., 1., 2., 2.])\n \"\"\"\n return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log2(x, out=None, **kwargs):\n \"\"\"\n Base-2 logarithm of x.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The logarithm base two of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.log2\n <https://www.google.com/search?q=numpy+log2>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> x = np.array([0, 1, 2, 2**4])\n >>> np.log2(x)\n array([-inf, 0., 1., 4.])\n \"\"\"\n return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log1p(x, out=None, **kwargs):\n \"\"\"\n Return the natural logarithm of one plus the input array, element-wise.\n Calculates ``log(1 + x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n Natural logarithm of 1 + x, element-wise. This is a scalar\n if x is a scalar.\n\n Notes\n -----\n For real-valued input, `log1p` is accurate also for `x` so small\n that `1 + x == 1` in floating-point accuracy.\n Logarithm is a multivalued function: for each `x` there is an infinite\n number of `z` such that `exp(z) = 1 + x`. The convention is to return\n the `z` whose imaginary part lies in `[-pi, pi]`.\n For real-valued input data types, `log1p` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n cannot support complex-valued input.\n\n Examples\n --------\n >>> np.log1p(1e-99)\n 1e-99\n >>> a = np.array([3, 4, 5])\n >>> np.log1p(a)\n array([1.3862944, 1.609438 , 1.7917595])\n \"\"\"\n return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef radians(x, out=None, **kwargs):\n \"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array in degrees.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding radian values. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.radians\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> deg = np.arange(12.) * 30.\n >>> np.radians(deg)\n array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,\n 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],\n dtype=float32)\n \"\"\"\n return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef deg2rad(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"deg2rad(x)\" is \"x * pi / 180\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.deg2rad(180)\n 3.1415927\n \"\"\"\n return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef reciprocal(x, out=None, **kwargs):\n r\"\"\"\n Return the reciprocal of the argument, element-wise.\n Calculates ``1/x``.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose reciprocals are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.reciprocal(2.)\n 0.5\n >>> x = np.array([1, 2., 3.33])\n >>> np.reciprocal(x)\n array([1. , 0.5 , 0.3003003])\n\n Notes\n -----\n .. note::\n This function is not designed to work with integers.\n For integer arguments with absolute value larger than 1 the result is\n always zero because of the way Python handles integer division. For\n integer zero the result is an overflow.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.reciprocal\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n \"\"\"\n return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef square(x, out=None, **kwargs):\n r\"\"\"\n Return the element-wise square of the input.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose squares are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.square(2.)\n 4.0\n >>> x = np.array([1, 2., -1])\n >>> np.square(x)\n array([1., 4., 1.])\n\n Notes\n -----\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.square\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n \"\"\"\n return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef negative(x, out=None, **kwargs):\n r\"\"\"\n Numerical negative, element-wise.\n\n Parameters:\n ------------\n x : ndarray or scalar\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored.\n\n Returns:\n ---------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.\n\n Examples:\n ---------\n >>> np.negative(1)\n -1\n \"\"\"\n return _unary_func_helper(x, _npi.negative, _np.negative, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef fix(x, out=None, **kwargs):\n r\"\"\"\n Round an array of floats element-wise to nearest integer towards zero.\n The rounded values are returned as floats.\n\n Parameters:\n ----------\n x : ndarray\n An array of floats to be rounded\n out : ndarray, optional\n Output array\n\n Returns:\n -------\n y : ndarray of floats\n\n Examples\n ---------\n >>> np.fix(3.14)\n 3\n \"\"\"\n return _unary_func_helper(x, _npi.fix, _np.fix, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef tan(x, out=None, **kwargs):\n r\"\"\"\n Compute tangent element-wise.\n Equivalent to np.sin(x)/np.cos(x) element-wise.\n\n Parameters:\n ----------\n x : ndarray\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided,\n it must have a shape that the inputs broadcast to. If not provided or None,\n a freshly-allocated array is returned. A tuple (possible only as a keyword argument)\n must have length equal to the number of outputs.\n where : ndarray, optional\n Values of True indicate to calculate the ufunc at that position,\n values of False indicate to leave the value in the output alone.\n\n Returns:\n -------\n y : ndarray\n The corresponding tangent values. This is a scalar if x is a scalar.\n\n Examples:\n ---------\n >>> np.tan(0.5)\n 0.5463024898437905\n \"\"\"\n\n return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef ceil(x, out=None, **kwargs):\n r\"\"\"\n Return the ceiling of the input, element-wise.\n The ceil of the ndarray `x` is the smallest integer `i`, such that\n `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a same shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The ceiling of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.ceil(a)\n array([-1., -1., -0., 1., 2., 2., 2.])\n >>> #if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.ceil(np.array(3.5), a)\n array(4.)\n >>> a\n array(4.)\n \"\"\"\n return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef floor(x, out=None, **kwargs):\n r\"\"\"\n Return the floor of the input, element-wise.\n The floor of the ndarray `x` is the largest integer `i`, such that\n `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a same shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The floor of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.floor(a)\n array([-2., -2., -1., 0., 1., 1., 2.])\n >>> #if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.floor(np.array(3.5), a)\n array(3.)\n >>> a\n array(3.)\n \"\"\"\n return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef bitwise_not(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef invert(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef trunc(x, out=None, **kwargs):\n r\"\"\"\n Return the truncated value of the input, element-wise.\n The truncated value of the scalar `x` is the nearest integer `i` which\n is closer to zero than `x` is. In short, the fractional part of the\n signed number `x` is discarded.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : ndarray or scalar\n The truncated value of each element in `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.trunc in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.trunc(a)\n array([-1., -1., -0., 0., 1., 1., 2.])\n \"\"\"\n return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef logical_not(x, out=None, **kwargs):\n r\"\"\"\n Compute the truth value of NOT x element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Logical NOT is applied to the elements of `x`.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : bool or ndarray of bool\n Boolean result with the same shape as `x` of the NOT operation\n on elements of `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.logical_not in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> x= np.array([True, False, 0, 1])\n >>> np.logical_not(x)\n array([False, True, True, False])\n\n >>> x = np.arange(5)\n >>> np.logical_not(x<3)\n array([False, False, False, True, True])\n \"\"\"\n return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arcsinh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arcsinh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arcsinh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `sinh(z) = x`.\n\n For real-valued input data types, `arcsinh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arcsinh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. DType of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arcsinh(a)\n array([1.8309381, 2.2924316])\n >>> np.arcsinh(1)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arccosh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arccosh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arccosh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `cosh(z) = x`.\n\n For real-valued input data types, `arccosh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arccosh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arccosh(a)\n array([1.8309381, 2.2924316])\n >>> np.arccosh(1)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arctanh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic tangent, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arctanh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctanh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `tanh(z) = x`.\n\n For real-valued input data types, `arctanh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arctanh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([0.0, -0.5])\n >>> np.arctanh(a)\n array([0., -0.54930615])\n >>> np.arctanh(0.0)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tile(A, reps):\n r\"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : ndarray or scalar\n An input array or a scalar to repeat.\n reps : a single integer or tuple of integers\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0., 1., 2., 0., 1., 2.])\n >>> np.tile(a, (2, 2))\n array([[0., 1., 2., 0., 1., 2.],\n [0., 1., 2., 0., 1., 2.]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0., 1., 2., 0., 1., 2.]],\n [[0., 1., 2., 0., 1., 2.]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1., 2., 1., 2.],\n [3., 4., 3., 4.]])\n >>> np.(b, (2, 1))\n array([[1., 2.],\n [3., 4.],\n [1., 2.],\n [3., 4.]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.]])\n\n Scalar as input:\n\n >>> np.tile(2, 3)\n array([2, 2, 2]) # repeating integer `2`\n\n \"\"\"\n return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n \"\"\"\n axis_size = ary.shape[axis]\n if isinstance(indices_or_sections, integer_types):\n sections = indices_or_sections\n if axis_size % sections:\n raise ValueError('array split does not result in an equal division')\n section_size = int(axis_size / sections)\n indices = [i * section_size for i in range(sections)]\n elif isinstance(indices_or_sections, (list, set, tuple)):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')\n ret = _npi.split(ary, indices, axis, False)\n assert isinstance(ret, list), 'Output of split should be list,' \\\n ' got a return type {}'.format(type(ret))\n return ret\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Split an array into multiple sub-arrays.\n\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an array of length l that should be split into n sections, it returns\n l % n sub-arrays of size l//n + 1 and the rest of size l//n.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D Python tuple, list or set.\n Param used to determine the number and size of the subarray.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> np.array_split(x, [3, 5, 6, 8])\n [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]\n\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(7.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]\n \"\"\"\n indices = []\n sections = 0\n if isinstance(indices_or_sections, integer_types):\n sections = indices_or_sections\n elif isinstance(indices_or_sections, (list, set, tuple)):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')\n ret = _npi.split(ary, indices, axis, False, sections)\n if not isinstance(ret, list):\n return [ret]\n return ret\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef hsplit(ary, indices_or_sections):\n \"\"\"Split an array into multiple sub-arrays horizontally (column-wise).\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int, list of ints or tuple of ints.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a list of sorted integers, the entries\n indicate where along `axis` the array is split.\n\n If an index exceeds the dimension of the array along `axis`,\n it will raises errors. so index must less than or euqal to\n the dimension of the array along axis.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Notes\n ------\n - If `indices_or_sections` is given as an integer, but a split\n does not result in equal division.It will raises ValueErrors.\n\n - If indices_or_sections is an integer, and the number is 1, it will\n raises an error. Because single output from split is not supported yet...\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, [3, 6])\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float32)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n\n If ``ary`` has one dimension, 'axis' = 0.\n >>> x = np.arange(4)\n array([0., 1., 2., 3.])\n >>> np.hsplit(x, 2)\n [array([0., 1.]), array([2., 3.])]\n\n If you want to produce an empty sub-array, you can see an example.\n >>> np.hsplit(x, [2, 2])\n [array([0., 1.]), array([], dtype=float32), array([2., 3.])]\n \"\"\"\n if len(ary.shape) < 1:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n indices = []\n sections = 0\n if isinstance(indices_or_sections, integer_types):\n sections = indices_or_sections\n elif isinstance(indices_or_sections, (list, set, tuple)):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')\n ret = _npi.hsplit(ary, indices, 1, False, sections)\n if not isinstance(ret, list):\n return [ret]\n return ret\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vsplit(ary, indices_or_sections):\n r\"\"\"\n vsplit(ary, indices_or_sections)\n\n Split an array into multiple sub-arrays vertically (row-wise).\n\n ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split\n along the first axis regardless of the array dimension.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 0. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 0 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along axis 0, an error will be thrown.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n\n - Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,\n tuple and list.\n - In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,\n an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n if len(ary.shape) < 2:\n raise ValueError(\"vsplit only works on arrays of 2 or more dimensions\")\n return split(ary, indices_or_sections, 0)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 2. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 2 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:, :, :2]\n - ary[:, :, 2:3]\n - ary[:, :, 3:]\n\n If an index exceeds the dimension of the array along axis 2, an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n \"\"\"\n if len(ary.shape) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef concatenate(seq, axis=0, out=None):\n \"\"\"\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of ndarray\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n\n >>> np.concatenate((a, b), axis=None)\n array([1., 2., 3., 4., 5., 6.])\n\n >>> np.concatenate((a, b.T), axis=1)\n array([[1., 2., 5.],\n [3., 4., 6.]])\n \"\"\"\n return _npi.concatenate(*seq, axis=axis, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef append(arr, values, axis=None): # pylint: disable=redefined-outer-name\n \"\"\"\n Append values to the end of an array.\n\n Parameters\n ----------\n arr : ndarray\n Values are appended to a copy of this array.\n values : ndarray\n These values are appended to a copy of `arr`. It must be of the\n correct shape (the same shape as `arr`, excluding `axis`). If\n `axis` is not specified, `values` can be any shape and will be\n flattened before use.\n axis : int, optional\n The axis along which `values` are appended. If `axis` is not\n given, both `arr` and `values` are flattened before use.\n\n Returns\n -------\n append : ndarray\n A copy of `arr` with `values` appended to `axis`. Note that\n `append` does not occur in-place: a new array is allocated and\n filled. If `axis` is None, `out` is a flattened array.\n\n Examples\n --------\n >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))\n array([1., 2., 3., 4., 5., 6., 7., 8., 9.])\n\n When `axis` is specified, `values` must have the correct shape.\n\n >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)\n array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n \"\"\"\n return _npi.concatenate(arr, values, axis=axis, out=None)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef stack(arrays, axis=0, out=None):\n \"\"\"Join a sequence of arrays along a new axis.\n The axis parameter specifies the index of the new axis in the dimensions of the result.\n For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.\n\n Parameters\n ----------\n arrays : sequence of ndarray\n Each array must have the same shape.\n axis : int, optional\n The axis in the result array along which the input arrays are stacked.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be correct,\n matching that of what stack would have returned if no out argument were specified.\n\n Returns\n -------\n stacked : ndarray\n The stacked array has one more dimension than the input arrays.\"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _npi.stack(*arrays, axis=axis, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vstack(arrays, out=None):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _npi.vstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef row_stack(arrays):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _npi.vstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Returns\n --------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _npi.column_stack(*tup)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hstack(arrays):\n \"\"\"\n Stack arrays in sequence horizontally (column wise).\n This is equivalent to concatenation along the second axis,\n except for 1-D arrays where it concatenates along the first axis.\n Rebuilds arrays divided by hsplit.\n This function makes most sense for arrays with up to 3 dimensions.\n For instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions concatenate,\n stack and block provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n Examples\n --------\n >>> from mxnet import np,npx\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.hstack((a,b))\n array([1., 2., 3., 2., 3., 4.])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.hstack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _npi.hstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef dstack(arrays):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n \"\"\"\n return _npi.dstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef maximum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef minimum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef swapaxes(a, axis1, axis2):\n \"\"\"Interchange two axes of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis1 : int\n First axis.\n axis2 : int\n Second axis.\n\n Returns\n -------\n a_swapped : ndarray\n Swapped array. This is always a copy of the input array.\n \"\"\"\n return _npi.swapaxes(a, dim1=axis1, dim2=axis2)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef clip(a, a_min, a_max, out=None):\n \"\"\"clip(a, a_min, a_max, out=None)\n\n Clip (limit) the values in an array.\n Given an interval, values outside the interval are clipped to\n the interval edges. For example, if an interval of ``[0, 1]``\n is specified, values smaller than 0 become 0, and values larger\n than 1 become 1.\n\n Parameters\n ----------\n a : ndarray\n Array containing elements to clip.\n a_min : scalar or `None`\n Minimum value. If `None`, clipping is not performed on lower\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n a_max : scalar or `None`\n Maximum value. If `None`, clipping is not performed on upper\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n out : ndarray, optional\n The results will be placed in this array. It may be the input\n array for in-place clipping. `out` must be of the right shape\n to hold the output. Its type is preserved.\n\n Returns\n -------\n clipped_array : ndarray\n An array with the elements of `a`, but where values\n < `a_min` are replaced with `a_min`, and those > `a_max`\n with `a_max`.\n\n Notes\n -----\n ndarray `a_min` and `a_max` are not supported.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> np.clip(a, 1, 8)\n array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)\n >>> np.clip(a, 3, 6, out=a)\n array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)\n \"\"\"\n if a_min is None and a_max is None:\n raise ValueError('array_clip: must set either max or min')\n if a_min is None:\n a_min = float('-inf')\n if a_max is None:\n a_max = float('inf')\n return _npi.clip(a, a_min, a_max, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argmax(a, axis=None, out=None):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmax(a)\n array(5.)\n >>> np.argmax(a, axis=0)\n array([1., 1., 1.])\n >>> np.argmax(a, axis=1)\n array([2., 2.])\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0., 5., 2., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(1.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmax(a, axis=1, out=b)\n array([2., 2.])\n >>> b\n array([2., 2.])\n \"\"\"\n return _npi.argmax(a, axis=axis, keepdims=False, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argmin(a, axis=None, out=None):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmin(a)\n array(0.)\n >>> np.argmin(a, axis=0)\n array([0., 0., 0.])\n >>> np.argmin(a, axis=1)\n array([0., 0.])\n\n >>> b = np.arange(6)\n >>> b[2] = 0\n >>> b\n array([0., 1., 0., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(0.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmin(a, axis=1, out=b)\n array([0., 0.])\n >>> b\n array([0., 0.])\n \"\"\"\n return _npi.argmin(a, axis=axis, keepdims=False, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef average(a, axis=None, weights=None, returned=False, out=None):\n \"\"\"\n Compute the weighted average along the specified axis.\n\n Parameters\n --------\n a : ndarray\n Array containing data to be averaged.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to average a.\n The default, axis=None, will average over\n all of the elements of the input array.\n If axis is negative it counts from the last to the first axis.\n New in version 1.7.0.\n If axis is a tuple of ints, averaging is\n performed on all of the axes specified in the tuple\n instead of a single axis or all the axes as before.\n weights : ndarray, optional\n An array of weights associated with the values in a, must be the same dtype with a.\n Each value in a contributes to the average according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of a along the given axis) or of the same shape as a.\n If weights=None, then all data in a are assumed to have a weight equal to one.\n The 1-D calculation is: avg = sum(a * weights) / sum(weights)\n The only constraint on weights is that sum(weights) must not be 0.\n returned : bool, optional\n Default is False.\n If True, the tuple (average, sum_of_weights) is returned,\n otherwise only the average is returned.\n If weights=None, sum_of_weights is equivalent to\n the number of elements over which the average is taken.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n\n Returns\n --------\n retval, [sum_of_weights] : ndarray\n Return the average along the specified axis.\n When returned is True, return a tuple with the average as the first element\n and the sum of the weights as the second element. sum_of_weights is of the same type as retval.\n If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a.\n\n Raises\n --------\n MXNetError\n - When all weights along axis sum to zero.\n - When the length of 1D weights is not the same as the shape of a along axis.\n - When given 1D weights, the axis is not specified or is not int.\n - When the shape of weights and a differ, but weights are not 1D.\n\n See also\n --------\n mean\n\n Notes\n --------\n This function differs from the original `numpy.average`\n <https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in\n the following way(s):\n\n - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens\n - Does not support complex dtype\n - The dtypes of a and weights must be the same\n - Integral a results in float32 returned dtype, not float64\n\n Examples\n --------\n >>> data = np.arange(1, 5)\n >>> data\n array([1., 2., 3., 4.])\n >>> np.average(data)\n array(2.5)\n >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))\n array(4.)\n >>> data = np.arange(6).reshape((3,2))\n >>> data\n array([[0., 1.],\n [2., 3.],\n [4., 5.]])\n >>> weights = np.array([0.25, 0.75])\n array([0.25, 0.75])\n >>> np.average(data, axis=1, weights=weights)\n array([0.75, 2.75, 4.75])\n \"\"\"\n if weights is None:\n return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)\n else:\n return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"\n mean(a, axis=None, dtype=None, out=None, keepdims=None)\n Compute the arithmetic mean along the specified axis.\n Returns the average of the array elements.\n The average is taken over the flattened array by default, otherwise over the specified axis.\n Parameters\n ----------\n a : ndarray\n ndarray containing numbers whose mean is desired.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.\n If this is a tuple of ints, a mean is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the mean. For integer inputs, the default is float32;\n for floating point inputs, it is the same as the input dtype.\n out : ndarray, optional\n Alternate output array in which to place the result. The default is None; if provided,\n it must have the same shape and type as the expected output\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result\n as dimensions with size one. With this option, the result will broadcast correctly\n against the input array.\n If the default value is passed, then keepdims will not be passed through to the mean\n method of sub-classes of ndarray, however any non-default value will be. If the sub-class\n method does not implement keepdims any exceptions will be raised.\n Returns\n -------\n m : ndarray, see dtype parameter above\n If out=None, returns a new array containing the mean values,\n otherwise a reference to the output array is returned.\n Notes\n -----\n This function differs from the original `numpy.mean\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in\n the following way(s):\n - only ndarray is accepted as valid input, python iterables or scalar is not supported\n - default data type for integer input is float32\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.mean(a)\n array(2.5)\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0,:] = 1.0\n >>> a[1,:] = 0.1\n >>> np.mean(a)\n array(0.55)\n >>> np.mean(a, dtype=np.float64)\n array(0.55)\n \"\"\"\n return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the standard deviation along the specified axis.\n Returns the standard deviation, a measure of the spread of a distribution,\n of the array elements. The standard deviation is computed for the\n flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n Calculate the standard deviation of these values.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed. The\n default is to compute the standard deviation of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a standard deviation is performed over\n multiple axes, instead of a single axis or all the axes as before.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the calculated\n values) will be cast if necessary.\n ddof : int, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n By default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `std` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard deviation,\n otherwise return a reference to the output array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.std(a)\n 1.1180339887498949 # may vary\n >>> np.std(a, axis=0)\n array([1., 1.])\n >>> np.std(a, axis=1)\n array([0.5, 0.5])\n In single precision, std() can be inaccurate:\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.std(a)\n array(0.45)\n >>> np.std(a, dtype=np.float64)\n array(0.45, dtype=float64)\n \"\"\"\n return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the variance along the specified axis.\n Returns the variance of the array elements, a measure of the spread of a\n distribution. The variance is computed for the flattened array by\n default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n Array containing numbers whose variance is desired. If `a` is not an\n array, a conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the variance is computed. The default is to\n compute the variance of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a variance is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the variance. For arrays of integer type\n the default is `float32`; for arrays of float types it is the same as\n the array type.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output, but the type is cast if\n necessary.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n ``N - ddof``, where ``N`` represents the number of elements. By\n default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `var` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n variance : ndarray, see dtype parameter above\n If ``out=None``, returns a new array containing the variance;\n otherwise, a reference to the output array is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.var(a)\n array(1.25)\n >>> np.var(a, axis=0)\n array([1., 1.])\n >>> np.var(a, axis=1)\n array([0.25, 0.25])\n\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.var(a)\n array(0.2025)\n >>> np.var(a, dtype=np.float64)\n array(0.2025, dtype=float64)\n >>> ((1-0.55)**2 + (0.1-0.55)**2)/2\n 0.2025\n \"\"\"\n return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef indices(dimensions, dtype=_np.int32, ctx=None):\n \"\"\"Return an array representing the indices of a grid.\n\n Compute an array where the subarrays contain index values 0,1,...\n varying only along the corresponding axis.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the grid.\n dtype : data-type, optional\n The desired data-type for the array. Default is `float32`.\n ctx : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.context.current_context()`.\n\n Returns\n -------\n grid : ndarray\n The array of grid indices,\n ``grid.shape = (len(dimensions),) + tuple(dimensions)``.\n\n Notes\n -----\n The output shape is obtained by prepending the number of dimensions\n in front of the tuple of dimensions, i.e. if `dimensions` is a tuple\n ``(r0, ..., rN-1)`` of length ``N``, the output shape is\n ``(N,r0,...,rN-1)``.\n\n The subarrays ``grid[k]`` contains the N-D array of indices along the\n ``k-th`` axis. Explicitly::\n\n grid[k,i0,i1,...,iN-1] = ik\n\n Examples\n --------\n >>> grid = np.indices((2, 3))\n >>> grid.shape\n (2, 2, 3)\n >>> grid[0] # row indices\n array([[0, 0, 0],\n [1, 1, 1]])\n >>> grid[1] # column indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int32)\n\n The indices can be used as an index into an array.\n\n >>> x = np.arange(20).reshape(5, 4)\n >>> row, col = np.indices((2, 3))\n >>> x[row, col]\n array([[0., 1., 2.],\n [4., 5., 6.]])\n\n Note that it would be more straightforward in the above example to\n extract the required elements directly with ``x[:2, :3]``.\n \"\"\"\n if isinstance(dimensions, (tuple, list)):\n if ctx is None:\n ctx = current_context()\n return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)\n else:\n raise ValueError(\"The dimensions must be sequence of ints\")\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef copysign(x1, x2, out=None, **kwargs):\n r\"\"\"\n Change the sign of x1 to that of x2, element-wise.\n\n If `x2` is a scalar, its sign will be copied to all elements of `x1`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Values to change the sign of.\n x2 : ndarray or scalar\n The sign of `x2` is copied to `x1`.\n out : ndarray or None, optional\n A location into which the result is stored. It must be of the\n right shape and right type to hold the output. If not provided\n or `None`,a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The values of `x1` with the sign of `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -------\n This function differs from the original `numpy.copysign\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in\n the following aspects:\n\n - ``where`` param is not supported.\n\n Examples\n --------\n >>> np.copysign(1.3, -1)\n -1.3\n >>> 1/np.copysign(0, 1)\n inf\n >>> 1/np.copysign(0, -1)\n -inf\n\n >>> a = np.array([-1, 0, 1])\n >>> np.copysign(a, -1.1)\n array([-1., -0., -1.])\n >>> np.copysign(a, np.arange(3)-1)\n array([-1., 0., 1.])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ravel(x, order='C'):\n r\"\"\"\n ravel(x)\n\n Return a contiguous flattened array.\n A 1-D array, containing the elements of the input, is returned. A copy is\n made only if needed.\n\n Parameters\n ----------\n x : ndarray\n Input array. The elements in `x` are read in row-major, C-style order and\n packed as a 1-D array.\n order : `C`, optional\n Only support row-major, C-style order.\n\n Returns\n -------\n y : ndarray\n y is an array of the same subtype as `x`, with shape ``(x.size,)``.\n Note that matrices are special cased for backward compatibility, if `x`\n is a matrix, then y is a 1-D ndarray.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support row-major, C-style order.\n\n Examples\n --------\n It is equivalent to ``reshape(x, -1)``.\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6]])\n >>> print(np.ravel(x))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(x.reshape(-1))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(np.ravel(x.T))\n [1. 4. 2. 5. 3. 6.]\n \"\"\"\n if order != 'C':\n raise NotImplementedError('order {} is not supported'.format(order))\n if isinstance(x, numeric_types):\n return _np.reshape(x, -1)\n elif isinstance(x, NDArray):\n return _npi.reshape(x, -1)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\ndef unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name\n \"\"\"\n Converts a flat index or array of flat indices into a tuple of coordinate arrays.\n\n Parameters:\n -------------\n indices : array_like\n An integer array whose elements are indices into the flattened version of an array of dimensions shape.\n Before version 1.6.0, this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling indices.\n\n Returns:\n -------------\n unraveled_coords : ndarray\n Each row in the ndarray has the same shape as the indices array.\n Each column in the ndarray represents the unravelled index\n\n Examples:\n -------------\n >>> np.unravel_index([22, 41, 37], (7,6))\n ([3. 6. 6.]\n [4. 5. 1.])\n >>> np.unravel_index(1621, (6,7,8,9))\n (3, 1, 4, 1)\n \"\"\"\n if order == 'C':\n if isinstance(indices, numeric_types):\n return _np.unravel_index(indices, shape)\n ret = _npi.unravel_index_fallback(indices, shape=shape)\n ret_list = []\n for item in ret:\n ret_list += [item]\n return tuple(ret_list)\n else:\n raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')\n\n\ndef diag_indices_from(arr):\n \"\"\"\n This returns a tuple of indices that can be used to access the main diagonal of an array\n a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is\n the usual diagonal, for a.ndim > 2 this is the set of indices to access\n a[i, i, ..., i] for i = [0..n-1].\n\n Parameters:\n -------------\n arr : ndarray\n Input array for acessing the main diagonal. All dimensions\n should have equal length.\n\n Return:\n -------------\n diag: tuple of ndarray\n indices of the main diagonal.\n\n Examples:\n -------------\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> idx = np.diag_indices_from(a)\n >>> idx\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n >>> a[idx] = 100\n >>> a\n array([[100, 1, 2, 3],\n [ 4, 100, 6, 7],\n [ 8, 9, 100, 11],\n [ 12, 13, 14, 100]])\n \"\"\"\n return tuple(_npi.diag_indices_from(arr))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hanning(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the Hanning window.\n\n The Hanning window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n blackman, hamming\n\n Notes\n -----\n The Hanning window is defined as\n\n .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hanning was named for Julius von Hann, an Austrian meteorologist.\n It is also known as the Cosine Bell. Some authors prefer that it be\n called a Hann window, to help avoid confusion with the very similar\n Hamming window.\n\n Most references to the Hanning window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 106-108.\n .. [3] Wikipedia, \"Window function\",\n http://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hanning(12)\n array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,\n 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,\n 0.07937312, 0. ])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hanning(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hann window\")\n Text(0.5, 1.0, 'Hann window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return _npi.hanning(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hamming(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the hamming window.\n\n The hamming window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n blackman, hanning\n\n Notes\n -----\n The Hamming window is defined as\n\n .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hamming was named for R. W. Hamming, an associate of J. W. Tukey\n and is described in Blackman and Tukey. It was recommended for\n smoothing the truncated autocovariance function in the time domain.\n Most references to the Hamming window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 109-110.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hamming(12)\n array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,\n 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,\n 0.15302327, 0.08000001])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hamming(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"hamming window\")\n Text(0.5, 1.0, 'hamming window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return _npi.hamming(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef blackman(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the Blackman window.\n\n The Blackman window is a taper formed by using the first three\n terms of a summation of cosines. It was designed to have close to the\n minimal leakage possible. It is close to optimal, only slightly worse\n than a Kaiser window.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value one\n appears only if the number of samples is odd).\n\n See Also\n --------\n hamming, hanning\n\n Notes\n -----\n The Blackman window is defined as\n\n .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/{M-1}) + 0.08 \\cos(4\\pi n/{M-1})\n\n Most references to the Blackman window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function. It is known as a\n \"near optimal\" tapering function, almost as good (by some measures)\n as the kaiser window.\n\n References\n ----------\n Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,\n Dover Publications, New York.\n\n Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.\n Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.\n\n Examples\n --------\n >>> np.blackman(12)\n array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,\n 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,\n 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.blackman(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"blackman window\")\n Text(0.5, 1.0, 'blackman window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return _npi.blackman(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef flip(m, axis=None, out=None):\n r\"\"\"\n flip(m, axis=None, out=None)\n\n Reverse the order of elements in an array along the given axis.\n\n The shape of the array is preserved, but the elements are reordered.\n\n Parameters\n ----------\n m : ndarray or scalar\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to flip over. The default,\n axis=None, will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n out : ndarray or scalar, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n out : ndarray or scalar\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> np.flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> np.flip(A)\n array([[[7, 6],\n [5, 4]],\n [[3, 2],\n [1, 0]]])\n >>> np.flip(A, (0, 2))\n array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(m, numeric_types):\n return _np.flip(m, axis)\n elif isinstance(m, ndarray):\n return _npi.flip(m, axis, out=out)\n else:\n raise TypeError('type {} not supported'.format(str(type(m))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef flipud(m):\n r\"\"\"\n flipud(*args, **kwargs)\n\n Flip array in the up/down direction.\n\n Flip the entries in each column in the up/down direction.\n Rows are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array.\n\n Returns\n -------\n out : array_like\n A view of `m` with the rows reversed. Since a view is\n returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n fliplr : Flip array in the left/right direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to ``m[::-1,...]``.\n Does not require the array to be two-dimensional.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.0, 2, 3]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.flipud(A)\n array([[0., 0., 3.],\n [0., 2., 0.],\n [1., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.flipud(A) == A[::-1,...])\n array(True)\n\n >>> np.flipud(np.array([1,2]))\n array([2., 1.])\n \"\"\"\n return flip(m, 0)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef fliplr(m):\n r\"\"\"\n fliplr(*args, **kwargs)\n\n Flip array in the left/right direction.\n\n Flip the entries in each row in the left/right direction.\n Columns are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array, must be at least 2-D.\n\n Returns\n -------\n f : ndarray\n A view of `m` with the columns reversed. Since a view\n is returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n flipud : Flip array in the up/down direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to m[:,::-1]. Requires the array to be at least 2-D.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.,2.,3.]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.fliplr(A)\n array([[0., 0., 1.],\n [0., 2., 0.],\n [3., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.fliplr(A) == A[:,::-1,...])\n array(True)\n \"\"\"\n return flip(m, 1)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef around(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n around(x, decimals=0, out=None)\n\n Evenly round to the given number of decimals.\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n decimals : int, optional\n Number of decimal places to round to (default: 0). If\n decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n rounded_array : ndarray or scalar\n An array of the same type as `x`, containing the rounded values.\n A reference to the result is returned.\n\n Notes\n -----\n For values exactly halfway between rounded decimal values, NumPy\n rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,\n -0.5 and 0.5 round to 0.0, etc.\n\n This function differs from the original numpy.prod in the following aspects:\n\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot support complex-valued number.\n\n Examples\n --------\n >>> np.around([0.37, 1.64])\n array([ 0., 2.])\n >>> np.around([0.37, 1.64], decimals=1)\n array([ 0.4, 1.6])\n >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value\n array([ 0., 2., 2., 4., 4.])\n >>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned\n array([ 1, 2, 3, 11])\n >>> np.around([1, 2, 3, 11], decimals=-1)\n array([ 0, 0, 0, 10])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _npi.around(x, decimals, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef round(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n round_(a, decimals=0, out=None)\n Round an array to the given number of decimals.\n\n See Also\n --------\n around : equivalent function; see for details.\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _npi.around(x, decimals, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef arctan2(x1, x2, out=None, **kwargs):\n r\"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n `y`-coordinates.\n x2 : ndarray or scalar\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if\n `x1` and `x2` are scalars.\n\n Notes\n -----\n *arctan2* is identical to the `atan2` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n ====== ====== ================\n `x1` `x2` `arctan2(x1,x2)`\n ====== ====== ================\n +/- 0 +0 +/- 0\n +/- 0 -0 +/- pi\n > 0 +/-inf +0 / +pi\n < 0 +/-inf -0 / -pi\n +/-inf +inf +/- (pi/4)\n +/-inf -inf +/- (3*pi/4)\n ====== ====== ================\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n\n >>> x = np.array([-1, +1, +1, -1])\n >>> y = np.array([-1, -1, +1, +1])\n >>> np.arctan2(y, x) * 180 / np.pi\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `arctan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> x = np.array([1, -1])\n >>> y = np.array([0, 0])\n >>> np.arctan2(x, y)\n array([ 1.5707964, -1.5707964])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,\n _npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef hypot(x1, x2, out=None, **kwargs):\n r\"\"\"\n Given the \"legs\" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n\n Parameters\n ----------\n x1, x2 : ndarray\n Leg of the triangle(s).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n z : ndarray\n The hypotenuse of the triangle(s).\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n Examples\n --------\n >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> np.hypot(3*np.ones((3, 3)), [4])\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_and(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_and(13, 17)\n 1\n\n >>> np.bitwise_and(14, 13)\n 12\n >>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)\n array([12, 1], dtype=int32)\n\n >>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))\n array([0, 1], dtype=int32)\n >>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))\n array([ 2, 4, 16], dtype=int32)\n >>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([False, True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_xor(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_xor(13, 17)\n 28\n\n >>> np.bitwise_xor(31, 5)\n 26\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)\n array([26, 6])\n\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([26, 5])\n >>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_or(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise OR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_or(13, 17)\n 29\n\n >>> np.bitwise_or(31, 5)\n 31\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)\n array([31, 7])\n\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([31, 7])\n >>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef ldexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns x1 * 2**x2, element-wise.\n The mantissas `x1` and twos exponents `x2` are used to construct\n floating point numbers ``x1 * 2**x2``.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The result of ``x1 * 2**x2``.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n Complex dtypes are not supported, they will raise a TypeError.\n Different from numpy, we allow x2 to be float besides int.\n `ldexp` is useful as the inverse of `frexp`, if used by itself it is\n more clear to simply use the expression ``x1 * 2**x2``.\n\n Examples\n --------\n >>> np.ldexp(5, np.arange(4))\n array([ 5., 10., 20., 40.])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef inner(a, b):\n r\"\"\"\n Inner product of two arrays.\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : ndarray\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n np.inner(a, b) = sum(a[:]*b[:])\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n or explicitly::\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n In addition `a` or `b` may be scalars, in which case::\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n A multidimensional example:\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n \"\"\"\n return tensordot(a, b, [-1, -1])\n\n\n@set_module('mxnet.ndarray.numpy')\ndef outer(a, b):\n r\"\"\"\n Compute the outer product of two vectors.\n Given two vectors, ``a = [a0, a1, ..., aM]`` and\n ``b = [b0, b1, ..., bN]``,\n the outer product [1]_ is::\n [[a0*b0 a0*b1 ... a0*bN ]\n [a1*b0 .\n [ ... .\n [aM*b0 aM*bN ]]\n\n Parameters\n ----------\n a : (M,) ndarray\n First input vector. Input is flattened if\n not already 1-dimensional.\n b : (N,) ndarray\n Second input vector. Input is flattened if\n not already 1-dimensional.\n\n Returns\n -------\n out : (M, N) ndarray\n ``out[i, j] = a[i] * b[j]``\n See also\n --------\n inner\n einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.\n ufunc.outer : A generalization to N dimensions and other operations.\n ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.\n References\n ----------\n .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd\n ed., Baltimore, MD, Johns Hopkins University Press, 1996,\n pg. 8.\n Examples\n --------\n Make a (*very* coarse) grid for computing a Mandelbrot set:\n >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))\n >>> rl\n array([[-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.]])\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 0)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vdot(a, b):\n r\"\"\"\n Return the dot product of two vectors.\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : ndarray\n First argument to the dot product.\n b : ndarray\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n Note that higher-dimensional arrays are flattened!\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n 30\n >>> np.vdot(b, a)\n 30\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 1)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef equal(x1, x2, out=None):\n \"\"\"\n Return (x1 == x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n Examples\n --------\n >>> np.equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.equal(1, np.ones(1))\n array([ True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef not_equal(x1, x2, out=None):\n \"\"\"\n Return (x1 != x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.not_equal(1, np.ones(1))\n array([False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef greater(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 > x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater(1, np.ones(1))\n array([False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,\n _npi.less_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef less(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 < x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.less(1, np.ones(1))\n array([False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef greater_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 >= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,\n _npi.less_equal_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef less_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 <= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.less_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,\n _npi.greater_equal_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef rot90(m, k=1, axes=(0, 1)):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by axes.\n Rotation direction is from the first towards the second axis.\n Parameters\n ----------\n m : ndarray\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes: (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n -----\n rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))\n rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], 'int')\n >>> m\n array([[1, 2],\n [3, 4]], dtype=int64)\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]], dtype=int64)\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]], dtype=int64)\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1., 3.],\n [0., 2.]],\n\n [[5., 7.],\n [4., 6.]]])\n \"\"\"\n return _npi.rot90(m, k=k, axes=axes)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef einsum(*operands, **kwargs):\n r\"\"\"\n einsum(subscripts, *operands, out=None, optimize=False)\n\n Evaluates the Einstein summation convention on the operands.\n\n Using the Einstein summation convention, many common multi-dimensional,\n linear algebraic array operations can be represented in a simple fashion.\n In *implicit* mode `einsum` computes these values.\n\n In *explicit* mode, `einsum` provides further flexibility to compute\n other array operations that might not be considered classical Einstein\n summation operations, by disabling, or forcing summation over specified\n subscript labels.\n\n See the notes and examples for clarification.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation as comma separated list of\n subscript labels. An implicit (classical Einstein summation)\n calculation is performed unless the explicit indicator '->' is\n included as well as subscript labels of the precise output form.\n operands : list of ndarray\n These are the arrays for the operation.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n optimize : {False, True}, optional\n Controls if intermediate optimization should occur. No optimization\n will occur if False. Defaults to False.\n\n Returns\n -------\n output : ndarray\n The calculation based on the Einstein summation convention.\n\n Notes\n -----\n The Einstein summation convention can be used to compute\n many multi-dimensional, linear algebraic array operations. `einsum`\n provides a succinct way of representing these.\n\n A non-exhaustive list of these operations,\n which can be computed by `einsum`, is shown below along with examples:\n\n * Trace of an array, :py:func:`np.trace`.\n * Return a diagonal, :py:func:`np.diag`.\n * Array axis summations, :py:func:`np.sum`.\n * Transpositions and permutations, :py:func:`np.transpose`.\n * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.\n * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.\n * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.\n * Tensor contractions, :py:func:`np.tensordot`.\n\n The subscripts string is a comma-separated list of subscript labels,\n where each label refers to a dimension of the corresponding operand.\n Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``\n is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label\n appears only once, it is not summed, so ``np.einsum('i', a)`` produces a\n view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``\n describes traditional matrix multiplication and is equivalent to\n :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one\n operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent\n to :py:func:`np.trace(a) <np.trace>`.\n\n In *implicit mode*, the chosen subscripts are important\n since the axes of the output are reordered alphabetically. This\n means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while\n ``np.einsum('ji', a)`` takes its transpose. Additionally,\n ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,\n ``np.einsum('ij,jh', a, b)`` returns the transpose of the\n multiplication since subscript 'h' precedes subscript 'i'.\n\n In *explicit mode* the output can be directly controlled by\n specifying output subscript labels. This requires the\n identifier '->' as well as the list of output subscript labels.\n This feature increases the flexibility of the function since\n summing can be disabled or forced when required. The call\n ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,\n and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.\n The difference is that `einsum` does not allow broadcasting by default.\n Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the\n order of the output subscript labels and therefore returns matrix\n multiplication, unlike the example above in implicit mode.\n\n To enable and control broadcasting, use an ellipsis. Default\n NumPy-style broadcasting is done by adding an ellipsis\n to the left of each term, like ``np.einsum('...ii->...i', a)``.\n To take the trace along the first and last axes,\n you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix\n product with the left-most indices instead of rightmost, one can do\n ``np.einsum('ij...,jk...->ik...', a, b)``.\n\n When there is only one operand, no axes are summed, and no output\n parameter is provided, a view into the operand is returned instead\n of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``\n produces a view.\n\n The ``optimize`` argument which will optimize the contraction order\n of an einsum expression. For a contraction with three or more operands this\n can greatly increase the computational efficiency at the cost of a larger\n memory footprint during computation.\n\n Typically a 'greedy' algorithm is applied which empirical tests have shown\n returns the optimal path in the majority of cases. 'optimal' is not supported\n for now.\n\n This function differs from the original `numpy.einsum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in\n the following way(s):\n\n - Does not support 'optimal' strategy\n - Does not support the alternative subscript like\n `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`\n - Does not produce view in any cases\n\n Examples\n --------\n >>> a = np.arange(25).reshape(5,5)\n >>> b = np.arange(5)\n >>> c = np.arange(6).reshape(2,3)\n\n Trace of a matrix:\n\n >>> np.einsum('ii', a)\n array(60.)\n\n Extract the diagonal (requires explicit form):\n\n >>> np.einsum('ii->i', a)\n array([ 0., 6., 12., 18., 24.])\n\n Sum over an axis (requires explicit form):\n\n >>> np.einsum('ij->i', a)\n array([ 10., 35., 60., 85., 110.])\n >>> np.sum(a, axis=1)\n array([ 10., 35., 60., 85., 110.])\n\n For higher dimensional arrays summing a single axis can be done with ellipsis:\n\n >>> np.einsum('...j->...', a)\n array([ 10., 35., 60., 85., 110.])\n\n Compute a matrix transpose, or reorder any number of axes:\n\n >>> np.einsum('ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.einsum('ij->ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.transpose(c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n\n Vector inner products:\n\n >>> np.einsum('i,i', b, b)\n array(30.)\n\n Matrix vector multiplication:\n\n >>> np.einsum('ij,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.dot(a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.einsum('...j,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n\n Broadcasting and scalar multiplication:\n\n >>> np.einsum('..., ...', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.einsum(',ij', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.multiply(3, c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n\n Vector outer product:\n\n >>> np.einsum('i,j', np.arange(2)+1, b)\n array([[0., 1., 2., 3., 4.],\n [0., 2., 4., 6., 8.]])\n\n Tensor contraction:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> np.einsum('ijk,jil->kl', a, b)\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n Example of ellipsis use:\n\n >>> a = np.arange(6).reshape((3,2))\n >>> b = np.arange(12).reshape((4,3))\n >>> np.einsum('ki,jk->ij', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('ki,...k->i...', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('k...,jk', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n\n Chained array operations. For more complicated contractions, speed ups\n might be achieved by repeatedly computing a 'greedy' path. Performance\n improvements can be particularly significant with larger arrays:\n\n >>> a = np.ones(64).reshape(2,4,8)\n # Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)\n # Greedy `einsum` (faster optimal path approximation): ~0.117ms\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)\n \"\"\"\n # Grab non-einsum kwargs; do not optimize by default.\n optimize_arg = kwargs.pop('optimize', False)\n out = kwargs.pop('out', None)\n\n subscripts = operands[0]\n operands = operands[1:]\n return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef nonzero(a):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of `a`,\n containing the indices of the non-zero elements in that\n dimension. The values in `a` are always returned in\n row-major, C-style order.\n\n To group the indices by element, rather than dimension, use `argwhere`,\n which returns a row for each non-zero element.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n ndarray.nonzero :\n Equivalent ndarray method.\n\n Notes\n -----\n While the nonzero values can be obtained with ``a[nonzero(a)]``, it is\n recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which\n will correctly handle 0-d arrays.\n\n Examples\n --------\n >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])\n >>> x\n array([[3, 0, 0],\n [0, 4, 0],\n [5, 6, 0]], dtype=int32)\n >>> np.nonzero(x)\n (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))\n\n >>> x[np.nonzero(x)]\n array([3, 4, 5, 6])\n >>> np.transpose(np.stack(np.nonzero(x)))\n array([[0, 0],\n [1, 1],\n [2, 0],\n [2, 1]], dtype=int64)\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, np.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)\n >>> a > 3\n array([[False, False, False],\n [ True, True, True],\n [ True, True, True]])\n >>> np.nonzero(a > 3)\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n\n Using this result to index `a` is equivalent to using the mask directly:\n\n >>> a[np.nonzero(a > 3)]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n >>> a[a > 3]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n\n ``nonzero`` can also be called as a method of the array.\n\n >>> (a > 3).nonzero()\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n \"\"\"\n out = _npi.nonzero(a).transpose()\n return tuple([out[i] for i in range(len(out))])\n\n\n@set_module('mxnet.ndarray.numpy')\ndef percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th percentile of the data along the specified axis.\n Returns the q-th percentile(s) of the array elements.\n\n Parameters\n ----------\n a : ndarray\n Input array\n q : ndarray\n Percentile or sequence of percentiles to compute.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the percentiles are computed. The default is to\n compute the percentile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have the same\n shape and buffer length as the expected output, but the type (of the output)\n will be cast if necessary.\n overwrite_input : bool, optional (Not supported yet)\n If True, then allow the input array a to be modified by intermediate calculations,\n to save memory. In this case, the contents of the input a after this function\n completes is undefined.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use when the\n desired percentile lies between two data points i < j:\n 'linear': i + (j - i) * fraction, where fraction is the fractional part of the\n index surrounded by i and j.\n 'lower': i.\n 'higher': j.\n 'nearest': i or j, whichever is nearest.\n 'midpoint': (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as\n dimensions with size one. With this option, the result will broadcast\n correctly against the original array a.\n\n Returns\n -------\n percentile : scalar or ndarray\n Output array.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.percentile(a, np.array(50))\n array(3.5)\n >>> np.percentile(a, np.array(50), axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.percentile(a, np.array(50), axis=1)\n array([7., 2.])\n >>> np.percentile(a, np.array(50), axis=1, keepdims=True)\n array([[7.],\n [2.]])\n\n >>> m = np.percentile(a, np.array(50), axis=0)\n >>> out = np.zeros_like(m)\n >>> np.percentile(a, np.array(50), axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n \"\"\"\n if overwrite_input is not None:\n raise NotImplementedError('overwrite_input is not supported yet')\n if isinstance(q, numeric_types):\n return _npi.percentile(a, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=q, out=out)\n return _npi.percentile(a, q, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=None, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th quantile of the data along the specified axis.\n New in version 1.15.0.\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n q : ndarray\n Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the quantiles are computed.\n The default is to compute the quantile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result.\n It must have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use\n when the desired quantile lies between two data points i < j:\n linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.\n lower: i.\n higher: j.\n nearest: i or j, whichever is nearest.\n midpoint: (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the original array a.\n Returns\n -------\n quantile : ndarray\n If q is a single quantile and axis=None, then the result is a scalar.\n If multiple quantiles are given, first axis of the result corresponds to the quantiles.\n The other axes are the axes that remain after the reduction of a.\n If out is specified, that array is returned instead.\n See also\n --------\n mean\n Notes\n -----\n Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum\n to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors\n as well as the interpolation parameter will determine the quantile if the normalized ranking\n does not match the location of q exactly. This function is the same as the median if q=0.5,\n the same as the minimum if q=0.0 and the same as the maximum if q=1.0.\n This function differs from the original `numpy.quantile\n <https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in\n the following aspects:\n - q must be ndarray type even if it is a scalar\n - do not support overwrite_input\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10., 7., 4.],\n [3., 2., 1.]])\n >>> q = np.array(0.5)\n >>> q\n array(0.5)\n >>> np.quantile(a, q)\n array(3.5)\n >>> np.quantile(a, q, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.quantile(a, q, axis=1)\n array([7., 2.])\n >>> np.quantile(a, q, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.quantile(a, q, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.quantile(a, q, axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> out\n array([6.5, 4.5, 2.5])\n \"\"\"\n if overwrite_input is not None:\n raise NotImplementedError('overwrite_input is not supported yet')\n if isinstance(q, numeric_types):\n return _npi.percentile(a, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=q * 100, out=out)\n return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=None, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef shares_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n This function differs from the original `numpy.shares_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `may_share_memory` in MXNet DeepNumPy\n \"\"\"\n return _npi.share_memory(a, b).item()\n\n\n@set_module('mxnet.ndarray.numpy')\ndef may_share_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n This function differs from the original `numpy.may_share_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `shares_memory` in MXNet DeepNumPy\n \"\"\"\n return _npi.share_memory(a, b).item()\n\n\n@set_module('mxnet.ndarray.numpy')\ndef diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n \"\"\"\n if (prepend or append):\n raise NotImplementedError('prepend and append options are not supported yet')\n return _npi.diff(a, n=n, axis=axis)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef resize(a, new_shape):\n \"\"\"\n Return a new array with the specified shape.\n If the new array is larger than the original array, then the new\n array is filled with repeated copies of `a`. Note that this behavior\n is different from a.resize(new_shape) which fills with zeros instead\n of repeated copies of `a`.\n\n Parameters\n ----------\n a : ndarray\n Array to be resized.\n new_shape : int or tuple of int\n Shape of resized array.\n\n Returns\n -------\n reshaped_array : ndarray\n The new array is formed from the data in the old array, repeated\n if necessary to fill out the required number of elements. The\n data are repeated in the order that they are stored in memory.\n\n See Also\n --------\n ndarray.resize : resize an array in-place.\n\n Notes\n -----\n Warning: This functionality does **not** consider axes separately,\n i.e. it does not apply interpolation/extrapolation.\n It fills the return array with the required number of elements, taken\n from `a` as they are laid out in memory, disregarding strides and axes.\n (This is in case the new shape is smaller. For larger, see above.)\n This functionality is therefore not suitable to resize images,\n or data where each axis represents a separate and distinct entity.\n\n Examples\n --------\n >>> a = np.array([[0, 1], [2, 3]])\n >>> np.resize(a, (2, 3))\n array([[0., 1., 2.],\n [3., 0., 1.]])\n >>> np.resize(a, (1, 4))\n array([[0., 1., 2., 3.]])\n >>> np.resize(a,(2, 4))\n array([[0., 1., 2., 3.],\n [0., 1., 2., 3.]])\n \"\"\"\n return _npi.resize_fallback(a, new_shape=new_shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):\n \"\"\"\n Replace NaN with zero and infinity with large finite numbers (default\n behaviour) or with the numbers defined by the user using the `nan`,\n `posinf` and/or `neginf` keywords.\n\n If `x` is inexact, NaN is replaced by zero or by the user defined value in\n `nan` keyword, infinity is replaced by the largest finite floating point\n values representable by ``x.dtype`` or by the user defined value in\n `posinf` keyword and -infinity is replaced by the most negative finite\n floating point values representable by ``x.dtype`` or by the user defined\n value in `neginf` keyword.\n\n For complex dtypes, the above is applied to each of the real and\n imaginary components of `x` separately.\n\n If `x` is not inexact, then no replacements are made.\n\n Parameters\n ----------\n x : ndarray\n Input data.\n copy : bool, optional\n Whether to create a copy of `x` (True) or to replace values\n in-place (False). The in-place operation only occurs if\n casting to an array does not require a copy.\n Default is True.\n nan : int, float, optional\n Value to be used to fill NaN values. If no value is passed\n then NaN values will be replaced with 0.0.\n posinf : int, float, optional\n Value to be used to fill positive infinity values. If no value is\n passed then positive infinity values will be replaced with a very\n large number.\n neginf : int, float, optional\n Value to be used to fill negative infinity values. If no value is\n passed then negative infinity values will be replaced with a very\n small (or negative) number.\n\n .. versionadded:: 1.13\n\n Returns\n -------\n out : ndarray\n `x`, with the non-finite values replaced. If `copy` is False, this may\n be `x` itself.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.nan_to_num(np.inf)\n 1.7976931348623157e+308\n >>> np.nan_to_num(-np.inf)\n -1.7976931348623157e+308\n >>> np.nan_to_num(np.nan)\n 0.0\n >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])\n >>> np.nan_to_num(x)\n array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,\n 1.2800000e+02])\n >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)\n array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,\n 1.2800000e+02])\n >>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype=\"float64\")/0\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y)\n array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],\n [ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)\n >>> np.nan_to_num(y, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n \"\"\"\n if isinstance(x, numeric_types):\n return _np.nan_to_num(x, copy, nan, posinf, neginf)\n elif isinstance(x, NDArray):\n if x.dtype in ['int8', 'uint8', 'int32', 'int64']:\n return x\n if not copy:\n return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)\n return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isnan(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for NaN and return result as a boolean array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is NaN, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n\n This function differs from the original `numpy.isinf\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isnan(np.nan)\n True\n >>> np.isnan(np.inf)\n False\n >>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))\n array([ True, False, False])\n \"\"\"\n return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive or negative infinity.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive or negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n This function differs from the original `numpy.isnan\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isinf(np.inf)\n True\n >>> np.isinf(np.nan)\n False\n >>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))\n array([ True, True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool_)\n >>> np.isinf(x, y)\n array([ True, False, True])\n >>> y\n array([ True, False, True])\n \"\"\"\n return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)\n\n\n@wrap_np_unary_func\ndef isposinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isposinf(np.inf)\n True\n >>> np.isposinf(-np.inf)\n False\n >>> np.isposinf(np.nan)\n False\n >>> np.isposinf(np.array([-np.inf, 0., np.inf]))\n array([False, False, True])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isposinf(x, y)\n array([False, False, True])\n >>> y\n array([False, False, True])\n \"\"\"\n return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isneginf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for negative infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isneginf(-np.inf)\n True\n >>> np.isneginf(np.inf)\n False\n >>> np.isneginf(float('-inf'))\n True\n >>> np.isneginf(np.array([-np.inf, 0., np.inf]))\n array([ True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isneginf(x, y)\n array([ True, False, False])\n >>> y\n array([ True, False, False])\n \"\"\"\n return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isfinite(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for finiteness (not infinity or not Not a Number).\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n Not a Number, positive infinity and negative infinity are considered to be non-finite.\n\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n Also that positive infinity is not equivalent to negative infinity.\n But infinity is equivalent to positive infinity. Errors result if the second argument\n is also supplied when x is a scalar input, or if first and second arguments have different shapes.\n\n Examples\n --------\n >>> np.isfinite(1)\n True\n >>> np.isfinite(0)\n True\n >>> np.isfinite(np.nan)\n False\n >>> np.isfinite(np.inf)\n False\n >>> np.isfinite(-np.inf)\n False\n >>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))\n array([False, True, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isfinite(x, y)\n array([False, True, False])\n >>> y\n array([False, True, False])\n \"\"\"\n return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef where(condition, x=None, y=None): # pylint: disable=too-many-return-statements\n \"\"\"where(condition, [x, y])\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. The rest of this documentation\n covers only the case where all three arguments are provided.\n\n Parameters\n ----------\n condition : ndarray\n Where True, yield `x`, otherwise yield `y`.\n x, y : ndarray\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape. `x` and `y` must have the same dtype.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n This function differs from the original `numpy.where\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in\n the following way(s):\n\n - If `condition` is a scalar, this operator returns x or y directly without broadcasting.\n - If `condition` is ndarray, while both `x` and `y` are scalars,\n the output dtype will be `float32`.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.where(a < 5, a, 10*a)\n array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])\n\n This can be used on multidimensional arrays too:\n\n >>> cond = np.array([[True, False], [True, True]])\n >>> x = np.array([[1, 2], [3, 4]])\n >>> y = np.array([[9, 8], [7, 6]])\n >>> np.where(cond, x, y)\n array([[1., 8.],\n [3., 4.]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = onp.ogrid[:3, :4]\n >>> x = np.array(x)\n >>> y = np.array(y)\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]], dtype=int64)\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0., 1., 2.],\n [ 0., 2., -1.],\n [ 0., 3., -1.]])\n \"\"\"\n if x is None and y is None:\n return nonzero(condition)\n else:\n if isinstance(condition, numeric_types):\n if condition != 0:\n return x\n else:\n return y\n else:\n if isinstance(x, numeric_types) and isinstance(y, numeric_types):\n return _npi.where_scalar2(condition, float(x), float(y), out=None)\n elif isinstance(x, NDArray) and isinstance(y, NDArray):\n return _npi.where(condition, x, y, out=None)\n elif isinstance(y, NDArray):\n return _npi.where_lscalar(condition, y, float(x), out=None)\n elif isinstance(x, NDArray):\n return _npi.where_rscalar(condition, x, float(y), out=None)\n else:\n raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef polyval(p, x):\n \"\"\"\n Evaluate a polynomial at specific values.\n If p is of length N, this function returns the value:\n p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]\n If x is a sequence, then p(x) is returned for each element of x.\n If x is another polynomial then the composite polynomial p(x(t)) is returned.\n\n Parameters\n ----------\n p : ndarray\n 1D array of polynomial coefficients (including coefficients equal to zero)\n from highest degree to the constant term.\n x : ndarray\n An array of numbers, at which to evaluate p.\n\n Returns\n -------\n values : ndarray\n Result array of polynomials\n\n Notes\n -----\n This function differs from the original `numpy.polyval\n <https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in\n the following way(s):\n - Does not support poly1d.\n - X should be ndarray type even if it contains only one element.\n\n Examples\n --------\n >>> p = np.array([3, 0, 1])\n array([3., 0., 1.])\n >>> x = np.array([5])\n array([5.])\n >>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1\n array([76.])\n >>> x = np.array([5, 4])\n array([5., 4.])\n >>> np.polyval(p, x)\n array([76., 49.])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(p, ndarray) and isinstance(x, ndarray):\n return _npi.polyval(p, x)\n elif not isinstance(p, ndarray) and not isinstance(x, ndarray):\n return _np.polyval(p, x)\n else:\n raise TypeError('type not supported')\n\n\n@set_module('mxnet.ndarray.numpy')\ndef bincount(x, weights=None, minlength=0):\n \"\"\"\n Count number of occurrences of each value in array of non-negative ints.\n\n Parameters\n ----------\n x : ndarray\n input array, 1 dimension, nonnegative ints.\n weights: ndarray\n input weigths same shape as x. (Optional)\n minlength: int\n A minimum number of bins for the output. (Optional)\n\n Returns\n --------\n out : ndarray\n the result of binning the input array. The length of out is equal to amax(x)+1.\n\n Raises\n --------\n Value Error\n If the input is not 1-dimensional, or contains elements with negative values,\n or if minlength is negative\n TypeError\n If the type of the input is float or complex.\n\n Examples\n --------\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n TypeError: array cannot be safely cast to required type\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n \"\"\"\n if not isinstance(x, NDArray):\n raise TypeError(\"Input data should be NDarray\")\n if minlength < 0:\n raise ValueError(\"Minlength value should greater than 0\")\n if weights is None:\n return _npi.bincount(x, minlength=minlength, has_weights=False)\n return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)\n"
] | [
[
"numpy.polyval",
"numpy.reshape",
"numpy.isscalar",
"numpy.flip",
"numpy.around",
"numpy.nan_to_num",
"numpy.unravel_index"
]
] |
michigg/web-simultaneous-recording-tool | [
"67db83f6e34d9cb726c69b4e448fed3604a43618"
] | [
"analyser/analysis/pen_calculation_deviation_box_plots.py"
] | [
"\"\"\"\n\n\"\"\"\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import argrelextrema\n\nfrom analysis.frog_click_mean_calculation import calc_click_mean_quantil_based\nfrom utils import dataframe_index, audio_calcs\nfrom utils.data_exporter import Exporter\nfrom utils.data_loader import Loader\n\nimport logging\n\nfrom utils.output import Output\n\n# INPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas-distance_0m-device.pkl'\nINPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas.pkl'\nOUTPUT_DIR = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Graphs/Calculations/BoxPlots'\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s]:\\n %(message)s\",\n handlers=[\n logging.FileHandler(f\"{OUTPUT_DIR}/analyse.log\", mode='w'),\n logging.StreamHandler(sys.stdout)\n ]\n)\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\n\ndef main():\n devices = Loader.load_analysis_from_pickle(INPUT_DEVICES)\n sample_rate = dataframe_index.get_sample_rate(devices)\n buffer_size = dataframe_index.get_buffer_size(devices)\n dataframes = []\n\n # global max\n result = audio_calcs.calculate_global_max(devices)\n result = result.unstack('PenId')\n result = result.std(axis=1).to_frame()\n result.columns = ['global max']\n dataframes.append(result)\n\n # quantil based deviations\n result = devices.apply(\n calc_click_mean_quantil_based,\n axis=1,\n sample_rate=sample_rate,\n buffer_size=buffer_size,\n db_only=True\n ).to_frame()\n result = result.unstack('PenId')\n result = result.std(axis=1).to_frame()\n result.columns = ['quantile based']\n dataframes.append(result)\n\n # global max based using db_range\n for db_range in [10, 15, 20]:\n result = devices.apply(\n audio_calcs.calc_series_click_mean,\n axis=1,\n sample_rate=sample_rate,\n buffer_size=buffer_size,\n db_range=db_range,\n return_maxima=False\n ).to_frame()\n result = result.unstack('PenId')\n result = result.std(axis=1).to_frame()\n result.columns = [f'{db_range} dB(A) range global max']\n dataframes.append(result)\n\n results = pd.concat(dataframes, axis=1)\n logger.info(results)\n Output.box_plot(\n '',\n # f'Deviations In dB(A) Between Frogs By Calculation Method',\n results,\n file_path=f'{OUTPUT_DIR}',\n file_name=f'box-plot-calculation-methods',\n ignore_clean=True,\n hide_outliers=True\n )\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.concat",
"pandas.set_option"
]
] |
Ali-Tahir/spaCy | [
"9e210fa7fdb8e376655e7a7ab7debd3ffd718a63"
] | [
"spacy/cli/pretrain.py"
] | [
"# coding: utf8\nfrom __future__ import print_function, unicode_literals\n\nimport plac\nimport random\nimport numpy\nimport time\nimport re\nfrom collections import Counter\nfrom pathlib import Path\nfrom thinc.v2v import Affine, Maxout\nfrom thinc.misc import LayerNorm as LN\nfrom thinc.neural.util import prefer_gpu\nfrom wasabi import Printer\nimport srsly\n\nfrom ..errors import Errors\nfrom ..tokens import Doc\nfrom ..attrs import ID, HEAD\nfrom .._ml import Tok2Vec, flatten, chain, create_default_optimizer\nfrom .._ml import masked_language_model, get_cossim_loss\nfrom .. import util\nfrom .train import _load_pretrained_tok2vec\n\n\[email protected](\n texts_loc=(\n \"Path to JSONL file with raw texts to learn from, with text provided as the key 'text' or tokens as the \"\n \"key 'tokens'\",\n \"positional\",\n None,\n str,\n ),\n vectors_model=(\"Name or path to spaCy model with vectors to learn from\"),\n output_dir=(\"Directory to write models to on each epoch\", \"positional\", None, str),\n width=(\"Width of CNN layers\", \"option\", \"cw\", int),\n depth=(\"Depth of CNN layers\", \"option\", \"cd\", int),\n cnn_window=(\"Window size for CNN layers\", \"option\", \"cW\", int),\n cnn_pieces=(\"Maxout size for CNN layers. 1 for Mish\", \"option\", \"cP\", int),\n use_chars=(\"Whether to use character-based embedding\", \"flag\", \"chr\", bool),\n sa_depth=(\"Depth of self-attention layers\", \"option\", \"sa\", int),\n bilstm_depth=(\"Depth of BiLSTM layers (requires PyTorch)\", \"option\", \"lstm\", int),\n embed_rows=(\"Number of embedding rows\", \"option\", \"er\", int),\n loss_func=(\n \"Loss function to use for the objective. Either 'L2' or 'cosine'\",\n \"option\",\n \"L\",\n str,\n ),\n use_vectors=(\"Whether to use the static vectors as input features\", \"flag\", \"uv\"),\n dropout=(\"Dropout rate\", \"option\", \"d\", float),\n batch_size=(\"Number of words per training batch\", \"option\", \"bs\", int),\n max_length=(\n \"Max words per example. Longer examples are discarded\",\n \"option\",\n \"xw\",\n int,\n ),\n min_length=(\n \"Min words per example. Shorter examples are discarded\",\n \"option\",\n \"nw\",\n int,\n ),\n seed=(\"Seed for random number generators\", \"option\", \"s\", int),\n n_iter=(\"Number of iterations to pretrain\", \"option\", \"i\", int),\n n_save_every=(\"Save model every X batches.\", \"option\", \"se\", int),\n init_tok2vec=(\n \"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\",\n \"option\",\n \"t2v\",\n Path,\n ),\n epoch_start=(\n \"The epoch to start counting at. Only relevant when using '--init-tok2vec' and the given weight file has been \"\n \"renamed. Prevents unintended overwriting of existing weight files.\",\n \"option\",\n \"es\",\n int,\n ),\n)\ndef pretrain(\n texts_loc,\n vectors_model,\n output_dir,\n width=96,\n depth=4,\n bilstm_depth=0,\n cnn_pieces=3,\n sa_depth=0,\n use_chars=False,\n cnn_window=1,\n embed_rows=2000,\n loss_func=\"cosine\",\n use_vectors=False,\n dropout=0.2,\n n_iter=1000,\n batch_size=3000,\n max_length=500,\n min_length=5,\n seed=0,\n n_save_every=None,\n init_tok2vec=None,\n epoch_start=None,\n):\n \"\"\"\n Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components,\n using an approximate language-modelling objective. Specifically, we load\n pretrained vectors, and train a component like a CNN, BiLSTM, etc to predict\n vectors which match the pretrained ones. The weights are saved to a directory\n after each epoch. You can then pass a path to one of these pretrained weights\n files to the 'spacy train' command.\n\n This technique may be especially helpful if you have little labelled data.\n However, it's still quite experimental, so your mileage may vary.\n\n To load the weights back in during 'spacy train', you need to ensure\n all settings are the same between pretraining and training. The API and\n errors around this need some improvement.\n \"\"\"\n config = dict(locals())\n for key in config:\n if isinstance(config[key], Path):\n config[key] = str(config[key])\n msg = Printer()\n util.fix_random_seed(seed)\n\n has_gpu = prefer_gpu()\n if has_gpu:\n import torch\n\n torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n msg.info(\"Using GPU\" if has_gpu else \"Not using GPU\")\n\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n msg.good(\"Created output directory\")\n srsly.write_json(output_dir / \"config.json\", config)\n msg.good(\"Saved settings to config.json\")\n\n # Load texts from file or stdin\n if texts_loc != \"-\": # reading from a file\n texts_loc = Path(texts_loc)\n if not texts_loc.exists():\n msg.fail(\"Input text file doesn't exist\", texts_loc, exits=1)\n with msg.loading(\"Loading input texts...\"):\n texts = list(srsly.read_jsonl(texts_loc))\n if not texts:\n msg.fail(\"Input file is empty\", texts_loc, exits=1)\n msg.good(\"Loaded input texts\")\n random.shuffle(texts)\n else: # reading from stdin\n msg.text(\"Reading input text from stdin...\")\n texts = srsly.read_jsonl(\"-\")\n\n with msg.loading(\"Loading model '{}'...\".format(vectors_model)):\n nlp = util.load_model(vectors_model)\n msg.good(\"Loaded model '{}'\".format(vectors_model))\n pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name\n model = create_pretraining_model(\n nlp,\n Tok2Vec(\n width,\n embed_rows,\n conv_depth=depth,\n pretrained_vectors=pretrained_vectors,\n bilstm_depth=bilstm_depth, # Requires PyTorch. Experimental.\n subword_features=not use_chars, # Set to False for Chinese etc\n cnn_maxout_pieces=cnn_pieces, # If set to 1, use Mish activation.\n ),\n )\n # Load in pretrained weights\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(\"Loaded pretrained tok2vec for: {}\".format(components))\n # Parse the epoch number from the given weight file\n model_name = re.search(r\"model\\d+\\.bin\", str(init_tok2vec))\n if model_name:\n # Default weight file name so read epoch_start from it by cutting off 'model' and '.bin'\n epoch_start = int(model_name.group(0)[5:][:-4]) + 1\n else:\n if not epoch_start:\n msg.fail(\n \"You have to use the '--epoch-start' argument when using a renamed weight file for \"\n \"'--init-tok2vec'\",\n exits=True,\n )\n elif epoch_start < 0:\n msg.fail(\n \"The argument '--epoch-start' has to be greater or equal to 0. '%d' is invalid\"\n % epoch_start,\n exits=True,\n )\n else:\n # Without '--init-tok2vec' the '--epoch-start' argument is ignored\n epoch_start = 0\n\n optimizer = create_default_optimizer(model.ops)\n tracker = ProgressTracker(frequency=10000)\n msg.divider(\"Pre-training tok2vec layer - starting at epoch %d\" % epoch_start)\n row_settings = {\"widths\": (3, 10, 10, 6, 4), \"aligns\": (\"r\", \"r\", \"r\", \"r\", \"r\")}\n msg.row((\"#\", \"# Words\", \"Total Loss\", \"Loss\", \"w/s\"), **row_settings)\n\n def _save_model(epoch, is_temp=False):\n is_temp_str = \".temp\" if is_temp else \"\"\n with model.use_params(optimizer.averages):\n with (output_dir / (\"model%d%s.bin\" % (epoch, is_temp_str))).open(\n \"wb\"\n ) as file_:\n file_.write(model.tok2vec.to_bytes())\n log = {\n \"nr_word\": tracker.nr_word,\n \"loss\": tracker.loss,\n \"epoch_loss\": tracker.epoch_loss,\n \"epoch\": epoch,\n }\n with (output_dir / \"log.jsonl\").open(\"a\") as file_:\n file_.write(srsly.json_dumps(log) + \"\\n\")\n\n skip_counter = 0\n for epoch in range(epoch_start, n_iter + epoch_start):\n for batch_id, batch in enumerate(\n util.minibatch_by_words(((text, None) for text in texts), size=batch_size)\n ):\n docs, count = make_docs(\n nlp,\n [text for (text, _) in batch],\n max_length=max_length,\n min_length=min_length,\n )\n skip_counter += count\n loss = make_update(\n model, docs, optimizer, objective=loss_func, drop=dropout\n )\n progress = tracker.update(epoch, loss, docs)\n if progress:\n msg.row(progress, **row_settings)\n if texts_loc == \"-\" and tracker.words_per_epoch[epoch] >= 10 ** 7:\n break\n if n_save_every and (batch_id % n_save_every == 0):\n _save_model(epoch, is_temp=True)\n _save_model(epoch)\n tracker.epoch_loss = 0.0\n if texts_loc != \"-\":\n # Reshuffle the texts if texts were loaded from a file\n random.shuffle(texts)\n if skip_counter > 0:\n msg.warn(\"Skipped {count} empty values\".format(count=str(skip_counter)))\n msg.good(\"Successfully finished pretrain\")\n\n\ndef make_update(model, docs, optimizer, drop=0.0, objective=\"L2\"):\n \"\"\"Perform an update over a single batch of documents.\n\n docs (iterable): A batch of `Doc` objects.\n drop (float): The dropout rate.\n optimizer (callable): An optimizer.\n RETURNS loss: A float for the loss.\n \"\"\"\n predictions, backprop = model.begin_update(docs, drop=drop)\n loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)\n backprop(gradients, sgd=optimizer)\n # Don't want to return a cupy object here\n # The gradients are modified in-place by the BERT MLM,\n # so we get an accurate loss\n return float(loss)\n\n\ndef make_docs(nlp, batch, min_length, max_length):\n docs = []\n skip_count = 0\n for record in batch:\n if not isinstance(record, dict):\n raise TypeError(Errors.E137.format(type=type(record), line=record))\n if \"tokens\" in record:\n words = record[\"tokens\"]\n if not words:\n skip_count += 1\n continue\n doc = Doc(nlp.vocab, words=words)\n elif \"text\" in record:\n text = record[\"text\"]\n if not text:\n skip_count += 1\n continue\n doc = nlp.make_doc(text)\n else:\n raise ValueError(Errors.E138.format(text=record))\n if \"heads\" in record:\n heads = record[\"heads\"]\n heads = numpy.asarray(heads, dtype=\"uint64\")\n heads = heads.reshape((len(doc), 1))\n doc = doc.from_array([HEAD], heads)\n if len(doc) >= min_length and len(doc) < max_length:\n docs.append(doc)\n return docs, skip_count\n\n\ndef get_vectors_loss(ops, docs, prediction, objective=\"L2\"):\n \"\"\"Compute a mean-squared error loss between the documents' vectors and\n the prediction.\n\n Note that this is ripe for customization! We could compute the vectors\n in some other word, e.g. with an LSTM language model, or use some other\n type of objective.\n \"\"\"\n # The simplest way to implement this would be to vstack the\n # token.vector values, but that's a bit inefficient, especially on GPU.\n # Instead we fetch the index into the vectors table for each of our tokens,\n # and look them up all at once. This prevents data copying.\n ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])\n target = docs[0].vocab.vectors.data[ids]\n if objective == \"L2\":\n d_target = prediction - target\n loss = (d_target ** 2).sum()\n elif objective == \"cosine\":\n loss, d_target = get_cossim_loss(prediction, target)\n else:\n raise ValueError(Errors.E142.format(loss_func=objective))\n return loss, d_target\n\n\ndef create_pretraining_model(nlp, tok2vec):\n \"\"\"Define a network for the pretraining. We simply add an output layer onto\n the tok2vec input model. The tok2vec input model needs to be a model that\n takes a batch of Doc objects (as a list), and returns a list of arrays.\n Each array in the output needs to have one row per token in the doc.\n \"\"\"\n output_size = nlp.vocab.vectors.data.shape[1]\n output_layer = chain(\n LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0)\n )\n # This is annoying, but the parser etc have the flatten step after\n # the tok2vec. To load the weights in cleanly, we need to match\n # the shape of the models' components exactly. So what we cann\n # \"tok2vec\" has to be the same set of processes as what the components do.\n tok2vec = chain(tok2vec, flatten)\n model = chain(tok2vec, output_layer)\n model = masked_language_model(nlp.vocab, model)\n model.tok2vec = tok2vec\n model.output_layer = output_layer\n model.begin_training([nlp.make_doc(\"Give it a doc to infer shapes\")])\n return model\n\n\nclass ProgressTracker(object):\n def __init__(self, frequency=1000000):\n self.loss = 0.0\n self.prev_loss = 0.0\n self.nr_word = 0\n self.words_per_epoch = Counter()\n self.frequency = frequency\n self.last_time = time.time()\n self.last_update = 0\n self.epoch_loss = 0.0\n\n def update(self, epoch, loss, docs):\n self.loss += loss\n self.epoch_loss += loss\n words_in_batch = sum(len(doc) for doc in docs)\n self.words_per_epoch[epoch] += words_in_batch\n self.nr_word += words_in_batch\n words_since_update = self.nr_word - self.last_update\n if words_since_update >= self.frequency:\n wps = words_since_update / (time.time() - self.last_time)\n self.last_update = self.nr_word\n self.last_time = time.time()\n loss_per_word = self.loss - self.prev_loss\n status = (\n epoch,\n self.nr_word,\n _smart_round(self.loss, width=10),\n _smart_round(loss_per_word, width=6),\n int(wps),\n )\n self.prev_loss = float(self.loss)\n return status\n else:\n return None\n\n\ndef _smart_round(figure, width=10, max_decimal=4):\n \"\"\"Round large numbers as integers, smaller numbers as decimals.\"\"\"\n n_digits = len(str(int(figure)))\n n_decimal = width - (n_digits + 1)\n if n_decimal <= 1:\n return str(int(figure))\n else:\n n_decimal = min(n_decimal, max_decimal)\n format_str = \"%.\" + str(n_decimal) + \"f\"\n return format_str % figure\n"
] | [
[
"torch.set_default_tensor_type",
"numpy.asarray"
]
] |
k-stacke/ssl-pathology | [
"d440ce11712a5c1b6631d698dc3cafe7c04e2786"
] | [
"simclr/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.cuda import amp\nfrom torchvision.models import resnet50\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\nclass Model(nn.Module):\n def __init__(self, feature_dim=128, pretrained=False):\n super(Model, self).__init__()\n\n self.f = resnet50(pretrained=pretrained)\n self.f.fc = Identity()\n # projection head\n self.g = nn.Sequential(nn.Linear(2048, 512, bias=False),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Linear(512, feature_dim, bias=True))\n\n @amp.autocast()\n def forward(self, x):\n x = self.f(x)\n feature = torch.flatten(x, start_dim=1)\n out = self.g(feature)\n return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.BatchNorm1d",
"torch.flatten",
"torch.nn.functional.normalize",
"torch.cuda.amp.autocast",
"torch.nn.ReLU"
]
] |
dina-fouad/pyccel | [
"f4d919e673b400442b9c7b81212b6fbef749c7b7"
] | [
"tests/codegen/ccode/scripts/arrays_pointers.py"
] | [
"# pylint: disable=missing-function-docstring, missing-module-docstring/\n#==============================================================================\n\ndef allocatable_to_pointer():\n\n from numpy import array\n\n a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n c = a #pylint:disable=unused-variable\n\ndef pointer_to_pointer():\n\n from numpy import array\n\n a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n b = a\n c = b #pylint:disable=unused-variable\n\ndef reassign_pointers():\n\n from numpy import array\n\n a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n b = a #pylint:disable=unused-variable\n b = a[1:]\n"
] | [
[
"numpy.array"
]
] |
MaxCodeXTC/symmetrynet | [
"f42810be95ecaa85a32a836213cb8d0687184574"
] | [
"train.py"
] | [
"#!/usr/bin/env python3\n\"\"\"Training and Evaluate the Neural Network\nUsage:\n train.py [options] <yaml-config>\n train.py (-h | --help )\n\nArguments:\n yaml-config Path to the yaml hyper-parameter file\n\nOptions:\n -h --help Show this screen.\n -d --devices <devices> Comma seperated GPU devices [default: 0]\n -i --identifier <identifier> Folder name [default: default-identifier]\n --from <checkpoint> Path to a checkpoint\n --ignore-optim Ignore optim when restoring from a checkpoint\n\"\"\"\n\nimport datetime\nimport glob\nimport os\nimport os.path as osp\nimport platform\nimport pprint\nimport random\nimport shlex\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport threading\n\nimport numpy as np\nimport torch\nimport yaml\nfrom docopt import docopt\n\nimport sym\nfrom sym.config import CI, CM, CO, C, load_config\nfrom sym.datasets import ShapeNetDataset\n\n\ndef git_hash():\n cmd = 'git log -n 1 --pretty=\"%h\"'\n ret = subprocess.check_output(shlex.split(cmd)).strip()\n if isinstance(ret, bytes):\n ret = ret.decode()\n return ret\n\n\ndef get_outdir(identifier):\n # load config\n name = str(datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\"))\n name += \"-%s\" % git_hash()\n name += \"-%s\" % identifier\n outdir = osp.join(osp.expanduser(CI.logdir), name)\n if not osp.exists(outdir):\n os.makedirs(outdir)\n C.to_yaml(osp.join(outdir, \"config.yaml\"))\n os.system(f\"git diff HEAD > {outdir}/gitdiff.patch\")\n os.system(f\"find -name '*.py' -print0 | tar -cJf {outdir}/src.tar.xz --null -T -\")\n return outdir\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef main():\n args = docopt(__doc__)\n config_file = args[\"<yaml-config>\"] or \"config/shapenet.yaml\"\n C.update(C.from_yaml(filename=config_file))\n if args[\"--from\"]:\n C.io.resume_from = args[\"--from\"]\n CI.update(C.io)\n CM.update(C.model)\n CO.update(C.optim)\n pprint.pprint(C, indent=4)\n resume_from = CI.resume_from\n\n # WARNING: still not deterministic\n random.seed(0)\n np.random.seed(0)\n torch.manual_seed(0)\n\n device_name = \"cpu\"\n num_gpus = args[\"--devices\"].count(\",\") + 1\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args[\"--devices\"]\n if torch.cuda.is_available():\n device_name = \"cuda\"\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n torch.cuda.manual_seed(0)\n print(\"Let's use\", torch.cuda.device_count(), \"GPU(s)!\")\n else:\n print(\"CUDA is not available\")\n device = torch.device(device_name)\n\n # 1. dataset\n batch_size = CM.batch_size * num_gpus\n datadir = CI.datadir\n kwargs = {\n \"batch_size\": batch_size,\n \"num_workers\": CI.num_workers,\n \"pin_memory\": True,\n }\n if CI.dataset == \"ShapeNet\":\n Dataset = ShapeNetDataset\n else:\n raise NotImplementedError\n\n train_loader = torch.utils.data.DataLoader(\n Dataset(datadir, split=\"train\"), shuffle=True, **kwargs\n )\n val_loader = torch.utils.data.DataLoader(\n Dataset(datadir, split=\"valid\"), shuffle=False, **kwargs\n )\n\n if resume_from:\n print(\"Restoring from\", resume_from)\n checkpoint = torch.load(resume_from)\n\n # 2. model\n model = sym.models.SymmetryNet().to(device)\n print(\"# of params:\", count_parameters(model))\n model = sym.utils.MyDataParallel(\n model, device_ids=list(range(args[\"--devices\"].count(\",\") + 1))\n )\n if resume_from:\n for module_name in list(checkpoint[\"model_state_dict\"].keys()):\n if module_name.startswith(\"module.backbone.volume_network.fc\"):\n del checkpoint[\"model_state_dict\"][module_name]\n model.load_state_dict(checkpoint[\"model_state_dict\"], strict=False)\n\n # 3. optimizer\n if CO.name == \"Adam\":\n optim = torch.optim.Adam(model.parameters(), **CO.params)\n elif CO.name == \"SGD\":\n optim = torch.optim.SGD(model.parameters(), **CO.params)\n else:\n raise NotImplementedError\n\n if resume_from and not args[\"--ignore-optim\"]:\n optim.load_state_dict(checkpoint[\"optim_state_dict\"])\n outdir = get_outdir(args[\"--identifier\"])\n shutil.copyfile(config_file, osp.join(outdir, \"config_origin.yaml\"))\n print(\"outdir:\", outdir)\n\n try:\n trainer = sym.trainer.Trainer(\n device=device,\n model=model,\n optimizer=optim,\n train_loader=train_loader,\n val_loader=val_loader,\n batch_size=batch_size,\n out=outdir,\n )\n trainer.train()\n except BaseException:\n if len(glob.glob(f\"{outdir}/viz/*\")) <= 1:\n shutil.rmtree(outdir)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.device"
]
] |
williamscales/pytopocomplexity | [
"f739b7695066f5da40a9610d21579983a12e76ad"
] | [
"tests/test_entropy.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for `pytopocomplexity.entropy`\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import (ascii, bytes, chr, dict, filter, hex, input, int,\n map, next, oct, open, pow, range, round, str,\n super, zip)\n\nimport numpy as np\nfrom numpy.random import random_sample\n\nfrom pytopocomplexity.entropy import estimate_entropy\n\n\ndef test_entropy_is_zero_for_unimodal_function():\n \"\"\"Test that the entropy of a function with one extremum is zero.\"\"\"\n def func_one_min(x):\n \"\"\"Objective function with global minimum at ``x == 0``.\"\"\"\n return x**2\n #initial_models = 2*random_sample((100,100)) - 1\n initial_models = 2*random_sample(100) - 1\n entropy = estimate_entropy(func_one_min, initial_models, 1e-8, 1e5)\n assert entropy == 0\n"
] | [
[
"numpy.random.random_sample"
]
] |
jkomyno/lattice-submodular-maximization | [
"e03c8bcc5fcf5bf79a6ae81f145757cf3fdff7cb"
] | [
"python/benchmark/utils/powerset.py"
] | [
"import numpy as np\nimport itertools\nfrom nptyping import NDArray\nfrom typing import Iterator\nfrom ..objective import Objective\n\n\ndef powerset(f: Objective) -> Iterator[NDArray[int]]:\n \"\"\"\n Inumerate b^n possible vectors in the integer lattice.\n :param f: integer-lattice submodular function objective\n \"\"\"\n return map(lambda t: np.array([*t]),\n itertools.product(range(f.b + 1), repeat=f.n))\n"
] | [
[
"numpy.array"
]
] |
JingqingZ/tensorlayer2 | [
"289a0402bd64f6a423aa574f10ac8ad8efcb7b66"
] | [
"examples/basic_tutorials/tutorial_cifar10_placeholder.py"
] | [
"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\n\ntf.logging.set_verbosity(tf.logging.DEBUG)\ntl.logging.set_verbosity(tl.logging.DEBUG)\n\nsess = tf.InteractiveSession()\n\nX_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)\n\n\ndef model(x, y_, reuse):\n W_init = tf.truncated_normal_initializer(stddev=5e-2)\n W_init2 = tf.truncated_normal_initializer(stddev=0.04)\n b_init2 = tf.constant_initializer(value=0.1)\n with tf.variable_scope(\"model\", reuse=reuse):\n net = InputLayer(x, name='input')\n net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')\n net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')\n\n net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')\n net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')\n\n net = FlattenLayer(net, name='flatten')\n net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')\n net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')\n net = DenseLayer(net, 10, act=None, W_init=W_init2, name='output')\n y = net.outputs\n\n ce = tl.cost.cross_entropy(y, y_, name='cost')\n # L2 for the MLP, without this, the accuracy will be reduced by 15%.\n L2 = 0\n for p in tl.layers.get_variables_with_name('relu/W', True, True):\n L2 += tf.contrib.layers.l2_regularizer(0.004)(p)\n cost = ce + L2\n\n correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return net, cost, acc\n\n\ndef model_batch_norm(x, y_, reuse, is_train):\n \"\"\"Batch normalization should be placed before rectifier.\"\"\"\n W_init = tf.truncated_normal_initializer(stddev=5e-2)\n W_init2 = tf.truncated_normal_initializer(stddev=0.04)\n b_init2 = tf.constant_initializer(value=0.1)\n with tf.variable_scope(\"model\", reuse=reuse):\n net = InputLayer(x, name='input')\n net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')\n net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch1')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')\n\n net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')\n net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch2')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')\n\n net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)\n net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')\n net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')\n net = DenseLayer(net, 10, act=None, W_init=W_init2, name='output')\n y = net.outputs\n\n ce = tl.cost.cross_entropy(y, y_, name='cost')\n # L2 for the MLP, without this, the accuracy will be reduced by 15%.\n L2 = 0\n for p in tl.layers.get_variables_with_name('relu/W', True, True):\n L2 += tf.contrib.layers.l2_regularizer(0.004)(p)\n cost = ce + L2\n\n correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return net, cost, acc\n\n\ndef distort_fn(x, is_train=False):\n \"\"\"\n The images are processed as follows:\n .. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.\n .. They are approximately whitened to make the model insensitive to dynamic range.\n For training, we additionally apply a series of random distortions to\n artificially increase the data set size:\n .. Randomly flip the image from left to right.\n .. Randomly distort the image brightness.\n \"\"\"\n # print('begin',x.shape, np.min(x), np.max(x))\n x = tl.prepro.crop(x, 24, 24, is_random=is_train)\n # print('after crop',x.shape, np.min(x), np.max(x))\n if is_train:\n # x = tl.prepro.zoom(x, zoom_range=(0.9, 1.0), is_random=True)\n # print('after zoom', x.shape, np.min(x), np.max(x))\n x = tl.prepro.flip_axis(x, axis=1, is_random=True)\n # print('after flip',x.shape, np.min(x), np.max(x))\n x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)\n # print('after brightness',x.shape, np.min(x), np.max(x))\n # tmp = np.max(x)\n # x += np.random.uniform(-20, 20)\n # x /= tmp\n # normalize the image\n x = (x - np.mean(x)) / max(np.std(x), 1e-5) # avoid values divided by 0\n # print('after norm', x.shape, np.min(x), np.max(x), np.mean(x))\n return x\n\n\nx = tf.placeholder(dtype=tf.float32, shape=[None, 24, 24, 3], name='x')\ny_ = tf.placeholder(dtype=tf.int64, shape=[None], name='y_')\n\n# using local response normalization\n# network, cost, _ = model(x, y_, False)\n# _, cost_test, acc = model(x, y_, True)\n# you may want to try batch normalization\nnetwork, cost, _ = model_batch_norm(x, y_, False, is_train=True)\n_, cost_test, acc = model_batch_norm(x, y_, True, is_train=False)\n\n# train\nn_epoch = 50000\nlearning_rate = 0.0001\nprint_freq = 1\nbatch_size = 128\n\ntrain_params = network.all_params\ntrain_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,\n use_locking=False).minimize(cost, var_list=train_params)\n\nsess.run(tf.global_variables_initializer())\n\nnetwork.print_params(False)\nnetwork.print_layers()\n\nprint(' learning_rate: %f' % learning_rate)\nprint(' batch_size: %d' % batch_size)\n\nfor epoch in range(n_epoch):\n start_time = time.time()\n for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):\n X_train_a = tl.prepro.threading_data(X_train_a, fn=distort_fn, is_train=True) # data augmentation for training\n sess.run(train_op, feed_dict={x: X_train_a, y_: y_train_a})\n\n if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:\n print(\"Epoch %d of %d took %fs\" % (epoch + 1, n_epoch, time.time() - start_time))\n # train_loss, train_acc, n_batch = 0, 0, 0\n # for X_train_a, y_train_a in tl.iterate.minibatches(\n # X_train, y_train, batch_size, shuffle=True):\n # X_train_a = tl.prepro.threading_data(X_train_a, fn=distort_fn, is_train=False) # central crop\n # err, ac = sess.run([cost_test, acc], feed_dict={x: X_train_a, y_: y_train_a})\n # train_loss += err; train_acc += ac; n_batch += 1\n # print(\" train loss: %f\" % (train_loss/ n_batch))\n # print(\" train acc: %f\" % (train_acc/ n_batch))\n test_loss, test_acc, n_batch = 0, 0, 0\n for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False):\n X_test_a = tl.prepro.threading_data(X_test_a, fn=distort_fn, is_train=False) # central crop\n err, ac = sess.run([cost_test, acc], feed_dict={x: X_test_a, y_: y_test_a})\n test_loss += err\n test_acc += ac\n n_batch += 1\n print(\" test loss: %f\" % (test_loss / n_batch))\n print(\" test acc: %f\" % (test_acc / n_batch))\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.constant_initializer",
"tensorflow.truncated_normal_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.logging.set_verbosity",
"tensorflow.train.AdamOptimizer",
"tensorflow.variable_scope",
"tensorflow.InteractiveSession",
"tensorflow.cast",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.argmax",
"numpy.std",
"numpy.mean"
]
] |
hotchilianalytics/zipline-broker | [
"fb475cf89ec8886db4ee6420bd9aca70c1821eab"
] | [
"tests/test_finance.py"
] | [
"#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTests for the zipline.finance package\n\"\"\"\nfrom datetime import datetime, timedelta\nimport os\n\nfrom nose.tools import timed\nfrom nose.tools import nottest\n\nimport numpy as np\nimport pandas as pd\nimport pytz\nfrom six import iteritems\nfrom six.moves import range\nfrom testfixtures import TempDirectory\n\nfrom zipline.finance.blotter.simulation_blotter import SimulationBlotter\nfrom zipline.finance.execution import MarketOrder, LimitOrder\nfrom zipline.finance.metrics import MetricsTracker, load as load_metrics_set\nfrom zipline.finance.trading import SimulationParameters\nfrom zipline.data.us_equity_pricing import BcolzDailyBarReader\nfrom zipline.data.minute_bars import BcolzMinuteBarReader\nfrom zipline.data.data_portal import DataPortal\nfrom zipline.data.us_equity_pricing import BcolzDailyBarWriter\nfrom zipline.finance.slippage import FixedSlippage, FixedBasisPointsSlippage\nfrom zipline.finance.asset_restrictions import NoRestrictions\nfrom zipline.protocol import BarData\nfrom zipline.testing import write_bcolz_minute_data\nimport zipline.testing.fixtures as zf\nimport zipline.utils.factory as factory\n\nDEFAULT_TIMEOUT = 15 # seconds\nEXTENDED_TIMEOUT = 90\n\n_multiprocess_can_split_ = False\n\n\nclass FinanceTestCase(zf.WithAssetFinder,\n zf.WithTradingCalendars,\n zf.ZiplineTestCase):\n ASSET_FINDER_EQUITY_SIDS = 1, 2, 133\n start = START_DATE = pd.Timestamp('2006-01-01', tz='utc')\n end = END_DATE = pd.Timestamp('2006-12-31', tz='utc')\n\n def init_instance_fixtures(self):\n super(FinanceTestCase, self).init_instance_fixtures()\n self.zipline_test_config = {'sid': 133}\n\n # TODO: write tests for short sales\n # TODO: write a test to do massive buying or shorting.\n\n @timed(DEFAULT_TIMEOUT)\n @nottest\n def test_partially_filled_orders(self):\n\n # create a scenario where order size and trade size are equal\n # so that orders must be spread out over several trades.\n params = {\n 'trade_count': 360,\n 'trade_interval': timedelta(minutes=1),\n 'order_count': 2,\n 'order_amount': 100,\n 'order_interval': timedelta(minutes=1),\n # because we placed two orders for 100 shares each, and the volume\n # of each trade is 100, and by default you can take up 10% of the\n # bar's volume (per FixedBasisPointsSlippage, the default slippage\n # model), the simulator should spread the order into 20 trades of\n # 10 shares per order.\n 'expected_txn_count': 20,\n 'expected_txn_volume': 2 * 100,\n 'default_slippage': True\n }\n\n self.transaction_sim(**params)\n\n # same scenario, but with short sales\n params2 = {\n 'trade_count': 360,\n 'trade_interval': timedelta(minutes=1),\n 'order_count': 2,\n 'order_amount': -100,\n 'order_interval': timedelta(minutes=1),\n 'expected_txn_count': 20,\n 'expected_txn_volume': 2 * -100,\n 'default_slippage': True\n }\n\n self.transaction_sim(**params2)\n\n @timed(DEFAULT_TIMEOUT)\n @nottest\n def test_collapsing_orders(self):\n # create a scenario where order.amount <<< trade.volume\n # to test that several orders can be covered properly by one trade,\n # but are represented by multiple transactions.\n params1 = {\n 'trade_count': 6,\n 'trade_interval': timedelta(hours=1),\n 'order_count': 24,\n 'order_amount': 1,\n 'order_interval': timedelta(minutes=1),\n # because we placed an orders totaling less than 25% of one trade\n # the simulator should produce just one transaction.\n 'expected_txn_count': 24,\n 'expected_txn_volume': 24\n }\n self.transaction_sim(**params1)\n\n # second verse, same as the first. except short!\n params2 = {\n 'trade_count': 6,\n 'trade_interval': timedelta(hours=1),\n 'order_count': 24,\n 'order_amount': -1,\n 'order_interval': timedelta(minutes=1),\n 'expected_txn_count': 24,\n 'expected_txn_volume': -24\n }\n self.transaction_sim(**params2)\n\n # Runs the collapsed trades over daily trade intervals.\n # Ensuring that our delay works for daily intervals as well.\n params3 = {\n 'trade_count': 6,\n 'trade_interval': timedelta(days=1),\n 'order_count': 24,\n 'order_amount': 1,\n 'order_interval': timedelta(minutes=1),\n 'expected_txn_count': 24,\n 'expected_txn_volume': 24\n }\n self.transaction_sim(**params3)\n\n @timed(DEFAULT_TIMEOUT)\n @nottest\n def test_alternating_long_short(self):\n # create a scenario where we alternate buys and sells\n params1 = {\n 'trade_count': int(6.5 * 60 * 4),\n 'trade_interval': timedelta(minutes=1),\n 'order_count': 4,\n 'order_amount': 10,\n 'order_interval': timedelta(hours=24),\n 'alternate': True,\n 'complete_fill': True,\n 'expected_txn_count': 4,\n 'expected_txn_volume': 0 # equal buys and sells\n }\n self.transaction_sim(**params1)\n\n def transaction_sim(self, **params):\n \"\"\"This is a utility method that asserts expected\n results for conversion of orders to transactions given a\n trade history\n \"\"\"\n trade_count = params['trade_count']\n trade_interval = params['trade_interval']\n order_count = params['order_count']\n order_amount = params['order_amount']\n order_interval = params['order_interval']\n expected_txn_count = params['expected_txn_count']\n expected_txn_volume = params['expected_txn_volume']\n\n # optional parameters\n # ---------------------\n # if present, alternate between long and short sales\n alternate = params.get('alternate')\n\n # if present, expect transaction amounts to match orders exactly.\n complete_fill = params.get('complete_fill')\n\n asset1 = self.asset_finder.retrieve_asset(1)\n with TempDirectory() as tempdir:\n\n if trade_interval < timedelta(days=1):\n sim_params = factory.create_simulation_parameters(\n start=self.start,\n end=self.end,\n data_frequency=\"minute\"\n )\n\n minutes = self.trading_calendar.minutes_window(\n sim_params.first_open,\n int((trade_interval.total_seconds() / 60) * trade_count)\n + 100)\n\n price_data = np.array([10.1] * len(minutes))\n assets = {\n asset1.sid: pd.DataFrame({\n \"open\": price_data,\n \"high\": price_data,\n \"low\": price_data,\n \"close\": price_data,\n \"volume\": np.array([100] * len(minutes)),\n \"dt\": minutes\n }).set_index(\"dt\")\n }\n\n write_bcolz_minute_data(\n self.trading_calendar,\n self.trading_calendar.sessions_in_range(\n self.trading_calendar.minute_to_session_label(\n minutes[0]\n ),\n self.trading_calendar.minute_to_session_label(\n minutes[-1]\n )\n ),\n tempdir.path,\n iteritems(assets),\n )\n\n equity_minute_reader = BcolzMinuteBarReader(tempdir.path)\n\n data_portal = DataPortal(\n self.asset_finder, self.trading_calendar,\n first_trading_day=equity_minute_reader.first_trading_day,\n equity_minute_reader=equity_minute_reader,\n )\n else:\n sim_params = factory.create_simulation_parameters(\n data_frequency=\"daily\"\n )\n\n days = sim_params.sessions\n\n assets = {\n 1: pd.DataFrame({\n \"open\": [10.1] * len(days),\n \"high\": [10.1] * len(days),\n \"low\": [10.1] * len(days),\n \"close\": [10.1] * len(days),\n \"volume\": [100] * len(days),\n \"day\": [day.value for day in days]\n }, index=days)\n }\n\n path = os.path.join(tempdir.path, \"testdata.bcolz\")\n BcolzDailyBarWriter(path, self.trading_calendar, days[0],\n days[-1]).write(\n assets.items()\n )\n\n equity_daily_reader = BcolzDailyBarReader(path)\n\n data_portal = DataPortal(\n self.asset_finder, self.trading_calendar,\n first_trading_day=equity_daily_reader.first_trading_day,\n equity_daily_reader=equity_daily_reader,\n )\n\n if \"default_slippage\" not in params or \\\n not params[\"default_slippage\"]:\n slippage_func = FixedBasisPointsSlippage()\n else:\n slippage_func = None\n\n blotter = SimulationBlotter(slippage_func)\n\n start_date = sim_params.first_open\n\n if alternate:\n alternator = -1\n else:\n alternator = 1\n\n tracker = MetricsTracker(\n trading_calendar=self.trading_calendar,\n first_session=sim_params.start_session,\n last_session=sim_params.end_session,\n capital_base=sim_params.capital_base,\n emission_rate=sim_params.emission_rate,\n data_frequency=sim_params.data_frequency,\n asset_finder=self.asset_finder,\n metrics=load_metrics_set('none'),\n )\n\n # replicate what tradesim does by going through every minute or day\n # of the simulation and processing open orders each time\n if sim_params.data_frequency == \"minute\":\n ticks = minutes\n else:\n ticks = days\n\n transactions = []\n\n order_list = []\n order_date = start_date\n for tick in ticks:\n blotter.current_dt = tick\n if tick >= order_date and len(order_list) < order_count:\n # place an order\n direction = alternator ** len(order_list)\n order_id = blotter.order(\n asset1,\n order_amount * direction,\n MarketOrder(),\n )\n order_list.append(blotter.orders[order_id])\n order_date = order_date + order_interval\n # move after market orders to just after market next\n # market open.\n if order_date.hour >= 21:\n if order_date.minute >= 00:\n order_date = order_date + timedelta(days=1)\n order_date = order_date.replace(hour=14, minute=30)\n else:\n bar_data = BarData(\n data_portal=data_portal,\n simulation_dt_func=lambda: tick,\n data_frequency=sim_params.data_frequency,\n trading_calendar=self.trading_calendar,\n restrictions=NoRestrictions(),\n )\n txns, _, closed_orders = blotter.get_transactions(bar_data)\n for txn in txns:\n tracker.process_transaction(txn)\n transactions.append(txn)\n\n blotter.prune_orders(closed_orders)\n\n for i in range(order_count):\n order = order_list[i]\n self.assertEqual(order.asset, asset1)\n self.assertEqual(order.amount, order_amount * alternator ** i)\n\n if complete_fill:\n self.assertEqual(len(transactions), len(order_list))\n\n total_volume = 0\n for i in range(len(transactions)):\n txn = transactions[i]\n total_volume += txn.amount\n if complete_fill:\n order = order_list[i]\n self.assertEqual(order.amount, txn.amount)\n\n self.assertEqual(total_volume, expected_txn_volume)\n\n self.assertEqual(len(transactions), expected_txn_count)\n\n if total_volume == 0:\n self.assertRaises(KeyError, lambda: tracker.positions[asset1])\n else:\n cumulative_pos = tracker.positions[asset1]\n self.assertEqual(total_volume, cumulative_pos.amount)\n\n # the open orders should not contain the asset.\n oo = blotter.open_orders\n self.assertNotIn(\n asset1,\n oo,\n \"Entry is removed when no open orders\"\n )\n\n def test_blotter_processes_splits(self):\n blotter = SimulationBlotter(equity_slippage=FixedSlippage())\n\n # set up two open limit orders with very low limit prices,\n # one for sid 1 and one for sid 2\n asset1 = self.asset_finder.retrieve_asset(1)\n asset2 = self.asset_finder.retrieve_asset(2)\n asset133 = self.asset_finder.retrieve_asset(133)\n\n blotter.order(asset1, 100, LimitOrder(10, asset=asset1))\n blotter.order(asset2, 100, LimitOrder(10, asset=asset2))\n\n # send in splits for assets 133 and 2. We have no open orders for\n # asset 133 so it should be ignored.\n blotter.process_splits([(asset133, 0.5), (asset2, 0.3333)])\n\n for asset in [asset1, asset2]:\n order_lists = blotter.open_orders[asset]\n self.assertIsNotNone(order_lists)\n self.assertEqual(1, len(order_lists))\n\n asset1_order = blotter.open_orders[1][0]\n asset2_order = blotter.open_orders[2][0]\n\n # make sure the asset1 order didn't change\n self.assertEqual(100, asset1_order.amount)\n self.assertEqual(10, asset1_order.limit)\n self.assertEqual(1, asset1_order.asset)\n\n # make sure the asset2 order did change\n # to 300 shares at 3.33\n self.assertEqual(300, asset2_order.amount)\n self.assertEqual(3.33, asset2_order.limit)\n self.assertEqual(2, asset2_order.asset)\n\n\nclass SimParamsTestCase(zf.WithTradingCalendars, zf.ZiplineTestCase):\n \"\"\"\n Tests for date management utilities in zipline.finance.trading.\n \"\"\"\n def test_simulation_parameters(self):\n sp = SimulationParameters(\n start_session=pd.Timestamp(\"2008-01-01\", tz='UTC'),\n end_session=pd.Timestamp(\"2008-12-31\", tz='UTC'),\n capital_base=100000,\n trading_calendar=self.trading_calendar,\n )\n\n self.assertTrue(sp.last_close.month == 12)\n self.assertTrue(sp.last_close.day == 31)\n\n @timed(DEFAULT_TIMEOUT)\n def test_sim_params_days_in_period(self):\n\n # January 2008\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30 31\n\n params = SimulationParameters(\n start_session=pd.Timestamp(\"2007-12-31\", tz='UTC'),\n end_session=pd.Timestamp(\"2008-01-07\", tz='UTC'),\n capital_base=100000,\n trading_calendar=self.trading_calendar,\n )\n\n expected_trading_days = (\n datetime(2007, 12, 31, tzinfo=pytz.utc),\n # Skip new years\n # holidays taken from: http://www.nyse.com/press/1191407641943.html\n datetime(2008, 1, 2, tzinfo=pytz.utc),\n datetime(2008, 1, 3, tzinfo=pytz.utc),\n datetime(2008, 1, 4, tzinfo=pytz.utc),\n # Skip Saturday\n # Skip Sunday\n datetime(2008, 1, 7, tzinfo=pytz.utc)\n )\n\n num_expected_trading_days = 5\n self.assertEquals(\n num_expected_trading_days,\n len(params.sessions)\n )\n np.testing.assert_array_equal(expected_trading_days,\n params.sessions.tolist())\n"
] | [
[
"pandas.Timestamp"
]
] |
anonymous29387491/iclr2022 | [
"60c5727f8519e64610b632d074510587fb7ff692"
] | [
"Tests/attribution_calculation/ShapleyExcess/iterate_drug.py"
] | [
"from torchvision import datasets, transforms\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom argparse import ArgumentParser \r\nfrom tqdm import tqdm\r\nimport time\r\nimport numpy as np\r\n\r\n\r\n###########\r\n# file imports / path issues\r\nimport os\r\nimport sys\r\nfrom pathlib import Path\r\n\r\npath = Path(os.path.abspath(__file__)).parents[3]\r\nos.chdir(path)\r\nsys.path.append('./BivariateShapley')\r\n\r\nfrom utils_shapley import *\r\nfrom shapley_kernel import Bivariate_KernelExplainer\r\n\r\nimport pickle\r\nimport os\r\n\r\n\r\nimport shap\r\n\r\n############################################\r\n# Define Test Parameters\r\n############################################\r\n\r\n\r\nparser = ArgumentParser(description='get phi plus matrices')\r\n\r\nparser.add_argument('--dataset_min_index', type = int,default=0,\r\n help='iterate over dataset starting from min_index')\r\n\r\nparser.add_argument('--dataset_samples', type = int,default=500,\r\n help='number of samples, starting from min_index')\r\nparser.add_argument('--verbose', action='store_true', default=False,\r\n help='boolean, use tqdm')\r\n\r\nargs = parser.parse_args()\r\n\r\nmin_index = args.dataset_min_index\r\nmax_index = min_index + args.dataset_samples\r\n\r\nbaseline = 'excess'\r\nsave_path = './Files/results_attribution/drug_%s' % (baseline)\r\nmake_dir(save_path)\r\nmodel_path = './Files/trained_bb_models/model_drug.pkl'\r\ndata_path = './Files/Data/drug.h5'\r\n\r\n\r\n\r\nfrom shapley_value_functions import *\r\n# load model\r\nimport pickle\r\nwith open(model_path, 'rb') as fid:\r\n model = pickle.load(fid)\r\nmodel_eval = eval_RF_binary(model)\r\n\r\n# Data Sample\r\nfrom shapley_datasets import drug\r\ndataset = drug(data_path = data_path, train = False)\r\ndataloader = DataLoader(dataset, batch_size = 1, shuffle = False, num_workers = 0)\r\n\r\ndataset_train = drug(data_path = data_path, train = True)\r\ndataloader_train = DataLoader(dataset_train, batch_size = 10, shuffle = True, num_workers = 0)\r\ndata_iterator = iter(dataloader_train)\r\n\r\n#######################\r\n# Explainer\r\n#######################\r\n\r\n# initialize variables\r\nx_list = []\r\nlabel_list = []\r\nunary_list = []\r\nmatrix_list = []\r\ntime_list = []\r\n\r\ndb_ind = {}\r\n\r\ntime1 = time.time()\r\nif args.verbose:\r\n batch_iterator = tqdm(enumerate(dataloader), total = max_index)\r\nelse:\r\n batch_iterator = enumerate(dataloader)\r\n\r\nfor idx, (x, label) in batch_iterator:\r\n\r\n # advance batch iterator\r\n if idx < min_index:\r\n continue\r\n elif idx == max_index:\r\n break\r\n\r\n time_start = time.time()\r\n label = label[0].item()\r\n #######################################\r\n # Calculate Shapley\r\n #######################################\r\n baseline_value = 0\r\n ########################################\r\n x = tensor2numpy(x) \r\n x_train = np.zeros_like(x)\r\n n_feat = x.reshape(-1).shape[0]\r\n matrix = np.zeros((n_feat, n_feat))\r\n\r\n model_eval.init_baseline(x, baseline_value = baseline_value)\r\n explainer = shap.KernelExplainer(model_eval, x_train) \r\n shapley_values = explainer.shap_values(x, silent = True, l1_reg = False)\r\n\r\n for i in range(n_feat):\r\n for j in range(i+1, n_feat):\r\n model_eval.init_baseline(x, j = j, i = i, baseline_value = baseline_value)\r\n x_ = np_collapse(x, index = j) # remove column j from x\r\n explainer = shap.KernelExplainer(model_eval, np.zeros_like(x_)+baseline_value)\r\n shapley_coalition = explainer.shap_values(x_, silent = True, l1_reg = False)\r\n shapley_coalition = np_insert(shapley_coalition, np.zeros((x.shape[0], 1)), index = j)\r\n\r\n matrix[i, j] = 0.5 * (shapley_coalition[0,i] - shapley_values[0,i] - shapley_values[0,j])\r\n matrix[j, i] = matrix[i,j]\r\n\r\n #######################################\r\n\r\n\r\n # save individual shapley\r\n time_list.append(time.time() - time_start)\r\n x_list.append(x)\r\n label_list.append(label)\r\n unary_list.append(shapley_values)\r\n matrix_list.append(matrix)\r\n\r\n\r\n\r\n if idx % 5 == 0:\r\n if not args.verbose:\r\n print('=====================')\r\n print('samples:' + str(idx+1))\r\n print('time per sample: ' + str(np.array(time_list).mean()))\r\n '''\r\n db_ind['x_list'] = x_list\r\n db_ind['label_list'] = label_list\r\n db_ind['unary_list'] = unary_list\r\n db_ind['matrix_list'] = matrix_list\r\n db_ind['time'] = time_list\r\n save_dict(db_ind, os.path.join(save_path, '%s-%s_checkpoint.pkl' % (str(min_index), str(max_index-1))))\r\n '''\r\n\r\ndb_ind['x_list'] = x_list\r\ndb_ind['label_list'] = label_list\r\ndb_ind['unary_list'] = unary_list\r\ndb_ind['matrix_list'] = matrix_list\r\ndb_ind['time_list'] = time_list\r\nsave_dict(db_ind, os.path.join(save_path, '%s-%s.pkl' % (str(min_index), str(max_index-1))))\r\n#os.remove(os.path.join(save_path, '%s-%s_checkpoint.pkl' % (str(min_index), str(max_index-1))))\r\nprint('done!')\r\n\r\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.zeros_like",
"numpy.zeros"
]
] |
zhangyuejoslin/Recurrent-VLN-BERT | [
"f9bc81c297d6ad04b6b846b4d702a8f7bb4544ab"
] | [
"r2r_src_update/train.py"
] | [
"import torch\n\nimport os\nimport time\nimport json\nimport random\nimport numpy as np\nfrom collections import defaultdict\n\nfrom utils import read_vocab, write_vocab, build_vocab, padding_idx, timeSince, read_img_features, print_progress, roi_img_features\nimport utils\nfrom env import R2RBatch\nfrom agent import Seq2SeqAgent\nfrom eval import Evaluation\nfrom param import args\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom tensorboardX import SummaryWriter\n\nfrom vlnbert.vlnbert_init import get_tokenizer\n\nlog_dir = '/home/joslin/Recurrent-VLN-BERT/snap/%s' % args.name\nif not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\nIMAGENET_FEATURES = 'img_features/ResNet-152-imagenet.tsv'\nPLACE365_FEATURES = '/home/hlr/shared/data/joslin/img_features/ResNet-152-places365.tsv'\n#PLACE365_FEATURES = '/home/hlr/shared/data/joslin/img_features/CLIP-ViT-B-32-views.tsv'\nresult_path = \"/home/joslin/Recurrent-VLN-BERT/result/\"\nexperiment_time = time.strftime(\"%Y%m%d-%H%M%S\", time.gmtime())\n\nif args.features == 'imagenet':\n features = IMAGENET_FEATURES\nelif args.features == 'places365':\n features = PLACE365_FEATURES\n\nfeedback_method = args.feedback # teacher or sample\n\nprint(args); print('')\n\n\n''' train the listener '''\ndef train(train_env, tok, n_iters, log_every=2000, val_envs={}, aug_env=None):\n writer = SummaryWriter(log_dir=log_dir)\n listner = Seq2SeqAgent(train_env, \"\", tok, args.maxAction)\n\n record_file = open('./logs/' + args.name + '.txt', 'a')\n record_file.write(str(args) + '\\n\\n')\n record_file.close()\n\n start_iter = 0\n if args.load is not None:\n if args.aug is None:\n start_iter = listner.load(os.path.join(args.load))\n print(\"\\nLOAD the model from {}, iteration \".format(args.load, start_iter))\n else:\n load_iter = listner.load(os.path.join(args.load))\n print(\"\\nLOAD the model from {}, iteration \".format(args.load, load_iter))\n\n start = time.time()\n print('\\nListener training starts, start iteration: %s' % str(start_iter))\n\n best_val = {'val_unseen': {\"spl\": 0., \"sr\": 0., \"state\":\"\", 'update':False}}\n\n for idx in range(start_iter, start_iter+n_iters, log_every):\n listner.logs = defaultdict(list)\n interval = min(log_every, n_iters-idx)\n iter = idx + interval\n\n # Train for log_every interval\n if aug_env is None:\n listner.env = train_env\n listner.train(interval, feedback=feedback_method) # Train interval iters\n else:\n jdx_length = len(range(interval // 2))\n for jdx in range(interval // 2):\n # Train with GT data\n listner.env = train_env\n args.ml_weight = 0.2\n listner.train(1, feedback=feedback_method)\n\n # Train with Augmented data\n listner.env = aug_env\n args.ml_weight = 0.2\n listner.train(1, feedback=feedback_method)\n\n print_progress(jdx, jdx_length, prefix='Progress:', suffix='Complete', bar_length=50)\n\n # Log the training stats to tensorboard\n total = max(sum(listner.logs['total']), 1)\n length = max(len(listner.logs['critic_loss']), 1)\n critic_loss = sum(listner.logs['critic_loss']) / total\n RL_loss = sum(listner.logs['RL_loss']) / max(len(listner.logs['RL_loss']), 1)\n IL_loss = sum(listner.logs['IL_loss']) / max(len(listner.logs['IL_loss']), 1)\n entropy = sum(listner.logs['entropy']) / total\n writer.add_scalar(\"loss/critic\", critic_loss, idx)\n writer.add_scalar(\"policy_entropy\", entropy, idx)\n writer.add_scalar(\"loss/RL_loss\", RL_loss, idx)\n writer.add_scalar(\"loss/IL_loss\", IL_loss, idx)\n writer.add_scalar(\"total_actions\", total, idx)\n writer.add_scalar(\"max_length\", length, idx)\n # print(\"total_actions\", total, \", max_length\", length)\n\n # Run validation\n loss_str = \"iter {}\".format(iter)\n for env_name, (env, evaluator) in val_envs.items():\n listner.env = env\n\n # Get validation distance from goal under test evaluation conditions\n listner.test(use_dropout=False, feedback='argmax', iters=None)\n result = listner.get_results()\n score_summary, _ = evaluator.score(result)\n loss_str += \", %s \" % env_name\n for metric, val in score_summary.items():\n if metric in ['spl']:\n writer.add_scalar(\"spl/%s\" % env_name, val, idx)\n if env_name in best_val:\n if val > best_val[env_name]['spl']:\n best_val[env_name]['spl'] = val\n best_val[env_name]['update'] = True\n elif (val == best_val[env_name]['spl']) and (score_summary['success_rate'] > best_val[env_name]['sr']):\n best_val[env_name]['spl'] = val\n best_val[env_name]['update'] = True\n loss_str += ', %s: %.4f' % (metric, val)\n\n record_file = open('./logs/' + args.name + '.txt', 'a')\n record_file.write(loss_str + '\\n')\n record_file.close()\n\n for env_name in best_val:\n if best_val[env_name]['update']:\n best_val[env_name]['state'] = 'Iter %d %s' % (iter, loss_str)\n best_val[env_name]['update'] = False\n listner.save(idx, os.path.join(\"snap\", args.name, \"state_dict\", \"best_%s\" % (env_name)))\n else:\n listner.save(idx, os.path.join(\"snap\", args.name, \"state_dict\", \"latest_dict\"))\n\n print(('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),\n iter, float(iter)/n_iters*100, loss_str)))\n\n with open(result_path+str(experiment_time)+\".txt\", \"a\") as f_result:\n f_result.write(('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),\n iter, float(iter)/n_iters*100, loss_str)))\n f_result.write('\\n')\n\n if iter % 1000 == 0:\n print(\"BEST RESULT TILL NOW\")\n for env_name in best_val:\n print(env_name, best_val[env_name]['state'])\n\n record_file = open('./logs/' + args.name + '.txt', 'a')\n record_file.write('BEST RESULT TILL NOW: ' + env_name + ' | ' + best_val[env_name]['state'] + '\\n')\n record_file.close()\n\n listner.save(idx, os.path.join(\"snap\", args.name, \"state_dict\", \"LAST_iter%d\" % (idx)))\n\n\ndef valid(train_env, tok, val_envs={}):\n agent = Seq2SeqAgent(train_env, \"\", tok, args.maxAction)\n\n print(\"Loaded the listener model at iter %d from %s\" % (agent.load(args.load), args.load))\n\n for env_name, (env, evaluator) in val_envs.items():\n agent.logs = defaultdict(list)\n agent.env = env\n\n iters = None\n agent.test(use_dropout=False, feedback='argmax', iters=iters)\n result = agent.get_results()\n\n if env_name != '':\n score_summary, _ = evaluator.score(result)\n loss_str = \"Env name: %s\" % env_name\n for metric,val in score_summary.items():\n loss_str += ', %s: %.4f' % (metric, val)\n print(loss_str)\n\n # if args.submit:\n json.dump(\n result,\n open(os.path.join(log_dir, \"submit_%s.json\" % env_name), 'w'),\n sort_keys=True, indent=4, separators=(',', ': ')\n )\n \n # YZ: print the sorrted tokens\n '''\n json.dump(\n agent.sort_tokens,\n open(os.path.join(log_dir, \"instr_%s.json\" % env_name), 'w'),\n sort_keys=True, indent=4, separators=(',', ': ')\n )\n '''\n # YZ: output the heatmap of transformer attention\n #np.save(\"/VL/space/zhan1624/Recurrent-VLN-BERT/attent_heatmap/mean/third_steps.npy\", agent.atten_heat, allow_pickle=True)\n # if env_name == \"val_seen\":\n # np.save(\"/VL/space/zhan1624/Recurrent-VLN-BERT/attent_heatmap/all/first_step_original.npy\", agent.obj_token_attn, allow_pickle=True)\n \n\ndef setup():\n torch.manual_seed(1)\n torch.cuda.manual_seed(1)\n random.seed(0)\n np.random.seed(0)\n\ndef train_val(test_only=False):\n ''' Train on the training set, and validate on seen and unseen splits. '''\n setup()\n tok = get_tokenizer(args)\n\n feat_dict = read_img_features(features, test_only=test_only)\n \n if args.using_obj:\n obj_dict = np.load(args.obj_img_feat_path, allow_pickle=True).item()\n else:\n obj_dict = None\n\n if test_only:\n featurized_scans = None\n val_env_names = ['val_train_seen']\n else:\n featurized_scans = set([key.split(\"_\")[0] for key in list(feat_dict.keys())])\n #val_env_names = ['val_train_seen', 'val_seen', 'val_unseen']\n val_env_names = ['val_seen', 'val_unseen']\n\n train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok, obj_store=obj_dict)\n from collections import OrderedDict\n\n if args.submit:\n val_env_names.append('test')\n else:\n pass\n\n val_envs = OrderedDict(\n ((split,\n (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok, obj_store=obj_dict),\n Evaluation([split], featurized_scans, tok))\n )\n for split in val_env_names\n )\n )\n\n if args.train == 'listener':\n train(train_env, tok, args.iters, val_envs=val_envs)\n elif args.train == 'validlistener':\n valid(train_env, tok, val_envs=val_envs)\n else:\n assert False\n\ndef train_val_augment(test_only=False):\n \"\"\"\n Train the listener with the augmented data\n \"\"\"\n setup()\n\n # Create a batch training environment that will also preprocess text\n tok_bert = get_tokenizer(args)\n\n # Load the env img features\n feat_dict = read_img_features(features, test_only=test_only)\n #feat_dict = roi_img_features(features)\n if test_only:\n featurized_scans = None\n val_env_names = ['val_train_seen']\n else:\n featurized_scans = set([key.split(\"_\")[0] for key in list(feat_dict.keys())])\n val_env_names = ['val_seen', 'val_unseen']\n\n # Load the augmentation data\n aug_path = args.aug\n # Create the training environment\n train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok_bert)\n aug_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=[aug_path], tokenizer=tok_bert, name='aug')\n\n # Setup the validation data\n val_envs = {split: (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok_bert),\n Evaluation([split], featurized_scans, tok_bert))\n for split in val_env_names}\n\n # Start training\n train(train_env, tok_bert, args.iters, val_envs=val_envs, aug_env=aug_env)\n\n\nif __name__ == \"__main__\":\n if args.train in ['listener', 'validlistener']:\n train_val(test_only=args.test_only)\n elif args.train == 'auglistener':\n train_val_augment(test_only=args.test_only)\n else:\n assert False\n"
] | [
[
"torch.cuda.manual_seed",
"numpy.load",
"torch.manual_seed",
"numpy.random.seed"
]
] |
alexjercan/unsupervised-segmentation | [
"172273fef52df3771d8de7c167fb0910f4079733"
] | [
"fcntest.py"
] | [
"from metrics import MetricFunctionNYUv2, print_single_error\nfrom model import SupervisedLossFunction\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom nyuv2 import NYUv2\nfrom tqdm import tqdm\nfrom general import generate_layers, load_checkpoint, tensors_to_device\nimport torch\nfrom torchvision.models.segmentation.segmentation import fcn_resnet50\n\nnum_layers = 3\n\n\ndef runmodel(model, imgs, depths):\n layers = generate_layers(imgs, depths, num_layers)\n x = [model(x)['out'] for x in layers]\n return torch.stack(x, dim=-1)\n\n\ndef run_test_nyuv2(model, dataloader, loss_fn, metric_fn):\n loop = tqdm(dataloader, position=0, leave=True)\n\n for i, tensors in enumerate(loop):\n imgs, seg13, normals, depths = tensors_to_device(tensors, DEVICE)\n with torch.no_grad():\n predictions = runmodel(model, imgs, depths)\n\n loss_fn(predictions, (normals, depths))\n metric_fn.evaluate(predictions, (seg13, normals, depths))\n loop.close()\n\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nmodel = fcn_resnet50(pretrained=False, num_classes=14)\nmodel = model.to(DEVICE)\nepoch_idx, model = load_checkpoint(model, \"fcnmodel.pth\", DEVICE)\n\nt = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])\ntest_dataset = NYUv2(root=\"../NYUv2\", download=True, rgb_transform=t, seg_transform=t, sn_transform=t, depth_transform=t, train=False)\ndataloader = DataLoader(test_dataset, batch_size=2, shuffle=True)\n\nloss_fn = SupervisedLossFunction()\nmetric_fn = MetricFunctionNYUv2(2)\n\nmodel.eval()\nrun_test_nyuv2(model, dataloader, loss_fn, metric_fn)\nprint_single_error(epoch_idx, loss_fn.show(), metric_fn.show())"
] | [
[
"torch.utils.data.DataLoader",
"torch.stack",
"torch.cuda.is_available",
"torch.no_grad"
]
] |
dccastro/NDFlow | [
"1e46cf00e78068d3c78281b42aa8aaed310e53c9"
] | [
"ndflow/util.py"
] | [
"import os\n\nimport numpy as np\n\nimport ndflow\nfrom ndflow.models.mixture import MixtureModel\n\n\ndef list_images(imgs_dir):\n import SimpleITK as sitk\n\n for filename in os.listdir(imgs_dir):\n path = os.path.join(imgs_dir, filename)\n reader = sitk.ImageFileReader()\n reader.SetFileName(path)\n try:\n reader.ReadImageInformation()\n yield filename\n except RuntimeError:\n continue # Probably not an image file, skip\n\n\ndef list_gmms(gmms_dir):\n return (filename for filename in os.listdir(gmms_dir)\n if filename.endswith(ndflow.GMM_FILENAME_SUFFIX))\n\n\ndef list_matches(matches_dir):\n return (filename for filename in os.listdir(matches_dir)\n if filename.endswith(ndflow.MATCH_FILENAME_SUFFIX))\n\n\ndef quantise(data, levels: int = None):\n \"\"\"Quantise data into discrete values, similarly to a histogram.\n\n Parameters\n ----------\n data : array_like\n Input data array.\n levels : int or None, optional\n Number of levels at which to quantise the data. If `None`, data will be cast to `int` and\n integer values in the data range will be used.\n\n Returns\n -------\n values : np.ndarray\n Values to which `data` was quantised.\n weights : np.ndarray\n Array of counts of items collapsed into each of the `values`.\n \"\"\"\n data = np.asarray(data).flatten()\n if levels is None:\n data = data.astype(int)\n data_min = data.min()\n weights = np.bincount(data - data_min)\n values = np.arange(len(weights), dtype=int) + data_min\n else:\n weights, bins = np.histogram(data, bins=levels, density=False)\n values = .5 * (bins[:-1] + bins[1:]) # Bin centres\n return values, weights\n\n\ndef plot_gmm(gmm: MixtureModel, x, values=None, weights=None, ax=None, **kwargs):\n \"\"\"Plot a Gaussian mixture model (GMM) density.\n\n Parameters\n ----------\n gmm : ndflow.models.mixture.MixtureModel\n x : array_like\n Values at which to evaluate the GMM likelihood.\n values, weights : np.ndarray, optional\n Quantised data distribution as computed by `quantise()`. If given, will plot a histogram\n alongside the GMM density.\n ax : matplotlib.axes.Axes, optional\n Axes onto which to draw. Defaults to the current axes.\n kwargs\n Keyword arguments passed through to the `plot()` call.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n if values is not None and weights is not None:\n # Compute histogram bars' parameters in case values are not evenly spaced\n widths = np.empty(values.shape[0])\n widths[1:] = values[1:] - values[:-1]\n widths[0] = widths[1]\n edges = values - .5 * widths\n heights = weights / (weights.sum() * widths)\n\n ax.bar(edges, heights, widths, align='edge', linewidth=0, alpha=.5)\n\n ax.plot(x, gmm.marginal_likelihood(x), **kwargs)\n"
] | [
[
"numpy.bincount",
"numpy.empty",
"numpy.histogram",
"matplotlib.pyplot.gca",
"numpy.asarray"
]
] |
AIVIS-inc/mmsegmentation | [
"e2b13de52e970215be566067cab7bd880010f062"
] | [
"mmseg/models/backbones/resnet.py"
] | [
"import warnings\n\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer\nfrom mmcv.runner import BaseModule\nfrom mmcv.utils.parrots_wrapper import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\n\n\nclass BasicBlock(BaseModule):\n \"\"\"Basic block for ResNet.\"\"\"\n\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n dcn=None,\n plugins=None,\n init_cfg=None):\n super(BasicBlock, self).__init__(init_cfg)\n assert dcn is None, 'Not implemented yet.'\n assert plugins is None, 'Not implemented yet.'\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n 3,\n stride=stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg, planes, planes, 3, padding=1, bias=False)\n self.add_module(self.norm2_name, norm2)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.with_cp = with_cp\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(BaseModule):\n \"\"\"Bottleneck block for ResNet.\n\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if it is\n \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n dcn=None,\n plugins=None,\n init_cfg=None):\n super(Bottleneck, self).__init__(init_cfg)\n assert style in ['pytorch', 'caffe']\n assert dcn is None or isinstance(dcn, dict)\n assert plugins is None or isinstance(plugins, list)\n if plugins is not None:\n allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']\n assert all(p['position'] in allowed_position for p in plugins)\n\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.dcn = dcn\n self.with_dcn = dcn is not None\n self.plugins = plugins\n self.with_plugins = plugins is not None\n\n if self.with_plugins:\n # collect plugins for conv1/conv2/conv3\n self.after_conv1_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv1'\n ]\n self.after_conv2_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv2'\n ]\n self.after_conv3_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv3'\n ]\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n fallback_on_stride = False\n if self.with_dcn:\n fallback_on_stride = dcn.pop('fallback_on_stride', False)\n if not self.with_dcn or fallback_on_stride:\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n else:\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n self.conv2 = build_conv_layer(\n dcn,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n if self.with_plugins:\n self.after_conv1_plugin_names = self.make_block_plugins(\n planes, self.after_conv1_plugins)\n self.after_conv2_plugin_names = self.make_block_plugins(\n planes, self.after_conv2_plugins)\n self.after_conv3_plugin_names = self.make_block_plugins(\n planes * self.expansion, self.after_conv3_plugins)\n\n def make_block_plugins(self, in_channels, plugins):\n \"\"\"make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n \"\"\"\n assert isinstance(plugins, list)\n plugin_names = []\n for plugin in plugins:\n plugin = plugin.copy()\n name, layer = build_plugin_layer(\n plugin,\n in_channels=in_channels,\n postfix=plugin.pop('postfix', ''))\n assert not hasattr(self, name), f'duplicate plugin {name}'\n self.add_module(name, layer)\n plugin_names.append(name)\n return plugin_names\n\n def forward_plugin(self, x, plugin_names):\n \"\"\"Forward function for plugins.\"\"\"\n out = x\n for name in plugin_names:\n out = getattr(self, name)(x)\n return out\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n \"\"\"nn.Module: normalization layer after the third convolution layer\"\"\"\n return getattr(self, self.norm3_name)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = self.relu(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\[email protected]_module()\nclass ResNet(BaseModule):\n \"\"\"ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n stem_channels (int): Number of stem channels. Default: 64.\n base_channels (int): Number of base channels of res layer. Default: 64.\n num_stages (int): Resnet stages, normally 4. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n Default: (1, 2, 2, 2).\n dilations (Sequence[int]): Dilation of each stage.\n Default: (1, 1, 1, 1).\n out_indices (Sequence[int]): Output from which stages.\n Default: (0, 1, 2, 3).\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer. Default: 'pytorch'.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv.\n Default: False.\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters. Default: -1.\n conv_cfg (dict | None): Dictionary to construct and config conv layer.\n When conv_cfg is None, cfg will be set to dict(type='Conv2d').\n Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True).\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n dcn (dict | None): Dictionary to construct and config DCN conv layer.\n When dcn is not None, conv_cfg must be None. Default: None.\n stage_with_dcn (Sequence[bool]): Whether to set DCN conv for each\n stage. The length of stage_with_dcn is equal to num_stages.\n Default: (False, False, False, False).\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n\n - position (str, required): Position inside block to insert plugin,\n options: 'after_conv1', 'after_conv2', 'after_conv3'.\n\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as 'num_stages'.\n Default: None.\n multi_grid (Sequence[int]|None): Multi grid dilation rates of last\n stage. Default: None.\n contract_dilation (bool): Whether contract first dilation of each layer\n Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity. Default: True.\n pretrained (str, optional): model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n\n Example:\n >>> from mmseg.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth,\n in_channels=3,\n stem_channels=64,\n base_channels=64,\n num_stages=4,\n strides=(1, 2, 2, 2),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3),\n style='pytorch',\n deep_stem=False,\n avg_down=False,\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=False,\n dcn=None,\n stage_with_dcn=(False, False, False, False),\n plugins=None,\n multi_grid=None,\n contract_dilation=False,\n with_cp=False,\n zero_init_residual=True,\n pretrained=None,\n init_cfg=None):\n super(ResNet, self).__init__(init_cfg)\n if depth not in self.arch_settings:\n raise KeyError(f'invalid depth {depth} for resnet')\n\n self.pretrained = pretrained\n self.zero_init_residual = zero_init_residual\n block_init_cfg = None\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be setting at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is a deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is None:\n if init_cfg is None:\n self.init_cfg = [\n dict(type='Kaiming', layer='Conv2d'),\n dict(\n type='Constant',\n val=1,\n layer=['_BatchNorm', 'GroupNorm'])\n ]\n block = self.arch_settings[depth][0]\n if self.zero_init_residual:\n if block is BasicBlock:\n block_init_cfg = dict(\n type='Constant',\n val=0,\n override=dict(name='norm2'))\n elif block is Bottleneck:\n block_init_cfg = dict(\n type='Constant',\n val=0,\n override=dict(name='norm3'))\n else:\n raise TypeError('pretrained must be a str or None')\n\n self.depth = depth\n self.stem_channels = stem_channels\n self.base_channels = base_channels\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.strides = strides\n self.dilations = dilations\n assert len(strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages\n self.style = style\n self.deep_stem = deep_stem\n self.avg_down = avg_down\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.with_cp = with_cp\n self.norm_eval = norm_eval\n self.dcn = dcn\n self.stage_with_dcn = stage_with_dcn\n if dcn is not None:\n assert len(stage_with_dcn) == num_stages\n self.plugins = plugins\n self.multi_grid = multi_grid\n self.contract_dilation = contract_dilation\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n self.inplanes = stem_channels\n\n self._make_stem_layer(in_channels, stem_channels)\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n stride = strides[i]\n dilation = dilations[i]\n dcn = self.dcn if self.stage_with_dcn[i] else None\n if plugins is not None:\n stage_plugins = self.make_stage_plugins(plugins, i)\n else:\n stage_plugins = None\n # multi grid is applied to last layer only\n stage_multi_grid = multi_grid if i == len(\n self.stage_blocks) - 1 else None\n planes = base_channels * 2**i\n res_layer = self.make_res_layer(\n block=self.block,\n inplanes=self.inplanes,\n planes=planes,\n num_blocks=num_blocks,\n stride=stride,\n dilation=dilation,\n style=self.style,\n avg_down=self.avg_down,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n dcn=dcn,\n plugins=stage_plugins,\n multi_grid=stage_multi_grid,\n contract_dilation=contract_dilation,\n init_cfg=block_init_cfg)\n self.inplanes = planes * self.block.expansion\n layer_name = f'layer{i+1}'\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self._freeze_stages()\n\n self.feat_dim = self.block.expansion * base_channels * 2**(\n len(self.stage_blocks) - 1)\n\n def make_stage_plugins(self, plugins, stage_idx):\n \"\"\"make plugins for ResNet 'stage_idx'th stage .\n\n Currently we support to insert 'context_block',\n 'empirical_attention_block', 'nonlocal_block' into the backbone like\n ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be :\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose 'stage_idx=0', the structure of blocks in the stage would be:\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n \"\"\"\n stage_plugins = []\n for plugin in plugins:\n plugin = plugin.copy()\n stages = plugin.pop('stages', None)\n assert stages is None or len(stages) == self.num_stages\n # whether to insert plugin into current stage\n if stages is None or stages[stage_idx]:\n stage_plugins.append(plugin)\n\n return stage_plugins\n\n def make_res_layer(self, **kwargs):\n \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n return ResLayer(**kwargs)\n\n @property\n def norm1(self):\n \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n return getattr(self, self.norm1_name)\n\n def _make_stem_layer(self, in_channels, stem_channels):\n \"\"\"Make stem layer for ResNet.\"\"\"\n if self.deep_stem:\n self.stem = nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels,\n stem_channels // 2,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n nn.ReLU(inplace=True),\n build_conv_layer(\n self.conv_cfg,\n stem_channels // 2,\n stem_channels // 2,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n nn.ReLU(inplace=True),\n build_conv_layer(\n self.conv_cfg,\n stem_channels // 2,\n stem_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels)[1],\n nn.ReLU(inplace=True))\n else:\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n in_channels,\n stem_channels,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False)\n self.norm1_name, norm1 = build_norm_layer(\n self.norm_cfg, stem_channels, postfix=1)\n self.add_module(self.norm1_name, norm1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def _freeze_stages(self):\n \"\"\"Freeze stages param and norm stats.\"\"\"\n if self.frozen_stages >= 0:\n if self.deep_stem:\n self.stem.eval()\n for param in self.stem.parameters():\n param.requires_grad = False\n else:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n if self.deep_stem:\n x = self.stem(x)\n else:\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n if i in self.out_indices:\n outs.append(x)\n return tuple(outs)\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep normalization layer\n freezed.\"\"\"\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n\n\[email protected]_module()\nclass ResNetV1c(ResNet):\n \"\"\"ResNetV1c variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv\n in the input stem with three 3x3 convs.\n\n References:\n .. [1] https://arxiv.org/pdf/1812.01187.pdf\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1c, self).__init__(\n deep_stem=True, avg_down=False, **kwargs)\n\n\[email protected]_module()\nclass ResNetV1d(ResNet):\n \"\"\"ResNetV1d variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1d, self).__init__(\n deep_stem=True, avg_down=True, **kwargs)\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.utils.checkpoint.checkpoint",
"torch.nn.ReLU"
]
] |
edawson/parliament2 | [
"2632aa3484ef64c9539c4885026b705b737f6d1e"
] | [
"resources/usr/local/lib/python2.7/dist-packages/sklearn/cluster/bicluster/spectral.py"
] | [
"\"\"\"Implements spectral biclustering algorithms.\n\nAuthors : Kemal Eren\nLicense: BSD 3 clause\n\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\n\nfrom scipy.sparse import dia_matrix\nfrom scipy.sparse import issparse\n\nfrom sklearn.base import BaseEstimator, BiclusterMixin\nfrom sklearn.externals import six\nfrom sklearn.utils.arpack import svds\nfrom sklearn.utils.arpack import eigsh\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\n\nfrom sklearn.utils.extmath import randomized_svd\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.utils.extmath import make_nonnegative\nfrom sklearn.utils.extmath import norm\n\nfrom sklearn.utils.validation import assert_all_finite\nfrom sklearn.utils.validation import check_arrays\n\nfrom .utils import check_array_ndim\n\n\ndef _scale_normalize(X):\n \"\"\"Normalize ``X`` by scaling rows and columns independently.\n\n Returns the normalized matrix and the row and column scaling\n factors.\n\n \"\"\"\n X = make_nonnegative(X)\n row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()\n col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()\n row_diag = np.where(np.isnan(row_diag), 0, row_diag)\n col_diag = np.where(np.isnan(col_diag), 0, col_diag)\n if issparse(X):\n n_rows, n_cols = X.shape\n r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))\n c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))\n an = r * X * c\n else:\n an = row_diag[:, np.newaxis] * X * col_diag\n return an, row_diag, col_diag\n\n\ndef _bistochastic_normalize(X, max_iter=1000, tol=1e-5):\n \"\"\"Normalize rows and columns of ``X`` simultaneously so that all\n rows sum to one constant and all columns sum to a different\n constant.\n\n \"\"\"\n # According to paper, this can also be done more efficiently with\n # deviation reduction and balancing algorithms.\n X = make_nonnegative(X)\n X_scaled = X\n dist = None\n for _ in range(max_iter):\n X_new, _, _ = _scale_normalize(X_scaled)\n if issparse(X):\n dist = norm(X_scaled.data - X.data)\n else:\n dist = norm(X_scaled - X_new)\n X_scaled = X_new\n if dist is not None and dist < tol:\n break\n return X_scaled\n\n\ndef _log_normalize(X):\n \"\"\"Normalize ``X`` according to Kluger's log-interactions scheme.\"\"\"\n X = make_nonnegative(X, min_value=1)\n if issparse(X):\n raise ValueError(\"Cannot compute log of a sparse matrix,\"\n \" because log(x) diverges to -infinity as x\"\n \" goes to 0.\")\n L = np.log(X)\n row_avg = L.mean(axis=1)[:, np.newaxis]\n col_avg = L.mean(axis=0)\n avg = L.mean()\n return L - row_avg - col_avg + avg\n\n\nclass BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,\n BiclusterMixin)):\n \"\"\"Base class for spectral biclustering.\"\"\"\n\n @abstractmethod\n def __init__(self, n_clusters=3, svd_method=\"randomized\",\n n_svd_vecs=None, mini_batch=False, init=\"k-means++\",\n n_init=10, n_jobs=1, random_state=None):\n self.n_clusters = n_clusters\n self.svd_method = svd_method\n self.n_svd_vecs = n_svd_vecs\n self.mini_batch = mini_batch\n self.init = init\n self.n_init = n_init\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n def _check_parameters(self):\n legal_svd_methods = ('randomized', 'arpack')\n if self.svd_method not in legal_svd_methods:\n raise ValueError(\"Unknown SVD method: '{}'. svd_method must be\"\n \" one of {}.\".format(self.svd_method,\n legal_svd_methods))\n\n def fit(self, X):\n \"\"\"Creates a biclustering for X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n \"\"\"\n X, = check_arrays(X, sparse_format='csr', dtype=np.float64)\n check_array_ndim(X)\n self._check_parameters()\n self._fit(X)\n\n def _svd(self, array, n_components, n_discard):\n \"\"\"Returns first `n_components` left and right singular\n vectors u and v, discarding the first `n_discard`.\n\n \"\"\"\n if self.svd_method == 'randomized':\n kwargs = {}\n if self.n_svd_vecs is not None:\n kwargs['n_oversamples'] = self.n_svd_vecs\n u, _, vt = randomized_svd(array, n_components,\n random_state=self.random_state,\n **kwargs)\n\n elif self.svd_method == 'arpack':\n u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)\n if np.any(np.isnan(vt)):\n # some eigenvalues of A * A.T are negative, causing\n # sqrt() to be np.nan. This causes some vectors in vt\n # to be np.nan.\n _, v = eigsh(safe_sparse_dot(array.T, array),\n ncv=self.n_svd_vecs)\n vt = v.T\n if np.any(np.isnan(u)):\n _, u = eigsh(safe_sparse_dot(array, array.T),\n ncv=self.n_svd_vecs)\n\n assert_all_finite(u)\n assert_all_finite(vt)\n u = u[:, n_discard:]\n vt = vt[n_discard:]\n return u, vt.T\n\n def _k_means(self, data, n_clusters):\n if self.mini_batch:\n model = MiniBatchKMeans(n_clusters,\n init=self.init,\n n_init=self.n_init,\n random_state=self.random_state)\n else:\n model = KMeans(n_clusters, init=self.init,\n n_init=self.n_init, n_jobs=self.n_jobs,\n random_state=self.random_state)\n model.fit(data)\n centroid = model.cluster_centers_\n labels = model.labels_\n return centroid, labels\n\n\nclass SpectralCoclustering(BaseSpectral):\n \"\"\"Spectral Co-Clustering algorithm (Dhillon, 2001).\n\n Clusters rows and columns of an array `X` to solve the relaxed\n normalized cut of the bipartite graph created from `X` as follows:\n the edge between row vertex `i` and column vertex `j` has weight\n `X[i, j]`.\n\n The resulting bicluster structure is block-diagonal, since each\n row and each column belongs to exactly one bicluster.\n\n Supports sparse matrices, as long as they are nonnegative.\n\n Parameters\n ----------\n n_clusters : integer, optional, default: 3\n The number of biclusters to find.\n\n svd_method : string, optional, default: 'randomized'\n Selects the algorithm for finding singular vectors. May be\n 'randomized' or 'arpack'. If 'randomized', use\n :func:`sklearn.utils.extmath.randomized_svd`, which may be faster\n for large matrices. If 'arpack', use\n :func:`sklearn.utils.arpack.svds`, which is more accurate, but\n possibly slower in some cases.\n\n n_svd_vecs : int, optional, default: None\n Number of vectors to use in calculating the SVD. Corresponds\n to `ncv` when `svd_method=arpack` and `n_oversamples` when\n `svd_method` is 'randomized`.\n\n mini_batch : bool, optional, default: False\n Whether to use mini-batch k-means, which is faster but may get\n different results.\n\n init : {'k-means++', 'random' or an ndarray}\n Method for initialization of k-means algorithm; defaults to\n 'k-means++'.\n\n n_init : int, optional, default: 10\n Number of random initializations that are tried with the\n k-means algorithm.\n\n If mini-batch k-means is used, the best initialization is\n chosen and the algorithm runs once. Otherwise, the algorithm\n is run for each initialization and the best solution chosen.\n\n n_jobs : int, optional, default: 1\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debuging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n random_state : int seed, RandomState instance, or None (default)\n A pseudo random number generator used by the K-Means\n initialization.\n\n Attributes\n ----------\n `rows_` : array-like, shape (n_row_clusters, n_rows)\n Results of the clustering. `rows[i, r]` is True if cluster `i`\n contains row `r`. Available only after calling ``fit``.\n\n `columns_` : array-like, shape (n_column_clusters, n_columns)\n Results of the clustering, like `rows`.\n\n `row_labels_` : array-like, shape (n_rows,)\n The bicluster label of each row.\n\n `column_labels_` : array-like, shape (n_cols,)\n The bicluster label of each column.\n\n References\n ----------\n\n * Dhillon, Inderjit S, 2001. `Co-clustering documents and words using\n bipartite spectral graph partitioning\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.\n\n \"\"\"\n def __init__(self, n_clusters=3, svd_method='randomized',\n n_svd_vecs=None, mini_batch=False, init='k-means++',\n n_init=10, n_jobs=1, random_state=None):\n super(SpectralCoclustering, self).__init__(n_clusters,\n svd_method,\n n_svd_vecs,\n mini_batch,\n init,\n n_init,\n n_jobs,\n random_state)\n\n def _fit(self, X):\n normalized_data, row_diag, col_diag = _scale_normalize(X)\n n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))\n u, v = self._svd(normalized_data, n_sv, n_discard=1)\n z = np.vstack((row_diag[:, np.newaxis] * u,\n col_diag[:, np.newaxis] * v))\n\n _, labels = self._k_means(z, self.n_clusters)\n\n n_rows = X.shape[0]\n self.row_labels_ = labels[:n_rows]\n self.column_labels_ = labels[n_rows:]\n\n self.rows_ = np.vstack(self.row_labels_ == c\n for c in range(self.n_clusters))\n self.columns_ = np.vstack(self.column_labels_ == c\n for c in range(self.n_clusters))\n\n\nclass SpectralBiclustering(BaseSpectral):\n \"\"\"Spectral biclustering (Kluger, 2003).\n\n Partitions rows and columns under the assumption that the data has\n an underlying checkerboard structure. For instance, if there are\n two row partitions and three column partitions, each row will\n belong to three biclusters, and each column will belong to two\n biclusters. The outer product of the corresponding row and column\n label vectors gives this checkerboard structure.\n\n Parameters\n ----------\n n_clusters : integer or tuple (n_row_clusters, n_column_clusters)\n The number of row and column clusters in the checkerboard\n structure.\n\n method : string, optional, default: 'bistochastic'\n Method of normalizing and converting singular vectors into\n biclusters. May be one of 'scale', 'bistochastic', or 'log'.\n The authors recommend using 'log'. If the data is sparse,\n however, log normalization will not work, which is why the\n default is 'bistochastic'. CAUTION: if `method='log'`, the\n data must not be sparse.\n\n n_components : integer, optional, default: 6\n Number of singular vectors to check.\n\n n_best : integer, optional, default: 3\n Number of best singular vectors to which to project the data\n for clustering.\n\n svd_method : string, optional, default: 'randomized'\n Selects the algorithm for finding singular vectors. May be\n 'randomized' or 'arpack'. If 'randomized', uses\n `sklearn.utils.extmath.randomized_svd`, which may be faster\n for large matrices. If 'arpack', uses\n `sklearn.utils.arpack.svds`, which is more accurate, but\n possibly slower in some cases.\n\n n_svd_vecs : int, optional, default: None\n Number of vectors to use in calculating the SVD. Corresponds\n to `ncv` when `svd_method=arpack` and `n_oversamples` when\n `svd_method` is 'randomized`.\n\n mini_batch : bool, optional, default: False\n Whether to use mini-batch k-means, which is faster but may get\n different results.\n\n init : {'k-means++', 'random' or an ndarray}\n Method for initialization of k-means algorithm; defaults to\n 'k-means++'.\n\n n_init : int, optional, default: 10\n Number of random initializations that are tried with the\n k-means algorithm.\n\n If mini-batch k-means is used, the best initialization is\n chosen and the algorithm runs once. Otherwise, the algorithm\n is run for each initialization and the best solution chosen.\n\n n_jobs : int, optional, default: 1\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debuging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n random_state : int seed, RandomState instance, or None (default)\n A pseudo random number generator used by the K-Means\n initialization.\n\n Attributes\n ----------\n `rows_` : array-like, shape (n_row_clusters, n_rows)\n Results of the clustering. `rows[i, r]` is True if cluster `i`\n contains row `r`. Available only after calling ``fit``.\n\n `columns_` : array-like, shape (n_column_clusters, n_columns)\n Results of the clustering, like `rows`.\n\n `row_labels_` : array-like, shape (n_rows,)\n Row partition labels.\n\n `column_labels_` : array-like, shape (n_cols,)\n Column partition labels.\n\n References\n ----------\n\n * Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray\n data: coclustering genes and conditions\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.\n\n \"\"\"\n def __init__(self, n_clusters=3, method='bistochastic',\n n_components=6, n_best=3, svd_method='randomized',\n n_svd_vecs=None, mini_batch=False, init='k-means++',\n n_init=10, n_jobs=1, random_state=None):\n super(SpectralBiclustering, self).__init__(n_clusters,\n svd_method,\n n_svd_vecs,\n mini_batch,\n init,\n n_init,\n n_jobs,\n random_state)\n self.method = method\n self.n_components = n_components\n self.n_best = n_best\n\n def _check_parameters(self):\n super(SpectralBiclustering, self)._check_parameters()\n legal_methods = ('bistochastic', 'scale', 'log')\n if self.method not in legal_methods:\n raise ValueError(\"Unknown method: '{}'. method must be\"\n \" one of {}.\".format(self.method, legal_methods))\n try:\n int(self.n_clusters)\n except TypeError:\n try:\n r, c = self.n_clusters\n int(r)\n int(c)\n except (ValueError, TypeError):\n raise ValueError(\"Incorrect parameter n_clusters has value:\"\n \" {}. It should either be a single integer\"\n \" or an iterable with two integers:\"\n \" (n_row_clusters, n_column_clusters)\")\n if self.n_components < 1:\n raise ValueError(\"Parameter n_components must be greater than 0,\"\n \" but its value is {}\".format(self.n_components))\n if self.n_best < 1:\n raise ValueError(\"Parameter n_best must be greater than 0,\"\n \" but its value is {}\".format(self.n_best))\n if self.n_best > self.n_components:\n raise ValueError(\"n_best cannot be larger than\"\n \" n_components, but {} > {}\"\n \"\".format(self.n_best, self.n_components))\n\n def _fit(self, X):\n n_sv = self.n_components\n if self.method == 'bistochastic':\n normalized_data = _bistochastic_normalize(X)\n n_sv += 1\n elif self.method == 'scale':\n normalized_data, _, _ = _scale_normalize(X)\n n_sv += 1\n elif self.method == 'log':\n normalized_data = _log_normalize(X)\n n_discard = 0 if self.method == 'log' else 1\n u, v = self._svd(normalized_data, n_sv, n_discard)\n ut = u.T\n vt = v.T\n\n try:\n n_row_clusters, n_col_clusters = self.n_clusters\n except TypeError:\n n_row_clusters = n_col_clusters = self.n_clusters\n\n best_ut = self._fit_best_piecewise(ut, self.n_best,\n n_row_clusters)\n\n best_vt = self._fit_best_piecewise(vt, self.n_best,\n n_col_clusters)\n\n self.row_labels_ = self._project_and_cluster(X, best_vt.T,\n n_row_clusters)\n\n self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,\n n_col_clusters)\n\n self.rows_ = np.vstack(self.row_labels_ == label\n for label in range(n_row_clusters)\n for _ in range(n_col_clusters))\n self.columns_ = np.vstack(self.column_labels_ == label\n for _ in range(n_row_clusters)\n for label in range(n_col_clusters))\n\n def _fit_best_piecewise(self, vectors, n_best, n_clusters):\n \"\"\"Find the ``n_best`` vectors that are best approximated by piecewise\n constant vectors.\n\n The piecewise vectors are found by k-means; the best is chosen\n according to Euclidean distance.\n\n \"\"\"\n def make_piecewise(v):\n centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)\n return centroid[labels].ravel()\n piecewise_vectors = np.apply_along_axis(make_piecewise,\n axis=1, arr=vectors)\n dists = np.apply_along_axis(norm, axis=1,\n arr=(vectors - piecewise_vectors))\n result = vectors[np.argsort(dists)[:n_best]]\n return result\n\n def _project_and_cluster(self, data, vectors, n_clusters):\n \"\"\"Project ``data`` to ``vectors`` and cluster the result.\"\"\"\n projected = safe_sparse_dot(data, vectors)\n _, labels = self._k_means(projected, n_clusters)\n return labels\n"
] | [
[
"numpy.vstack",
"numpy.log2",
"scipy.sparse.dia_matrix",
"sklearn.utils.validation.check_arrays",
"scipy.sparse.issparse",
"numpy.apply_along_axis",
"numpy.argsort",
"sklearn.externals.six.with_metaclass",
"sklearn.cluster.KMeans",
"sklearn.utils.arpack.svds",
"numpy.log",
"sklearn.utils.extmath.norm",
"sklearn.cluster.MiniBatchKMeans",
"numpy.isnan",
"sklearn.utils.extmath.safe_sparse_dot",
"sklearn.utils.extmath.make_nonnegative",
"sklearn.utils.validation.assert_all_finite",
"sklearn.utils.extmath.randomized_svd"
]
] |
sailxjx/DI-engine | [
"c6763f8e2ba885a2a02f611195a1b5f8b50bff00",
"c6763f8e2ba885a2a02f611195a1b5f8b50bff00"
] | [
"ding/utils/time_helper.py",
"ding/hpc_rl/tests/test_lstm.py"
] | [
"import signal\nimport time\nfrom typing import Any, Callable\n\nimport torch\nfrom easydict import EasyDict\nfrom .time_helper_base import TimeWrapper\nfrom .time_helper_cuda import get_cuda_time_wrapper\n\n\ndef build_time_helper(cfg: EasyDict = None, wrapper_type: str = None) -> Callable[[], 'TimeWrapper']:\n r\"\"\"\n Overview:\n Build the timehelper\n\n Arguments:\n - cfg (:obj:`dict`):\n The config file, which is a multilevel dict, have large domain like\n evaluate, common, model, train etc, and each large domain\n has it's smaller domain.\n - wrapper_type (:obj:`str`): The type of wrapper returned, support ``['time', 'cuda']``\n\n Returns:\n - time_wrapper (:obj:`TimeWrapper`):\n Return the corresponding timewrapper, Reference: ``ding.utils.timehelper.TimeWrapperTime``\n and ``ding.utils.timehelper.get_cuda_time_wrapper``.\n \"\"\"\n # Note: wrapper_type has higher priority\n if wrapper_type is not None:\n time_wrapper_type = wrapper_type\n elif cfg is not None:\n time_wrapper_type = cfg.common.time_wrapper_type\n else:\n raise RuntimeError('Either wrapper_type or cfg should be provided.')\n\n if time_wrapper_type == 'time':\n return TimeWrapperTime\n elif time_wrapper_type == 'cuda':\n if torch.cuda.is_available():\n # lazy initialize to make code runnable locally\n return get_cuda_time_wrapper()\n else:\n return TimeWrapperTime\n else:\n raise KeyError('invalid time_wrapper_type: {}'.format(time_wrapper_type))\n\n\nclass EasyTimer:\n r\"\"\"\n Overview:\n A decent timer wrapper that can be used easily.\n\n Interface:\n ``__init__``, ``__enter__``, ``__exit__``\n\n Example:\n >>> wait_timer = EasyTimer()\n >>> with wait_timer:\n >>> func(...)\n >>> time_ = wait_timer.value # in second\n \"\"\"\n\n def __init__(self, cuda=True):\n r\"\"\"\n Overview:\n Init class EasyTimer\n\n Arguments:\n - cuda (:obj:`bool`): Whether to build timer with cuda type\n \"\"\"\n if torch.cuda.is_available() and cuda:\n time_wrapper_type = \"cuda\"\n else:\n time_wrapper_type = \"time\"\n self._timer = build_time_helper(wrapper_type=time_wrapper_type)\n self.value = 0.0\n\n def __enter__(self):\n r\"\"\"\n Overview:\n Enter timer, start timing\n \"\"\"\n self.value = 0.0\n self._timer.start_time()\n\n def __exit__(self, *args):\n r\"\"\"\n Overview:\n Exit timer, stop timing\n \"\"\"\n self.value = self._timer.end_time()\n\n\nclass TimeWrapperTime(TimeWrapper):\n r\"\"\"\n Overview:\n A class method that inherit from ``TimeWrapper`` class\n\n Interface:\n ``start_time``, ``end_time``\n \"\"\"\n\n # overwrite\n @classmethod\n def start_time(cls):\n r\"\"\"\n Overview:\n Implement and overide the ``start_time`` method in ``TimeWrapper`` class\n \"\"\"\n cls.start = time.time()\n\n # overwrite\n @classmethod\n def end_time(cls):\n r\"\"\"\n Overview:\n Implement and overide the end_time method in ``TimeWrapper`` class\n\n Returns:\n - time(:obj:`float`): The time between ``start_time`` and end_time\n \"\"\"\n cls.end = time.time()\n return cls.end - cls.start\n\n\nclass WatchDog(object):\n \"\"\"\n Overview:\n Simple watchdog timer to detect timeouts\n\n Arguments:\n - timeout (:obj:`int`): Timeout value of the ``watchdog [seconds]``.\n\n .. note::\n If it is not reset before exceeding this value, ``TimeourError`` raised.\n\n Interface:\n ``start``, ``stop``\n\n Examples:\n >>> watchdog = WatchDog(x) # x is a timeout value\n >>> ...\n >>> watchdog.start()\n >>> ... # Some function\n\n \"\"\"\n\n def __init__(self, timeout: int = 1):\n self._timeout = timeout + 1\n self._failed = False\n\n def start(self):\n r\"\"\"\n Overview:\n Start watchdog.\n \"\"\"\n signal.signal(signal.SIGALRM, self._event)\n signal.alarm(self._timeout)\n\n @staticmethod\n def _event(signum: Any, frame: Any):\n raise TimeoutError()\n\n def stop(self):\n r\"\"\"\n Overview:\n Stop watchdog with ``alarm(0)``, ``SIGALRM``, and ``SIG_DFL`` signals.\n \"\"\"\n signal.alarm(0)\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n",
"import time\nimport torch\nfrom hpc_rll.origin.rnn import get_lstm\nfrom hpc_rll.torch_utils.network.rnn import LSTM\nfrom testbase import mean_relative_error, times\n\nassert torch.cuda.is_available()\nuse_cuda = True\n\nseq_len = 64\nbatch_size = 3\ninput_size = 1792\nhidden_size = 384\nnum_layers = 3\nnorm_type = 'LN'\ndropout = 0 # 0.1\n\n\n# Note: need open load_params for hpc_lstm to validation\n# Note: only used to case of num_layers = 3\ndef lstm_val():\n ori_lstm = get_lstm('normal', input_size, hidden_size, num_layers, norm_type, dropout)\n hpc_lstm = LSTM(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)\n\n ori_x = torch.randn(seq_len, batch_size, input_size)\n ori_h0 = torch.randn(num_layers, batch_size, hidden_size)\n ori_c0 = torch.randn(num_layers, batch_size, hidden_size)\n\n if use_cuda:\n ori_x = ori_x.cuda()\n ori_h0 = ori_h0.cuda()\n ori_c0 = ori_c0.cuda()\n ori_lstm = ori_lstm.cuda()\n hpc_lstm = hpc_lstm.cuda()\n\n ori_x.requires_grad_(True)\n ori_output, ori_next_state = ori_lstm(ori_x, [ori_h0, ori_c0])\n ori_loss = ori_output.mean()\n ori_loss.backward()\n\n hpc_x = ori_x.clone().detach()\n hpc_h0 = ori_h0.clone().detach()\n hpc_c0 = ori_c0.clone().detach()\n hpc_x.requires_grad_(True)\n hpc_output, hpc_next_state = hpc_lstm(hpc_x, [hpc_h0, hpc_c0])\n hpc_loss = hpc_output.mean()\n hpc_loss.backward()\n torch.cuda.synchronize()\n\n mre = mean_relative_error(\n torch.flatten(ori_loss).cpu().detach().numpy(),\n torch.flatten(hpc_loss).cpu().detach().numpy()\n )\n print(\"lstm fp mean_relative_error: \" + str(mre))\n mre = mean_relative_error(\n torch.flatten(ori_x.grad).cpu().detach().numpy(),\n torch.flatten(hpc_x.grad).cpu().detach().numpy()\n )\n print(\"lstm bp mean_relative_error: \" + str(mre))\n\n ori_wx_grad = torch.cat((ori_lstm.wx[0].grad, ori_lstm.wx[1].grad, ori_lstm.wx[2].grad))\n hpc_wx_grad = hpc_lstm.wx.grad\n mre = mean_relative_error(torch.flatten(ori_wx_grad).cpu().numpy(), torch.flatten(hpc_wx_grad).cpu().numpy())\n print(\"wx grad mean_relative_error: \" + str(mre))\n\n ori_wh_grad = torch.cat((ori_lstm.wh[0].grad, ori_lstm.wh[1].grad, ori_lstm.wh[2].grad))\n hpc_wh_grad = hpc_lstm.wh.grad\n mre = mean_relative_error(torch.flatten(ori_wh_grad).cpu().numpy(), torch.flatten(hpc_wh_grad).cpu().numpy())\n print(\"wh grad mean_relative_error: \" + str(mre))\n\n ori_bias_grad = ori_lstm.bias.grad\n hpc_bias_grad = hpc_lstm.bias.grad\n mre = mean_relative_error(torch.flatten(ori_bias_grad).cpu().numpy(), torch.flatten(hpc_bias_grad).cpu().numpy())\n print(\"bias grad mean_relative_error: \" + str(mre))\n\n params = list(ori_lstm.parameters())\n gamma_0_x = params[1]\n beta_0_x = params[2]\n gamma_0_h = params[3]\n beta_0_h = params[4]\n gamma_1_x = params[5]\n beta_1_x = params[6]\n gamma_1_h = params[7]\n beta_1_h = params[8]\n gamma_2_x = params[9]\n beta_2_x = params[10]\n gamma_2_h = params[11]\n beta_2_h = params[12]\n ori_gamma_grad = torch.cat(\n (gamma_0_x.grad, gamma_0_h.grad, gamma_1_x.grad, gamma_1_h.grad, gamma_2_x.grad, gamma_2_h.grad)\n )\n ori_beta_grad = torch.cat(\n (beta_0_x.grad, beta_0_h.grad, beta_1_x.grad, beta_1_h.grad, beta_2_x.grad, beta_2_h.grad)\n )\n hpc_gamma_grad = hpc_lstm.ln_gamma.grad\n hpc_beta_grad = hpc_lstm.ln_beta.grad\n mre = mean_relative_error(torch.flatten(ori_gamma_grad).cpu().numpy(), torch.flatten(hpc_gamma_grad).cpu().numpy())\n print(\"ln gamma grad mean_relative_error: \" + str(mre))\n mre = mean_relative_error(torch.flatten(ori_beta_grad).cpu().numpy(), torch.flatten(hpc_beta_grad).cpu().numpy())\n print(\"ln beta grad mean_relative_error: \" + str(mre))\n\n\ndef lstm_perf():\n ori_lstm = get_lstm('normal', input_size, hidden_size, num_layers, norm_type, dropout)\n hpc_lstm = LSTM(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)\n\n lstms = {'normal': ori_lstm, 'hpc': hpc_lstm}\n\n for lstm_type, lstm in lstms.items():\n x = torch.rand(seq_len, batch_size, input_size)\n h0 = torch.randn(num_layers, batch_size, hidden_size)\n c0 = torch.randn(num_layers, batch_size, hidden_size)\n if use_cuda:\n x = x.cuda()\n h0 = h0.cuda()\n c0 = c0.cuda()\n lstm = lstm.cuda()\n\n prev_state = [h0, c0]\n x.requires_grad_(True)\n for i in range(times):\n t = time.time()\n output, _ = lstm(x, prev_state)\n loss = output.mean()\n loss.backward()\n if use_cuda:\n torch.cuda.synchronize()\n print('epoch: {}, {} lstm cost time: {}'.format(i, lstm_type, time.time() - t))\n\n\nif __name__ == '__main__':\n print(\n \"target problem: seq_len = {}, batch_size = {}, input_size = {}, hidden_size = {}, num_layers = {}, norm_type = {}, dropout = {}\" # noqa\n .format(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)\n )\n print(\"==============lstm has no validation test================\")\n #print(\"===============run lstm validation test==================\")\n #lstm_val()\n print(\"===============run lstm performance test=================\")\n lstm_perf()\n"
] | [
[
"torch.cuda.is_available"
],
[
"torch.randn",
"torch.flatten",
"torch.rand",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"torch.cat"
]
] |
Shivakoreddi/CryptoDataApplication | [
"ad620231a0614ed6f4f587dfcfb83249d1d16689"
] | [
"connectingPipelines/coins_ld.py"
] | [
"from apiWrapper import coinAPI\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table,Column,Integer,String,MetaData,ForeignKey\nimport sqlite3\nfrom sqlite3 import Error\nimport pandas as pd\nimport os\n\n\ndef main():\n path = \"/CryptoDataApplication/\"\n for filename in os.listdir(path):\n if filename.startswith('valid_coin'):\n file = filename\n\n coin_df = pd.read_csv(file,sep=',')\n conn = sqlite3.connect('/CryptoDataApplication/transactionDB/tradingSchema.db')\n cursor = conn.cursor()\n query = []\n ##for index,row in coin_df.iterrows():\n ##query = \"\"\"INSERT OR REPLACE INTO coins(id,symbol,name,image) VALUES('{0}','{1}','{2}','{3}')\"\"\".format(row['id'],row['symbol'],row['name'],row['image'])\n #print(query[1])\n ##cursor.execute(query)\n ##conn.commit()\n\n cursor.execute(\"select * from coins\")\n rows = cursor.fetchall()\n for row in rows:\n print(row)\n\n\nif __name__==\"__main__\":\n main()\n\n"
] | [
[
"pandas.read_csv"
]
] |
Nikolay-Lysenko/gpn | [
"a59f43e90536f85f8b0051c5ce6d0497081a5a8f"
] | [
"tests/test_graph.py"
] | [
"\"\"\"\nTest `graph.py` module.\n\nAuthor: Nikolay Lysenko\n\"\"\"\n\n\nfrom typing import List, Tuple\n\nimport pytest\nimport tensorflow as tf\nimport numpy as np\n\nfrom gpn.graph import sample_multiple_fragments\n\n\[email protected](\n \"images, corners, fragment_size, frame_size, n_channels, expected\",\n [\n (\n # `images`\n np.array([\n [\n [[1, 0, 1, 0],\n [0, 1, 0, 1],\n [1, 0, 1, 0],\n [0, 1, 0, 1]],\n [[1, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]]\n ],\n [\n [[1, 1, 0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 1, 1]]\n ]\n ]).swapaxes(1, 3),\n # `corners`\n [(1, 1), (0, 2)],\n # `fragment_size`\n 4,\n # `frame_size`\n 1,\n # `n_channels`\n 3,\n # `expected`\n np.array([\n [\n [[1, 0, 1, 0],\n [0, 1, 0, 1],\n [1, 0, 1, 0],\n [0, 1, 0, 1]],\n [[1, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]]\n ],\n [\n [[1, 1, 0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 1, 1]]\n ],\n [\n [[0, 0, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 1, 1, 1],\n [0, 0, 0, 0]]\n ],\n [\n [[0, 1, 1, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]\n ],\n ]).swapaxes(1, 3)\n )\n ]\n)\ndef test_sample_multiple_fragments(\n images: np.ndarray, corners: List[Tuple[int, int]],\n fragment_size: int, frame_size: int, n_channels: int,\n expected: np.ndarray\n) -> None:\n \"\"\"Test `sample_multiple_fragments` function.\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n tensor_images = tf.placeholder(tf.float32, images.shape)\n tensor_corners = [\n tf.placeholder(tf.int32, (2,), name=f'corner_{i}')\n for i, _ in enumerate(corners)\n ]\n tensor_fragments = sample_multiple_fragments(\n tensor_images, tensor_corners,\n fragment_size, frame_size, n_channels\n )\n with tf.Session(graph=graph) as sess:\n feed_dict = {\n tensor_images: images,\n **{k: v for k, v in zip(tensor_corners, corners)}\n }\n fragments = tensor_fragments.eval(feed_dict, sess)\n np.testing.assert_array_equal(fragments, expected)\n"
] | [
[
"tensorflow.placeholder",
"numpy.testing.assert_array_equal",
"tensorflow.Graph",
"tensorflow.Session",
"numpy.array"
]
] |
caserwin/daily-learning-python | [
"01fea4c5d4e86cbea2dbef8817146f018b5f1479"
] | [
"demo_sklearn/model/model_test.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/19 下午2:08\n# @Author : Erwin\nfrom common.pickle_helper import read_model\nimport numpy as np\n# noinspection PyUnresolvedReferences\nfrom sklearn.neighbors import LocalOutlierFactor\n# noinspection PyUnresolvedReferences\nfrom sklearn.ensemble import IsolationForest\n\nlof_model = read_model(\"./sklearn_LOF_demo1.pkl\")\nif_model = read_model(\"./sklearn_IsolationForest_demo1.pkl\")\n\nuser_define = np.array([(2, 3), (5, 6), (2.3, 1.8)])\n# -1表示异常点,1表示正常点。\nprint(lof_model.predict(user_define))\nprint(if_model.predict(user_define))"
] | [
[
"numpy.array"
]
] |
doffing81/lambdata-AshleyBrooks213 | [
"9c5d4b5f49094e1b2d43f51e7e42ece2e98e3bb6"
] | [
"lambdata/helper_functions.py"
] | [
"\"\"\"A collection of Data Science helper functions\"\"\"\n\nimport pandas as pd \nimport numpy as np \nimport random\n\ndef df_cleaner(df):\n \"\"\"Clean a df of nulls\"\"\"\n return df.dropna()\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"df_cleaner is working!\") \n\n\ndef null_count(df):\n \"\"\"Check a dataframe for nulls and return the \n number of missing values\"\"\"\n return df.isnull().sum().sum()\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"null_count is working!\")\n\n\ndef train_test_split(df, frac):\n \"\"\"\n Create a Train/Test split function for a dataframe and return both \n the Training and Testing sets.\n Frac refers to the percent of data you would like to set aside\n for training.\n \"\"\"\n frac = round(len(df)*frac)\n train = df[:frac]\n test = df[frac:]\n\n return train, test\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"train_test_split is working!\")\n\n\ndef randomize(df, seed):\n \"\"\"\n Testing randomize(df) function: Develop a \n randomization function that randomizes all of \n a dataframes cells then returns that randomized dataframe\n \"\"\"\n \"\"\"NOTE: I am not sure about the seed part.\"\"\"\n #seed = np.random.seed(0)\n \"\"\"Randomly sample 100% of your df\"\"\"\n df = df.sample(frac=1, random_state=seed)#.reset_index(drop=True)\n return df\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"randomize is working!\")\n\n \ndef addy_split(add_series):\n cities = []\n states = []\n zipcodes = []\n for row in add_series.iterrows():\n alist = row.split()\n #if statements to find city\n city = [word for word in alist if word[-1] == ',']\n cities.append(city)\n #if statements to find state\n state = [piece for piece in alist if len(piece) == 2 and piece[:2].isupper() == True]\n states.append(state)\n # if statements to zipcode\n zipcode = [n for n in alist if len(n) == 5 and n.isdigit() == True]\n zipcodes.append(zipcode)\n df = pd.DataFrame({'city': cities, 'state': states, 'zip': zipcodes})\n return df\n\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"addy_split is working!\")\n\n\ndef abbr_2_st(state_series, abbr_2_st=True):\n \"\"\"\n Return a new column with the full name from a State\n abbreviation column -> An input of FL would return Florida.\n This function should also take a boolean (abbr_2_state)\n and when False takes full state names and return state abbreviations.\n -> An input of Florida would return Fl.\n \"\"\"\n us_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'American Samoa': 'AS',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Guam': 'GU',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands':'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY'\n}\n if abbr_2_st == True:\n inv_map = {v: k for k, v in us_state_abbrev.items()}\n full_names = []\n for abbv in state_series:\n full_names.append(inv_map[abbv])\n return full_names\n else:\n # Return Abbreviation\n abbvs = []\n for full_name in state_series:\n abbvs.append(us_state_abbrev[full_name])\n return abbvs\n\n\n\nFAVORITE_ANIMALS = ['dolphin', 'whale', 'seadragon', 'wolf', 'tiger']\nFAVORITE_COLORS = ['pink', 'blue', 'purple', 'green']\n\ndef add(x1, x2):\n return x1 + x2\n \n\ndef increment(x):\n return x + 1\n\n\"\"\"Check to make sure code works all the way through\"\"\"\nprint(\"it worked!\")"
] | [
[
"pandas.DataFrame"
]
] |
tianjuxue/AmorFEA | [
"5ddf6c1c9d4489e74a207d5d63ca00af57911ab0"
] | [
"src/opt/optimizer_robot.py"
] | [
"import numpy as np\nimport torch\nimport scipy.optimize as opt\nimport time\nfrom .optimizer import Optimizer\nfrom ..ml.trainer_robot import TrainerRobot\nfrom ..ml.models import RobotNetwork, RobotSolver\nfrom .. import arguments\nfrom ..graph.visualization import scalar_field_paraview\n\n\nclass OptimizerRobot(Optimizer):\n\n def __init__(self, args):\n super(OptimizerRobot, self).__init__(args)\n self.tip_x1_index = 6\n self.tip_x2_index = 7\n self.trainer = TrainerRobot(args, opt=True)\n self.path = self.args.root_path + '/' + self.args.model_path + '/' + \\\n self.trainer.poisson.name + '/model_s'\n self.model = RobotNetwork(self.args, self.trainer.graph_info)\n self.model.load_state_dict(torch.load(self.path))\n\n\nclass OptimizerRobotTrajectory(OptimizerRobot):\n\n def __init__(self, args):\n super(OptimizerRobotTrajectory, self).__init__(args)\n self.target_coos = heart_shape()\n self.n_pts = self.target_coos.shape[1]\n\n def optimize(self):\n x_initial = np.zeros(self.args.input_size * self.n_pts)\n options = {'eps': 1e-15, 'maxiter': 1000,\n 'disp': True}\n res = opt.minimize(fun=self._objective,\n x0=x_initial,\n method='CG',\n jac=self._derivative,\n callback=None,\n options=options)\n x_opt = res.x.reshape(-1, self.args.input_size)\n source = torch.tensor(x_opt, dtype=torch.float)\n solution = self.model(source)\n print(\"NN surrogate, loss is\", self.trainer.loss_function(\n source, solution).data.numpy())\n for i in range(31):\n scalar_field_paraview(self.args, solution.data.numpy()[\n i], self.trainer.poisson, \"/robot/time_series_nn/u\" + str(i))\n\n for i in range(31):\n gt_sol = self.trainer.forward_prediction(x_opt[i], self.model)\n scalar_field_paraview(\n self.args, gt_sol, self.trainer.poisson, \"/robot/time_series_gt/u\" + str(i))\n\n return res.x\n\n def _obj(self, source):\n source = source.reshape(-1, self.args.input_size)\n solution = self.model(source)\n sol_tip = solution[:, [self.tip_x1_index, self.tip_x2_index]]\n tar_tip = torch.tensor(self.target_coos.transpose(), dtype=torch.float)\n L_dist = ((sol_tip - tar_tip)**2).sum()\n L_reg = ((source[1:, :] - source[:-1, :])**2).sum()\n alpha = 0 * 1e-3\n L = L_dist + alpha * L_reg\n return L\n\n\nclass OptimizerRobotPoint(OptimizerRobot):\n\n def __init__(self, args):\n super(OptimizerRobotPoint, self).__init__(args)\n self.target_point = np.array([0, -2])\n self.para_data = None\n\n def _opt(self, alpha=1e-2, x_initial=None, maxiter=200, log_interval=20):\n if x_initial is None:\n x_initial = np.zeros(self.args.input_size)\n\n x = x_initial\n start = time.time()\n wall_time = [0]\n objective = []\n source = [x]\n for i in range(maxiter):\n obj = self._objective(x)\n der = self._derivative(x)\n x = x - alpha * der\n if i % log_interval == 0:\n print(\"loop {} obj {}\".format(i, obj))\n wall_time.append(time.time() - start)\n objective.append(obj)\n source.append(x)\n x_opt = x\n objective.append(self._objective(x))\n return x_opt, np.asarray(wall_time), np.asarray(objective), np.asarray(source)\n\n def L_dist(self, solution):\n L = (solution[0][self.tip_x1_index] - self.target_point[0])**2 \\\n + (solution[0][self.tip_x2_index] - self.target_point[1])**2\n return L\n\n def evaluate(self, source):\n solution, _ = self.trainer.forward_prediction(source, model=self.model)\n L = self.L_dist(np.expand_dims(solution, axis=0))\n return L, solution\n\n def batch_evaluate(self, source):\n Ls = []\n sols = []\n for s in source:\n L, sol = self.evaluate(s)\n Ls.append(L)\n sols.append(sol)\n print(\"Evaluated L\", L)\n return np.asarray(Ls), np.asarray(sols)\n\n\nclass OptimizerRobotPointFree(OptimizerRobotPoint):\n\n def __init__(self, args):\n super(OptimizerRobotPointFree, self).__init__(args)\n\n def optimize(self, x_initial=None):\n if x_initial is None:\n x_initial = 0.1 * np.ones(self.args.input_size)\n\n x = x_initial\n self._obj(x)\n options = {'maxiter': 100, 'disp': True,\n 'adaptive': True}\n res = opt.minimize(fun=self._obj,\n x0=x_initial,\n method='Nelder-Mead',\n options=options)\n x_opt = x\n return x_opt\n\n def _obj(self, source):\n solution, _ = self.trainer.forward_prediction(\n source, model=None, para_data=self.para_data)\n L = self.L_dist(torch.tensor(solution, dtype=torch.float).unsqueeze(0))\n print(L)\n return L.item()\n\n\nclass OptimizerRobotPointSurrogate(OptimizerRobotPoint):\n\n def __init__(self, args):\n super(OptimizerRobotPointSurrogate, self).__init__(args)\n\n def optimize(self, alpha=1e-2, x_initial=None, maxiter=100, log_interval=100):\n return self._opt(alpha=alpha, x_initial=x_initial, maxiter=maxiter, log_interval=log_interval)\n\n def _obj(self, source):\n source = source.unsqueeze(0)\n solution = self.model(source)\n L = self.L_dist(solution)\n return L\n\n\nclass OptimizerRobotPointAdjoint(OptimizerRobotPoint):\n\n def __init__(self, args):\n super(OptimizerRobotPointAdjoint, self).__init__(args)\n\n def optimize(self, alpha=2 * 1e-2, x_initial=None, maxiter=20, log_interval=1):\n return self._opt(alpha=alpha, x_initial=x_initial, maxiter=maxiter, log_interval=log_interval)\n\n def _objective(self, source):\n _, self.para_data = self.trainer.forward_prediction(\n source, model=None, para_data=self.para_data)\n _, _, L = self._objective_partials(source, self.para_data)\n return L\n\n def _derivative(self, source):\n dcdx, dcdy = self._constraint_partials(source, self.para_data)\n dLdx, dLdy, _ = self._objective_partials(source, self.para_data)\n J = self._adjoint_derivative(dcdx, dcdy, dLdx, dLdy)\n return J\n\n def _adjoint_derivative(self, dcdx, dcdy, dLdx, dLdy):\n dcdx_T = dcdx.transpose()\n adjoint_sol = np.linalg.solve(dcdx_T, dLdx)\n total_derivative = -np.matmul(adjoint_sol, dcdy) + dLdy\n return total_derivative\n\n def _objective_partials(self, source, para_data):\n solver = RobotSolver(self.args, self.trainer.graph_info)\n solver.reset_parameters_data(para_data)\n\n source = torch.tensor(source, requires_grad=True, dtype=torch.float)\n source_input = source.unsqueeze(0)\n\n solution = solver(source_input)\n L = self.L_dist(solution)\n\n dLdx = torch.autograd.grad(\n L, solver.para, create_graph=True, retain_graph=True)[0]\n dLdy = torch.autograd.grad(\n L, source, create_graph=True, retain_graph=True)[0]\n\n return dLdx.data.numpy(), dLdy.data.numpy(), L.data.numpy()\n\n def _constraint_partials(self, source, para_data):\n solver = RobotSolver(self.args, self.trainer.graph_info)\n solver.reset_parameters_data(para_data)\n\n source = torch.tensor(source, requires_grad=True, dtype=torch.float)\n source_input = source.unsqueeze(0)\n\n solution = solver(source_input)\n L = self.trainer.loss_function(source_input, solution)\n c = torch.autograd.grad(\n L, solver.para, create_graph=True, retain_graph=True)[0]\n\n dcdx = torch.stack([torch.autograd.grad(\n c[i], solver.para, create_graph=True, retain_graph=True)[0] for i in range(len(c))])\n dcdy = torch.stack([torch.autograd.grad(\n c[i], source, create_graph=True, retain_graph=True)[0] for i in range(len(c))])\n\n return dcdx.data.numpy(), dcdy.data.numpy()\n\n\n'''Helpers'''\n\n\ndef heart_shape():\n def x_para(t):\n return 16 * np.sin(t)**3\n\n def y_para(t):\n return 13 * np.cos(t) - 5 * np.cos(2 * t) - 2 * np.cos(3 * t) - np.cos(4 * t) - 5\n vertical_dist = 2\n norm_factor = vertical_dist / (y_para(0) - y_para(np.pi))\n t = np.linspace(0, 2 * np.pi, 31)\n x = norm_factor * x_para(t)\n y = norm_factor * y_para(t)\n return np.asarray([x, y])\n\n\ndef circle_shape():\n t = np.linspace(0, np.pi, 4)\n x = 2 * np.cos(t - np.pi / 2.)\n y = 2 * np.sin(t - np.pi / 2.)\n return np.asarray([x, y])\n\n\ndef run_mixed_opt(alpha_nn,\n alpha_ad,\n maxiter_nn,\n maxiter_ad,\n log_interval_nn,\n log_interval_ad,\n optimizer_nn,\n optimizer_ad\n ):\n x_opt, wall_time_nn, objective_nn, source_nn = optimizer_nn.optimize(alpha=alpha_nn,\n x_initial=None,\n maxiter=maxiter_nn,\n log_interval=log_interval_nn)\n solver = RobotSolver(optimizer_nn.args, optimizer_nn.trainer.graph_info)\n solver.reset_parameters_network(torch.tensor(\n x_opt, dtype=torch.float).unsqueeze(0), optimizer_nn.model)\n para_data = solver.para.data\n optimizer_ad.para_data = para_data\n x_opt, wall_time_ad, objective_ad, source_ad = optimizer_ad.optimize(alpha=alpha_ad,\n x_initial=x_opt,\n maxiter=maxiter_ad,\n log_interval=log_interval_ad)\n\n wall_time_mix = np.concatenate(\n (wall_time_nn, wall_time_ad[1:] + wall_time_nn[-1]))\n objective_mix = np.concatenate((objective_nn, objective_ad[1:]))\n source_mix = np.concatenate((source_nn, source_ad[1:]))\n return x_opt, wall_time_mix, objective_mix, source_mix\n\n\ndef run_single_opt(alpha,\n maxiter,\n log_interval,\n optimizer\n ):\n x_opt, wall_time, objective, source = optimizer.optimize(alpha=alpha,\n x_initial=None,\n maxiter=maxiter,\n log_interval=log_interval)\n\n return x_opt, wall_time, objective, source\n\n\ndef run_one_case(args,\n alpha_nn,\n alpha_ad1,\n alpha_ad2,\n maxiter_nn,\n maxiter_ad1,\n maxiter_ad2,\n log_interval_nn,\n log_interval_ad1,\n log_interval_ad2,\n target_point,\n case_number\n ):\n\n print(\"\\ncase number {}\".format(case_number))\n\n optimizer_nn = OptimizerRobotPointSurrogate(args)\n optimizer_nn.target_point = target_point\n optimizer_ad = OptimizerRobotPointAdjoint(args)\n optimizer_ad.target_point = target_point\n\n _, wall_time_ad, objective_ad, source_ad = run_single_opt(\n alpha_ad1, maxiter_ad1, log_interval_ad1, optimizer_ad)\n print(\"\\n\")\n _, wall_time_mix, objective_mix, source_mix = run_mixed_opt(alpha_nn,\n alpha_ad2,\n maxiter_nn,\n maxiter_ad2,\n log_interval_nn,\n log_interval_ad2,\n optimizer_nn,\n optimizer_ad)\n\n nn_number = maxiter_nn // log_interval_nn\n objective_mix[\n :nn_number + 1], _ = optimizer_nn.batch_evaluate(source_mix[:nn_number + 1])\n _, optimal_solution = optimizer_nn.evaluate(source_mix[-1])\n\n print(\"true error ad\", objective_ad[-1])\n print(\"true error mix\", objective_mix[-1])\n\n np.savez(args.root_path + '/' + args.numpy_path\n + '/robot/deploy/case' + str(case_number) + '.npz',\n wall_time_ad=wall_time_ad,\n objective_ad=objective_ad,\n nn_number=nn_number,\n wall_time_mix=wall_time_mix,\n objective_mix=objective_mix,\n target_point=target_point\n )\n scalar_field_paraview(args, optimal_solution,\n optimizer_nn.trainer.poisson, \"/robot/deploy/u\" + str(case_number))\n\n\ndef run_walltime(args):\n target_coos = circle_shape()\n\n alpha_ad1_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n alpha_nn_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n alpha_ad2_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n\n maxiter_ad1_list = [20, 20, 20, 20]\n maxiter_nn_list = [400, 400, 4000, 6000]\n maxiter_ad2_list = [20, 20, 20, 20]\n\n log_interval_ad1_list = [1, 1, 1, 1]\n log_interval_nn_list = [40, 40, 400, 600]\n log_interval_ad2_list = [1, 1, 1, 1]\n\n for i in range(3, 4):\n run_one_case(args,\n alpha_nn_list[i],\n alpha_ad1_list[i],\n alpha_ad2_list[i],\n maxiter_nn_list[i],\n maxiter_ad1_list[i],\n maxiter_ad2_list[i],\n log_interval_nn_list[i],\n log_interval_ad1_list[i],\n log_interval_ad2_list[i],\n target_coos[:, i],\n i)\n\n\ndef run_step(args):\n target_coos = circle_shape()\n alpha_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n for case_number in range(2, 4):\n optimizer_nn = OptimizerRobotPointSurrogate(args)\n optimizer_ad = OptimizerRobotPointAdjoint(args)\n print(\"case_number\", case_number)\n target_point = target_coos[:, case_number]\n optimizer_nn.target_point = target_point\n optimizer_ad.target_point = target_point\n\n _, wall_time_ad, objective_ad, source_ad = run_single_opt(\n alpha_list[case_number], 100, 1, optimizer_ad)\n _, wall_time_nn, objective_nn, source_nn = run_single_opt(\n alpha_list[case_number], 100, 1, optimizer_nn)\n objective_nn, _ = optimizer_nn.batch_evaluate(source_nn)\n np.savez(args.root_path + '/' + args.numpy_path\n + '/robot/deploy/case_step' + str(case_number) + '.npz',\n objective_ad=objective_ad,\n objective_nn=objective_nn,\n target_point=target_point,\n wall_time_ad=wall_time_ad,\n wall_time_nn=wall_time_nn\n )\n\n\ndef run_gradient_free(args):\n target_coos = circle_shape()\n target_point = target_coos[:, 1]\n optimizer_fr = OptimizerRobotPointFree(args)\n optimizer_fr.target_point = target_point\n optimizer_fr.optimize()\n\n\nif __name__ == '__main__':\n args = arguments.args\n run_walltime(args)\n # run_step(args)\n"
] | [
[
"numpy.ones",
"numpy.matmul",
"numpy.linalg.solve",
"torch.autograd.grad",
"torch.load",
"numpy.zeros",
"scipy.optimize.minimize",
"torch.tensor",
"numpy.asarray",
"numpy.cos",
"numpy.expand_dims",
"numpy.array",
"numpy.sin",
"numpy.concatenate",
"numpy.linspace"
]
] |
alfrunesiq/SemanticSegmentationActiveLearning | [
"3f953a22c8fd95828c9bd4c5ce52a53e991391e4"
] | [
"active_learning.py"
] | [
"# Python standard libraries\nimport argparse\nimport glob\nimport json\nimport logging\nimport logging.config\nimport os\nimport sys\n\n# Non-standard includes\nimport numpy as np\nimport tensorflow as tf\n# Maybe import tqdm\nshow_progress = False\ntry:\n import tqdm\n show_progress = True\nexcept ImportError:\n pass\n\ntry:\n import tkinter\n tkinter.Tk().withdraw()\nexcept ImportError:\n if args.unlabelled == None:\n pass\n else:\n raise ImportError(\"Could not import tkinter, make sukre Tk \"\n \"dependencies are installed\")\nexcept Exception as e:\n print(e)\n pass\n\n# User includes\nimport models\nimport datasets\nimport tensortools as tt\n\n# Lowest representable float32\nEPSILON = np.finfo(np.float32).tiny\n\ndef main(args, logger):\n # Retrieve training parameters for convenience\n params = args.params # All parameters\n hparams = params[\"hyperparams\"] # Hyperparamters\n alparams = params[\"active_learning\"] # Active learning parameters\n state = None # State dict\n # Define state and config filenames\n state_filename = os.path.join(args.log_dir, \"state.json\")\n config_filename = os.path.join(args.log_dir, \"config.json\")\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir)\n # Dump parameter config\n with open(config_filename, \"w+\") as f:\n json.dump(params, f, indent=4)\n\n # Retrieve dataset specific object\n if args.dataset == \"cityscapes\":\n dataset = datasets.Cityscapes(coarse=args.coarse)\n test_examples_glob = os.path.join(args.data_dir, \"val\", \"*.tfrecord\")\n elif args.dataset == \"freiburg\":\n dataset = datasets.Freiburg()\n test_examples_glob = os.path.join(args.data_dir, \"test\", \"*.tfrecord\")\n elif args.dataset == \"vistas\":\n dataset = datasets.Vistas()\n test_examples_glob = os.path.join(args.data_dir, \"val\", \"*.tfrecord\")\n else:\n raise NotImplementedError(\"Dataset \\\"%s\\\" not supported\" % args.dataset)\n\n # Prepare dataset example file paths.\n train_examples_glob = os.path.join(args.data_dir, \"train\", \"*.tfrecord\")\n\n if not os.path.exists(state_filename):\n # Initialize state\n # Resolve example filenames\n train_val_examples = np.sort(np.array(glob.glob(train_examples_glob)))\n # Pick examples from training set to use for validation\n val_examples = train_val_examples[:alparams[\"num_validation\"]]\n # Use the rest as training examples\n train_examples = train_val_examples[alparams[\"num_validation\"]:]\n\n # Use annotated test set, NOTE: cityscapes validation set\n test_examples = np.array(glob.glob(test_examples_glob))\n\n # Draw random train examples and mark as annotated\n train_indices = np.arange(len(train_examples), dtype=np.int32)\n np.random.shuffle(train_indices)\n\n initially_labelled = alparams[\"num_initially_labelled\"]\n if initially_labelled < 0:\n # Use rest of labelled examples\n initially_labelled = len(train_examples)\n\n # Possibly add actually unlabelled examples\n no_label_indices = np.empty(0, dtype=str)\n if args.unlabelled is not None:\n no_label_glob = os.path.join(args.unlabelled, \"*.tfrecord\")\n no_label_examples = glob.glob(no_label_glob)\n no_label_indices = np.arange(\n len(train_indices), len(train_indices)+len(no_label_examples)\n )\n train_examples = np.concatenate(train_examples,\n no_label_examples)\n train_indices = np.concatenate((train_indices, no_label_indices))\n\n labelled = train_indices[:initially_labelled]\n unlabelled = train_indices[initially_labelled:]\n del train_indices\n\n # Setup initial state\n state = {\n \"checkpoint\" : None, # Keep track of latest checkpoint.\n \"iteration\" : 0,\n \"dataset\" : {\n \"train\" : {\n \"filenames\" : list(train_examples),\n \"labelled\" : labelled.tolist(),\n \"unlabelled\" : unlabelled.tolist(),\n \"no_label\" : no_label_indices.tolist()\n },\n \"val\" : {\n \"filenames\" : list(val_examples)\n },\n \"test\" : {\n \"filenames\" : list(test_examples)\n }\n }\n }\n with open(state_filename, \"w+\") as f:\n json.dump(state, f, indent=2)\n\n else:\n # Load state\n with open(state_filename, \"r\") as f:\n state = json.load(f)\n # Extract filename properties\n train_examples = np.array(state[\"dataset\"][\"train\"][\"filenames\"])\n val_examples = np.array(state[\"dataset\"][\"val\"][\"filenames\"])\n test_examples = np.array(state[\"dataset\"][\"test\"][\"filenames\"])\n labelled = np.array(state[\"dataset\"][\"train\"][\"labelled\"])\n unlabelled = np.array(state[\"dataset\"][\"train\"][\"unlabelled\"])\n no_label_indices = np.array(state[\"dataset\"][\"train\"][\"no_label\"])\n\n train_input_labelled = np.full_like(train_examples, False, dtype=bool)\n train_input_labelled[labelled] = True\n train_input_indices = np.arange(len(train_examples))\n\n with tf.device(\"/device:CPU:0\"):\n with tf.name_scope(\"Datasets\"):\n # Create input placeholders\n train_input = tt.input.NumpyCapsule()\n train_input.filenames = train_examples\n train_input.labelled = train_input_labelled\n train_input.indices = train_input_indices\n\n val_input = tt.input.NumpyCapsule()\n val_input.filenames = val_examples\n test_input = tt.input.NumpyCapsule()\n test_input.filenames = test_examples\n\n # Setup input pipelines\n train_input_stage = tt.input.InputStage(\n input_shape=[params[\"network\"][\"input\"][\"height\"],\n params[\"network\"][\"input\"][\"width\"]])\n # Validation AND Test input stage\n val_input_stage = tt.input.InputStage(\n input_shape=[params[\"network\"][\"input\"][\"height\"],\n params[\"network\"][\"input\"][\"width\"]])\n\n # Add datasets\n train_input_stage.add_dataset_from_placeholders(\n \"train\", train_input.filenames,\n train_input.labelled, train_input.indices,\n batch_size=params[\"batch_size\"],\n augment=True)\n # Validation set\n val_input_stage.add_dataset_from_placeholders(\n \"val\", val_input.filenames,\n batch_size=params[\"batch_size\"])\n # Test set\n val_input_stage.add_dataset_from_placeholders(\n \"test\", test_input.filenames,\n batch_size=params[\"batch_size\"])\n # Calculate number of batches in each iterator\n val_batches = (len(val_examples) - 1)//params[\"batch_size\"] + 1\n test_batches = (len(test_examples) - 1)//params[\"batch_size\"] + 1\n\n # Get iterator outputs\n train_image_raw, train_image, train_label, train_mask, \\\n train_labelled, train_index = train_input_stage.get_output()\n val_image, val_label, val_mask = val_input_stage.get_output()\n\n # Create step variables\n with tf.variable_scope(\"StepCounters\"):\n global_step = tf.Variable(0, dtype=tf.int64,\n trainable=False, name=\"GlobalStep\")\n local_step = tf.Variable(0, dtype=tf.int64,\n trainable=False, name=\"LocalStep\")\n global_step_op = tf.assign_add(global_step, local_step)\n epoch_step = tf.Variable(0, trainable=False, name=\"EpochStep\")\n epoch_step_inc = tf.assign_add(epoch_step, 1)\n\n # Build training- and validation network\n regularization = {\"drop_rates\": hparams[\"dropout_rates\"]}\n if hparams[\"weight_reg\"][\"L2\"] > 0.0 \\\n or hparams[\"weight_reg\"][\"L1\"] > 0.0:\n regularization = {\n \"weight_regularization\" : tf.keras.regularizers.l1_l2(\n l1=hparams[\"weight_reg\"][\"L1\"],\n l2=hparams[\"weight_reg\"][\"L2\"]),\n \"regularization_scaling\" : hparams[\"weight_reg\"][\"glorot_scaling\"],\n }\n\n # Initialize networks\n train_net = models.ENet(\n dataset.num_classes,\n **regularization\n )\n val_net = models.ENet(dataset.num_classes)\n\n with tf.device(\"/device:GPU:0\"):\n # Build graph for training\n train_logits = train_net(train_image, training=True)\n # Compute predictions: use @train_pred for metrics and\n # @pseudo_label for pseudo_annotation process.\n train_pred = tf.math.argmax(train_logits, axis=-1,\n name=\"TrainPredictions\")\n\n with tf.name_scope(\"PseudoAnnotation\"):\n # Build ops one more time without dropout.\n pseudo_logits = train_net(train_image_raw, training=False)\n # Just make sure not to propagate gradients a second time.\n pseudo_logits = tf.stop_gradient(pseudo_logits)\n pseudo_label = tf.math.argmax(pseudo_logits, axis=-1,\n name=\"TrainPredictions\")\n pseudo_label = tf.cast(pseudo_label, tf.uint8)\n\n # Configure on-line high confidence pseudo labeling.\n pseudo_prob = tf.nn.softmax(pseudo_logits, axis=-1, name=\"TrainProb\")\n if alparams[\"measure\"] == \"entropy\":\n # Reduce entropy over last dimension.\n # Compute prediction entropy\n entropy = - pseudo_prob * tf.math.log(pseudo_prob+EPSILON)\n entropy = tf.math.reduce_sum(entropy, axis=-1)\n # Convert logarithm base to units of number of classes\n # NOTE this will make the metric independent of number of\n # classes as well the range in [0,1]\n log_base = tf.math.log(np.float32(dataset.num_classes))\n entropy = entropy / log_base\n # Convert entropy to confidence\n pseudo_confidence = 1.0 - entropy\n elif alparams[\"measure\"] == \"margin\":\n # Difference between the two largest entries in last dimension.\n values, indices = tf.math.top_k(pseudo_prob, k=2)\n pseudo_confidence = values[:,:,:,0] - values[:,:,:,1]\n elif alparams[\"measure\"] == \"confidence\":\n # Reduce max over last dimension.\n pseudo_confidence = tf.math.reduce_max(pseudo_prob, axis=-1)\n else:\n raise NotImplementedError(\"Uncertainty function not implemented.\")\n pseudo_mean_confidence = tf.reduce_mean(\n tf.cast(pseudo_confidence, tf.float64),\n axis=(1,2))\n # Pseudo annotate high-confidence unlabeled example pixels\n pseudo_mask = tf.where(tf.math.less(pseudo_confidence, alparams[\"threshold\"]),\n tf.zeros_like(pseudo_label,\n dtype=train_label.dtype),\n tf.ones_like(pseudo_label,\n dtype=train_label.dtype))\n # Pseudo annotation logic (think of it as @tf.cond maped \n # over batch dimension)\n train_label = tf.where(train_labelled, train_label,\n pseudo_label, name=\"MaybeGenLabel\")\n train_mask = tf.where(train_labelled, train_mask,\n pseudo_mask, name=\"MaybeGenMask\")\n\n with tf.device(\"/device:GPU:1\"):\n # Build validation network.\n val_logits = val_net(val_image, training=False)\n val_pred = tf.math.argmax(val_logits, axis=-1,\n name=\"ValidationPredictions\")\n\n # Build cost function\n with tf.name_scope(\"Cost\"):\n with tf.device(\"/device:GPU:0\"):\n # Establish loss function\n if hparams[\"softmax\"][\"multiscale\"]:\n loss, loss_weights = \\\n tt.losses.multiscale_masked_softmax_cross_entropy(\n train_label,\n train_net.endpoint_outputs[0],\n train_mask, dataset.num_classes,\n weight=hparams[\"softmax\"][\"loginverse_scaling\"],\n label_smoothing=hparams[\"softmax\"][\"label_smoothing\"],\n scope=\"XEntropy\")\n # NOTE: this will make @loss_weights checkpointed\n train_net.loss_scale_weights = loss_weights\n else:\n loss = tt.losses.masked_softmax_cross_entropy(\n train_label,\n train_logits,\n train_mask, dataset.num_classes,\n weight=hparams[\"softmax\"][\"loginverse_scaling\"],\n label_smoothing=hparams[\"softmax\"][\"label_smoothing\"],\n scope=\"XEntropy\")\n cost = loss\n # Add regularization to cost function\n if len(train_net.losses) > 0:\n regularization_loss = tf.math.add_n(train_net.losses, name=\"Regularization\")\n cost += tf.cast(regularization_loss, dtype=tf.float64)\n\n # Setup learning rate\n learning_rate = hparams[\"learning_rate\"]\n if hparams[\"learning_rate_decay\"] > 0.0:\n # Inverse time learning_rate if lr_decay specified\n learning_rate = tf.train.inverse_time_decay(\n learning_rate, local_step,\n decay_steps=train_batches,\n decay_rate=hparams[\"learning_rate_decay\"])\n\n # Create optimization procedure\n optimizer = tf.train.AdamOptimizer(learning_rate, **hparams[\"optimizer\"][\"kwargs\"])\n\n # Create training op\n train_op = optimizer.minimize(cost, global_step=local_step,\n name=\"TrainOp\")\n # END tf.device(\"/device:GPU:0\")\n # END tf.name_scope(\"Cost\")\n\n # Create summary operations for training and validation network\n with tf.name_scope(\"Summary\"):\n # Create colormap for image summaries\n colormap = tf.constant(dataset.colormap, dtype=tf.uint8,\n name=\"Colormap\")\n # Create metric evaluation and summaries\n with tf.device(\"/device:GPU:0\"):\n with tf.name_scope(\"TrainMetrics\"):\n # Create metrics object for training network.\n train_metrics = tt.metrics.Metrics(train_pred, train_label,\n dataset.num_classes, train_mask)\n # Get Tensorflow update op.\n metric_update_op = train_metrics.get_update_op()\n # Get Tensorflow summary operations.\n metric_summaries = train_metrics.get_summaries()\n\n train_summary_iter = tf.summary.merge(\n [\n # Summaries run at each iteration.\n tf.summary.scalar(\"CrossEntropyLoss\", loss,\n family=\"Losses\"),\n tf.summary.scalar(\"TotalCost\", cost,\n family=\"Losses\"),\n tf.summary.scalar(\"LearningRate\", learning_rate,\n family=\"Losses\")\n ], name=\"IterationSummaries\"\n )\n\n with tf.control_dependencies([metric_update_op]):\n train_summary_epoch = tf.summary.merge(\n [\n # Summaries run at epoch boundaries.\n metric_summaries[\"Metrics\"],\n metric_summaries[\"ConfusionMat\"]\n ], name=\"EpochSummaries\"\n )\n\n train_image_summary = tf.summary.merge(\n [\n tf.summary.image(\n \"PseudoLabel/input\",\n train_image_raw,\n family=\"PseudoLabel\"\n ),\n tf.summary.image(\n \"PseudoLabel/confidence\",\n tf.expand_dims(pseudo_confidence, axis=-1),\n family=\"PseudoLabel\"\n ),\n tf.summary.image(\n \"PseudoLabel\", \n tf.gather(dataset.colormap,\n tf.cast(pseudo_label*pseudo_mask \\\n + (1 - pseudo_mask)*255,\n tf.int32)),\n family=\"PseudoLabel\"\n )\n ]\n )\n # Create metric evaluation and summaries\n with tf.device(\"/device:GPU:1\"):\n with tf.name_scope(\"ValidationTestMetrics\"):\n # Create metrics object\n val_metrics = tt.metrics.Metrics(val_pred, val_label,\n dataset.num_classes, val_mask)\n # Get update tensorflow ops\n val_metric_update_op = val_metrics.get_update_op()\n # Get metric sumaries\n val_metric_summaries = val_metrics.get_summaries()\n\n with tf.control_dependencies([val_metric_update_op]):\n val_metric_summary = tf.summary.merge(\n [\n # \"Expensive\" summaries run at epoch boundaries.\n val_metric_summaries[\"Metrics\"],\n val_metric_summaries[\"ClassMetrics\"],\n val_metric_summaries[\"ConfusionMat\"]\n ], name=\"EpochSummaries\"\n )\n val_image_summary = tf.summary.merge(\n [\n tf.summary.image(\"Input\", val_image),\n tf.summary.image(\"Label\", tf.gather(\n colormap, tf.cast(val_label + 255*(1-val_mask),\n tf.int32))),\n tf.summary.image(\"Predictions\", tf.gather(\n colormap, tf.cast(val_pred, tf.int32)))\n ]\n )\n val_summary_epoch = val_metric_summary\n test_summary_epoch = tf.summary.merge([\n val_metric_summary,\n val_image_summary\n ]\n )\n conf_summary_ph = tf.placeholder(tf.float64, shape=[None])\n conf_summary = tf.summary.histogram(\"ConfidenceDistribution\",\n conf_summary_ph)\n # END name_scope(\"Summary\")\n\n # Create session with soft device placement\n # - some ops neet to run on the CPU\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n sess_config.gpu_options.allow_growth = True\n with tf.Session(config=sess_config) as sess:\n logger.debug(\"Initializing variables...\")\n sess.run(tf.global_variables_initializer())\n\n\n # Create checkpoint object\n with tf.name_scope(\"Checkpoint\"):\n checkpoint = tf.train.Checkpoint(model=train_net,\n epoch=epoch_step,\n step=global_step,\n optimizer=optimizer)\n checkpoint_name = os.path.join(args.log_dir, \"model\")\n if args.checkpoint is not None:\n # CMDline checkpoint given\n ckpt = args.checkpoint\n if os.path.isdir(ckpt):\n ckpt = tf.train.latest_checkpoint(ckpt)\n if ckpt is None:\n logger.error(\"Checkpoint path \\\"%s\\\" is invalid.\")\n return 1\n logger.info(\"Resuming from checkpoint \\\"%s\\\"\" % ckpt)\n status = checkpoint.restore(ckpt)\n if tf.__version__ < \"1.14.0\":\n status.assert_existing_objects_matched()\n else:\n status.expect_partial()\n status.initialize_or_restore(sess)\n if args.reinitialize_output:\n sess.run(train_net.Final.kernel.initializer)\n\n elif state[\"checkpoint\"] != None:\n # Try to restore from checkpoint in logdir\n ckpt = state[\"checkpoint\"]\n logger.info(\"Resuming from checkpoint \\\"%s\\\"\" % ckpt)\n status = checkpoint.restore(ckpt)\n if tf.__version__ < \"1.14.0\":\n status.assert_existing_objects_matched()\n else:\n status.expect_partial()\n status.initialize_or_restore(sess)\n\n with tf.name_scope(\"UpdateValidationWeights\"):\n update_val_op = []\n for i in range(len(val_net.layers)):\n for j in range(len(val_net.layers[i].variables)):\n update_val_op.append(\n tf.assign(val_net.layers[i].variables[j],\n train_net.layers[i].variables[j]))\n update_val_op = tf.group(update_val_op)\n\n ckpt_manager = tt.checkpoint_manager.CheckpointManager(checkpoint,\n args.log_dir)\n # END scope Checkpoint\n # Prepare global fetches dict\n fetches = {\n \"train\" : {\n \"iteration\" : {\n \"step\" : global_step_op,\n \"summary\" : train_summary_iter,\n \"train_op\" : train_op,\n \"update\" : metric_update_op,\n \"updates\" : train_net.updates\n },\n \"epoch\" : {\n \"step\" : epoch_step,\n \"summary\" : train_summary_epoch,\n \"summary/image\" : train_image_summary\n }\n },\n \"val\" : { # Validation and test fetches\n \"iteration\" : {\n \"update\" : val_metric_update_op\n },\n \"epoch\" : {\n \"step\" : epoch_step,\n \"MeanIoU\" : val_metrics.metrics[\"MeanIoU\"],\n \"summary\" : val_summary_epoch,\n # Also add image summary, however only added to\n # writer every N epochs.\n \"summary/image\" : val_image_summary\n }\n },\n \"test\" : {\n \"iteration\" : {\"update\" : val_metric_update_op},\n \"epoch\" : {\"summary\" : test_summary_epoch}\n }\n }\n\n # Train loop (until convergence) -> Pick unlabeled examples -> test_loop\n def train_loop(summary_writer):\n \"\"\"\n Train loop closure.\n Runs training loop untill no improvement is seen in\n @params[\"epochs\"] epochs before returning.\n \"\"\"\n # How many epoch until counting @no_improvement\n _initial_grace_period = alparams[\"epochs/warm_up\"]\n best_ckpt = state[\"checkpoint\"]\n best_mean_iou = 0.0\n log_subdir = summary_writer.get_logdir()\n run_name = os.path.basename(log_subdir)\n checkpoint_prefix = os.path.join(log_subdir, \"model\")\n num_iter_per_epoch = np.maximum(train_input.size,\n val_input.size)\n no_improvement_count = 0\n while no_improvement_count < params[\"epochs\"] \\\n or _initial_grace_period >= 0:\n _initial_grace_period -= 1\n # Increment in-graph epoch counter.\n epoch = sess.run(epoch_step_inc)\n\n # Prepare inner loop iterator\n _iter = range(0, num_iter_per_epoch, params[\"batch_size\"])\n if show_progress:\n _iter = tqdm.tqdm(_iter, desc=\"%s[%d]\" % (run_name, epoch),\n dynamic_ncols=True,\n ascii=True,\n postfix={\"NIC\": no_improvement_count})\n\n # Initialize iterators\n train_input_stage.init_iterator(\n \"train\", sess, train_input.feed_dict)\n val_input_stage.init_iterator(\n \"val\", sess, val_input.feed_dict)\n\n # Reset confusion matrices\n train_metrics.reset_metrics(sess)\n val_metrics.reset_metrics(sess)\n\n # Prepare iteration fetches\n _fetches = {\n \"train\" : {\"iteration\" : fetches[\"train\"][\"iteration\"]},\n \"val\" : {\"iteration\" : fetches[\"val\"][\"iteration\"]}\n }\n # Update validation network weights\n sess.run(update_val_op)\n\n try:\n for i in _iter:\n if train_input.size-params[\"batch_size\"] <= i < train_input.size:\n # Fetches for last training iteration.\n _fetches[\"train\"][\"epoch\"] = fetches[\"train\"][\"epoch\"]\n if val_input.size-params[\"batch_size\"] <= i < val_input.size:\n _fetches[\"val\"][\"epoch\"] = fetches[\"val\"][\"epoch\"]\n\n # Run fetches\n results = sess.run(_fetches)\n\n if \"train\" in results.keys():\n # Add iteration summary\n summary_writer.add_summary(\n results[\"train\"][\"iteration\"][\"summary\"],\n results[\"train\"][\"iteration\"][\"step\"])\n\n # Maybe add epoch summary\n if \"epoch\" in results[\"train\"].keys():\n summary_writer.add_summary(\n results[\"train\"][\"epoch\"][\"summary\"],\n results[\"train\"][\"epoch\"][\"step\"]\n )\n # Pop fetches to prohibit OutOfRangeError due to\n # asymmetric train-/val- input size.\n if results[\"train\"][\"epoch\"][\"step\"] % 100 == 0:\n summary_writer.add_summary(\n results[\"train\"][\"epoch\"][\"summary/image\"],\n results[\"train\"][\"epoch\"][\"step\"]\n )\n _fetches.pop(\"train\")\n\n if \"val\" in results.keys() and \\\n \"epoch\" in results[\"val\"].keys():\n # Add summaries to event log.\n summary_writer.add_summary(\n results[\"val\"][\"epoch\"][\"summary\"],\n results[\"val\"][\"epoch\"][\"step\"]\n )\n if results[\"val\"][\"epoch\"][\"step\"] % 100 == 0:\n # Only report image summary every 100th epoch.\n summary_writer.add_summary(\n results[\"val\"][\"epoch\"][\"summary/image\"],\n results[\"val\"][\"epoch\"][\"step\"]\n )\n # Check if MeanIoU improved and\n # update counter and best\n if results[\"val\"][\"epoch\"][\"MeanIoU\"] > best_mean_iou:\n best_mean_iou = results[\"val\"][\"epoch\"][\"MeanIoU\"]\n # Update checkpoint file used for\n # @tf.train.latest_checkpoint to point at\n # current best.\n _ckpt_name = ckpt_manager.commit(\n checkpoint_prefix, sess)\n if _ckpt_name != \"\":\n best_ckpt = _ckpt_name\n # Reset counter\n no_improvement_count = 0\n else:\n # Result has not improved, increment counter.\n no_improvement_count += 1\n if no_improvement_count >= params[\"epochs\"] and \\\n _initial_grace_period < 0:\n _iter.close()\n break\n if show_progress:\n _iter.set_postfix(NIC=no_improvement_count)\n # Pop fetches to prohibit OutOfRangeError due to\n # asymmetric train-/val- input size.\n _fetches.pop(\"val\")\n # END \"maybe add epoch summary\"\n except tf.errors.OutOfRangeError:\n logger.error(\"Out of range error. Attempting to continue.\")\n pass\n\n summary_writer.flush()\n ckpt_manager.cache(sess)\n # END while no_improvement_count < params[\"epochs\"]\n return best_ckpt\n\n def test_loop(summary_writer):\n \"\"\"\n Test loop closure.\n \"\"\"\n _step = len(labelled)\n # Initialize validation input stage with test set\n val_input_stage.init_iterator(\"test\", sess, test_input.feed_dict)\n _iter = range(0, test_input.size, params[\"batch_size\"])\n if show_progress:\n _iter = tqdm.tqdm(_iter, desc=\"test[%d]\" % (_step),\n ascii=True,\n dynamic_ncols=True)\n summary_proto = None\n val_metrics.reset_metrics(sess)\n try:\n for i in _iter:\n # Accumulate confusion matrix\n if i < test_input.size - params[\"batch_size\"]:\n sess.run(fetches[\"test\"][\"iteration\"][\"update\"])\n else:\n # Run summary operation last iteration\n _, summary_proto = sess.run([fetches[\"test\"][\"iteration\"][\"update\"],\n fetches[\"test\"][\"epoch\"][\"summary\"]])\n except tf.errors.OutOfRangeError:\n pass\n # Add summary with number of labelled examples as step.\n # NOTE this only runs on each major iteration.\n summary_writer.add_summary(\n summary_proto, _step\n )\n\n def rank_confidence():\n # Allocate array to store all confidence scores\n num_examples = len(state[\"dataset\"][\"train\"][\"filenames\"])\n confidence = np.zeros(num_examples, dtype=np.float32)\n # Initialize input stage\n train_input_stage.init_iterator(\"train\", sess,\n train_input.feed_dict)\n _iter = range(0, train_input.size, params[\"batch_size\"])\n if show_progress:\n _iter = tqdm.tqdm(_iter, desc=\"ranking[%d]\" % len(labelled),\n ascii=True,\n dynamic_ncols=True)\n try:\n for i in _iter:\n # Loop over all examples and compute confidence\n batch_confidence, batch_indices = sess.run(\n [pseudo_mean_confidence, train_index])\n # Add to list of confidence\n confidence[batch_indices] = batch_confidence\n except tf.errors.OutOfRangeError:\n pass\n\n # Filter out labelled examples\n unlabelled_confidence = confidence[unlabelled]\n\n selection_size = np.minimum(len(unlabelled),\n alparams[\"selection_size\"])\n # Get the lowest confidence indices of unlabelled subset\n example_indices = np.argpartition(unlabelled_confidence,\n selection_size)\n example_indices = example_indices[:selection_size]\n # Convert to indices into all filenames list\n low_conf_examples = unlabelled[example_indices]\n return low_conf_examples, unlabelled_confidence\n\n checkpoint_path = state[\"checkpoint\"]\n # Only add graph to first event file\n _graph = sess.graph if checkpoint_path == None else None\n with tf.summary.FileWriter(args.log_dir, graph=_graph) as test_writer:\n iterations = alparams[\"iterations\"]\n if iterations < 0:\n # Iterate untill all data is consumed\n iterations = np.ceil(len(unlabelled)\n / float(alparams[\"selection_size\"]))\n logger.info(\"Iteration count: %d\" % iterations)\n\n while state[\"iteration\"] < iterations:\n # Step 1: train_loop\n train_input.set_indices(labelled)\n\n if state[\"iteration\"] == 0:\n # Pretrain\n log_subdir = os.path.join(args.log_dir, \"pretrain\")\n # Only use labelled subset\n else:\n # Any other iteration\n log_subdir = os.path.join(args.log_dir, \"iter-%d\" %\n state[\"iteration\"])\n # Sample from the unlabelled set\n p = alparams[\"pseudo_labelling_proportion\"]\n sample_size = int(len(labelled)*p/(1-p))\n sample_size = np.minimum(sample_size, len(unlabelled))\n train_input.set_sample_size(sample_size)\n\n # Create subdir if it doesn't exist\n if not os.path.exists(log_subdir):\n os.mkdir(log_subdir)\n\n # Change checkpoint manager directory\n ckpt_manager.chdir(log_subdir)\n with tf.summary.FileWriter(log_subdir) as train_val_writer:\n # Enter train loop\n try:\n checkpoint_path = train_loop(train_val_writer)\n except KeyboardInterrupt as exception:\n # Quickly store state\n if ckpt_manager.latest_checkpoint != \"\":\n state[\"checkpoint\"] = ckpt_manager.latest_checkpoint\n with open(state_filename, \"w\") as f:\n json.dump(state, f, indent=2)\n f.truncate()\n raise exception\n\n\n # Reload best checkpoint\n status = checkpoint.restore(checkpoint_path)\n status.run_restore_ops(sess)\n sess.run(update_val_op)\n\n # Step 2: test_loop\n if test_input.size > 0:\n # This step may be omitted on deployment\n test_loop(test_writer)\n\n # Step 3: Find low confidence examples\n # Reset train_input to use all examples for ranking\n train_input.set_indices()\n if alparams[\"selection_size\"] > 0:\n low_conf_examples, unlabelled_conf = rank_confidence()\n _hist_summary = sess.run(conf_summary,\n {conf_summary_ph: \n unlabelled_conf})\n test_writer.add_summary(_hist_summary, state[\"iteration\"])\n else:\n # Draw examples randomly\n selection_size = np.minimum(alparams[\"selection_size\"],\n len(unlabelled.tolist()))\n if selection_size != 0:\n low_conf_examples = np.random.choice(\n unlabelled, np.abs(alparams[\"selection_size\"]))\n else:\n low_conf_examples = []\n\n # (maybe) Pause for user to annotate\n to_annotate_indices = no_label_indices[np.isin(\n no_label_indices, low_conf_examples)]\n\n while len(to_annotate_indices) > 0:\n to_annotate = train_examples[to_annotate_indices]\n # Poll user for filenames of annotated examples\n logger.info(\"Please annotate the following examples:\\n%s\" %\n \"\\n\".join(to_annotate_basename.tolist()))\n filenames = tkinter.filedialog.askopenfilename(\n multiple=1,\n filetypes=((\"TFRecord\", \"*.tfrecord\"),))\n\n hit = [] # List of matching filename indices\n for filename in filenames:\n basename = os.path.basename(filename)\n idx = -1\n for i in range(len(to_annotate)):\n if to_annotate[i].endswith(basename):\n idx = i\n break\n if idx != -1:\n # Update state filenames\n train_examples[to_annotate_indices[idx]] = filename\n hit.append(idx)\n else:\n logger.info(\"Unrecognized filepath: %s\" % filename)\n # Remove matched paths\n to_annotate_indices = np.delete(to_annotate_indices, hit)\n\n\n # Remove annotated examples from unlabelled set\n no_label_indices = no_label_indices[np.isin(no_label_indices,\n low_conf_examples,\n invert=True)]\n\n\n logger.info(\n \"Moving following examples to labelled set:\\n%s\" %\n \"\\n\".join(train_examples[low_conf_examples].tolist())\n )\n # First make the update to input stage before\n # commiting state change\n train_input_labelled[low_conf_examples] = True\n train_input.labelled = train_input_labelled\n\n\n # Step 4: Update state information\n labelled = np.append(labelled, low_conf_examples)\n unlabelled = unlabelled[np.isin(unlabelled, low_conf_examples,\n assume_unique=True, invert=True)]\n state[\"dataset\"][\"train\"][\"filenames\"] = train_examples.tolist()\n state[\"dataset\"][\"train\"][\"labelled\"] = labelled.tolist()\n state[\"dataset\"][\"train\"][\"unlabelled\"] = unlabelled.tolist()\n state[\"iteration\"] += 1\n state[\"checkpoint\"] = checkpoint_path\n # Dump updated state\n with open(state_filename, \"w\") as f:\n json.dump(state, f, indent=2)\n f.truncate()\n return 0\n\nclass HelpfullParser(argparse.ArgumentParser):\n # Prints help instead of usage string on error\n def error(self, message):\n self.print_help()\n self.exit(2, \"error: %s\\n\" % message)\n\ndef parse_arguments():\n \"\"\"\n Handles parseing of commandline arguments\n\n :returns: The parsed commandline options\n :rtype: argparse.Namespace\n \"\"\"\n # Required arguments\n req_parser = argparse.ArgumentParser(add_help=False)\n req_group = req_parser.add_argument_group(title=\"Required arguments\")\n req_group.add_argument(\n \"-d\", \"--data-dir\",\n required=True,\n type=str,\n dest=\"data_dir\",\n help=\"Path to dataset root directory\"\n )\n req_group.add_argument(\n \"-l\", \"--log-dir\",\n required=True,\n type=str,\n dest=\"log_dir\",\n metavar=\"LOG_DIR\",\n help=\"Logdirectory for the session.\"\n )\n req_group.add_argument(\n \"-p\", \"--parameters\",\n required=True,\n type=str,\n dest=\"params\",\n metavar=\"PARAM_FILE\",\n help=\"Path to parameter configuration file, see conf subdirectory.\"\n )\n #Optional arguments\n opt_parser = argparse.ArgumentParser(add_help=False)\n opt_parser.add_argument(\n \"-c\", \"--checkpoint\",\n type=str,\n dest=\"checkpoint\", required=False,\n metavar=\"CHECKPOINT\",\n help=\"Path to pretrained checkpoint directory or model.\"\n )\n opt_parser.add_argument(\n \"-r\", \"--reinitialize-output-layer\",\n action=\"store_true\",\n dest=\"reinitialize_output\", required=False,\n help=\"Reinitialize last layer of model (if checkpoint specified).\"\n )\n opt_parser.add_argument(\n \"-u\", \"--unlabelled-dir\",\n type=str,\n default=None,\n dest=\"unlabelled\",\n metavar=\"UNLABELLED_GLOB\",\n help=\"Path to directory containing only feature data.\"\n )\n\n # Create parser hierarchy\n # Top parser\n top_parser = argparse.ArgumentParser(\n usage=\"%s {cityscapes,freiburg,vistas} [-h/--help]\"\n % sys.argv[0])\n\n # Dataset specific parsers inherits required arguments.\n data_parsers = top_parser.add_subparsers(parser_class=HelpfullParser)\n # Cityscapes dataset\n cityscapes = data_parsers.add_parser(\n \"cityscapes\",\n usage=\"%s {cityscapes,freiburg} -d DATA_DIR -l LOG_DIR [options]\"\n % sys.argv[0],\n parents=[req_parser,opt_parser],\n conflict_handler=\"resolve\",\n help=\"The Cityscapes dataset.\")\n cityscapes.set_defaults(dataset=\"cityscapes\")\n cityscapes.add_argument(\"--use-coarse\",\n action=\"store_true\",\n required=False,\n dest=\"coarse\")\n # Mapillary Vistas dataset\n vistas = data_parsers.add_parser(\n \"vistas\",\n usage=\"%s {cityscapes,freiburg,vistas} -d DATA_DIR -l LOG_DIR [options]\"\n % sys.argv[0],\n parents=[req_parser,opt_parser],\n conflict_handler=\"resolve\",\n help=\"The Mapillary Vistas dataset.\")\n vistas.set_defaults(dataset=\"vistas\")\n\n # Freiburg forrest dataset\n freiburg = data_parsers.add_parser(\n \"freiburg\",\n usage=\"%s {cityscapes,freiburg} -d DATA_DIR -l LOG_DIR [options]\"\n % sys.argv[0],\n parents=[req_parser,opt_parser],\n conflict_handler=\"resolve\",\n help=\"The Freiburg Forest dataset.\")\n freiburg.set_defaults(dataset=\"freiburg\")\n freiburg.add_argument(\"-m\", \"--modalities\",\n type=str,\n nargs=\"+\",\n required=False,\n default=[],\n help=\"Path to Freiburg Forest root directory.\")\n if not \"freiburg\" in sys.argv and \\\n not \"cityscapes\" in sys.argv and \\\n not \"vistas\" in sys.argv:\n top_parser.print_help()\n sys.exit(0)\n args = top_parser.parse_args()\n\n return args\n\nif __name__ == \"__main__\":\n # Get and configure logger\n logger = logging.getLogger(__name__)\n with open(\"util/logging.json\") as conf:\n conf_dict = json.load(conf)\n logging.config.dictConfig(conf_dict)\n del conf_dict\n args = parse_arguments()\n # Load parameters\n parameters = None\n with open(args.params, \"r\") as f:\n parameters = json.load(f)\n # Overwrite with parameter dict\n args.params = parameters\n sys.exit(main(args, logger))\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.assign_add",
"tensorflow.summary.image",
"tensorflow.keras.regularizers.l1_l2",
"tensorflow.variable_scope",
"tensorflow.name_scope",
"tensorflow.Variable",
"tensorflow.summary.merge",
"tensorflow.summary.FileWriter",
"tensorflow.nn.softmax",
"numpy.full_like",
"tensorflow.summary.histogram",
"tensorflow.math.less",
"tensorflow.math.log",
"numpy.append",
"tensorflow.global_variables_initializer",
"tensorflow.device",
"numpy.argpartition",
"numpy.abs",
"numpy.isin",
"numpy.delete",
"tensorflow.constant",
"numpy.zeros",
"tensorflow.ones_like",
"tensorflow.math.reduce_max",
"numpy.float32",
"tensorflow.zeros_like",
"tensorflow.math.argmax",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.assign",
"tensorflow.Session",
"numpy.maximum",
"numpy.finfo",
"tensorflow.math.add_n",
"tensorflow.group",
"tensorflow.ConfigProto",
"tensorflow.control_dependencies",
"tensorflow.placeholder",
"numpy.random.shuffle",
"tensorflow.math.reduce_sum",
"numpy.empty",
"tensorflow.train.inverse_time_decay",
"tensorflow.train.AdamOptimizer",
"tensorflow.stop_gradient",
"tensorflow.train.latest_checkpoint",
"tensorflow.where",
"numpy.array",
"tensorflow.train.Checkpoint",
"numpy.concatenate",
"tensorflow.math.top_k"
]
] |
naveenarun/MolGAN | [
"c304707144ec9a4870390011aa73cdc7078a0e9d"
] | [
"utils/sparse_molecular_dataset.py"
] | [
"import pickle\nimport numpy as np\n\nfrom rdkit import Chem\n\nif __name__ == '__main__':\n from progress_bar import ProgressBar\nelse:\n from utils.progress_bar import ProgressBar\n\nfrom datetime import datetime\n\n\nclass SparseMolecularDataset():\n\n def load(self, filename, subset=1):\n\n with open(filename, 'rb') as f:\n self.__dict__.update(pickle.load(f))\n\n self.train_idx = np.random.choice(self.train_idx, int(len(self.train_idx) * subset), replace=False)\n self.validation_idx = np.random.choice(self.validation_idx, int(len(self.validation_idx) * subset),\n replace=False)\n self.test_idx = np.random.choice(self.test_idx, int(len(self.test_idx) * subset), replace=False)\n\n self.train_count = len(self.train_idx)\n self.validation_count = len(self.validation_idx)\n self.test_count = len(self.test_idx)\n\n self.__len = self.train_count + self.validation_count + self.test_count\n\n def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self.__dict__, f)\n\n def generate(self, filename, add_h=False, filters=lambda x: True, size=None, validation=0.1, test=0.1):\n self.log('Extracting {}..'.format(filename))\n\n if filename.endswith('.sdf'):\n self.data = list(filter(lambda x: x is not None, Chem.SDMolSupplier(filename)))\n elif filename.endswith('.smi'):\n self.data = [Chem.MolFromSmiles(line) for line in open(filename, 'r').readlines()]\n\n self.data = list(map(Chem.AddHs, self.data)) if add_h else self.data\n self.data = list(filter(filters, self.data))\n self.data = self.data[:size]\n\n self.log('Extracted {} out of {} molecules {}adding Hydrogen!'.format(len(self.data),\n len(Chem.SDMolSupplier(filename)),\n '' if add_h else 'not '))\n\n self._generate_encoders_decoders()\n self._generate_AX()\n\n # it contains the all the molecules stored as rdkit.Chem objects\n self.data = np.array(self.data)\n\n # it contains the all the molecules stored as SMILES strings\n self.smiles = np.array(self.smiles)\n\n # a (N, L) matrix where N is the length of the dataset and each L-dim vector contains the \n # indices corresponding to a SMILE sequences with padding wrt the max length of the longest \n # SMILES sequence in the dataset (see self._genS)\n self.data_S = np.stack(self.data_S)\n\n # a (N, 9, 9) tensor where N is the length of the dataset and each 9x9 matrix contains the \n # indices of the positions of the ones in the one-hot representation of the adjacency tensor\n # (see self._genA)\n self.data_A = np.stack(self.data_A)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the \n # indices of the positions of the ones in the one-hot representation of the annotation matrix\n # (see self._genX)\n self.data_X = np.stack(self.data_X)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the \n # diagonal of the correspondent adjacency matrix\n self.data_D = np.stack(self.data_D)\n\n # a (N, F) matrix where N is the length of the dataset and each F vector contains features \n # of the correspondent molecule (see self._genF)\n self.data_F = np.stack(self.data_F)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the\n # eigenvalues of the correspondent Laplacian matrix\n self.data_Le = np.stack(self.data_Le)\n\n # a (N, 9, 9) matrix where N is the length of the dataset and each 9x9 matrix contains the \n # eigenvectors of the correspondent Laplacian matrix\n self.data_Lv = np.stack(self.data_Lv) \n\n self.vertexes = self.data_F.shape[-2]\n self.features = self.data_F.shape[-1]\n\n self._generate_train_validation_test(validation, test)\n\n def _generate_encoders_decoders(self):\n self.log('Creating atoms encoder and decoder..')\n atom_labels = sorted(set([atom.GetAtomicNum() for mol in self.data for atom in mol.GetAtoms()] + [0]))\n self.atom_encoder_m = {l: i for i, l in enumerate(atom_labels)}\n self.atom_decoder_m = {i: l for i, l in enumerate(atom_labels)}\n self.atom_num_types = len(atom_labels)\n self.log('Created atoms encoder and decoder with {} atom types and 1 PAD symbol!'.format(\n self.atom_num_types - 1))\n\n self.log('Creating bonds encoder and decoder..')\n bond_labels = [Chem.rdchem.BondType.ZERO] + list(sorted(set(bond.GetBondType()\n for mol in self.data\n for bond in mol.GetBonds())))\n\n self.bond_encoder_m = {l: i for i, l in enumerate(bond_labels)}\n self.bond_decoder_m = {i: l for i, l in enumerate(bond_labels)}\n self.bond_num_types = len(bond_labels)\n self.log('Created bonds encoder and decoder with {} bond types and 1 PAD symbol!'.format(\n self.bond_num_types - 1))\n\n self.log('Creating SMILES encoder and decoder..')\n smiles_labels = ['E'] + list(set(c for mol in self.data for c in Chem.MolToSmiles(mol)))\n self.smiles_encoder_m = {l: i for i, l in enumerate(smiles_labels)}\n self.smiles_decoder_m = {i: l for i, l in enumerate(smiles_labels)}\n self.smiles_num_types = len(smiles_labels)\n self.log('Created SMILES encoder and decoder with {} types and 1 PAD symbol!'.format(\n self.smiles_num_types - 1))\n\n def _generate_AX(self):\n self.log('Creating features and adjacency matrices..')\n pr = ProgressBar(60, len(self.data))\n\n data = []\n smiles = []\n data_S = []\n data_A = []\n data_X = []\n data_D = []\n data_F = []\n data_Le = []\n data_Lv = []\n\n max_length = max(mol.GetNumAtoms() for mol in self.data)\n max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)\n\n for i, mol in enumerate(self.data):\n A = self._genA(mol, connected=True, max_length=max_length)\n D = np.count_nonzero(A, -1)\n if A is not None:\n data.append(mol)\n smiles.append(Chem.MolToSmiles(mol))\n data_S.append(self._genS(mol, max_length=max_length_s))\n data_A.append(A)\n data_X.append(self._genX(mol, max_length=max_length))\n data_D.append(D)\n data_F.append(self._genF(mol, max_length=max_length))\n\n L = D - A\n Le, Lv = np.linalg.eigh(L)\n\n data_Le.append(Le)\n data_Lv.append(Lv)\n\n pr.update(i + 1)\n\n self.log(date=False)\n self.log('Created {} features and adjacency matrices out of {} molecules!'.format(len(data),\n len(self.data)))\n\n self.data = data\n self.smiles = smiles\n self.data_S = data_S\n self.data_A = data_A\n self.data_X = data_X\n self.data_D = data_D\n self.data_F = data_F\n self.data_Le = data_Le\n self.data_Lv = data_Lv\n self.__len = len(self.data)\n\n def _genA(self, mol, connected=True, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n A = np.zeros(shape=(max_length, max_length), dtype=np.int32)\n\n begin, end = [b.GetBeginAtomIdx() for b in mol.GetBonds()], [b.GetEndAtomIdx() for b in mol.GetBonds()]\n bond_type = [self.bond_encoder_m[b.GetBondType()] for b in mol.GetBonds()]\n\n A[begin, end] = bond_type\n A[end, begin] = bond_type\n\n degree = np.sum(A[:mol.GetNumAtoms(), :mol.GetNumAtoms()], axis=-1)\n\n return A if connected and (degree > 0).all() else None\n\n def _genX(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n return np.array([self.atom_encoder_m[atom.GetAtomicNum()] for atom in mol.GetAtoms()] + [0] * (\n max_length - mol.GetNumAtoms()), dtype=np.int32)\n\n def _genS(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else len(Chem.MolToSmiles(mol))\n\n return np.array([self.smiles_encoder_m[c] for c in Chem.MolToSmiles(mol)] + [self.smiles_encoder_m['E']] * (\n max_length - len(Chem.MolToSmiles(mol))), dtype=np.int32)\n\n def _genF(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n features = np.array([[*[a.GetDegree() == i for i in range(5)],\n *[a.GetExplicitValence() == i for i in range(9)],\n *[int(a.GetHybridization()) == i for i in range(1, 7)],\n *[a.GetImplicitValence() == i for i in range(9)],\n a.GetIsAromatic(),\n a.GetNoImplicit(),\n *[a.GetNumExplicitHs() == i for i in range(5)],\n *[a.GetNumImplicitHs() == i for i in range(5)],\n *[a.GetNumRadicalElectrons() == i for i in range(5)],\n a.IsInRing(),\n *[a.IsInRingSize(i) for i in range(2, 9)]] for a in mol.GetAtoms()], dtype=np.int32)\n\n return np.vstack((features, np.zeros((max_length - features.shape[0], features.shape[1]))))\n\n def matrices2mol(self, node_labels, edge_labels, strict=False):\n mol = Chem.RWMol()\n\n for node_label in node_labels:\n mol.AddAtom(Chem.Atom(self.atom_decoder_m[node_label]))\n\n for start, end in zip(*np.nonzero(edge_labels)):\n if start > end:\n mol.AddBond(int(start), int(end), self.bond_decoder_m[edge_labels[start, end]])\n\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n\n return mol\n\n def seq2mol(self, seq, strict=False):\n mol = Chem.MolFromSmiles(''.join([self.smiles_decoder_m[e] for e in seq if e != 0]))\n\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n\n return mol\n\n def _generate_train_validation_test(self, validation, test):\n\n self.log('Creating train, validation and test sets..')\n\n validation = int(validation * len(self))\n test = int(test * len(self))\n train = len(self) - validation - test\n\n self.all_idx = np.random.permutation(len(self))\n self.train_idx = self.all_idx[0:train]\n self.validation_idx = self.all_idx[train:train + validation]\n self.test_idx = self.all_idx[train + validation:]\n\n self.train_counter = 0\n self.validation_counter = 0\n self.test_counter = 0\n\n self.train_count = train\n self.validation_count = validation\n self.test_count = test\n\n self.log('Created train ({} items), validation ({} items) and test ({} items) sets!'.format(\n train, validation, test))\n\n def _next_batch(self, counter, count, idx, batch_size):\n if batch_size is not None:\n if counter + batch_size >= count:\n counter = 0\n np.random.shuffle(idx)\n\n output = [obj[idx[counter:counter + batch_size]]\n for obj in (self.data, self.smiles, self.data_S, self.data_A, self.data_X,\n self.data_D, self.data_F, self.data_Le, self.data_Lv)]\n\n counter += batch_size\n else:\n output = [obj[idx] for obj in (self.data, self.smiles, self.data_S, self.data_A, self.data_X,\n self.data_D, self.data_F, self.data_Le, self.data_Lv)]\n\n return [counter] + output\n\n def next_train_batch(self, batch_size=None):\n out = self._next_batch(counter=self.train_counter, count=self.train_count,\n idx=self.train_idx, batch_size=batch_size)\n self.train_counter = out[0]\n\n return out[1:]\n\n def next_validation_batch(self, batch_size=None):\n out = self._next_batch(counter=self.validation_counter, count=self.validation_count,\n idx=self.validation_idx, batch_size=batch_size)\n self.validation_counter = out[0]\n\n return out[1:]\n\n def next_test_batch(self, batch_size=None):\n out = self._next_batch(counter=self.test_counter, count=self.test_count,\n idx=self.test_idx, batch_size=batch_size)\n self.test_counter = out[0]\n\n return out[1:]\n\n @staticmethod\n def log(msg='', date=True):\n print(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + ' ' + str(msg) if date else str(msg))\n\n def __len__(self):\n return self.__len\n\n\nif __name__ == '__main__':\n data = SparseMolecularDataset()\n data.generate('data/gdb9.sdf', filters=lambda x: x.GetNumAtoms() <= 9)\n data.save('data/gdb9_9nodes.sparsedataset')\n\n # data = SparseMolecularDataset()\n # data.generate('data/qm9_5k.smi', validation=0.00021, test=0.00021) # , filters=lambda x: x.GetNumAtoms() <= 9)\n # data.save('data/qm9_5k.sparsedataset')\n"
] | [
[
"numpy.random.shuffle",
"numpy.zeros",
"numpy.stack",
"numpy.linalg.eigh",
"numpy.count_nonzero",
"numpy.array",
"numpy.nonzero"
]
] |
YufengChenK/scVI-1 | [
"c05237c384c59f1fd783ee1f45e75d108bcabf4e"
] | [
"scvi/dataset/csv.py"
] | [
"from .dataset import GeneExpressionDataset\nimport pandas as pd\nimport numpy as np\nimport os\n\n\nclass CsvDataset(GeneExpressionDataset):\n r\"\"\" Loads a `.csv` file.\n\n Args:\n :filename: Name of the `.csv` file.\n :save_path: Save path of the dataset. Default: ``'data/'``.\n :url: Url of the remote dataset. Default: ``None``.\n :new_n_genes: Number of subsampled genes. Default: ``600``.\n :subset_genes: List of genes for subsampling. Default: ``None``.\n :compression: For on-the-fly decompression of on-disk data. If ‘infer’ and filepath_or_buffer\n is path-like, then detect compression from the following extensions: ‘.gz’, ‘.bz2’, ‘.zip’, or ‘.xz’\n (otherwise no decompression). If using ‘zip’, the ZIP file must contain only one data file to be read in.\n Default: ``None``.\n :batch_ids_file: Name of the `.csv` file with batch indices.\n File contains two columns. The first holds gene names and second\n holds batch indices - type int. The first row of the file is header.\n\n Examples:\n >>> # Loading a remote dataset\n >>> remote_url = \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE100866&format=file&file=\" \\\n ... \"GSE100866%5FCBMC%5F8K%5F13AB%5F10X%2DRNA%5Fumi%2Ecsv%2Egz\")\n >>> remote_csv_dataset = CsvDataset(\"GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\", save_path='data/',\n ... compression='gzip', url=remote_url)\n >>> # Loading a local dataset\n >>> local_csv_dataset = CsvDataset(\"GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\",\n ... save_path='data/', compression='gzip')\n\n \"\"\"\n\n def __init__(self, filename, save_path='data/', url=None, new_n_genes=600, subset_genes=None,\n compression=None, sep=',', gene_by_cell=True, labels_file=None,\n batch_ids_file=None):\n self.download_name = filename # The given csv file is\n self.save_path = save_path\n self.url = url\n self.compression = compression\n self.sep = sep\n self.gene_by_cell = gene_by_cell # Whether the original dataset is genes by cells\n self.labels_file = labels_file\n self.batch_ids_file = batch_ids_file\n\n data, gene_names, labels, cell_types, batch_ids = self.download_and_preprocess()\n\n super().__init__(\n *GeneExpressionDataset.get_attributes_from_matrix(\n data, labels=labels,\n batch_indices=batch_ids if batch_ids is not None else 0),\n gene_names=gene_names, cell_types=cell_types)\n\n self.subsample_genes(new_n_genes, subset_genes)\n\n def preprocess(self):\n print(\"Preprocessing dataset\")\n\n if self.gene_by_cell:\n data = pd.read_csv(os.path.join(self.save_path, self.download_name),\n sep=self.sep, index_col=0, compression=self.compression).T\n else:\n data = pd.read_csv(os.path.join(self.save_path, self.download_name),\n sep=self.sep, index_col=0, compression=self.compression)\n\n gene_names = np.array(data.columns, dtype=str)\n labels, cell_types, batch_ids = None, None, None\n if self.labels_file is not None:\n labels = pd.read_csv(os.path.join(self.save_path, self.labels_file), header=0, index_col=0)\n labels = labels.values\n cell_types = np.unique(labels)\n\n if self.batch_ids_file is not None:\n batch_ids = pd.read_csv(\n os.path.join(\n self.save_path, self.batch_ids_file), header=0, index_col=0)\n batch_ids = batch_ids.values\n\n data = data.values\n print(\"Finished preprocessing dataset\")\n return data, gene_names, labels, cell_types, batch_ids\n\n\nclass BreastCancerDataset(CsvDataset):\n def __init__(self, save_path='data/'):\n super().__init__(\"Layer2_BC_count_matrix-1.tsv\", save_path=save_path,\n url=\"http://www.spatialtranscriptomicsresearch.org/wp-content/\"\n \"uploads/2016/07/Layer2_BC_count_matrix-1.tsv\",\n sep='\\t', gene_by_cell=False)\n\n\nclass MouseOBDataset(CsvDataset):\n def __init__(self, save_path='data/'):\n super().__init__(\"Rep11_MOB_count_matrix-1.tsv\", save_path=save_path,\n url=\"http://www.spatialtranscriptomicsresearch.org/wp-content/uploads/\"\n \"2016/07/Rep11_MOB_count_matrix-1.tsv\",\n sep='\\t', gene_by_cell=False)\n"
] | [
[
"numpy.array",
"numpy.unique"
]
] |
Holmeswww/Text_Infilling | [
"f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3",
"f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3"
] | [
"texar/modules/policies/policy_nets_test.py",
"texar/data/data_decoders.py"
] | [
"#\n\"\"\"\nTests policy nets.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\n\nfrom texar.modules.policies.policy_nets import CategoricalPolicyNet\n\nclass CategoricalPolicyNetTest(tf.test.TestCase):\n \"\"\"Tests :class:`texar.modules.CategoricalPolicyNet`.\n \"\"\"\n\n def test_categorical_policy(self):\n \"\"\"Tests logics.\n \"\"\"\n policy = CategoricalPolicyNet()\n\n inputs = tf.random_uniform(shape=[64, 4])\n outputs = policy(inputs=inputs)\n self.assertEqual(outputs['action'].shape, outputs['log_prob'].shape)\n self.assertIsInstance(\n outputs['distribution'], tf.distributions.Categorical)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# -*- coding: utf-8 -*-\n#\n\"\"\"\nHelper functions and classes for decoding text data which are used after\nreading raw text data.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.python.slim.data import data_decoder\n\nfrom texar.data.vocabulary import SpecialTokens\n\n# pylint: disable=too-many-instance-attributes, too-many-arguments,\n# pylint: disable=no-member, invalid-name\n\n__all__ = [\n \"ScalarDataDecoder\",\n \"TextDataDecoder\",\n \"VarUttTextDataDecoder\"\n]\n\ndef _append_token(token):\n return token is not None and token != \"\"\n\nclass ScalarDataDecoder(data_decoder.DataDecoder):\n \"\"\"A data decoder that decodes a scalar, e.g., int label or float number.\n\n The only operation is to cast the data into a specified data type.\n\n Args:\n dtype: A :tf_main:`tf DType <DType>` that data is cast into. Can be\n `tf.int32` or `tf.float32`.\n data_name (str): Name of the decoded data.\n \"\"\"\n\n def __init__(self, dtype=tf.int32, data_name=\"data\"):\n self._dtype = dtype\n self._data_name = data_name\n if self._data_name is None:\n self._data_name = \"data\"\n\n def __call__(self, data):\n outputs = self.decode(data, self.list_items())\n return dict(zip(self.list_items(), outputs))\n\n def decode(self, data, items):\n \"\"\"Decodes the data to return the tensors specified by the list of\n items.\n\n Args:\n data: The scalar data to decode.\n items: A list of strings, each of which is the name of the resulting\n tensors to retrieve.\n\n Returns:\n A list of tensors, each of which corresponds to each item.\n \"\"\"\n data = tf.reshape(data, shape=[])\n if data.dtype is tf.string:\n decoded_data = tf.string_to_number(data, out_type=self._dtype)\n else:\n decoded_data = tf.cast(data, self._dtype),\n outputs = {\n self._data_name: decoded_data\n }\n return [outputs[item] for item in items]\n\n def list_items(self):\n \"\"\"Returns the list of item names that the decoder can produce.\n\n Returns:\n A list of strings can be passed to :meth:`decode()`.\n \"\"\"\n return [self._data_name]\n\n @property\n def data_tensor_name(self):\n \"\"\"The name of the data tensor.\n \"\"\"\n return self._data_name\n\nclass TextDataDecoder(data_decoder.DataDecoder):\n \"\"\"A text data decoder that decodes raw text data.\n\n Operations include splitting on word or character level, truncation,\n inserting special tokens, mapping text units to indexes, etc.\n\n Args:\n split_level (str): The name of split level on which text sequence is\n split. Either \"word\" or \"char\".\n delimiter (str): The delimiter character used when splitting on word\n level.\n bos_token (str, optional): Special token added to the beginning of\n sequences. If it is `None` (default) or an empty string, no\n BOS token is added.\n eos_token (str, optional): Special tokan added to the end of\n sequences. If it is `None` (default) or an empty string, no EOS\n token is added.\n max_seq_length (int, optional): Maximum length of output sequences.\n Tokens exceeding the maximum length will be truncated. The length\n does not include any added bos_token and eos_token. If not\n given, no truncation is performed.\n token_to_id_map (optional): A\n :class:`~tensorflow.contrib.lookup.HashTable` instance that maps\n token strings to integer indexes. If not given, the decoder will\n not decode text into indexes. :attr:`bos_token` and\n :attr:`eos_token` (if given) should have entries in the\n :attr:`token_to_id_map` (if given).\n text_tensor_name (str): Name of the text tensor results. Used as a\n key to retrieve the text tensor.\n length_tensor_name (str): Name of the text length tensor results.\n text_id_tensor_name (str): Name of the text index tensor results.\n \"\"\"\n\n def __init__(self,\n split_level=\"word\",\n delimiter=\" \",\n bos_token=None,\n eos_token=None,\n max_seq_length=None,\n token_to_id_map=None,\n text_tensor_name=\"text\",\n length_tensor_name=\"length\",\n text_id_tensor_name=\"text_ids\"):\n self._split_level = split_level\n self._delimiter = delimiter\n self._bos_token = bos_token\n self._eos_token = eos_token\n self._max_seq_length = max_seq_length\n self._token_to_id_map = token_to_id_map\n self._text_tensor_name = text_tensor_name\n self._text_id_tensor_name = text_id_tensor_name\n self._length_tensor_name = length_tensor_name\n self._added_length = 0\n\n def __call__(self, data):\n outputs = self.decode(data, self.list_items())\n return dict(zip(self.list_items(), outputs))\n\n def decode(self, data, items):\n \"\"\"Decodes the data to return the tensors specified by the list of\n items.\n\n Args:\n data: The text data to decode.\n items: A list of strings, each of which is the name of the resulting\n tensors to retrieve.\n\n Returns:\n A list of tensors, each of which corresponds to each item. If\n `token_to_id_map` is not given when constructing the decoder,\n returns `None` for the token index item.\n \"\"\"\n # Split\n if self._split_level == \"word\":\n tokens = tf.string_split([data], delimiter=self._delimiter).values\n elif self._split_level == \"char\":\n raise NotImplementedError\n else:\n raise ValueError(\"Unknown split level: %s\" % self._split_level)\n\n # Truncate\n if self._max_seq_length is not None:\n tokens = tokens[:self._max_seq_length]\n\n # Add BOS/EOS tokens\n if _append_token(self._bos_token):\n tokens = tf.concat([[self._bos_token], tokens], axis=0)\n self._added_length += 1\n if _append_token(self._eos_token):\n tokens = tf.concat([tokens, [self._eos_token]], axis=0)\n self._added_length += 1\n\n # Map to index\n token_ids = None\n if self._token_to_id_map is not None:\n token_ids = self._token_to_id_map.lookup(tokens)\n\n outputs = {\n self._text_tensor_name: tokens,\n self._length_tensor_name: tf.size(tokens),\n self._text_id_tensor_name: token_ids\n }\n return [outputs[item] for item in items]\n\n def list_items(self):\n \"\"\"Returns the list of item names that the decoder can produce.\n\n Returns:\n A list of strings can be passed to :meth:`decode()`.\n \"\"\"\n return [self._text_tensor_name,\n self._length_tensor_name,\n self._text_id_tensor_name]\n\n @property\n def text_tensor_name(self):\n \"\"\"The name of text tensor.\n \"\"\"\n return self._text_tensor_name\n\n @text_tensor_name.setter\n def text_tensor_name(self, name):\n self._text_tensor_name = name\n\n @property\n def length_tensor_name(self):\n \"\"\"The name of length tensor.\n \"\"\"\n return self._length_tensor_name\n\n @length_tensor_name.setter\n def length_tensor_name(self, name):\n self._length_tensor_name = name\n\n @property\n def text_id_tensor_name(self):\n \"\"\"The name of text index tensor.\n \"\"\"\n return self._text_id_tensor_name\n\n @text_id_tensor_name.setter\n def text_id_tensor_name(self, name):\n self._text_id_tensor_name = name\n\n @property\n def added_length(self):\n \"\"\"The added text length due to appended bos and eos tokens.\n \"\"\"\n return self._added_length\n\nclass VarUttTextDataDecoder(data_decoder.DataDecoder):\n \"\"\"A text data decoder that decodes raw text data. Each data is considered\n to be multiple sentences concatenated by a delimiter.\n\n Operations include splitting on word or character level, truncation,\n inserting special tokens, mapping text units to indexes, etc.\n\n Args:\n split_level (str): The name of split level on which text sequence is\n split. Either \"word\" or \"char\".\n delimiter (str): The delimiter character used when splitting on word\n level.\n bos_token (str, optional): Special token added to the beginning of\n sequences. If it is `None` (default) or an empty string, no\n BOS token is added.\n eos_token (str, optional): Special tokan added to the end of\n sequences. If it is `None` (default) or an empty string, no EOS\n token is added.\n max_seq_length (int): Maximum length of each sequence.\n Tokens exceed the maximum length will be truncated. Additional\n padding will be done to ensure output sequence all reach this\n number. The length does not include any added bos_token and eos_\n token.\n max_utterance_cnt (int): Maximum number of sequences.\n Additional empty sentences will be added to\n ensure the respective dimension of the output tensor has size\n :attr:`max_utterance_cnt`. The output item named by\n :meth:`utterance_cnt_tensor_name` contains the actual number of\n utterance in the data.\n token_to_id_map (optional): A\n :class:`~tensorflow.contrib.lookup.HashTable` instance that maps\n token strings to integer indexes. If not given, the decoder will\n not decode text into indexes. :attr:`bos_token` and\n :attr:`eos_token` (if given) should have entries in the\n :attr:`token_to_id_map` (if given).\n text_tensor_name (str): Name of the text tensor results. Used as a\n key to retrieve the text tensor.\n length_tensor_name (str): Name of the text length tensor results.\n text_id_tensor_name (str): Name of the text index tensor results.\n \"\"\"\n\n def __init__(self,\n split_level=\"word\",\n delimiter=\" \",\n sentence_delimiter=\"|||\",\n bos_token=None,\n eos_token=None,\n max_seq_length=None,\n max_utterance_cnt=None,\n token_to_id_map=None,\n text_tensor_name=\"text\",\n length_tensor_name=\"length\",\n text_id_tensor_name=\"text_ids\",\n utterance_cnt_tensor_name=\"utterance_cnt\"):\n self._split_level = split_level\n self._delimiter = delimiter\n self._bos_token = bos_token\n self._eos_token = eos_token\n self._max_seq_length = max_seq_length\n self._token_to_id_map = token_to_id_map\n self._text_tensor_name = text_tensor_name\n self._text_id_tensor_name = text_id_tensor_name\n self._length_tensor_name = length_tensor_name\n self._utterance_cnt_tensor_name = utterance_cnt_tensor_name\n self._sentence_delimiter = sentence_delimiter\n self._max_utterance_cnt = max_utterance_cnt\n self._added_length = 0\n\n def __call__(self, data):\n outputs = self.decode(data, self.list_items())\n return dict(zip(self.list_items(), outputs))\n\n def decode(self, data, items): # pylint: disable=too-many-locals\n \"\"\"Decodes the data to return the tensors specified by the list of\n items.\n\n Args:\n data: The text data to decode.\n items: A list of strings, each of which is the name of the resulting\n tensors to retrieve.\n\n Returns:\n A list of tensors, each of which corresponds to each item. If\n `token_to_id_map` is not given when constructing the decoder,\n returns `None` for the token index item.\n \"\"\"\n\n sentences = tf.string_split([data],\n delimiter=self._sentence_delimiter).values\n\n # Truncate utterances\n if self._max_utterance_cnt:\n sentences = sentences[:self._max_utterance_cnt]\n utterance_cnt = tf.shape(sentences)[0]\n\n # Get (max) sentence length\n def _get_sent_length(s):\n raw_length = tf.size(\n tf.string_split([s], delimiter=self._delimiter).values)\n if self._max_seq_length:\n return tf.minimum(raw_length, self._max_seq_length)\n else:\n return raw_length\n\n raw_sent_length = tf.map_fn(\n _get_sent_length, sentences, dtype=tf.int32)\n sent_length = self._max_seq_length\n if not sent_length:\n sent_length = tf.reduce_max(raw_sent_length)\n if _append_token(self._eos_token):\n raw_sent_length += 1\n sent_length += 1\n self._added_length += 1\n if _append_token(self._bos_token):\n raw_sent_length += 1\n sent_length += 1\n self._added_length += 1\n\n def _trunc_and_pad(s, pad_token, max_length):\n if self._max_seq_length:\n s = s[:self._max_seq_length]\n if _append_token(self._bos_token):\n s = np.append([self._bos_token], s)\n if _append_token(self._eos_token):\n s = np.append(s, [self._eos_token])\n s = np.append(s, [pad_token]*(max_length-s.size))\n return s\n\n # Split each sentence to tokens, and pad them to a same length.\n # This is necessary to treat all sentences as a single tensor.\n split_sentences = tf.map_fn(\n lambda s: tf.py_func(\n _trunc_and_pad,\n [\n tf.string_split([s], delimiter=self._delimiter).values,\n SpecialTokens.PAD,\n sent_length\n ],\n tf.string),\n sentences, dtype=tf.string\n )\n\n split_sentences = tf.reshape(split_sentences,\n [utterance_cnt, sent_length])\n\n # Map to index\n token_ids = None\n if self._token_to_id_map is not None:\n token_ids = self._token_to_id_map.lookup(split_sentences)\n\n outputs = {\n self._text_tensor_name: split_sentences,\n self._length_tensor_name: raw_sent_length,\n self._utterance_cnt_tensor_name: tf.shape(sentences)[0],\n self._text_id_tensor_name: token_ids\n }\n return [outputs[item] for item in items]\n\n def list_items(self):\n \"\"\"Returns the list of item names that the decoder can produce.\n\n Returns:\n A list of strings can be passed to :meth:`decode()`.\n \"\"\"\n return [\n self._text_tensor_name,\n self._length_tensor_name,\n self._text_id_tensor_name,\n self._utterance_cnt_tensor_name\n ]\n\n @property\n def text_tensor_name(self):\n \"\"\"The name of text tensor.\n \"\"\"\n return self._text_tensor_name\n\n @text_tensor_name.setter\n def text_tensor_name(self, name):\n self._text_tensor_name = name\n\n @property\n def utterance_cnt_tensor_name(self):\n \"\"\"The name of the utterance count tensor.\n \"\"\"\n return self._utterance_cnt_tensor_name\n\n @property\n def length_tensor_name(self):\n \"\"\"The name of length tensor.\n \"\"\"\n return self._length_tensor_name\n\n @length_tensor_name.setter\n def length_tensor_name(self, name):\n self._length_tensor_name = name\n\n @property\n def text_id_tensor_name(self):\n \"\"\"The name of text index tensor.\n \"\"\"\n return self._text_id_tensor_name\n\n @text_id_tensor_name.setter\n def text_id_tensor_name(self, name):\n self._text_id_tensor_name = name\n\n @property\n def added_length(self):\n \"\"\"The added text length due to appended bos and eos tokens.\n \"\"\"\n return self._added_length\n"
] | [
[
"tensorflow.random_uniform",
"tensorflow.test.main"
],
[
"tensorflow.size",
"tensorflow.minimum",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.reduce_max",
"tensorflow.map_fn",
"tensorflow.string_to_number",
"numpy.append",
"tensorflow.cast",
"tensorflow.concat",
"tensorflow.string_split"
]
] |
tramtran2/prlab_image_colorization | [
"3ec7f3ad60d6235c5bb232713f1b3ec5f06f4d67"
] | [
"sources/image_colorization/datasets/quantized_colors/utils.py"
] | [
"\"\"\"\nFunctions:\n def read_image(img_path, is_resize = True, width = 224, height = 224, interpolation = cv2.INTER_AREA)\n \n def cielab_color_space()\n def view_db_info(db_root, db_files, db_name)\n \n def compute_prior_prob(image_files, width, height, do_plot, pts_in_hull_path, prior_prob_path)\n def compute_prior_prob_v1(image_files, is_resize, width, height, do_plot, pts_in_hull_path, prior_prob_path, ab_hist_path):\n \n def compute_prior_prob_smoothed(prior_prob_path, prior_prob_smoothed_path, sigma, do_plot = True, verbose = 1)\n def compute_prior_factor(prior_prob_path, prior_prob_smoothed_path, prior_prob_factor_path, gamma = 0.5, alpha = 1, do_plot = True, verbose = 1)\n \nMain:\n def compute_prior_prob_export(db_root, db_file, db_name, column_image = \"image\", column_type = \"type\", process_types = [\"train\"], \n pts_in_hull_path = os.path.join(module_dir, \"data\", \"prior_prob_train_div2k.npy\").replace(\"\\\\\", \"/\"),\n export_prior_prob_path = None, \n export_ab_hist_path = None, \n is_resize = False, width = 112, height = 112, \n do_plot = True, verbose = 1, )\n \n def main()\n def main_index_data(**input_params)\n def main_compute_prior_prob(**input_params)\n def main_compute_prior_prob_smoothed(**input_params)\n def main_cielab_color_space()\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport click, os, pandas as pd, glob, tqdm, cv2, numpy as np, sys\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pylab as plt\nfrom matplotlib.colors import LogNorm\n\nimport time\nfrom skimage import color\nfrom console_progressbar import ProgressBar\n\nimport sklearn.neighbors as nn\n\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import gaussian, convolve\n\ndef read_image(img_path, is_resize = True, width = 224, height = 224, interpolation = cv2.INTER_AREA):\n \"\"\"\n Load img with opencv and reshape\n \"\"\"\n result = {}\n \n org_img_color = cv2.imread(img_path)\n if len(org_img_color.shape)==2: # grayscale\n org_img_color = np.dstack([org_img_color, org_img_color, org_img_color])\n else:\n org_img_color = org_img_color[:, :, ::-1] # RGB convert\n # if\n org_img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n org_img_Lab = cv2.cvtColor(org_img_color, cv2.COLOR_RGB2Lab)\n \n result.update(dict(org_img_color=org_img_color, org_img_gray=org_img_gray, org_img_Lab=org_img_Lab))\n\n if is_resize == True:\n res_img_color = cv2.resize(org_img_color, (width, height), interpolation=interpolation)\n res_img_gray = cv2.resize(org_img_gray, (width, height), interpolation=interpolation)\n res_img_Lab = cv2.cvtColor(res_img_color, cv2.COLOR_RGB2Lab)\n result.update(dict(res_img_color=res_img_color, res_img_gray=res_img_gray, res_img_Lab=res_img_Lab))\n # if\n\n return result\n# read_image\n\ndef compute_prior_prob(image_files, width, height, do_plot, pts_in_hull_path, prior_prob_path):\n \"\"\"\n Compute color prior probabilities for pts in hull\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n df_data = pd.read_hdf(os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"div2k.hdf5\"), \"data\")\n list_types = [\"'train'\"]\n df_select_data = df_data.query(\"type in [\" + \",\".join(list_types) + \"]\")\n image_dir = os.path.join(dataset_dir, \"DIV2K\").replace(\"\\\\\", \"/\")\n\n image_files = image_dir + \"/\" + df_select_data[\"path\"].values\n image_files[0:3], len(image_files)\n\n info = dict(\n image_files = image_files,\n pts_in_hull_path = os.path.join(data_dir, \"colorization_richard_zhang\", \"pts_in_hull.npy\"),\n prior_prob_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_train_div2k.npy\"),\n width = 112,\n height = 112,\n do_plot = True\n )\n locals().update(**info)\n prior_prob = compute_prior_prob(**info)\n \"\"\"\n # Load ab image\n X_ab = []\n for image_path in tqdm.tqdm(image_files):\n result = read_image(image_path, is_resize = True, width = width, height = height)\n X_ab.append(result[\"res_img_Lab\"][:, :, 1:])\n # for\n X_ab = np.array(X_ab)\n X_ab = X_ab - 128.0\n \n # Load the gamut points location\n q_ab = np.load(pts_in_hull_path)\n\n if do_plot:\n plt.figure(figsize=(8, 8))\n plt.title(\"ab quantize\")\n gs = gridspec.GridSpec(1, 1)\n ax = plt.subplot(gs[0])\n for i in range(q_ab.shape[0]):\n ax.scatter(q_ab[:, 0], q_ab[:, 1])\n ax.annotate(str(i), (q_ab[i, 0], q_ab[i, 1]), fontsize=6)\n ax.set_xlim([-110, 110])\n ax.set_ylim([-110, 110])\n # for\n # if\n \n npts, c, h, w = X_ab.shape\n X_a_ravel = np.ravel(X_ab[:, :, :, 0])\n X_b_ravel = np.ravel(X_ab[:, :, :, 1])\n X_ab_ravel = np.vstack((X_a_ravel, X_b_ravel)).T\n \n if do_plot:\n plt.title(\"Prior Distribution in ab space\\n\", fontsize=16)\n plt.hist2d(X_ab_ravel[:, 0], X_ab_ravel[:, 1], bins=100, density=True, norm=LogNorm(), cmap=plt.cm.jet)\n plt.xlim([-120, 120])\n plt.ylim([-120, 120])\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.xlabel(\"b channel\", fontsize = 14)\n plt.ylabel(\"a channel\", fontsize = 14)\n plt.colorbar()\n plt.show()\n plt.clf()\n plt.close()\n # if\n \n # Create nearest neighbord instance with index = q_ab\n NN = 1\n nearest = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(q_ab)\n # Find index of nearest neighbor for X_ab\n dists, ind = nearest.kneighbors(X_ab_ravel)\n\n # We now count the number of occurrences of each color\n ind = np.ravel(ind)\n counts = np.bincount(ind)\n idxs = np.nonzero(counts)[0]\n prior_prob = np.zeros((q_ab.shape[0]))\n \n prior_prob[idxs] = counts[idxs] \n \n # We turn this into a color probability\n prior_prob = prior_prob / (1.0 * np.sum(prior_prob))\n\n # Save\n if prior_prob_path is not None:\n save_dir = os.path.dirname(prior_prob_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n pts_in_hull_name = os.path.basename(pts_in_hull_path)\n safe_copy(pts_in_hull_path, os.path.join(save_dir, pts_in_hull_name))\n np.save(prior_prob_path, prior_prob)\n # if\n\n if do_plot:\n plt.hist(prior_prob, bins=100)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n plt.show()\n # if\n \n return prior_prob\n pass\n# compute_prior_prob\n\ndef compute_prior_prob_v1(image_files, is_resize, width, height, do_plot, pts_in_hull_path, prior_prob_path, ab_hist_path):\n \"\"\"\n Compute color prior probabilities for pts in hull\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n df_data = pd.read_hdf(os.path.join(dataset_dir, \"DIV2K\", \"div2k.hdf5\"), \"data\")\n list_types = [\"'train'\"]\n df_select_data = df_data.query(\"type in [\" + \",\".join(list_types) + \"]\")\n image_dir = os.path.join(dataset_dir, \"DIV2K\").replace(\"\\\\\", \"/\")\n\n image_files = image_dir + \"/\" + df_select_data[\"path\"].values\n image_files[0:3], len(image_files)\n\n info = dict(\n image_files = image_files,\n pts_in_hull_path = os.path.join(module_dir, \"data\", \"pts_in_hull.npy\"),\n prior_prob_path = os.path.join(module_dir, \"data\", \"prior_prob_train_div2k.npy\"),\n ab_hist_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"ab_hist_train_div2k.npy\"),\n \n is_resize = False,\n width = 112,\n height = 112,\n \n do_plot = True\n )\n locals().update(**info)\n prior_prob = compute_prior_prob(**info)\n \"\"\"\n # Load ab image\n ab_hist = np.zeros((256, 256), dtype = np.uint64)\n for image_path in tqdm.tqdm(image_files):\n result = read_image(image_path, is_resize = is_resize, \n width = width, height = height)\n I_ab = result[\"res_img_Lab\"][:, :, 1:] if is_resize==True else result[\"org_img_Lab\"][:, :, 1:] \n I_ab = I_ab.reshape(-1, 2).astype(np.uint)\n\n (ab_vals, ab_cnts) = np.unique(I_ab, return_counts = True, axis=0)\n ab_hist[ab_vals[:, 0], ab_vals[:, 1]] += ab_cnts.astype(np.uint64)\n # for\n \n # Load the gamut points location\n q_ab = np.load(pts_in_hull_path)\n \n if do_plot:\n plt.figure(figsize=(8, 8))\n gs = gridspec.GridSpec(1, 1)\n ax = plt.subplot(gs[0])\n for i in range(q_ab.shape[0]):\n ax.scatter(q_ab[:, 0], q_ab[:, 1])\n ax.annotate(str(i), (q_ab[i, 0], q_ab[i, 1]), fontsize=6)\n ax.set_xlim([-110, 110])\n ax.set_ylim([-110, 110])\n # for\n \n plt.title(\"Prior Distribution in ab space\\n\", fontsize=16)\n plt.imshow(ab_hist.transpose(), norm=LogNorm(), cmap=plt.cm.jet, extent = (-128, 127, -128, 127), origin = \"uper\")\n plt.xlim([-120, 120])\n plt.ylim([-120, 120])\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.xlabel(\"b channel\", fontsize = 14)\n plt.ylabel(\"a channel\", fontsize = 14)\n plt.colorbar()\n plt.show()\n plt.clf()\n plt.close()\n # if\n \n X_ab_ravel_h = np.vstack(np.nonzero(ab_hist)).T\n X_ab_ravel_h = X_ab_ravel_h - 128\n \n # Create nearest neighbord instance with index = q_ab\n NN = 1\n nearest = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(q_ab)\n # Find index of nearest neighbor for X_ab\n dists, ind = nearest.kneighbors(X_ab_ravel_h)\n\n # We now count the number of occurrences of each color\n ind = np.ravel(ind)\n \n counts = np.zeros(np.max(ind) + 1, np.uint64)\n for idx, (a,b) in enumerate(X_ab_ravel_h):\n counts[ind[idx]] = counts[ind[idx]] + ab_hist[(a + 128,b + 128)]\n pass\n # for\n \n idxs = np.nonzero(counts)[0]\n prior_prob = np.zeros((q_ab.shape[0]))\n prior_prob[idxs] = counts[idxs]\n \n # We turn this into a color probability\n prior_prob = prior_prob / (1.0 * np.sum(prior_prob))\n\n # Save\n if prior_prob_path is not None:\n save_dir = os.path.dirname(prior_prob_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(prior_prob_path, prior_prob)\n # if\n \n # Save\n if ab_hist_path is not None:\n save_dir = os.path.dirname(ab_hist_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(ab_hist_path, ab_hist)\n # if\n\n if do_plot:\n plt.hist(prior_prob, bins=100)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n plt.show()\n # if\n \n return prior_prob, ab_hist\n pass\n# compute_prior_prob_v1\n\ndef compute_prior_prob_smoothed(prior_prob_path, prior_prob_smoothed_path, sigma = 5, do_plot = True, verbose = 1):\n \"\"\"\n Interpolation on prior prob, next using interpolation to smoothness path, and normalize again\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n info = dict(\n prior_prob_path = os.path.join(module_dir, \"data\", \"prior_prob_train_div2k.npy\"),\n prior_prob_smoothed_path = os.path.join(module_dir, \"data\", \"prior_prob_smoothed_train_div2k.npy\"),\n sigma = 5,\n do_plot = True,\n verbose = True,\n )\n locals().update(**info)\n prior_prob_smoothed = compute_prior_prob_smoothed(**info)\n \"\"\"\n # load prior probability\n \n if verbose==1: print(\"\\n=== Compute Prior Probability Smoothed === \")\n prior_prob = np.load(prior_prob_path)\n \n # add an epsilon to prior prob to avoid 0 vakues and possible NaN\n prior_prob += 1E-3 * np.min(prior_prob)\n # renormalize\n prior_prob = prior_prob / (1.0 * np.sum(prior_prob))\n\n # Smooth with gaussian\n f = interp1d(np.arange(prior_prob.shape[0]), prior_prob)\n xx = np.linspace(0, prior_prob.shape[0] - 1, 1000)\n yy = f(xx)\n window = gaussian(2000, sigma) # 2000 pts in the window, sigma=5\n smoothed = convolve(yy, window / window.sum(), mode='same')\n fout = interp1d(xx, smoothed)\n prior_prob_smoothed = np.array([fout(i) for i in range(prior_prob.shape[0])])\n prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed)\n\n # Save\n if prior_prob_smoothed_path is not None:\n save_dir = os.path.dirname(prior_prob_smoothed_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(prior_prob_smoothed_path, prior_prob_smoothed)\n # if\n\n if do_plot:\n plt.figure(figsize=(20, 10))\n plt.subplot(2, 2, 1)\n plt.plot(prior_prob, label=\"prior_prob\")\n plt.plot(prior_prob_smoothed, \"g--\", label=\"prior_prob_smoothed\")\n plt.yscale(\"log\")\n plt.legend()\n \n plt.subplot(2, 2, 2)\n plt.plot(prior_prob, label=\"prior_prob\")\n plt.plot(xx, smoothed, \"r-\", label=\"smoothed\")\n plt.yscale(\"log\")\n plt.legend()\n \n plt.subplot(2, 2, 3)\n plt.hist(prior_prob, bins=100)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.subplot(2, 2, 4)\n plt.hist(prior_prob_smoothed, bins=100)\n plt.xlabel(\"Prior probability smoothed\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.show()\n # if\n \n return prior_prob_smoothed\n# compute_prior_prob_smoothed\n\ndef compute_prior_factor(prior_prob_path, prior_prob_smoothed_path, prior_prob_factor_path, gamma = 0.5, alpha = 1, do_plot = True, verbose = 1):\n \"\"\"\n Calculate prior probability factorization\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n info = dict(\n prior_prob_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_train_div2k.npy\"),\n prior_prob_smoothed_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_smoothed_train_div2k.npy\"),\n prior_prob_factor_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_factor_train_div2k.npy\"),\n gamma = 0.5,\n alpha = 1,\n do_plot = True,\n verbose = 1,\n )\n locals().update(**info)\n prior_factor = compute_prior_factor(**info)\n \"\"\"\n if verbose==1: print(\"\\n=== Compute Prior Factor === \")\n prior_prob = np.load(prior_prob_path)\n prior_prob_smoothed = np.load(prior_prob_smoothed_path)\n\n u = np.ones_like(prior_prob_smoothed)\n u = u / np.sum(1.0 * u)\n\n prior_factor = (1 - gamma) * prior_prob_smoothed + gamma * u\n prior_factor = np.power(prior_factor, -alpha)\n\n # renormalize\n prior_factor = prior_factor / (np.sum(prior_factor * prior_prob_smoothed))\n\n # Save\n if prior_prob_factor_path is not None:\n save_dir = os.path.dirname(prior_prob_factor_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(prior_prob_factor_path, prior_factor)\n # if\n \n if do_plot:\n plt.figure(figsize=(20, 10))\n plt.subplot(1, 3, 1)\n plt.hist(prior_prob)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.subplot(1, 3, 2)\n plt.hist(prior_prob_smoothed)\n plt.xlabel(\"Prior probability smoothed\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.subplot(1, 3, 3)\n plt.hist(prior_factor)\n plt.xlabel(\"Prior probability smoothed factor\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.show()\n # if\n\n return prior_factor\n# def\n\ndef cielab_color_space():\n print('SkImage:')\n start = time.time()\n L = [0] * 256 ** 3\n a = [0] * 256 ** 3\n b = [0] * 256 ** 3\n i = 0\n pb = ProgressBar(total=256, prefix='SkImage converting images', suffix='', decimals=3, length=50, fill='=')\n for r in range(256):\n for g in range(256):\n for bb in range(256):\n im = np.array((bb, g, r), np.uint8).reshape(1, 1, 3)\n color.rgb2lab(im) # transform it to LAB\n L[i] = im[0, 0, 0]\n a[i] = im[0, 0, 1]\n b[i] = im[0, 0, 2]\n i += 1\n # for\n # for\n pb.print_progress_bar(r)\n # for\n \n print(\"\")\n print(min(L), '<=L<=', max(L))\n print(min(a), '<=a<=', max(a))\n print(min(b), '<=b<=', max(b))\n end = time.time()\n elapsed = end - start\n print('elapsed: {} seconds.'.format(elapsed))\n ##############################################\n \n print('OpenCV:')\n start = time.time()\n L = [0] * 256 ** 3\n a = [0] * 256 ** 3\n b = [0] * 256 ** 3\n i = 0\n pb = ProgressBar(total=256, prefix='OpenCV converting images', suffix='', decimals=3, length=50, fill='=')\n for r in range(256):\n for g in range(256):\n for bb in range(256):\n im = np.array((bb, g, r), np.uint8).reshape(1, 1, 3)\n cv2.cvtColor(im, cv2.COLOR_BGR2LAB, im) # transform it to LAB\n L[i] = im[0, 0, 0]\n a[i] = im[0, 0, 1]\n b[i] = im[0, 0, 2]\n i += 1\n # for\n # for\n pb.print_progress_bar(r)\n # for\n\n print(\"\")\n print(min(L), '<=L<=', max(L))\n print(min(a), '<=a<=', max(a))\n print(min(b), '<=b<=', max(b))\n end = time.time()\n elapsed = end - start\n print('elapsed: {} seconds.'.format(elapsed))\n# cielab_color_space\n\ndef view_db_info(db_root, db_files, db_name):\n df_data = pd.read_hdf(db_files, key = db_name)\n\n print(\"Dataset info: \")\n print(\"+ Image Path: \", db_root)\n print(\"+ Index Path: \", db_files)\n print(\"+ Columns: \", df_data.keys())\n print(\"+ Rows: \", len(df_data))\n info_types = df_data.groupby(\"type\").count().reset_index()[[\"type\", \"image\"]].values\n print(\"+ Types: \\n\", info_types)\n print()\n# view_db_info\n\ndef compute_prior_prob_export(db_root, db_file, db_name, column_image = \"image\", column_type = \"type\", process_types = [\"train\"], \n pts_in_hull_path = \"prior_prob_train_div2k.npy\",\n export_prior_prob_path = None, \n export_ab_hist_path = None, \n is_resize = False, width = 112, height = 112, \n do_plot = True, verbose = 1, ): \n print(\"\\n=== Compute Prior Probability === \")\n df_data = pd.read_hdf(db_file, key = db_name)\n select_expr = f'{column_type} in [\"%s\"]'%('\", \"'.join(list(process_types)))\n df_select_data = df_data.query(select_expr)\n image_files = db_root + \"/\" + df_select_data[column_image].values\n \n if verbose==1: \n view_db_info(db_root, db_file, db_name)\n print(f'Select_expr: {select_expr}')\n print(f'Rows after select: {len(df_select_data)}')\n print()\n print(\"Images: \", image_files[0:5], \" ... \")\n print()\n print(\"Caluculate prior probability\")\n # if\n \n prior_prob, ab_hist = compute_prior_prob_v1(image_files = image_files, \n pts_in_hull_path = pts_in_hull_path, \n prior_prob_path = export_prior_prob_path,\n ab_hist_path = export_ab_hist_path,\n is_resize = is_resize, \n width = width, height = height, \n do_plot = do_plot)\n \n if verbose==1:\n print()\n print(f'prior_prob: shape={prior_prob.shape}')\n print(prior_prob)\n print()\n print(f'ab_hist: shape={ab_hist.shape}')\n print(ab_hist)\n # if\n \n return prior_prob, ab_hist\n# compute_prior_prob_export"
] | [
[
"matplotlib.pylab.xlim",
"numpy.save",
"numpy.sum",
"scipy.interpolate.interp1d",
"matplotlib.pylab.close",
"numpy.ones_like",
"matplotlib.pylab.hist",
"numpy.dstack",
"numpy.vstack",
"matplotlib.pylab.subplot",
"sklearn.neighbors.NearestNeighbors",
"matplotlib.pylab.title",
"scipy.signal.gaussian",
"matplotlib.pylab.figure",
"matplotlib.pylab.show",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.yticks",
"pandas.read_hdf",
"matplotlib.pylab.ylim",
"matplotlib.colors.LogNorm",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.nonzero",
"numpy.unique",
"numpy.load",
"numpy.bincount",
"numpy.zeros",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.clf",
"numpy.arange",
"numpy.power",
"numpy.max",
"numpy.min",
"matplotlib.pylab.colorbar",
"matplotlib.pylab.legend",
"matplotlib.pylab.xticks",
"numpy.ravel",
"numpy.array",
"matplotlib.pylab.plot",
"matplotlib.pylab.yscale"
]
] |
HarshCasper/mars | [
"4c12c968414d666c7a10f497bc22de90376b1932"
] | [
"mars/tensor/linalg/cholesky.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\nfrom ...serialize import KeyField, BoolField\nfrom ... import opcodes as OperandDef\nfrom ...utils import check_chunks_unknown_shape\nfrom ...tiles import TilesError\nfrom ..operands import TensorHasInput, TensorOperand, TensorOperandMixin\nfrom ..datasource import tensor as astensor\nfrom ..core import TensorOrder\nfrom ..array_utils import as_same_device, device\n\n\nclass TensorCholesky(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY\n\n _input = KeyField('input')\n _lower = BoolField('lower')\n\n def __init__(self, lower=None, dtype=None, **kw):\n super().__init__(_lower=lower, _dtype=dtype, **kw)\n\n @property\n def lower(self):\n return self._lower\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, a):\n return self.new_tensor([a], a.shape, order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n from ..datasource.zeros import TensorZeros\n from ..base import TensorTranspose\n from ..utils import reverse_order\n from .dot import TensorDot\n from .solve_triangular import TensorSolveTriangular\n\n tensor = op.outputs[0]\n in_tensor = op.input\n check_chunks_unknown_shape([in_tensor], TilesError)\n if in_tensor.nsplits[0] != in_tensor.nsplits[1]:\n # all chunks on diagonal should be square\n nsplits = in_tensor.nsplits[0]\n in_tensor = in_tensor.rechunk([nsplits, nsplits])._inplace_tile()\n\n lower_chunks, upper_chunks = {}, {}\n for i in range(in_tensor.chunk_shape[0]):\n for j in range(in_tensor.chunk_shape[1]):\n if i < j:\n lower_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]),\n index=(i, j), order=tensor.order)\n upper_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[1][j], in_tensor.nsplits[0][i]),\n index=(j, i), order=tensor.order)\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n elif i == j:\n target = in_tensor.cix[i, j]\n if i > 0:\n prev_chunks = []\n for p in range(i):\n a, b = lower_chunks[i, p], upper_chunks[p, j]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n\n cholesky_fuse_op = TensorCholeskyFuse()\n lower_chunk = cholesky_fuse_op.new_chunk([target] + prev_chunks,\n shape=target.shape, index=(i, j),\n order=tensor.order)\n else:\n lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(\n [target], shape=target.shape, index=(i, j), order=tensor.order)\n\n upper_chunk = TensorTranspose(dtype=lower_chunk.dtype).new_chunk(\n [lower_chunk], shape=lower_chunk.shape[::-1],\n index=lower_chunk.index[::-1], order=reverse_order(lower_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n else:\n target = in_tensor.cix[j, i]\n if j > 0:\n prev_chunks = []\n for p in range(j):\n a, b = lower_chunks[j, p], upper_chunks[p, i]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n cholesky_fuse_op = TensorCholeskyFuse(by_solve_triangular=True)\n upper_chunk = cholesky_fuse_op.new_chunk([target] + [lower_chunks[j, j]] + prev_chunks,\n shape=target.shape, index=(j, i),\n order=tensor.order)\n else:\n upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(\n [lower_chunks[j, j], target], shape=target.shape,\n index=(j, i), order=tensor.order)\n lower_chunk = TensorTranspose(dtype=upper_chunk.dtype).new_chunk(\n [upper_chunk], shape=upper_chunk.shape[::-1],\n index=upper_chunk.index[::-1], order=reverse_order(upper_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n\n new_op = op.copy()\n if op.lower:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(lower_chunks.values()), nsplits=in_tensor.nsplits)\n else:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(upper_chunks.values()), nsplits=in_tensor.nsplits)\n\n @classmethod\n def execute(cls, ctx, op):\n chunk = op.outputs[0]\n (a,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n if xp is np:\n try:\n import scipy.linalg\n\n ctx[chunk.key] = scipy.linalg.cholesky(a, lower=op.lower)\n return\n except ImportError: # pragma: no cover\n pass\n\n r = xp.linalg.cholesky(a)\n if not chunk.op.lower:\n r = r.T.conj()\n\n ctx[chunk.key] = r\n\n\nclass TensorCholeskyFuse(TensorOperand, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY_FUSE\n\n _by_solve_triangular = BoolField('by_solve_triangular')\n\n def __init__(self, by_solve_triangular=None, **kw):\n super().__init__(_by_solve_triangular=by_solve_triangular, **kw)\n\n @property\n def by_solve_triangular(self):\n return self._by_solve_triangular\n\n @classmethod\n def _execute_by_cholesky(cls, inputs):\n import scipy.linalg\n\n target = inputs[0]\n return scipy.linalg.cholesky((target - sum(inputs[1:])), lower=True)\n\n @classmethod\n def _execute_by_solve_striangular(cls, inputs):\n import scipy.linalg\n\n target = inputs[0]\n lower = inputs[1]\n return scipy.linalg.solve_triangular(lower, (target - sum(inputs[2:])), lower=True)\n\n @classmethod\n def execute(cls, ctx, op):\n inputs = [ctx[c.key] for c in op.inputs]\n if op.by_solve_triangular:\n ret = cls._execute_by_solve_striangular(inputs)\n else:\n ret = cls._execute_by_cholesky(inputs)\n ctx[op.outputs[0].key] = ret\n\n\ndef cholesky(a, lower=False):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n lower : bool\n Whether to compute the upper or lower triangular Cholesky\n factorization. Default is upper-triangular.\n\n Returns\n -------\n L : (..., M, M) array_like\n Upper or lower-triangular Cholesky factor of `a`.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n\n Broadcasting rules apply, see the `mt.linalg` documentation for\n details.\n\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> A = mt.array([[1,-2j],[2j,5]])\n >>> A.execute()\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = mt.linalg.cholesky(A, lower=True)\n >>> L.execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> mt.dot(L, L.T.conj()).execute() # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> mt.linalg.cholesky(A, lower=True).execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n a = astensor(a)\n\n if a.ndim != 2:\n raise LinAlgError(f'{a.ndim}-dimensional array given. '\n 'Tensor must be two-dimensional')\n if a.shape[0] != a.shape[1]:\n raise LinAlgError('Input must be square')\n\n cho = np.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))\n\n op = TensorCholesky(lower=lower, dtype=cho.dtype)\n return op(a)\n"
] | [
[
"numpy.array",
"numpy.linalg.LinAlgError"
]
] |
hatrungduc/spark-nlp | [
"b38260543524507e34cbcb7fa2006923091634ad"
] | [
"python/tensorflow/lib/ner/embeddings_resolver.py"
] | [
"import shutil\nimport numpy as np\nimport plyvel\nimport os.path\nimport sys\nsys.path.append('../')\nfrom bert.modeling import *\nfrom bert.tokenization import *\nimport json\nimport os.path\nimport numpy as np\n\n\nclass TokenEmbeddings:\n def __init__(self, piece, is_word_start, vector):\n self.piece = piece\n self.is_word_start = is_word_start\n self.vector = vector\n \n @staticmethod\n def create_sentence(pieces, is_word_starts, embeddings):\n # Array of TokenEmbeddings\n return [TokenEmbeddings(piece, is_start, vector)\n for (piece, is_start, vector) in zip(pieces, is_word_starts, embeddings)]\n \n def __str__(self):\n return 'TokenEmbeddings({}, {}, [{}])'.format(self.piece, self.is_word_start, np.shape(self.vector))\n\n def __repr__(self):\n return self.__str__()\n\n\nclass EmbeddingsDbResolver:\n \n @staticmethod\n def get_index_name(prefix, dim):\n return prefix + '-' + str(dim)\n \n @staticmethod\n def read_from_file(glove_file, dim, index_file = 'embeddings_index', \n lowercase=False, clear_if_exists = False):\n \n full_index_file = EmbeddingsDbResolver.get_index_name(index_file, dim)\n try:\n resolver = None\n\n index_existed = os.path.exists(full_index_file) and not clear_if_exists\n resolver = EmbeddingsDbResolver(dim, index_file, lowercase, clear_if_exists)\n\n if not index_existed:\n resolver.read_glove(glove_file)\n\n return resolver\n except:\n if resolver and resolver.db:\n resolver.close()\n \n raise()\n \n def read_glove(self, glove_file):\n portion = 500000\n print('reading file: ', glove_file)\n wb = None\n with open(glove_file, encoding='utf-8') as f:\n for num, line in enumerate(f):\n items = line.split(' ')\n word = items[0]\n vector = [float(x) for x in items[1:]]\n if num % portion == portion - 1:\n print('read lines: {}'.format(num))\n wb.write()\n wb = None\n \n if not wb:\n wb = self.db.write_batch()\n\n self.add_vector(word, vector, wb)\n if wb:\n wb.write()\n \n def __init__(self, dim, index_file = 'embeddings_index', lowercase = False, clear_if_exists=False): \n full_index_file = EmbeddingsDbResolver.get_index_name(index_file, dim)\n \n if clear_if_exists and os.path.exists(full_index_file):\n shutil.rmtree(db_index)\n \n dummy_added = False\n self.db = plyvel.DB(full_index_file, create_if_missing=True)\n self.add_vector(\"__oov__\", [0.] * dim)\n self.lowercase = lowercase\n \n def get_embeddings(self, word):\n word = word.strip()\n if self.lowercase:\n word = word.lower()\n \n result = self.db.get(word.encode()) or self.db.get('__oov__'.encode())\n return np.frombuffer(result)\n \n def resolve_sentence(self, sentence):\n \"\"\"\n sentence - array of words\n \"\"\"\n embeddings = list([self.get_embeddings(word) for word in sentence])\n is_word_start = [True] * len(sentence)\n \n return TokenEmbeddings.create_sentence(sentence, is_word_start, embeddings)\n\n \n def add_vector(self, word, vector, wb = None):\n array = np.array(vector)\n if wb:\n wb.put(word.encode(), array.tobytes())\n else:\n self.db.put(word.encode(), array.tobytes())\n \n def close(self):\n self.db.close()\n \n\nclass BertEmbeddingsResolver:\n \n def __init__(self, model_folder, max_length = 256, lowercase = True):\n \n # 1. Create tokenizer\n self.max_length = max_length\n vocab_file = os.path.join(model_folder, 'vocab.txt')\n self.tokenizer = FullTokenizer(vocab_file, do_lower_case = lowercase)\n \n # 2. Read Config\n config_file = os.path.join(model_folder, 'bert_config.json') \n self.config = BertConfig.from_json_file(config_file)\n \n # 3. Create Model\n self.session = tf.Session()\n self.token_ids_op = tf.placeholder(tf.int32, shape=(None, max_length), name='token_ids')\n self.model = BertModel(config = self.config, \n is_training = False, \n input_ids = self.token_ids_op, \n use_one_hot_embeddings = False)\n \n # 4. Restore Trained Model\n self.saver = tf.train.Saver()\n ckpt_file = os.path.join(model_folder, 'bert_model.ckpt')\n self.saver.restore(self.session, ckpt_file)\n \n hidden_layers = self.config.num_hidden_layers\n self.embeddings_op = tf.get_default_graph().get_tensor_by_name(\n \"bert/encoder/Reshape_{}:0\".format(hidden_layers + 1))\n \n def tokenize_sentence(self, tokens, add_service_tokens = True):\n result = []\n is_word_start = []\n for token in tokens:\n pieces = self.tokenizer.tokenize(token)\n result.extend(pieces)\n starts = [False] * len(pieces)\n starts[0] = True\n is_word_start.extend(starts)\n\n if add_service_tokens:\n if len(result) > self.max_length - 2:\n result = result[:self.max_length -2]\n is_word_start = is_word_start[:self.max_length -2]\n \n result = ['[CLS]'] + result + ['[SEP]']\n is_word_start = [False] + is_word_start + [False]\n else:\n if len(result) > self.max_length:\n result = result[:self.max_length]\n is_word_start = is_word_start[:self.max_length]\n \n return (result, is_word_start)\n\n def resolve_sentences(self, sentences):\n batch_is_word_start = []\n batch_token_ids = []\n batch_tokens = []\n \n for sentence in sentences:\n tokens, is_word_start = self.tokenize_sentence(sentence)\n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n to_input = np.pad(token_ids, [(0, self.max_length - len(token_ids))], mode='constant')\n batch_token_ids.append(to_input)\n batch_tokens.append(tokens)\n batch_is_word_start.append(is_word_start)\n\n embeddings = self.session.run(self.embeddings_op, feed_dict = {self.token_ids_op: batch_token_ids})\n \n result = []\n for i in range(len(sentences)):\n tokens = batch_tokens[i]\n is_word_start = batch_is_word_start[i]\n item_embeddings = embeddings[i, :len(tokens), :]\n\n resolved = TokenEmbeddings.create_sentence(tokens, is_word_start, item_embeddings)\n result.append(resolved)\n \n return result\n\n \n def resolve_sentence(self, sentence):\n tokens, is_word_start = self.tokenize_sentence(sentence)\n \n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n to_input = np.pad(token_ids, [(0, self.max_length - len(token_ids))], mode='constant')\n to_input = to_input.reshape((1, self.max_length))\n\n embeddings = self.session.run(self.embeddings_op, feed_dict = {self.token_ids_op: to_input})\n embeddings = np.squeeze(embeddings)\n embeddings = embeddings[:len(token_ids), :]\n\n return TokenEmbeddings.create_sentence(tokens, is_word_start, embeddings)\n "
] | [
[
"numpy.array",
"numpy.squeeze",
"numpy.shape",
"numpy.frombuffer"
]
] |
colibri-coruscans/pyGSTi | [
"da54f4abf668a28476030528f81afa46a1fbba33"
] | [
"pygsti/algorithms/germselection.py"
] | [
"\"\"\"\nFunctions for selecting a complete set of germs for a GST analysis.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport warnings as _warnings\n\nimport numpy as _np\nimport numpy.linalg as _nla\n\nfrom pygsti.algorithms import grasp as _grasp\nfrom pygsti.algorithms import scoring as _scoring\nfrom pygsti import circuits as _circuits\nfrom pygsti import baseobjs as _baseobjs\nfrom pygsti.tools import mpitools as _mpit\n\nFLOATSIZE = 8 # in bytes: TODO: a better way\n\n\ndef find_germs(target_model, randomize=True, randomization_strength=1e-2,\n num_gs_copies=5, seed=None, candidate_germ_counts=None,\n candidate_seed=None, force=\"singletons\", algorithm='greedy',\n algorithm_kwargs=None, mem_limit=None, comm=None,\n profiler=None, verbosity=1):\n \"\"\"\n Generate a germ set for doing GST with a given target model.\n\n This function provides a streamlined interface to a variety of germ\n selection algorithms. It's goal is to provide a method that typical users\n can run by simply providing a target model and leaving all other settings\n at their default values, while providing flexibility for users desiring\n more control to fine tune some of the general and algorithm-specific\n details.\n\n Currently, to break troublesome degeneracies and provide some confidence\n that the chosen germ set is amplificationally complete (AC) for all\n models in a neighborhood of the target model (rather than only the\n target model), an ensemble of models with random unitary perturbations\n to their gates must be provided or generated.\n\n Parameters\n ----------\n target_model : Model or list of Model\n The model you are aiming to implement, or a list of models that are\n copies of the model you are trying to implement (either with or\n without random unitary perturbations applied to the models).\n\n randomize : bool, optional\n Whether or not to add random unitary perturbations to the model(s)\n provided.\n\n randomization_strength : float, optional\n The size of the random unitary perturbations applied to gates in the\n model. See :meth:`~pygsti.objects.Model.randomize_with_unitary`\n for more details.\n\n num_gs_copies : int, optional\n The number of copies of the original model that should be used.\n\n seed : int, optional\n Seed for generating random unitary perturbations to models. Also\n passed along to stochastic germ-selection algorithms.\n\n candidate_germ_counts : dict, optional\n A dictionary of *germ_length* : *count* key-value pairs, specifying\n the germ \"candidate list\" - a list of potential germs to draw from.\n *count* is either an integer specifying the number of random germs\n considered at the given *germ_length* or the special values `\"all upto\"`\n that considers all of the of all non-equivalent germs of length up to\n the corresponding *germ_length*. If None, all germs of up to length\n 6 are used, the equivalent of `{6: 'all upto'}`.\n\n candidate_seed : int, optional\n A seed value used when randomly selecting candidate germs. For each\n germ length being randomly selected, the germ length is added to\n the value of `candidate_seed` to get the actual seed used.\n\n force : str or list, optional\n A list of Circuits which *must* be included in the final germ set.\n If set to the special string \"singletons\" then all length-1 strings will\n be included. Seting to None is the same as an empty list.\n\n algorithm : {'greedy', 'grasp', 'slack'}, optional\n Specifies the algorithm to use to generate the germ set. Current\n options are:\n 'greedy'\n Add germs one-at-a-time until the set is AC, picking the germ that\n improves the germ-set score by the largest amount at each step. See\n :func:`find_germs_breadthfirst` for more details.\n 'grasp'\n Use GRASP to generate random greedy germ sets and then locally\n optimize them. See :func:`find_germs_grasp` for more\n details.\n 'slack'\n From a initial set of germs, add or remove a germ at each step in\n an attempt to improve the germ-set score. Will allow moves that\n degrade the score in an attempt to escape local optima as long as\n the degredation is within some specified amount of \"slack\". See\n :func:`find_germs_integer_slack` for more details.\n\n algorithm_kwargs : dict\n Dictionary of ``{'keyword': keyword_arg}`` pairs providing keyword\n arguments for the specified `algorithm` function. See the documentation\n for functions referred to in the `algorithm` keyword documentation for\n what options are available for each algorithm.\n\n mem_limit : int, optional\n A rough memory limit in bytes which restricts the amount of intermediate\n values that are computed and stored.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n profiler : Profiler, optional\n A profiler object used for to track timing and memory usage.\n\n verbosity : int, optional\n The verbosity level of the :class:`~pygsti.objects.VerbosityPrinter`\n used to print log messages.\n\n Returns\n -------\n list of Circuit\n A list containing the germs making up the germ set.\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)\n modelList = _setup_model_list(target_model, randomize,\n randomization_strength, num_gs_copies, seed)\n gates = list(target_model.operations.keys())\n availableGermsList = []\n if candidate_germ_counts is None: candidate_germ_counts = {6: 'all upto'}\n for germLength, count in candidate_germ_counts.items():\n if count == \"all upto\":\n availableGermsList.extend(_circuits.list_all_circuits_without_powers_and_cycles(\n gates, max_length=germLength))\n else:\n seed = None if candidate_seed is None else candidate_seed + germLength\n availableGermsList.extend(_circuits.list_random_circuits_onelen(\n gates, germLength, count, seed=seed))\n\n if algorithm_kwargs is None:\n # Avoid danger of using empty dict for default value.\n algorithm_kwargs = {}\n\n if algorithm == 'greedy':\n printer.log('Using greedy algorithm.', 1)\n # Define defaults for parameters that currently have no default or\n # whose default we want to change.\n default_kwargs = {\n 'germs_list': availableGermsList,\n 'randomize': False,\n 'seed': seed,\n 'verbosity': max(0, verbosity - 1),\n 'force': force,\n 'score_func': 'all',\n 'comm': comm,\n 'mem_limit': mem_limit,\n 'profiler': profiler\n }\n for key in default_kwargs:\n if key not in algorithm_kwargs:\n algorithm_kwargs[key] = default_kwargs[key]\n germList = find_germs_breadthfirst(model_list=modelList,\n **algorithm_kwargs)\n if germList is not None:\n germsetScore = compute_germ_set_score(\n germList, neighborhood=modelList,\n score_func=algorithm_kwargs['score_func'])\n printer.log('Constructed germ set:', 1)\n printer.log(str([germ.str for germ in germList]), 1)\n printer.log('Score: {}'.format(germsetScore), 1)\n elif algorithm == 'grasp':\n printer.log('Using GRASP algorithm.', 1)\n # Define defaults for parameters that currently have no default or\n # whose default we want to change.\n default_kwargs = {\n 'alpha': 0.1, # No real reason for setting this value of alpha.\n 'germs_list': availableGermsList,\n 'randomize': False,\n 'seed': seed,\n 'verbosity': max(0, verbosity - 1),\n 'force': force,\n 'return_all': False,\n 'score_func': 'all',\n }\n for key in default_kwargs:\n if key not in algorithm_kwargs:\n algorithm_kwargs[key] = default_kwargs[key]\n germList = find_germs_grasp(model_list=modelList,\n **algorithm_kwargs)\n printer.log('Constructed germ set:', 1)\n\n if algorithm_kwargs['return_all'] and germList[0] is not None:\n germsetScore = compute_germ_set_score(\n germList[0], neighborhood=modelList,\n score_func=algorithm_kwargs['score_func'])\n printer.log(str([germ.str for germ in germList[0]]), 1)\n printer.log('Score: {}'.format(germsetScore))\n elif not algorithm_kwargs['return_all'] and germList is not None:\n germsetScore = compute_germ_set_score(germList,\n neighborhood=modelList)\n printer.log(str([germ.str for germ in germList]), 1)\n printer.log('Score: {}'.format(germsetScore), 1)\n elif algorithm == 'slack':\n printer.log('Using slack algorithm.', 1)\n # Define defaults for parameters that currently have no default or\n # whose default we want to change.\n default_kwargs = {\n 'germs_list': availableGermsList,\n 'randomize': False,\n 'seed': seed,\n 'verbosity': max(0, verbosity - 1),\n 'force': force,\n 'score_func': 'all',\n }\n if ('slack_frac' not in algorithm_kwargs\n and 'fixed_slack' not in algorithm_kwargs):\n algorithm_kwargs['slack_frac'] = 0.1\n for key in default_kwargs:\n if key not in algorithm_kwargs:\n algorithm_kwargs[key] = default_kwargs[key]\n germList = find_germs_integer_slack(modelList,\n **algorithm_kwargs)\n if germList is not None:\n germsetScore = compute_germ_set_score(\n germList, neighborhood=modelList,\n score_func=algorithm_kwargs['score_func'])\n printer.log('Constructed germ set:', 1)\n printer.log(str([germ.str for germ in germList]), 1)\n printer.log('Score: {}'.format(germsetScore), 1)\n else:\n raise ValueError(\"'{}' is not a valid algorithm \"\n \"identifier.\".format(algorithm))\n\n return germList\n\n\ndef compute_germ_set_score(germs, target_model=None, neighborhood=None,\n neighborhood_size=5,\n randomization_strength=1e-2, score_func='all',\n op_penalty=0.0, l1_penalty=0.0):\n \"\"\"\n Calculate the score of a germ set with respect to a model.\n\n More precisely, this function computes the maximum score (roughly equal\n to the number of amplified parameters) for a cloud of models.\n If `target_model` is given, it serves as the center of the cloud,\n otherwise the cloud must be supplied directly via `neighborhood`.\n\n\n Parameters\n ----------\n germs : list\n The germ set\n\n target_model : Model, optional\n The target model, used to generate a neighborhood of randomized models.\n\n neighborhood : list of Models, optional\n The \"cloud\" of models for which scores are computed. If not None, this\n overrides `target_model`, `neighborhood_size`, and `randomization_strength`.\n\n neighborhood_size : int, optional\n Number of randomized models to construct around `target_model`.\n\n randomization_strength : float, optional\n Strength of unitary randomizations, as passed to :method:`target_model.randomize_with_unitary`.\n\n score_func : {'all', 'worst'}\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/input_array)``. If 'worst', score is ``1/min(input_array)``.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n l1_penalty : float, optional\n Coefficient for a penalty linear in the number of germs.\n\n Returns\n -------\n CompositeScore\n The maximum score for `germs`, indicating how many parameters it amplifies.\n \"\"\"\n def score_fn(x): return _scoring.list_score(x, score_func=score_func)\n if neighborhood is None:\n neighborhood = [target_model.randomize_with_unitary(randomization_strength)\n for n in range(neighborhood_size)]\n scores = [compute_composite_germ_set_score(score_fn, model=model,\n partial_germs_list=germs,\n op_penalty=op_penalty,\n l1_penalty=l1_penalty)\n for model in neighborhood]\n\n return max(scores)\n\n\ndef _get_model_params(model_list):\n \"\"\"\n Get the number of gates and gauge parameters of the models in a list.\n\n Also verifies all models have the same number of gates and gauge parameters.\n\n Parameters\n ----------\n model_list : list of Model\n A list of models for which you want an AC germ set.\n\n Returns\n -------\n reducedModelList : list of Model\n The original list of models with SPAM removed\n numGaugeParams : int\n The number of non-SPAM gauge parameters for all models.\n numNonGaugeParams : int\n The number of non-SPAM non-gauge parameters for all models.\n numOps : int\n The number of gates for all models.\n\n Raises\n ------\n ValueError\n If the number of gauge parameters or gates varies among the models.\n \"\"\"\n # We don't care about SPAM, since it can't be amplified.\n reducedModelList = [_remove_spam_vectors(model)\n for model in model_list]\n\n # All the models should have the same number of parameters and gates, but\n # let's be paranoid here for the time being and make sure.\n numGaugeParamsList = [reducedModel.num_gauge_params\n for reducedModel in reducedModelList]\n numGaugeParams = numGaugeParamsList[0]\n if not all([numGaugeParams == otherNumGaugeParams\n for otherNumGaugeParams in numGaugeParamsList[1:]]):\n raise ValueError(\"All models must have the same number of gauge \"\n \"parameters!\")\n\n numNonGaugeParamsList = [reducedModel.num_nongauge_params\n for reducedModel in reducedModelList]\n numNonGaugeParams = numNonGaugeParamsList[0]\n if not all([numNonGaugeParams == otherNumNonGaugeParams\n for otherNumNonGaugeParams in numNonGaugeParamsList[1:]]):\n raise ValueError(\"All models must have the same number of non-gauge \"\n \"parameters!\")\n\n numOpsList = [len(reducedModel.operations)\n for reducedModel in reducedModelList]\n numOps = numOpsList[0]\n if not all([numOps == otherNumOps\n for otherNumOps in numOpsList[1:]]):\n raise ValueError(\"All models must have the same number of gates!\")\n\n return reducedModelList, numGaugeParams, numNonGaugeParams, numOps\n\n\ndef _setup_model_list(model_list, randomize, randomization_strength,\n num_copies, seed):\n \"\"\"\n Sets up a list of randomize models (helper function).\n \"\"\"\n if not isinstance(model_list, (list, tuple)):\n model_list = [model_list]\n if len(model_list) > 1 and num_copies is not None:\n _warnings.warn(\"Ignoring num_copies={} since multiple models were \"\n \"supplied.\".format(num_copies))\n\n if randomize:\n model_list = randomize_model_list(model_list, randomization_strength,\n num_copies, seed)\n\n return model_list\n\n\ndef compute_composite_germ_set_score(score_fn, threshold_ac=1e6, init_n=1,\n partial_deriv_dagger_deriv=None, model=None,\n partial_germs_list=None, eps=None, num_gauge_params=None,\n op_penalty=0.0, germ_lengths=None, l1_penalty=0.0):\n \"\"\"\n Compute the score for a germ set when it is not AC against a model.\n\n Normally scores computed for germ sets against models for which they are\n not AC will simply be astronomically large. This is fine if AC is all you\n care about, but not so useful if you want to compare partial germ sets\n against one another to see which is closer to being AC. This function\n will see if the germ set is AC for the parameters corresponding to the\n largest `N` eigenvalues for increasing `N` until it finds a value of `N`\n for which the germ set is not AC or all the non gauge parameters are\n accounted for and report the value of `N` as well as the score.\n This allows partial germ set scores to be compared against one-another\n sensibly, where a larger value of `N` always beats a smaller value of `N`,\n and ties in the value of `N` are broken by the score for that value of `N`.\n\n Parameters\n ----------\n score_fn : callable\n A function that takes as input a list of sorted eigenvalues and returns\n a score for the partial germ set based on those eigenvalues, with lower\n scores indicating better germ sets. Usually some flavor of\n :func:`~pygsti.algorithms.scoring.list_score`.\n\n threshold_ac : float, optional\n Value which the score (before penalties are applied) must be lower than\n for the germ set to be considered AC.\n\n init_n : int\n The number of largest eigenvalues to begin with checking.\n\n partial_deriv_dagger_deriv : numpy.array, optional\n Array with three axes, where the first axis indexes individual germs\n within the partial germ set and the remaining axes index entries in the\n positive square of the Jacobian of each individual germ's parameters\n with respect to the model parameters.\n If this array is not supplied it will need to be computed from\n `germs_list` and `model`, which will take longer, so it is recommended\n to precompute this array if this routine will be called multiple times.\n\n model : Model, optional\n The model against which the germ set is to be scored. Not needed if\n `partial_deriv_dagger_deriv` is provided.\n\n partial_germs_list : list of Circuit, optional\n The list of germs in the partial germ set to be evaluated. Not needed\n if `partial_deriv_dagger_deriv` (and `germ_lengths` when\n ``op_penalty > 0``) are provided.\n\n eps : float, optional\n Used when calculating `partial_deriv_dagger_deriv` to determine if two\n eigenvalues are equal (see :func:`_bulk_twirled_deriv` for details). Not\n used if `partial_deriv_dagger_deriv` is provided.\n\n num_gauge_params : int\n The number of gauge parameters of the model. Not needed if `model`\n is provided.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n germ_lengths : numpy.array, optional\n The length of each germ. Not needed if `op_penalty` is ``0.0`` or\n `partial_germs_list` is provided.\n\n l1_penalty : float, optional\n Coefficient for a penalty linear in the number of germs.\n\n Returns\n -------\n CompositeScore\n The score for the germ set indicating how many parameters it amplifies\n and its numerical score restricted to those parameters.\n \"\"\"\n if partial_deriv_dagger_deriv is None:\n if model is None or partial_germs_list is None:\n raise ValueError(\"Must provide either partial_deriv_dagger_deriv or \"\n \"(model, partial_germs_list)!\")\n else:\n pDDD_kwargs = {'model': model, 'germs_list': partial_germs_list}\n if eps is not None:\n pDDD_kwargs['eps'] = eps\n if germ_lengths is not None:\n pDDD_kwargs['germ_lengths'] = germ_lengths\n partial_deriv_dagger_deriv = _compute_bulk_twirled_ddd(**pDDD_kwargs)\n\n if num_gauge_params is None:\n if model is None:\n raise ValueError(\"Must provide either num_gauge_params or model!\")\n else:\n num_gauge_params = _remove_spam_vectors(model).num_gauge_params\n\n # Calculate penalty scores\n numGerms = partial_deriv_dagger_deriv.shape[0]\n l1Score = l1_penalty * numGerms\n opScore = 0.0\n if op_penalty != 0.0:\n if germ_lengths is None:\n if partial_germs_list is None:\n raise ValueError(\"Must provide either germ_lengths or \"\n \"partial_germs_list when op_penalty != 0.0!\")\n else:\n germ_lengths = _np.array([len(germ)\n for germ in partial_germs_list])\n opScore = op_penalty * _np.sum(germ_lengths)\n\n combinedDDD = _np.sum(partial_deriv_dagger_deriv, axis=0)\n sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD)))\n observableEigenvals = sortedEigenvals[num_gauge_params:]\n N_AC = 0\n AC_score = _np.inf\n for N in range(init_n, len(observableEigenvals) + 1):\n scoredEigenvals = observableEigenvals[-N:]\n candidate_AC_score = score_fn(scoredEigenvals)\n if candidate_AC_score > threshold_ac:\n break # We've found a set of parameters for which the germ set\n # is not AC.\n else:\n AC_score = candidate_AC_score\n N_AC = N\n\n # OLD Apply penalties to the minor score; major part is just #amplified\n #major_score = N_AC\n #minor_score = AC_score + l1Score + opScore\n\n # Apply penalties to the major score\n major_score = -N_AC + opScore + l1Score\n minor_score = AC_score\n ret = _scoring.CompositeScore(major_score, minor_score, N_AC)\n #DEBUG: ret.extra = {'opScore': opScore,\n # 'sum(germ_lengths)': _np.sum(germ_lengths), 'l1': l1Score}\n return ret\n\n\ndef _compute_bulk_twirled_ddd(model, germs_list, eps=1e-6, check=False,\n germ_lengths=None, comm=None):\n \"\"\"\n Calculate the positive squares of the germ Jacobians.\n\n twirledDerivDaggerDeriv == array J.H*J contributions from each germ\n (J=Jacobian) indexed by (iGerm, iModelParam1, iModelParam2)\n size (nGerms, vec_model_dim, vec_model_dim)\n\n Parameters\n ----------\n model : Model\n The model defining the parameters to differentiate with respect to.\n\n germs_list : list\n The germ set\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n check : bool, optional\n Whether to perform internal consistency checks, at the expense of\n making the function slower.\n\n germ_lengths : numpy.ndarray, optional\n A pre-computed array of the length (depth) of each germ.\n\n comm : mpi4py.MPI.Comm, optional\n When not ``None``, an MPI communicator for distributing the computation\n across multiple processors.\n\n Returns\n -------\n twirledDerivDaggerDeriv : numpy.ndarray\n A complex array of shape `(len(germs), model.num_params, model.num_params)`.\n \"\"\"\n if germ_lengths is None:\n germ_lengths = _np.array([len(germ) for germ in germs_list])\n\n twirledDeriv = _bulk_twirled_deriv(model, germs_list, eps, check, comm) / germ_lengths[:, None, None]\n\n #OLD: slow, I think because conjugate *copies* a large tensor, causing a memory bottleneck\n #twirledDerivDaggerDeriv = _np.einsum('ijk,ijl->ikl',\n # _np.conjugate(twirledDeriv),\n # twirledDeriv)\n\n #NEW: faster, one-germ-at-a-time computation requires less memory.\n nGerms, _, vec_model_dim = twirledDeriv.shape\n twirledDerivDaggerDeriv = _np.empty((nGerms, vec_model_dim, vec_model_dim),\n dtype=_np.complex)\n for i in range(nGerms):\n twirledDerivDaggerDeriv[i, :, :] = _np.dot(\n twirledDeriv[i, :, :].conjugate().T, twirledDeriv[i, :, :])\n\n return twirledDerivDaggerDeriv\n\n\ndef _compute_twirled_ddd(model, germ, eps=1e-6):\n \"\"\"\n Calculate the positive squares of the germ Jacobian.\n\n twirledDerivDaggerDeriv == array J.H*J contributions from `germ`\n (J=Jacobian) indexed by (iModelParam1, iModelParam2)\n size (vec_model_dim, vec_model_dim)\n\n Parameters\n ----------\n model : Model\n The model defining the parameters to differentiate with respect to.\n\n germ : Circuit\n The (single) germ circuit to consider. `J` above is the twirled\n derivative of this circuit's action (process matrix).\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n twirledDeriv = _twirled_deriv(model, germ, eps) / len(germ)\n #twirledDerivDaggerDeriv = _np.einsum('jk,jl->kl',\n # _np.conjugate(twirledDeriv),\n # twirledDeriv)\n twirledDerivDaggerDeriv = _np.tensordot(_np.conjugate(twirledDeriv),\n twirledDeriv, (0, 0))\n\n return twirledDerivDaggerDeriv\n\n\ndef _germ_set_score_slack(weights, model_num, score_func, deriv_dagger_deriv_list,\n force_indices, force_score,\n n_gauge_params, op_penalty, germ_lengths, l1_penalty=1e-2,\n score_dict=None):\n \"\"\"\n Returns a germ set \"score\" in which smaller is better.\n\n Also returns intentionally bad score (`force_score`) if `weights` is zero on any of\n the \"forced\" germs (i.e. at any index in `forcedIndices`).\n This function is included for use by :func:`find_germs_integer_slack`,\n but is not convenient for just computing the score of a germ set. For that,\n use :func:`compute_germ_set_score`.\n\n Parameters\n ----------\n weights : list\n The per-germ \"selection weight\", indicating whether the germ\n is present in the selected germ set or not.\n\n model_num : int\n index into `deriv_dagger_deriv_list` indicating which model (typically in\n a neighborhood) we're computing scores for.\n\n score_func : {'all', 'worst'}\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/input_array)``. If 'worst', score is ``1/min(input_array)``.\n\n deriv_dagger_deriv_list : numpy.ndarray\n Array of J.T * J contributions for each model.\n\n force_indices : list of ints\n Indices marking the germs that *must* be in the final set (or else `force_score`\n will be returned).\n\n force_score : float\n The score that is returned when any of the germs indexed by `force_indices` are\n not present (i.e. their weights are <= 0).\n\n n_gauge_params : int\n The number of gauge (not amplifiable) parameters in the model.\n\n op_penalty : float\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n germ_lengths : numpy.ndarray\n A pre-computed array of the length (depth) of each germ.\n\n l1_penalty : float\n Coefficient for a penalty linear in the number of germs.\n\n score_dict : dict, optional\n A dictionary to cache the score valies for the given `model_num` and\n `weights`, i.e. `score_dict[model_num, tuple(weights)]` is set to the\n returned value.\n\n Returns\n -------\n float\n \"\"\"\n if force_indices is not None and _np.any(weights[force_indices] <= 0):\n score = force_score\n else:\n #combinedDDD = _np.einsum('i,ijk', weights,\n # deriv_dagger_deriv_list[model_num])\n combinedDDD = _np.squeeze(\n _np.tensordot(_np.expand_dims(weights, 1),\n deriv_dagger_deriv_list[model_num], (0, 0)))\n assert len(combinedDDD.shape) == 2\n\n sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD)))\n observableEigenvals = sortedEigenvals[n_gauge_params:]\n score = (_scoring.list_score(observableEigenvals, score_func)\n + l1_penalty * _np.sum(weights)\n + op_penalty * _np.dot(germ_lengths, weights))\n if score_dict is not None:\n # Side effect: calling _germ_set_score_slack caches result in score_dict\n score_dict[model_num, tuple(weights)] = score\n return score\n\n\ndef randomize_model_list(model_list, randomization_strength, num_copies,\n seed=None):\n \"\"\"\n Applies random unitary perturbations to a model or list of models.\n\n If `model_list` is a length-1 list, then `num_copies` determines how\n many randomizations to create. If `model_list` containes multiple\n models, then `num_copies` must be `None` and each model is\n randomized once to create the corresponding returned model.\n\n Parameters\n ----------\n model_list : Model or list\n A list of Model objects.\n\n randomization_strength : float, optional\n Strength of unitary randomizations, as passed to :method:`Model.randomize_with_unitary`.\n\n num_copies : int\n The number of random perturbations of `model_list[0]` to generate when\n `len(model_list) == 1`. A value of `None` will result in 1 copy. If\n `len(model_list) > 1` then `num_copies` must be set to None.\n\n seed : int, optional\n Starting seed for randomization. Successive randomizations receive\n successive seeds. `None` results in random seeds.\n\n Returns\n -------\n list\n A list of the randomized Models.\n \"\"\"\n if len(model_list) > 1 and num_copies is not None:\n raise ValueError(\"Input multiple models XOR request multiple \"\n \"copies only!\")\n\n newmodelList = []\n if len(model_list) > 1:\n for modelnum, model in enumerate(model_list):\n newmodelList.append(model.randomize_with_unitary(\n randomization_strength,\n seed=None if seed is None else seed + modelnum))\n else:\n for modelnum in range(num_copies if num_copies is not None else 1):\n newmodelList.append(model_list[0].randomize_with_unitary(\n randomization_strength,\n seed=None if seed is None else seed + modelnum))\n return newmodelList\n\n\ndef test_germs_list_completeness(model_list, germs_list, score_func, threshold):\n \"\"\"\n Check to see if the germs_list is amplificationally complete (AC).\n\n Checks for AC with respect to all the Models in `model_list`, returning\n the index of the first Model for which it is not AC or `-1` if it is AC\n for all Models.\n\n Parameters\n ----------\n model_list : list\n A list of models to test. Often this list is a neighborhood (\"cloud\") of\n models around a model of interest.\n\n germs_list : list\n A list of the germ :class:`Circuit`s (the \"germ set\") to test for completeness.\n\n score_func : {'all', 'worst'}\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/eigval_array)``. If 'worst', score is ``1/min(eigval_array)``.\n\n threshold : float, optional\n An eigenvalue of jacobian^T*jacobian is considered zero and thus a\n parameter un-amplified when its reciprocal is greater than threshold.\n Also used for eigenvector degeneracy testing in twirling operation.\n\n Returns\n -------\n int\n The index of the first model in `model_list` to fail the amplficational\n completeness test.\n \"\"\"\n for modelNum, model in enumerate(model_list):\n initial_test = test_germ_set_infl(model, germs_list,\n score_func=score_func,\n threshold=threshold)\n if not initial_test:\n return modelNum\n\n # If the germs_list is complete for all models, return -1\n return -1\n\n\ndef _remove_spam_vectors(model):\n \"\"\"\n Returns a copy of `model` with state preparations and effects removed.\n\n Parameters\n ----------\n model : Model\n The model to act on.\n\n Returns\n -------\n Model\n \"\"\"\n reducedModel = model.copy()\n for prepLabel in list(reducedModel.preps.keys()):\n del reducedModel.preps[prepLabel]\n for povmLabel in list(reducedModel.povms.keys()):\n del reducedModel.povms[povmLabel]\n return reducedModel\n\n\ndef _num_non_spam_gauge_params(model):\n \"\"\"\n Return the number of non-gauge, non-SPAM parameters in `model`.\n\n Equivalent to `_remove_spam_vectors(model).num_gauge_params`.\n\n Parameters\n ---------\n model : Model\n\n Parameters\n ----------\n model : Model\n The model to act on.\n\n Returns\n -------\n int\n \"\"\"\n return _remove_spam_vectors(model).num_gauge_params\n\n\n# wrt is op_dim x op_dim, so is M, Minv, Proj\n# so SOP is op_dim^2 x op_dim^2 and acts on vectorized *gates*\n# Recall vectorizing identity (when vec(.) concats rows as flatten does):\n# vec( A * X * B ) = A tensor B^T * vec( X )\ndef _super_op_for_perfect_twirl(wrt, eps):\n \"\"\"Return super operator for doing a perfect twirl with respect to wrt.\n \"\"\"\n assert wrt.shape[0] == wrt.shape[1] # only square matrices allowed\n dim = wrt.shape[0]\n SuperOp = _np.zeros((dim**2, dim**2), 'complex')\n\n # Get spectrum and eigenvectors of wrt\n wrtEvals, wrtEvecs = _np.linalg.eig(wrt)\n wrtEvecsInv = _np.linalg.inv(wrtEvecs)\n\n # We want to project X -> M * (Proj_i * (Minv * X * M) * Proj_i) * Minv,\n # where M = wrtEvecs. So A = B = M * Proj_i * Minv and so\n # superop = A tensor B^T == A tensor A^T\n # NOTE: this == (A^T tensor A)^T while *Maple* germ functions seem to just\n # use A^T tensor A -> ^T difference\n for i in range(dim):\n # Create projector onto i-th eigenspace (spanned by i-th eigenvector\n # and other degenerate eigenvectors)\n Proj_i = _np.diag([(1 if (abs(wrtEvals[i] - wrtEvals[j]) <= eps)\n else 0) for j in range(dim)])\n A = _np.dot(wrtEvecs, _np.dot(Proj_i, wrtEvecsInv))\n #if _np.linalg.norm(A.imag) > 1e-6:\n # print(\"DB: imag = \",_np.linalg.norm(A.imag))\n #assert(_np.linalg.norm(A.imag) < 1e-6)\n #A = _np.real(A)\n # Need to normalize, because we are overcounting projectors onto\n # subspaces of dimension d > 1, giving us d * Proj_i tensor Proj_i^T.\n # We can fix this with a division by tr(Proj_i) = d.\n SuperOp += _np.kron(A, A.T) / _np.trace(Proj_i)\n # SuperOp += _np.kron(A.T,A) # Mimic Maple version (but I think this is\n # wrong... or it doesn't matter?)\n return SuperOp # a op_dim^2 x op_dim^2 matrix\n\n\ndef _sq_sing_vals_from_deriv(deriv, weights=None):\n \"\"\"\n Calculate the squared singular values of the Jacobian of the germ set.\n\n Parameters\n ----------\n deriv : numpy.array\n Array of shape ``(nGerms, flattened_op_dim, vec_model_dim)``. Each\n sub-array corresponding to an individual germ is the Jacobian of the\n vectorized gate representation of that germ raised to some power with\n respect to the model parameters, normalized by dividing by the length\n of each germ after repetition.\n\n weights : numpy.array\n Array of length ``nGerms``, giving the relative contributions of each\n individual germ's Jacobian to the combined Jacobian (which is calculated\n as a convex combination of the individual Jacobians).\n\n Returns\n -------\n numpy.array\n The sorted squared singular values of the combined Jacobian of the germ\n set.\n \"\"\"\n # shape (nGerms, vec_model_dim, vec_model_dim)\n derivDaggerDeriv = _np.einsum('ijk,ijl->ikl', _np.conjugate(deriv), deriv)\n # awkward to convert to tensordot, so leave as einsum\n\n # Take the average of the D^dagger*D/L^2 matrices associated with each germ\n # with optional weights.\n combinedDDD = _np.average(derivDaggerDeriv, weights=weights, axis=0)\n sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD)))\n\n return sortedEigenvals\n\n\ndef _twirled_deriv(model, circuit, eps=1e-6):\n \"\"\"\n Compute the \"Twirled Derivative\" of a circuit.\n\n The twirled derivative is obtained by acting on the standard derivative of\n a circuit with the twirling superoperator.\n\n Parameters\n ----------\n model : Model object\n The Model which associates operation labels with operators.\n\n circuit : Circuit object\n A twirled derivative of this circuit's action (process matrix) is taken.\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n Returns\n -------\n numpy array\n An array of shape (op_dim^2, num_model_params)\n \"\"\"\n prod = model.sim.product(circuit)\n\n # flattened_op_dim x vec_model_dim\n dProd = model.sim.dproduct(circuit, flat=True)\n\n # flattened_op_dim x flattened_op_dim\n twirler = _super_op_for_perfect_twirl(prod, eps)\n\n # flattened_op_dim x vec_model_dim\n return _np.dot(twirler, dProd)\n\n\ndef _bulk_twirled_deriv(model, circuits, eps=1e-6, check=False, comm=None):\n \"\"\"\n Compute the \"Twirled Derivative\" of a set of circuits.\n\n The twirled derivative is obtained by acting on the standard derivative of\n a circuit with the twirling superoperator.\n\n Parameters\n ----------\n model : Model object\n The Model which associates operation labels with operators.\n\n circuits : list of Circuit objects\n A twirled derivative of this circuit's action (process matrix) is taken.\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n check : bool, optional\n Whether to perform internal consistency checks, at the expense of\n making the function slower.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n Returns\n -------\n numpy array\n An array of shape (num_simplified_circuits, op_dim^2, num_model_params)\n \"\"\"\n if len(model.preps) > 0 or len(model.povms) > 0:\n model = _remove_spam_vectors(model)\n # This function assumes model has no spam elements so `lookup` below\n # gives indexes into products computed by evalTree.\n\n resource_alloc = _baseobjs.ResourceAllocation(comm=comm)\n dProds, prods = model.sim.bulk_dproduct(circuits, flat=True, return_prods=True, resource_alloc=resource_alloc)\n op_dim = model.dim\n fd = op_dim**2 # flattened gate dimension\n nCircuits = len(circuits)\n\n ret = _np.empty((nCircuits, fd, dProds.shape[1]), 'complex')\n for i in range(nCircuits):\n # flattened_op_dim x flattened_op_dim\n twirler = _super_op_for_perfect_twirl(prods[i], eps)\n\n # flattened_op_dim x vec_model_dim\n ret[i] = _np.dot(twirler, dProds[i * fd:(i + 1) * fd])\n\n if check:\n for i, circuit in enumerate(circuits):\n chk_ret = _twirled_deriv(model, circuit, eps)\n if _nla.norm(ret[i] - chk_ret) > 1e-6:\n _warnings.warn(\"bulk twirled derivative norm mismatch = \"\n \"%g - %g = %g\"\n % (_nla.norm(ret[i]), _nla.norm(chk_ret),\n _nla.norm(ret[i] - chk_ret))) # pragma: no cover\n\n return ret # nSimplifiedCircuits x flattened_op_dim x vec_model_dim\n\n\ndef test_germ_set_finitel(model, germs_to_test, length, weights=None,\n return_spectrum=False, tol=1e-6):\n \"\"\"\n Test whether a set of germs is able to amplify all non-gauge parameters.\n\n Parameters\n ----------\n model : Model\n The Model (associates operation matrices with operation labels).\n\n germs_to_test : list of Circuits\n List of germ circuits to test for completeness.\n\n length : int\n The finite length to use in amplification testing. Larger\n values take longer to compute but give more robust results.\n\n weights : numpy array, optional\n A 1-D array of weights with length equal len(germs_to_test),\n which multiply the contribution of each germ to the total\n jacobian matrix determining parameter amplification. If\n None, a uniform weighting of 1.0/len(germs_to_test) is applied.\n\n return_spectrum : bool, optional\n If True, return the jacobian^T*jacobian spectrum in addition\n to the success flag.\n\n tol : float, optional\n Tolerance: an eigenvalue of jacobian^T*jacobian is considered\n zero and thus a parameter un-amplified when it is less than tol.\n\n Returns\n -------\n success : bool\n Whether all non-gauge parameters were amplified.\n spectrum : numpy array\n Only returned when `return_spectrum` is ``True``. Sorted array of\n eigenvalues (from small to large) of the jacobian^T * jacobian\n matrix used to determine parameter amplification.\n \"\"\"\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n model = _remove_spam_vectors(model)\n\n nGerms = len(germs_to_test)\n germToPowL = [germ * length for germ in germs_to_test]\n\n op_dim = model.dim\n dprods = model.sim.bulk_dproduct(germToPowL, flat=True) # shape (nGerms*flattened_op_dim, vec_model_dim)\n dprods.shape = (nGerms, op_dim**2, dprods.shape[1])\n\n germLengths = _np.array([len(germ) for germ in germs_to_test], 'd')\n\n normalizedDeriv = dprods / (length * germLengths[:, None, None])\n\n sortedEigenvals = _sq_sing_vals_from_deriv(normalizedDeriv, weights)\n\n nGaugeParams = model.num_gauge_params\n\n observableEigenvals = sortedEigenvals[nGaugeParams:]\n\n bSuccess = bool(_scoring.list_score(observableEigenvals, 'worst') < 1 / tol)\n\n return (bSuccess, sortedEigenvals) if return_spectrum else bSuccess\n\n\ndef test_germ_set_infl(model, germs_to_test, score_func='all', weights=None,\n return_spectrum=False, threshold=1e6, check=False):\n \"\"\"\n Test whether a set of germs is able to amplify all non-gauge parameters.\n\n Parameters\n ----------\n model : Model\n The Model (associates operation matrices with operation labels).\n\n germs_to_test : list of Circuit\n List of germ circuits to test for completeness.\n\n score_func : string\n Label to indicate how a germ set is scored. See\n :func:`~pygsti.algorithms.scoring.list_score` for details.\n\n weights : numpy array, optional\n A 1-D array of weights with length equal len(germs_to_test),\n which multiply the contribution of each germ to the total\n jacobian matrix determining parameter amplification. If\n None, a uniform weighting of 1.0/len(germs_to_test) is applied.\n\n return_spectrum : bool, optional\n If ``True``, return the jacobian^T*jacobian spectrum in addition\n to the success flag.\n\n threshold : float, optional\n An eigenvalue of jacobian^T*jacobian is considered zero and thus a\n parameter un-amplified when its reciprocal is greater than threshold.\n Also used for eigenvector degeneracy testing in twirling operation.\n\n check : bool, optional\n Whether to perform internal consistency checks, at the\n expense of making the function slower.\n\n Returns\n -------\n success : bool\n Whether all non-gauge parameters were amplified.\n spectrum : numpy array\n Only returned when `return_spectrum` is ``True``. Sorted array of\n eigenvalues (from small to large) of the jacobian^T * jacobian\n matrix used to determine parameter amplification.\n \"\"\"\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n model = _remove_spam_vectors(model)\n\n germLengths = _np.array([len(germ) for germ in germs_to_test], _np.int64)\n twirledDerivDaggerDeriv = _compute_bulk_twirled_ddd(model, germs_to_test,\n 1. / threshold, check,\n germLengths)\n # result[i] = _np.dot( twirledDeriv[i].H, twirledDeriv[i] ) i.e. matrix\n # product\n # result[i,k,l] = sum_j twirledDerivH[i,k,j] * twirledDeriv(i,j,l)\n # result[i,k,l] = sum_j twirledDeriv_conj[i,j,k] * twirledDeriv(i,j,l)\n\n if weights is None:\n nGerms = len(germs_to_test)\n # weights = _np.array( [1.0/nGerms]*nGerms, 'd')\n weights = _np.array([1.0] * nGerms, 'd')\n\n #combinedTDDD = _np.einsum('i,ijk->jk', weights, twirledDerivDaggerDeriv)\n combinedTDDD = _np.tensordot(weights, twirledDerivDaggerDeriv, (0, 0))\n sortedEigenvals = _np.sort(_np.real(_np.linalg.eigvalsh(combinedTDDD)))\n\n nGaugeParams = model.num_gauge_params\n observableEigenvals = sortedEigenvals[nGaugeParams:]\n\n bSuccess = bool(_scoring.list_score(observableEigenvals, score_func)\n < threshold)\n\n return (bSuccess, sortedEigenvals) if return_spectrum else bSuccess\n\n\ndef find_germs_depthfirst(model_list, germs_list, randomize=True,\n randomization_strength=1e-3, num_copies=None, seed=0, op_penalty=0,\n score_func='all', tol=1e-6, threshold=1e6, check=False,\n force=\"singletons\", verbosity=0):\n \"\"\"\n Greedy germ selection algorithm starting with 0 germs.\n\n Tries to minimize the number of germs needed to achieve amplificational\n completeness (AC). Begins with 0 germs and adds the germ that increases the\n score used to check for AC by the largest amount at each step, stopping when\n the threshold for AC is achieved.\n\n Parameters\n ----------\n model_list : Model or list\n The model or list of `Model`s to select germs for.\n\n germs_list : list of Circuit\n The list of germs to contruct a germ set from.\n\n randomize : bool, optional\n Whether or not to randomize `model_list` (usually just a single\n `Model`) with small (see `randomizationStrengh`) unitary maps\n in order to avoid \"accidental\" symmetries which could allow for\n fewer germs but *only* for that particular model. Setting this\n to `True` will increase the run time by a factor equal to the\n numer of randomized copies (`num_copies`).\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n\n num_copies : int, optional\n The number of randomized models to create when only a *single* gate\n set is passed via `model_list`. Otherwise, `num_copies` must be set\n to `None`.\n\n seed : int, optional\n Seed for generating random unitary perturbations to models.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n score_func : {'all', 'worst'}, optional\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/eigenvalues)``. If 'worst', score is\n ``1/min(eiganvalues)``.\n\n tol : float, optional\n Tolerance (`eps` arg) for :func:`_compute_bulk_twirled_ddd`, which sets\n the differece between eigenvalues below which they're treated as\n degenerate.\n\n threshold : float, optional\n Value which the score (before penalties are applied) must be lower than\n for a germ set to be considered AC.\n\n check : bool, optional\n Whether to perform internal checks (will slow down run time\n substantially).\n\n force : list of Circuits\n A list of `Circuit` objects which *must* be included in the final\n germ set. If the special string \"singletons\" is given, then all of\n the single gates (length-1 sequences) must be included.\n\n verbosity : int, optional\n Level of detail printed to stdout.\n\n Returns\n -------\n list\n A list of the built-up germ set (a list of :class:`Circuit` objects).\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n (reducedModelList,\n numGaugeParams, _, _) = _get_model_params(model_list)\n\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n numGerms = len(germs_list)\n\n weights = _np.zeros(numGerms, _np.int64)\n goodGerms = []\n if force:\n if force == \"singletons\":\n weights[_np.where(germLengths == 1)] = 1\n goodGerms = [germ for germ\n in _np.array(germs_list)[_np.where(germLengths == 1)]]\n else: # force should be a list of Circuits\n for opstr in force:\n weights[germs_list.index(opstr)] = 1\n goodGerms = force[:]\n\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list,\n score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.warning(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \". Aborting search.\")\n return None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n\n twirledDerivDaggerDerivList = [_compute_bulk_twirled_ddd(model, germs_list, tol,\n check, germLengths)\n for model in model_list]\n\n # Dict of keyword arguments passed to compute_score_non_AC that don't\n # change from call to call\n nonAC_kwargs = {\n 'score_fn': lambda x: _scoring.list_score(x, score_func=score_func),\n 'threshold_ac': threshold,\n 'num_gauge_params': numGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n }\n\n for modelNum, reducedModel in enumerate(reducedModelList):\n derivDaggerDeriv = twirledDerivDaggerDerivList[modelNum]\n # Make sure the set of germs you come up with is AC for all\n # models.\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n while _np.any(weights == 0):\n\n # As long as there are some unused germs, see if you need to add\n # another one.\n if test_germ_set_infl(reducedModel, goodGerms,\n score_func=score_func, threshold=threshold):\n # The germs are sufficient for the current model\n break\n candidateGerms = _np.where(weights == 0)[0]\n candidateGermScores = []\n for candidateGermIdx in _np.where(weights == 0)[0]:\n # If the germs aren't sufficient, try adding a single germ\n candidateWeights = weights.copy()\n candidateWeights[candidateGermIdx] = 1\n partialDDD = derivDaggerDeriv[\n _np.where(candidateWeights == 1)[0], :, :]\n candidateGermScore = compute_composite_germ_set_score(\n partial_deriv_dagger_deriv=partialDDD, **nonAC_kwargs)\n candidateGermScores.append(candidateGermScore)\n # Add the germ that give the best score\n bestCandidateGerm = candidateGerms[_np.array(\n candidateGermScores).argmin()]\n weights[bestCandidateGerm] = 1\n goodGerms.append(germs_list[bestCandidateGerm])\n\n return goodGerms\n\n\ndef find_germs_breadthfirst(model_list, germs_list, randomize=True,\n randomization_strength=1e-3, num_copies=None, seed=0,\n op_penalty=0, score_func='all', tol=1e-6, threshold=1e6,\n check=False, force=\"singletons\", pretest=True, mem_limit=None,\n comm=None, profiler=None, verbosity=0):\n \"\"\"\n Greedy algorithm starting with 0 germs.\n\n Tries to minimize the number of germs needed to achieve amplificational\n completeness (AC). Begins with 0 germs and adds the germ that increases the\n score used to check for AC by the largest amount (for the model that\n currently has the lowest score) at each step, stopping when the threshold\n for AC is achieved. This strategy is something of a \"breadth-first\"\n approach, in contrast to :func:`find_germs_depthfirst`, which only looks at the\n scores for one model at a time until that model achieves AC, then\n turning it's attention to the remaining models.\n\n Parameters\n ----------\n model_list : Model or list\n The model or list of `Model`s to select germs for.\n\n germs_list : list of Circuit\n The list of germs to contruct a germ set from.\n\n randomize : bool, optional\n Whether or not to randomize `model_list` (usually just a single\n `Model`) with small (see `randomizationStrengh`) unitary maps\n in order to avoid \"accidental\" symmetries which could allow for\n fewer germs but *only* for that particular model. Setting this\n to `True` will increase the run time by a factor equal to the\n numer of randomized copies (`num_copies`).\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n\n num_copies : int, optional\n The number of randomized models to create when only a *single* gate\n set is passed via `model_list`. Otherwise, `num_copies` must be set\n to `None`.\n\n seed : int, optional\n Seed for generating random unitary perturbations to models.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n score_func : {'all', 'worst'}, optional\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/eigenvalues)``. If 'worst', score is\n ``1/min(eiganvalues)``.\n\n tol : float, optional\n Tolerance (`eps` arg) for :func:`_compute_bulk_twirled_ddd`, which sets\n the differece between eigenvalues below which they're treated as\n degenerate.\n\n threshold : float, optional\n Value which the score (before penalties are applied) must be lower than\n for a germ set to be considered AC.\n\n check : bool, optional\n Whether to perform internal checks (will slow down run time\n substantially).\n\n force : list of Circuits\n A list of `Circuit` objects which *must* be included in the final\n germ set. If the special string \"singletons\" is given, then all of\n the single gates (length-1 sequences) must be included.\n\n pretest : boolean, optional\n Whether germ list should be initially checked for completeness.\n\n mem_limit : int, optional\n A rough memory limit in bytes which restricts the amount of intermediate\n values that are computed and stored.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n profiler : Profiler, optional\n A profiler object used for to track timing and memory usage.\n\n verbosity : int, optional\n Level of detail printed to stdout.\n\n Returns\n -------\n list\n A list of the built-up germ set (a list of :class:`Circuit` objects).\n \"\"\"\n if comm is not None and comm.Get_size() > 1:\n from mpi4py import MPI # not at top so pygsti doesn't require mpi4py\n\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n dim = model_list[0].dim\n #Np = model_list[0].num_params #wrong:? includes spam...\n Np = model_list[0].num_params\n #print(\"DB Np = %d, Ng = %d\" % (Np,Ng))\n assert(all([(mdl.dim == dim) for mdl in model_list])), \\\n \"All models must have the same dimension!\"\n #assert(all([(mdl.num_params == Np) for mdl in model_list])), \\\n # \"All models must have the same number of parameters!\"\n\n (_, numGaugeParams,\n numNonGaugeParams, _) = _get_model_params(model_list)\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n\n numGerms = len(germs_list)\n\n goodGerms = []\n weights = _np.zeros(numGerms, _np.int64)\n if force:\n if force == \"singletons\":\n weights[_np.where(germLengths == 1)] = 1\n goodGerms = [germ for i, germ in enumerate(germs_list) if germLengths[i] == 1]\n else: # force should be a list of Circuits\n for opstr in force:\n weights[germs_list.index(opstr)] = 1\n goodGerms = force[:]\n\n if pretest:\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list,\n score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.warning(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \".\")\n printer.warning(\"Aborting search.\")\n return None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n\n mode = \"all-Jac\" # compute a all the possible germ's jacobians at once up\n # front and store them separately (requires lots of mem)\n\n if mem_limit is not None:\n memEstimate = FLOATSIZE * len(model_list) * len(germs_list) * Np**2\n # for _compute_bulk_twirled_ddd\n memEstimate += FLOATSIZE * len(model_list) * len(germs_list) * dim**2 * Np\n # for _bulk_twirled_deriv sub-call\n printer.log(\"Memory estimate of %.1f GB (%.1f GB limit) for all-Jac mode.\" %\n (memEstimate / 1024.0**3, mem_limit / 1024.0**3), 1)\n\n if memEstimate > mem_limit:\n mode = \"single-Jac\" # compute a single germ's jacobian at a time\n # and store the needed J-sum over chosen germs.\n memEstimate = FLOATSIZE * 3 * len(model_list) * Np**2 + \\\n FLOATSIZE * 3 * len(model_list) * dim**2 * Np\n #Factor of 3 accounts for currentDDDs, testDDDs, and bestDDDs\n printer.log(\"Memory estimate of %.1f GB (%.1f GB limit) for single-Jac mode.\" %\n (memEstimate / 1024.0**3, mem_limit / 1024.0**3), 1)\n\n if memEstimate > mem_limit:\n raise MemoryError(\"Too little memory, even for single-Jac mode!\")\n\n twirledDerivDaggerDerivList = None\n\n if mode == \"all-Jac\":\n twirledDerivDaggerDerivList = \\\n [_compute_bulk_twirled_ddd(model, germs_list, tol,\n check, germLengths, comm)\n for model in model_list]\n\n currentDDDList = []\n for i, derivDaggerDeriv in enumerate(twirledDerivDaggerDerivList):\n currentDDDList.append(_np.sum(derivDaggerDeriv[_np.where(weights == 1)[0], :, :], axis=0))\n\n elif mode == \"single-Jac\":\n currentDDDList = [_np.zeros((Np, Np), 'complex') for mdl in model_list]\n\n loc_Indices, _, _ = _mpit.distribute_indices(\n list(range(len(goodGerms))), comm, False)\n\n with printer.progress_logging(3):\n for i, goodGermIdx in enumerate(loc_Indices):\n printer.show_progress(i, len(loc_Indices),\n prefix=\"Initial germ set computation\",\n suffix=germs_list[goodGermIdx].str)\n #print(\"DB: Rank%d computing initial index %d\" % (comm.Get_rank(),goodGermIdx))\n\n for k, model in enumerate(model_list):\n currentDDDList[k] += _compute_twirled_ddd(\n model, germs_list[goodGermIdx], tol)\n\n #aggregate each currendDDDList across all procs\n if comm is not None and comm.Get_size() > 1:\n for k, model in enumerate(model_list):\n result = _np.empty((Np, Np), 'complex')\n comm.Allreduce(currentDDDList[k], result, op=MPI.SUM)\n currentDDDList[k][:, :] = result[:, :]\n result = None # free mem\n\n else: # should be unreachable since we set 'mode' internally above\n raise ValueError(\"Invalid mode: %s\" % mode) # pragma: no cover\n\n # Dict of keyword arguments passed to compute_score_non_AC that don't\n # change from call to call\n nonAC_kwargs = {\n 'score_fn': lambda x: _scoring.list_score(x, score_func=score_func),\n 'threshold_ac': threshold,\n 'num_gauge_params': numGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n }\n\n initN = 1\n while _np.any(weights == 0):\n printer.log(\"Outer iteration: %d of %d amplified, %d germs\" %\n (initN, numNonGaugeParams, len(goodGerms)), 2)\n # As long as there are some unused germs, see if you need to add\n # another one.\n if initN == numNonGaugeParams:\n break # We are AC for all models, so we can stop adding germs.\n\n candidateGermIndices = _np.where(weights == 0)[0]\n loc_candidateIndices, owners, _ = _mpit.distribute_indices(\n candidateGermIndices, comm, False)\n\n # Since the germs aren't sufficient, add the best single candidate germ\n bestDDDs = None\n bestGermScore = _scoring.CompositeScore(1.0e100, 0, None) # lower is better\n iBestCandidateGerm = None\n with printer.progress_logging(3):\n for i, candidateGermIdx in enumerate(loc_candidateIndices):\n printer.show_progress(i, len(loc_candidateIndices),\n prefix=\"Inner iter over candidate germs\",\n suffix=germs_list[candidateGermIdx].str)\n\n #print(\"DB: Rank%d computing index %d\" % (comm.Get_rank(),candidateGermIdx))\n worstScore = _scoring.CompositeScore(-1.0e100, 0, None) # worst of all models\n\n # Loop over all models\n testDDDs = []\n for k, currentDDD in enumerate(currentDDDList):\n testDDD = currentDDD.copy()\n\n if mode == \"all-Jac\":\n #just get cached value of deriv-dagger-deriv\n derivDaggerDeriv = twirledDerivDaggerDerivList[k][candidateGermIdx]\n testDDD += derivDaggerDeriv\n\n elif mode == \"single-Jac\":\n #compute value of deriv-dagger-deriv\n model = model_list[k]\n testDDD += _compute_twirled_ddd(\n model, germs_list[candidateGermIdx], tol)\n # (else already checked above)\n\n nonAC_kwargs['germ_lengths'] = \\\n _np.array([len(germ) for germ in\n (goodGerms + [germs_list[candidateGermIdx]])])\n worstScore = max(worstScore, compute_composite_germ_set_score(\n partial_deriv_dagger_deriv=testDDD[None, :, :], init_n=initN,\n **nonAC_kwargs))\n testDDDs.append(testDDD) # save in case this is a keeper\n\n # Take the score for the current germ to be its worst score\n # over all the models.\n germScore = worstScore\n printer.log(str(germScore), 4)\n if germScore < bestGermScore:\n bestGermScore = germScore\n iBestCandidateGerm = candidateGermIdx\n bestDDDs = testDDDs\n testDDDs = None\n\n # Add the germ that gives the best germ score\n if comm is not None and comm.Get_size() > 1:\n #figure out which processor has best germ score and distribute\n # its information to the rest of the procs\n globalMinScore = comm.allreduce(bestGermScore, op=MPI.MIN)\n toSend = comm.Get_rank() if (globalMinScore == bestGermScore) \\\n else comm.Get_size() + 1\n winningRank = comm.allreduce(toSend, op=MPI.MIN)\n bestGermScore = globalMinScore\n toCast = iBestCandidateGerm if (comm.Get_rank() == winningRank) else None\n iBestCandidateGerm = comm.bcast(toCast, root=winningRank)\n for k in range(len(model_list)):\n comm.Bcast(bestDDDs[k], root=winningRank)\n\n #Update variables for next outer iteration\n weights[iBestCandidateGerm] = 1\n initN = bestGermScore.N\n goodGerms.append(germs_list[iBestCandidateGerm])\n\n for k in range(len(model_list)):\n currentDDDList[k][:, :] = bestDDDs[k][:, :]\n bestDDDs[k] = None\n\n printer.log(\"Added %s to final germs (%s)\" %\n (germs_list[iBestCandidateGerm].str, str(bestGermScore)), 3)\n\n return goodGerms\n\n\n#@profile\ndef find_germs_integer_slack(model_list, germs_list, randomize=True,\n randomization_strength=1e-3, num_copies=None,\n seed=0, l1_penalty=1e-2, op_penalty=0,\n initial_weights=None, score_func='all',\n max_iter=100, fixed_slack=False,\n slack_frac=False, return_all=False, tol=1e-6,\n check=False, force=\"singletons\",\n force_score=1e100, threshold=1e6,\n verbosity=1):\n \"\"\"\n Find a locally optimal subset of the germs in germs_list.\n\n Locally optimal here means that no single germ can be excluded\n without making the smallest non-gauge eigenvalue of the\n Jacobian.H*Jacobian matrix smaller, i.e. less amplified,\n by more than a fixed or variable amount of \"slack\", as\n specified by `fixed_slack` or `slack_frac`.\n\n Parameters\n ----------\n model_list : Model or list of Model\n The list of Models to be tested. To ensure that the returned germ\n set is amplficationally complete, it is a good idea to score potential\n germ sets against a collection (~5-10) of similar models. The user\n may specify a single Model and a number of unitarily close copies to\n be made (set by the kwarg `num_copies`), or the user may specify their\n own list of Models, each of which in turn may or may not be\n randomized (set by the kwarg `randomize`).\n\n germs_list : list of Circuit\n List of all germ circuits to consider.\n\n randomize : Bool, optional\n Whether or not the input Model(s) are first subject to unitary\n randomization. If ``False``, the user should perform the unitary\n randomization themselves. Note: If the Model(s) are perfect (e.g.\n ``std1Q_XYI.target_model()``), then the germ selection output should not be\n trusted, due to accidental degeneracies in the Model. If the\n Model(s) include stochastic (non-unitary) error, then germ selection\n will fail, as we score amplificational completeness in the limit of\n infinite sequence length (so any stochastic noise will completely\n depolarize any sequence in that limit). Default is ``True``.\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n Default is ``1e-3``.\n\n num_copies : int, optional\n The number of Model copies to be made of the input Model (prior to\n unitary randomization). If more than one Model is passed in,\n `num_copies` should be ``None``. If only one Model is passed in and\n `num_copies` is ``None``, no extra copies are made.\n\n seed : float, optional\n The starting seed used for unitary randomization. If multiple Models\n are to be randomized, ``model_list[i]`` is randomized with ``seed +\n i``. Default is 0.\n\n l1_penalty : float, optional\n How strong the penalty should be for increasing the germ set list by a\n single germ. Default is 1e-2.\n\n op_penalty : float, optional\n How strong the penalty should be for increasing a germ in the germ set\n list by a single gate. Default is 0.\n\n initial_weights : list-like\n List or array of either booleans or (0 or 1) integers\n specifying which germs in `germ_list` comprise the initial\n germ set. If ``None``, then starting point includes all\n germs.\n\n score_func : string\n Label to indicate how a germ set is scored. See\n :func:`~pygsti.algorithms.scoring.list_score` for details.\n\n max_iter : int, optional\n The maximum number of iterations before giving up.\n\n fixed_slack : float, optional\n If not ``None``, a floating point number which specifies that excluding\n a germ is allowed to increase 1.0/smallest-non-gauge-eigenvalue by\n `fixed_slack`. You must specify *either* `fixed_slack` or `slack_frac`.\n\n slack_frac : float, optional\n If not ``None``, a floating point number which specifies that excluding\n a germ is allowed to increase 1.0/smallest-non-gauge-eigenvalue by\n `fixedFrac`*100 percent. You must specify *either* `fixed_slack` or\n `slack_frac`.\n\n return_all : bool, optional\n If ``True``, return the final ``weights`` vector and score dictionary\n in addition to the optimal germ list (see below).\n\n tol : float, optional\n Tolerance used for eigenvector degeneracy testing in twirling\n operation.\n\n check : bool, optional\n Whether to perform internal consistency checks, at the\n expense of making the function slower.\n\n force : str or list, optional\n A list of Circuits which *must* be included in the final germ set.\n If set to the special string \"singletons\" then all length-1 strings will\n be included. Seting to None is the same as an empty list.\n\n force_score : float, optional (default is 1e100)\n When `force` designates a non-empty set of circuits, the score to\n assign any germ set that does not contain each and every required germ.\n\n threshold : float, optional (default is 1e6)\n Specifies a maximum score for the score matrix, above which the germ\n set is rejected as amplificationally incomplete.\n\n verbosity : int, optional\n Integer >= 0 indicating the amount of detail to print.\n\n See Also\n --------\n :class:`~pygsti.objects.Model`\n :class:`~pygsti.objects.Circuit`\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n if (fixed_slack and slack_frac) or (not fixed_slack and not slack_frac):\n raise ValueError(\"Either fixed_slack *or* slack_frac should be specified\")\n\n if initial_weights is not None:\n if len(germs_list) != len(initial_weights):\n raise ValueError(\"The lengths of germs_list (%d) and \"\n \"initial_weights (%d) must match.\"\n % (len(germs_list), len(initial_weights)))\n # Normalize the weights array to be 0s and 1s even if it is provided as\n # bools\n weights = _np.array([1 if x else 0 for x in initial_weights])\n else:\n weights = _np.ones(len(germs_list), _np.int64) # default: start with all germs\n# lessWeightOnly = True # we're starting at the max-weight vector\n\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list, score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.log(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \".\", 1)\n printer.log(\"Aborting search.\", 1)\n return (None, None, None) if return_all else None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n\n num_models = len(model_list)\n\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n model0 = _remove_spam_vectors(model_list[0])\n\n # Initially allow adding to weight. -- maybe make this an argument??\n lessWeightOnly = False\n\n nGaugeParams = model0.num_gauge_params\n\n # score dictionary:\n # keys = (modelNum, tuple-ized weight vector of 1's and 0's only)\n # values = list_score\n scoreD = {}\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n\n if force:\n if force == \"singletons\":\n forceIndices = _np.where(germLengths == 1)\n else: # force should be a list of Circuits\n forceIndices = _np.array([germs_list.index(opstr) for opstr in force])\n else:\n forceIndices = None\n\n twirledDerivDaggerDerivList = [_compute_bulk_twirled_ddd(model, germs_list, tol)\n for model in model_list]\n\n # Dict of keyword arguments passed to _germ_set_score_slack that don't change from\n # call to call\n cs_kwargs = {\n 'score_func': score_func,\n 'deriv_dagger_deriv_list': twirledDerivDaggerDerivList,\n 'force_indices': forceIndices,\n 'force_score': force_score,\n 'n_gauge_params': nGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n 'l1_penalty': l1_penalty,\n 'score_dict': scoreD,\n }\n\n scoreList = [_germ_set_score_slack(weights, model_num, **cs_kwargs)\n for model_num in range(num_models)]\n score = _np.max(scoreList)\n L1 = sum(weights) # ~ L1 norm of weights\n\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n printer.log(\"Model has %d gauge params.\" % nGaugeParams, 1)\n\n def _get_neighbors(bool_vec):\n for i in range(len(bool_vec)):\n v = bool_vec.copy()\n v[i] = (v[i] + 1) % 2 # Toggle v[i] btwn 0 and 1\n yield v\n\n with printer.progress_logging(1):\n for iIter in range(max_iter):\n printer.show_progress(iIter, max_iter,\n suffix=\"score=%g, nGerms=%d\" % (score, L1))\n\n bFoundBetterNeighbor = False\n for neighbor in _get_neighbors(weights):\n neighborScoreList = []\n for model_num in range(len(model_list)):\n if (model_num, tuple(neighbor)) not in scoreD:\n neighborL1 = sum(neighbor)\n neighborScoreList.append(_germ_set_score_slack(neighbor,\n model_num,\n **cs_kwargs))\n else:\n neighborL1 = sum(neighbor)\n neighborScoreList.append(scoreD[model_num,\n tuple(neighbor)])\n\n neighborScore = _np.max(neighborScoreList) # Take worst case.\n # Move if we've found better position; if we've relaxed, we\n # only move when L1 is improved.\n if neighborScore <= score and (neighborL1 < L1 or not lessWeightOnly):\n weights, score, L1 = neighbor, neighborScore, neighborL1\n bFoundBetterNeighbor = True\n\n printer.log(\"Found better neighbor: \"\n \"nGerms = %d score = %g\" % (L1, score), 2)\n\n if not bFoundBetterNeighbor: # Time to relax our search.\n # From now on, don't allow increasing weight L1\n lessWeightOnly = True\n\n if fixed_slack is False:\n # Note score is positive (for sum of 1/lambda)\n slack = score * slack_frac\n # print \"slack =\", slack\n else:\n slack = fixed_slack\n assert slack > 0\n\n printer.log(\"No better neighbor. Relaxing score w/slack: \"\n + \"%g => %g\" % (score, score + slack), 2)\n # Artificially increase score and see if any neighbor is better\n # now...\n score += slack\n\n for neighbor in _get_neighbors(weights):\n scoreList = [scoreD[model_num, tuple(neighbor)]\n for model_num in range(len(model_list))]\n maxScore = _np.max(scoreList)\n if sum(neighbor) < L1 and maxScore < score:\n weights, score, L1 = neighbor, maxScore, sum(neighbor)\n bFoundBetterNeighbor = True\n printer.log(\"Found better neighbor: \"\n \"nGerms = %d score = %g\" % (L1, score), 2)\n\n if not bFoundBetterNeighbor: # Relaxing didn't help!\n printer.log(\"Stationary point found!\", 1)\n break # end main for loop\n\n printer.log(\"Moving to better neighbor\", 1)\n # print score\n else:\n printer.log(\"Hit max. iterations\", 1)\n\n printer.log(\"score = %s\" % score, 1)\n printer.log(\"weights = %s\" % weights, 1)\n printer.log(\"L1(weights) = %s\" % sum(weights), 1)\n\n goodGerms = []\n for index, val in enumerate(weights):\n if val == 1:\n goodGerms.append(germs_list[index])\n\n if return_all:\n return goodGerms, weights, scoreD\n else:\n return goodGerms\n\n\ndef _germ_set_score_grasp(germ_set, germs_list, twirled_deriv_dagger_deriv_list,\n non_ac_kwargs, init_n=1):\n \"\"\"\n Score a germ set against a collection of models.\n\n Calculate the score of the germ set with respect to each member of a\n collection of models and return the worst score among that collection.\n\n Parameters\n ----------\n germ_set : list of Circuit\n The set of germs to score.\n\n germs_list : list of Circuit\n The list of all germs whose Jacobians are provided in\n `twirled_deriv_dagger_deriv_list`.\n\n twirled_deriv_dagger_deriv_list : numpy.array\n Jacobians for all the germs in `germs_list` stored as a 3-dimensional\n array, where the first index indexes the particular germ.\n\n non_ac_kwargs : dict\n Dictionary containing further arguments to pass to\n :func:`compute_composite_germ_set_score` for the scoring of the germ set against\n individual models.\n\n init_n : int\n The number of eigenvalues to begin checking for amplificational\n completeness with respect to. Passed as an argument to\n :func:`compute_composite_germ_set_score`.\n\n Returns\n -------\n CompositeScore\n The worst score over all models of the germ set.\n \"\"\"\n weights = _np.zeros(len(germs_list))\n for germ in germ_set:\n weights[germs_list.index(germ)] = 1\n germsVsModelScores = []\n for derivDaggerDeriv in twirled_deriv_dagger_deriv_list:\n # Loop over all models\n partialDDD = derivDaggerDeriv[_np.where(weights == 1)[0], :, :]\n germsVsModelScores.append(compute_composite_germ_set_score(\n partial_deriv_dagger_deriv=partialDDD, init_n=init_n, **non_ac_kwargs))\n # Take the score for the current germ set to be its worst score over all\n # models.\n return max(germsVsModelScores)\n\n\ndef find_germs_grasp(model_list, germs_list, alpha, randomize=True,\n randomization_strength=1e-3, num_copies=None,\n seed=None, l1_penalty=1e-2, op_penalty=0.0,\n score_func='all', tol=1e-6, threshold=1e6,\n check=False, force=\"singletons\",\n iterations=5, return_all=False, shuffle=False,\n verbosity=0):\n \"\"\"\n Use GRASP to find a high-performing germ set.\n\n Parameters\n ----------\n model_list : Model or list of Model\n The list of Models to be tested. To ensure that the returned germ\n set is amplficationally complete, it is a good idea to score potential\n germ sets against a collection (~5-10) of similar models. The user\n may specify a single Model and a number of unitarily close copies to\n be made (set by the kwarg `num_copies`, or the user may specify their\n own list of Models, each of which in turn may or may not be\n randomized (set by the kwarg `randomize`).\n\n germs_list : list of Circuit\n List of all germ circuits to consider.\n\n alpha : float\n A number between 0 and 1 that roughly specifies a score theshold\n relative to the spread of scores that a germ must score better than in\n order to be included in the RCL. A value of 0 for `alpha` corresponds\n to a purely greedy algorithm (only the best-scoring germ set is\n included in the RCL), while a value of 1 for `alpha` will include all\n germs in the RCL.\n See :func:`pygsti.algorithms.scoring.filter_composite_rcl` for more details.\n\n randomize : Bool, optional\n Whether or not the input Model(s) are first subject to unitary\n randomization. If ``False``, the user should perform the unitary\n randomization themselves. Note: If the Model(s) are perfect (e.g.\n ``std1Q_XYI.target_model()``), then the germ selection output should not be\n trusted, due to accidental degeneracies in the Model. If the\n Model(s) include stochastic (non-unitary) error, then germ selection\n will fail, as we score amplificational completeness in the limit of\n infinite sequence length (so any stochastic noise will completely\n depolarize any sequence in that limit).\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n Default is ``1e-3``.\n\n num_copies : int, optional\n The number of Model copies to be made of the input Model (prior to\n unitary randomization). If more than one Model is passed in,\n `num_copies` should be ``None``. If only one Model is passed in and\n `num_copies` is ``None``, no extra copies are made.\n\n seed : float, optional\n The starting seed used for unitary randomization. If multiple Models\n are to be randomized, ``model_list[i]`` is randomized with ``seed +\n i``.\n\n l1_penalty : float, optional\n How strong the penalty should be for increasing the germ set list by a\n single germ. Used for choosing between outputs of various GRASP\n iterations.\n\n op_penalty : float, optional\n How strong the penalty should be for increasing a germ in the germ set\n list by a single gate.\n\n score_func : string\n Label to indicate how a germ set is scored. See\n :func:`~pygsti.algorithms.scoring.list_score` for details.\n\n tol : float, optional\n Tolerance used for eigenvector degeneracy testing in twirling\n operation.\n\n threshold : float, optional (default is 1e6)\n Specifies a maximum score for the score matrix, above which the germ\n set is rejected as amplificationally incomplete.\n\n check : bool, optional\n Whether to perform internal consistency checks, at the\n expense of making the function slower.\n\n force : str or list, optional\n A list of Circuits which *must* be included in the final germ set.\n If set to the special string \"singletons\" then all length-1 strings will\n be included. Seting to None is the same as an empty list.\n\n iterations : int, optional\n The number of GRASP iterations to perform.\n\n return_all : bool, optional\n Flag set to tell the routine if it should return lists of all\n initial constructions and local optimizations in addition to the\n optimal solution (useful for diagnostic purposes or if you're not sure\n what your `finalScoreFn` should really be).\n\n shuffle : bool, optional\n Whether the neighborhood should be presented to the optimizer in a\n random order (important since currently the local optimizer updates the\n solution to the first better solution it finds in the neighborhood).\n\n verbosity : int, optional\n Integer >= 0 indicating the amount of detail to print.\n\n Returns\n -------\n finalGermList : list of Circuit\n Sublist of `germs_list` specifying the final, optimal set of germs.\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n (_, numGaugeParams,\n numNonGaugeParams, _) = _get_model_params(model_list)\n\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n\n numGerms = len(germs_list)\n\n initialWeights = _np.zeros(numGerms, dtype=_np.int64)\n if force:\n if force == \"singletons\":\n initialWeights[_np.where(germLengths == 1)] = 1\n else: # force should be a list of Circuits\n for opstr in force:\n initialWeights[germs_list.index(opstr)] = 1\n\n def get_neighbors_fn(weights): return _grasp.get_swap_neighbors(\n weights, forced_weights=initialWeights, shuffle=shuffle)\n\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list,\n score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.warning(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \".\")\n printer.warning(\"Aborting search.\")\n return (None, None, None) if return_all else None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n\n twirledDerivDaggerDerivList = [_compute_bulk_twirled_ddd(model, germs_list, tol,\n check, germLengths)\n for model in model_list]\n\n # Dict of keyword arguments passed to compute_score_non_AC that don't\n # change from call to call\n nonAC_kwargs = {\n 'score_fn': lambda x: _scoring.list_score(x, score_func=score_func),\n 'threshold_ac': threshold,\n 'num_gauge_params': numGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n }\n\n final_nonAC_kwargs = nonAC_kwargs.copy()\n final_nonAC_kwargs['l1_penalty'] = l1_penalty\n\n scoreFn = (lambda germSet:\n _germ_set_score_grasp(germSet, germs_list,\n twirledDerivDaggerDerivList, nonAC_kwargs,\n init_n=1))\n finalScoreFn = (lambda germSet:\n _germ_set_score_grasp(germSet, germs_list,\n twirledDerivDaggerDerivList,\n final_nonAC_kwargs, init_n=1))\n\n #OLD: feasibleThreshold = _scoring.CompositeScore(-numNonGaugeParams,threshold,numNonGaugeParams))\n def _feasible_fn(germ_set): # now that scoring is not ordered entirely by N\n s = _germ_set_score_grasp(germ_set, germs_list,\n twirledDerivDaggerDerivList, nonAC_kwargs,\n init_n=1)\n return (s.N >= numNonGaugeParams and s.minor < threshold)\n\n def rcl_fn(x): return _scoring.filter_composite_rcl(x, alpha)\n\n initialSolns = []\n localSolns = []\n\n for iteration in range(iterations):\n # This loop is parallelizable (each iteration is independent of all\n # other iterations).\n printer.log('Starting iteration {} of {}.'.format(iteration + 1,\n iterations), 1)\n success = False\n failCount = 0\n while not success and failCount < 10:\n try:\n iterSolns = _grasp.run_grasp_iteration(\n elements=germs_list, greedy_score_fn=scoreFn, rcl_fn=rcl_fn,\n local_score_fn=scoreFn,\n get_neighbors_fn=get_neighbors_fn,\n feasible_fn=_feasible_fn,\n initial_elements=initialWeights, seed=seed,\n verbosity=verbosity)\n\n initialSolns.append(iterSolns[0])\n localSolns.append(iterSolns[1])\n\n success = True\n printer.log('Finished iteration {} of {}.'.format(\n iteration + 1, iterations), 1)\n except Exception as e:\n failCount += 1\n raise e if (failCount == 10) else printer.warning(e)\n\n finalScores = _np.array([finalScoreFn(localSoln)\n for localSoln in localSolns])\n bestSoln = localSolns[_np.argmin(finalScores)]\n\n return (bestSoln, initialSolns, localSolns) if return_all else bestSoln\n"
] | [
[
"numpy.sum",
"numpy.any",
"numpy.trace",
"numpy.kron",
"numpy.argmin",
"numpy.tensordot",
"numpy.expand_dims",
"numpy.where",
"numpy.average",
"numpy.linalg.eig",
"numpy.zeros",
"numpy.linalg.eigvalsh",
"numpy.max",
"numpy.linalg.norm",
"numpy.empty",
"numpy.linalg.inv",
"numpy.conjugate",
"numpy.array",
"numpy.dot"
]
] |
ClarePan/Tax-Calculator | [
"d2d6cb4b551f34017db7166d91d982b5c4670816"
] | [
"taxcalc/tests/test_calculate.py"
] | [
"# CODING-STYLE CHECKS:\n# pycodestyle test_calculate.py\n\nimport os\nimport json\nfrom io import StringIO\nimport tempfile\nimport copy\nimport six\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom taxcalc import Policy, Records, Calculator, Behavior, Consumption\n\n\nRAWINPUTFILE_FUNITS = 4\nRAWINPUTFILE_YEAR = 2015\nRAWINPUTFILE_CONTENTS = (\n 'RECID,MARS\\n'\n '1,2\\n'\n '2,1\\n'\n '3,4\\n'\n '4,3\\n'\n)\n\n\[email protected](scope='module', name='rawinputfile')\ndef fixture_rawinputfile():\n \"\"\"\n Temporary input file that contains the minimum required input varaibles.\n \"\"\"\n ifile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n ifile.write(RAWINPUTFILE_CONTENTS)\n ifile.close()\n # must close and then yield for Windows platform\n yield ifile\n if os.path.isfile(ifile.name):\n try:\n os.remove(ifile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\[email protected](scope='module', name='policyfile')\ndef fixture_policyfile():\n txt = \"\"\"{\"_almdep\": {\"value\": [7150, 7250, 7400]},\n \"_almsep\": {\"value\": [40400, 41050]},\n \"_rt5\": {\"value\": [0.33 ]},\n \"_rt7\": {\"value\": [0.396]}}\"\"\"\n f = tempfile.NamedTemporaryFile(mode=\"a\", delete=False)\n f.write(txt + \"\\n\")\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef test_make_calculator(cps_subsample):\n syr = 2014\n pol = Policy(start_year=syr, num_years=9)\n assert pol.current_year == syr\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n consump = Consumption()\n consump.update_consumption({syr: {'_MPC_e20400': [0.05]}})\n assert consump.current_year == Consumption.JSON_START_YEAR\n calc = Calculator(policy=pol, records=rec,\n consumption=consump, behavior=Behavior())\n assert calc.current_year == syr\n assert calc.records_current_year() == syr\n # test incorrect Calculator instantiation:\n with pytest.raises(ValueError):\n Calculator(policy=None, records=rec)\n with pytest.raises(ValueError):\n Calculator(policy=pol, records=None)\n with pytest.raises(ValueError):\n Calculator(policy=pol, records=rec, behavior=list())\n with pytest.raises(ValueError):\n Calculator(policy=pol, records=rec, consumption=list())\n\n\ndef test_make_calculator_deepcopy(cps_subsample):\n pol = Policy()\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc1 = Calculator(policy=pol, records=rec)\n calc2 = copy.deepcopy(calc1)\n assert isinstance(calc2, Calculator)\n\n\ndef test_make_calculator_with_policy_reform(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n year = rec.current_year\n # create a Policy object and apply a policy reform\n pol = Policy()\n reform = {2013: {'_II_em': [4000], '_II_em_cpi': False,\n '_STD_Aged': [[1600, 1300, 1300, 1600, 1600]],\n '_STD_Aged_cpi': False}}\n pol.implement_reform(reform)\n # create a Calculator object using this policy reform\n calc = Calculator(policy=pol, records=rec)\n # check that Policy object embedded in Calculator object is correct\n assert calc.current_year == year\n assert calc.policy_param('II_em') == 4000\n assert np.allclose(calc.policy_param('_II_em'),\n np.array([4000] * Policy.DEFAULT_NUM_YEARS))\n exp_STD_Aged = [[1600, 1300, 1300,\n 1600, 1600]] * Policy.DEFAULT_NUM_YEARS\n assert np.allclose(calc.policy_param('_STD_Aged'),\n np.array(exp_STD_Aged))\n assert np.allclose(calc.policy_param('STD_Aged'),\n np.array([1600, 1300, 1300, 1600, 1600]))\n\n\ndef test_make_calculator_with_multiyear_reform(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n year = rec.current_year\n # create a Policy object and apply a policy reform\n pol = Policy()\n reform = {2015: {}, 2016: {}}\n reform[2015]['_II_em'] = [5000, 6000] # reform values for 2015 and 2016\n reform[2015]['_II_em_cpi'] = False\n reform[2016]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600]]\n pol.implement_reform(reform)\n # create a Calculator object using this policy-reform\n calc = Calculator(policy=pol, records=rec)\n # check that Policy object embedded in Calculator object is correct\n assert pol.num_years == Policy.DEFAULT_NUM_YEARS\n assert calc.current_year == year\n assert calc.policy_param('II_em') == 3950\n exp_II_em = [3900, 3950, 5000] + [6000] * (Policy.DEFAULT_NUM_YEARS - 3)\n assert np.allclose(calc.policy_param('_II_em'),\n np.array(exp_II_em))\n calc.increment_year()\n calc.increment_year()\n assert calc.current_year == 2016\n assert np.allclose(calc.policy_param('STD_Aged'),\n np.array([1600, 1300, 1600, 1300, 1600]))\n\n\ndef test_calculator_advance_to_year(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n pol = Policy()\n calc = Calculator(policy=pol, records=rec)\n calc.advance_to_year(2016)\n assert calc.current_year == 2016\n with pytest.raises(ValueError):\n calc.advance_to_year(2015)\n\n\ndef test_make_calculator_raises_on_no_policy(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n with pytest.raises(ValueError):\n Calculator(records=rec)\n\n\ndef test_calculator_mtr(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calcx = Calculator(policy=Policy(), records=rec)\n calcx.calc_all()\n combinedx = calcx.array('combined')\n c00100x = calcx.array('c00100')\n calc = Calculator(policy=Policy(), records=rec)\n recs_pre_e00200p = copy.deepcopy(calc.array('e00200p'))\n (mtr_ptx, mtr_itx, mtr_cmb) = calc.mtr(variable_str='e00200p',\n zero_out_calculated_vars=True)\n recs_post_e00200p = calc.array('e00200p')\n assert np.allclose(recs_post_e00200p, recs_pre_e00200p)\n assert np.allclose(calc.array('combined'), combinedx)\n assert np.allclose(calc.array('c00100'), c00100x)\n assert np.array_equal(mtr_cmb, mtr_ptx) is False\n assert np.array_equal(mtr_ptx, mtr_itx) is False\n with pytest.raises(ValueError):\n calc.mtr(variable_str='bad_income_type')\n (_, _, mtr_combined) = calc.mtr(variable_str='e00200s',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e00650',\n negative_finite_diff=True,\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e00900p',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e01700',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e26270',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e00200p',\n calc_all_already_called=True)\n assert np.allclose(mtr_combined, mtr_cmb)\n assert np.allclose(calc.array('combined'), combinedx)\n assert np.allclose(calc.array('c00100'), c00100x)\n\n\ndef test_calculator_mtr_when_PT_rates_differ():\n reform = {2013: {'_II_rt1': [0.40],\n '_II_rt2': [0.40],\n '_II_rt3': [0.40],\n '_II_rt4': [0.40],\n '_II_rt5': [0.40],\n '_II_rt6': [0.40],\n '_II_rt7': [0.40],\n '_PT_rt1': [0.30],\n '_PT_rt2': [0.30],\n '_PT_rt3': [0.30],\n '_PT_rt4': [0.30],\n '_PT_rt5': [0.30],\n '_PT_rt6': [0.30],\n '_PT_rt7': [0.30]}}\n funit = (\n u'RECID,MARS,FLPDYR,e00200,e00200p,e00900,e00900p,extraneous\\n'\n u'1, 1, 2009, 200000,200000, 100000,100000, 9999999999\\n'\n )\n rec = Records(pd.read_csv(StringIO(funit)))\n pol = Policy()\n calc1 = Calculator(policy=pol, records=rec)\n (_, mtr1, _) = calc1.mtr(variable_str='p23250')\n pol.implement_reform(reform)\n calc2 = Calculator(policy=pol, records=rec)\n (_, mtr2, _) = calc2.mtr(variable_str='p23250')\n assert np.allclose(mtr1, mtr2, rtol=0.0, atol=1e-06)\n\n\ndef test_make_calculator_increment_years_first(cps_subsample):\n # create Policy object with policy reform\n syr = 2013\n pol = Policy(start_year=syr)\n reform = {2015: {}, 2016: {}}\n std5 = 2000\n reform[2015]['_STD_Aged'] = [[std5, std5, std5, std5, std5]]\n reform[2015]['_II_em'] = [5000]\n reform[2016]['_II_em'] = [6000]\n reform[2016]['_II_em_cpi'] = False\n pol.implement_reform(reform)\n # create Calculator object with Policy object as modified by reform\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=pol, records=rec)\n # compare expected policy parameter values with those embedded in calc\n irates = pol.inflation_rates()\n irate2015 = irates[2015 - syr]\n irate2016 = irates[2016 - syr]\n std6 = std5 * (1.0 + irate2015)\n std7 = std6 * (1.0 + irate2016)\n exp_STD_Aged = np.array([[1500, 1200, 1200, 1500, 1500],\n [1550, 1200, 1200, 1550, 1550],\n [std5, std5, std5, std5, std5],\n [std6, std6, std6, std6, std6],\n [std7, std7, std7, std7, std7]])\n act_STD_Aged = calc.policy_param('_STD_Aged')\n assert np.allclose(act_STD_Aged[:5], exp_STD_Aged)\n exp_II_em = np.array([3900, 3950, 5000, 6000, 6000])\n act_II_em = calc.policy_param('_II_em')\n assert np.allclose(act_II_em[:5], exp_II_em)\n\n\ndef test_ID_HC_vs_BS(cps_subsample):\n \"\"\"\n Test that complete haircut of itemized deductions produces same\n results as a 100% benefit surtax with no benefit deduction.\n \"\"\"\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n # specify complete-haircut reform policy and Calculator object\n hc_reform = {2013: {'_ID_Medical_hc': [1.0],\n '_ID_StateLocalTax_hc': [1.0],\n '_ID_RealEstate_hc': [1.0],\n '_ID_Casualty_hc': [1.0],\n '_ID_Miscellaneous_hc': [1.0],\n '_ID_InterestPaid_hc': [1.0],\n '_ID_Charity_hc': [1.0]}}\n hc_policy = Policy()\n hc_policy.implement_reform(hc_reform)\n hc_calc = Calculator(policy=hc_policy, records=recs)\n hc_calc.calc_all()\n hc_taxes = hc_calc.dataframe(['iitax', 'payrolltax'])\n del hc_calc\n # specify benefit-surtax reform policy and Calculator object\n bs_reform = {2013: {'_ID_BenefitSurtax_crt': [0.0],\n '_ID_BenefitSurtax_trt': [1.0]}}\n bs_policy = Policy()\n bs_policy.implement_reform(bs_reform)\n bs_calc = Calculator(policy=bs_policy, records=recs)\n bs_calc.calc_all()\n bs_taxes = bs_calc.dataframe(['iitax', 'payrolltax'])\n del bs_calc\n # compare calculated taxes generated by the two reforms\n assert np.allclose(hc_taxes['payrolltax'], bs_taxes['payrolltax'])\n assert np.allclose(hc_taxes['iitax'], bs_taxes['iitax'])\n\n\ndef test_ID_StateLocal_HC_vs_CRT(cps_subsample):\n \"\"\"\n Test that a cap on state/local income and sales tax deductions at 0 percent\n of AGI is equivalent to a complete haircut on the same state/local tax\n deductions.\n \"\"\"\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n # specify state/local complete haircut reform policy and Calculator object\n hc_reform = {2013: {'_ID_StateLocalTax_hc': [1.0]}}\n hc_policy = Policy()\n hc_policy.implement_reform(hc_reform)\n hc_calc = Calculator(policy=hc_policy, records=rec)\n hc_calc.calc_all()\n # specify AGI cap reform policy and Calculator object\n crt_reform = {2013: {'_ID_StateLocalTax_crt': [0.0]}}\n crt_policy = Policy()\n crt_policy.implement_reform(crt_reform)\n crt_calc = Calculator(policy=crt_policy, records=rec)\n crt_calc.calc_all()\n # compare calculated tax results generated by the two reforms\n assert np.allclose(hc_calc.array('payrolltax'),\n crt_calc.array('payrolltax'))\n assert np.allclose(hc_calc.array('iitax'),\n crt_calc.array('iitax'))\n\n\ndef test_ID_RealEstate_HC_vs_CRT(cps_subsample):\n \"\"\"\n Test that a cap on all state, local, and foreign real estate tax deductions\n at 0 percent of AGI is equivalent to a complete haircut on the same real\n estate tax deductions.\n \"\"\"\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n # specify real estate complete haircut reform policy and Calculator object\n hc_reform = {2013: {'_ID_RealEstate_hc': [1.0]}}\n hc_policy = Policy()\n hc_policy.implement_reform(hc_reform)\n hc_calc = Calculator(policy=hc_policy, records=rec)\n hc_calc.calc_all()\n # specify AGI cap reform policy and Calculator object\n crt_reform = {2013: {'_ID_RealEstate_crt': [0.0]}}\n crt_policy = Policy()\n crt_policy.implement_reform(crt_reform)\n crt_calc = Calculator(policy=crt_policy, records=rec)\n crt_calc.calc_all()\n # compare calculated tax results generated by the two reforms\n assert np.allclose(hc_calc.array('payrolltax'),\n crt_calc.array('payrolltax'))\n assert np.allclose(hc_calc.array('iitax'),\n crt_calc.array('iitax'))\n\n\ndef test_calculator_using_nonstd_input(rawinputfile):\n # check Calculator handling of raw, non-standard input data with no aging\n pol = Policy()\n pol.set_year(RAWINPUTFILE_YEAR) # set policy params to input data year\n nonstd = Records(data=rawinputfile.name,\n gfactors=None, # keeps raw data unchanged\n weights=None,\n start_year=RAWINPUTFILE_YEAR) # set raw input data year\n assert nonstd.array_length == RAWINPUTFILE_FUNITS\n calc = Calculator(policy=pol, records=nonstd,\n sync_years=False) # keeps raw data unchanged\n assert calc.current_year == RAWINPUTFILE_YEAR\n calc.calc_all()\n assert calc.weighted_total('e00200') == 0\n assert calc.total_weight() == 0\n varlist = ['RECID', 'MARS']\n pdf = calc.dataframe(varlist)\n assert isinstance(pdf, pd.DataFrame)\n assert pdf.shape == (RAWINPUTFILE_FUNITS, len(varlist))\n mars = calc.array('MARS')\n assert isinstance(mars, np.ndarray)\n assert mars.shape == (RAWINPUTFILE_FUNITS,)\n exp_iitax = np.zeros((nonstd.array_length,))\n assert np.allclose(calc.array('iitax'), exp_iitax)\n mtr_ptax, _, _ = calc.mtr(wrt_full_compensation=False)\n exp_mtr_ptax = np.zeros((nonstd.array_length,))\n exp_mtr_ptax.fill(0.153)\n assert np.allclose(mtr_ptax, exp_mtr_ptax)\n\n\nREFORM_CONTENTS = \"\"\"\n// Example of a reform file suitable for read_json_param_objects().\n// This JSON file can contain any number of trailing //-style comments, which\n// will be removed before the contents are converted from JSON to a dictionary.\n// Within each \"policy\" object, the primary keys are parameters and\n// the secondary keys are years.\n// Both the primary and secondary key values must be enclosed in quotes (\").\n// Boolean variables are specified as true or false (no quotes; all lowercase).\n// Parameter code in the policy object is enclosed inside a pair of double\n// pipe characters (||).\n{\n \"policy\": {\n \"_AMT_brk1\": // top of first AMT tax bracket\n {\"2015\": [200000],\n \"2017\": [300000]\n },\n \"_EITC_c\": // maximum EITC amount by number of qualifying kids (0,1,2,3+)\n {\"2016\": [[ 900, 5000, 8000, 9000]],\n \"2019\": [[1200, 7000, 10000, 12000]]\n },\n \"_II_em\": // personal exemption amount (see indexing changes below)\n {\"2016\": [6000],\n \"2018\": [7500],\n \"2020\": [9000]\n },\n \"_II_em_cpi\": // personal exemption amount indexing status\n {\"2016\": false, // values in future years are same as this year value\n \"2018\": true // values in future years indexed with this year as base\n },\n \"_SS_Earnings_c\": // social security (OASDI) maximum taxable earnings\n {\"2016\": [300000],\n \"2018\": [500000],\n \"2020\": [700000]\n },\n \"_AMT_em_cpi\": // AMT exemption amount indexing status\n {\"2017\": false, // values in future years are same as this year value\n \"2020\": true // values in future years indexed with this year as base\n }\n }\n}\n\"\"\"\n\n\[email protected](scope='module', name='reform_file')\ndef fixture_reform_file():\n \"\"\"\n Temporary reform file for read_json_param_objects() function.\n \"\"\"\n rfile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n rfile.write(REFORM_CONTENTS)\n rfile.close()\n # must close and then yield for Windows platform\n yield rfile\n if os.path.isfile(rfile.name):\n try:\n os.remove(rfile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\nASSUMP_CONTENTS = \"\"\"\n// Example of assump file suitable for the read_json_param_objects().\n// This JSON file can contain any number of trailing //-style comments, which\n// will be removed before the contents are converted from JSON to a dictionary.\n// Within each \"behavior\", \"consumption\" and \"growth\" object, the\n// primary keys are parameters and the secondary keys are years.\n// Both the primary and secondary key values must be enclosed in quotes (\").\n// Boolean variables are specified as true or false (no quotes; all lowercase).\n{\n \"consumption\": { \"_MPC_e18400\": {\"2018\": [0.05]} },\n \"behavior\": {},\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n}\n\"\"\"\n\n\[email protected](scope='module', name='assump_file')\ndef fixture_assump_file():\n \"\"\"\n Temporary assumption file for read_json_params_files() function.\n \"\"\"\n afile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n afile.write(ASSUMP_CONTENTS)\n afile.close()\n # must close and then yield for Windows platform\n yield afile\n if os.path.isfile(afile.name):\n try:\n os.remove(afile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\[email protected](\"set_year\", [False, True])\ndef test_read_json_reform_file_and_implement_reform(reform_file,\n assump_file,\n set_year):\n \"\"\"\n Test reading and translation of reform file into a reform dictionary\n that is then used to call implement_reform method and Calculate.calc_all()\n NOTE: implement_reform called when policy.current_year == policy.start_year\n \"\"\"\n pol = Policy()\n if set_year:\n pol.set_year(2015)\n param_dict = Calculator.read_json_param_objects(reform_file.name,\n assump_file.name)\n pol.implement_reform(param_dict['policy'])\n syr = pol.start_year\n amt_brk1 = pol._AMT_brk1\n assert amt_brk1[2015 - syr] == 200000\n assert amt_brk1[2016 - syr] > 200000\n assert amt_brk1[2017 - syr] == 300000\n assert amt_brk1[2018 - syr] > 300000\n ii_em = pol._II_em\n assert ii_em[2016 - syr] == 6000\n assert ii_em[2017 - syr] == 6000\n assert ii_em[2018 - syr] == 7500\n assert ii_em[2019 - syr] > 7500\n assert ii_em[2020 - syr] == 9000\n assert ii_em[2021 - syr] > 9000\n amt_em = pol._AMT_em\n assert amt_em[2016 - syr, 0] > amt_em[2015 - syr, 0]\n assert amt_em[2017 - syr, 0] > amt_em[2016 - syr, 0]\n assert amt_em[2018 - syr, 0] == amt_em[2017 - syr, 0]\n assert amt_em[2019 - syr, 0] == amt_em[2017 - syr, 0]\n assert amt_em[2020 - syr, 0] == amt_em[2017 - syr, 0]\n assert amt_em[2021 - syr, 0] > amt_em[2020 - syr, 0]\n assert amt_em[2022 - syr, 0] > amt_em[2021 - syr, 0]\n add4aged = pol._ID_Medical_frt_add4aged\n assert add4aged[2015 - syr] == -0.025\n assert add4aged[2016 - syr] == -0.025\n assert add4aged[2017 - syr] == 0.0\n assert add4aged[2022 - syr] == 0.0\n\n\[email protected](scope='module', name='bad1reformfile')\ndef fixture_bad1reformfile():\n # specify JSON text for reform\n txt = \"\"\"\n {\n \"policy\": { // example of incorrect JSON because 'x' must be \"x\"\n 'x': {\"2014\": [4000]}\n }\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad2reformfile')\ndef fixture_bad2reformfile():\n # specify JSON text for reform\n txt = \"\"\"\n {\n \"title\": \"\",\n \"policyx\": { // example of reform file not containing \"policy\" key\n \"_SS_Earnings_c\": {\"2018\": [9e99]}\n }\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad3reformfile')\ndef fixture_bad3reformfile():\n # specify JSON text for reform\n txt = \"\"\"\n {\n \"title\": \"\",\n \"policy\": {\n \"_SS_Earnings_c\": {\"2018\": [9e99]}\n },\n \"behavior\": { // example of misplaced \"behavior\" key\n }\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef test_read_bad_json_reform_file(bad1reformfile, bad2reformfile,\n bad3reformfile):\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(bad1reformfile.name, None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(bad2reformfile.name, None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(bad3reformfile.name, None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(list(), None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, 'unknown_file_name')\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, list())\n\n\[email protected](scope='module', name='bad1assumpfile')\ndef fixture_bad1assumpfile():\n # specify JSON text for assumptions\n txt = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": { // example of incorrect JSON because 'x' must be \"x\"\n 'x': {\"2014\": [0.25]}\n },\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad2assumpfile')\ndef fixture_bad2assumpfile():\n # specify JSON text for assumptions\n txt = \"\"\"\n {\n \"consumption\": {},\n \"behaviorx\": {}, // example of assump file not containing \"behavior\" key\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad3assumpfile')\ndef fixture_bad3assumpfile():\n # specify JSON text for assump\n txt = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": {},\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"policy\": { // example of misplaced policy key\n \"_SS_Earnings_c\": {\"2018\": [9e99]}\n },\n \"growmodel\": {}\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef test_read_bad_json_assump_file(bad1assumpfile, bad2assumpfile,\n bad3assumpfile):\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, bad1assumpfile.name)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, bad2assumpfile.name)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, bad3assumpfile.name)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, 'unknown_file_name')\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, list())\n\n\ndef test_convert_parameter_dict():\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({2013: {'2013': [40000]}})\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({'_II_em': {2013: [40000]}})\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({4567: {2013: [40000]}})\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({'_II_em': 40000})\n rdict = Calculator._convert_parameter_dict({'_II_em': {'2013': [40000]}})\n assert isinstance(rdict, dict)\n\n\ndef test_calc_all(reform_file, rawinputfile):\n cyr = 2016\n pol = Policy()\n param_dict = Calculator.read_json_param_objects(reform_file.name, None)\n pol.implement_reform(param_dict['policy'])\n pol.set_year(cyr)\n nonstd = Records(data=rawinputfile.name, gfactors=None,\n weights=None, start_year=cyr)\n assert nonstd.array_length == RAWINPUTFILE_FUNITS\n calc = Calculator(policy=pol, records=nonstd,\n sync_years=False) # keeps raw data unchanged\n assert calc.current_year == cyr\n assert calc.reform_warnings == ''\n\n\ndef test_translate_json_reform_suffixes_mars_non_indexed():\n # test read_json_param_objects()\n # using MARS-indexed parameter suffixes\n json1 = \"\"\"{\"policy\": {\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]},\n \"_AMEDT_ec_joint\": {\"2018\": [400000], \"2016\": [300000]},\n \"_AMEDT_ec_separate\": {\"2017\": [150000], \"2019\": [200000]}\n }}\"\"\"\n pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)\n rdict1 = pdict1['policy']\n json2 = \"\"\"{\"policy\": {\n \"_AMEDT_ec\": {\"2016\": [[200000, 300000, 125000, 200000, 200000]],\n \"2017\": [[200000, 300000, 150000, 200000, 200000]],\n \"2018\": [[200000, 400000, 150000, 200000, 200000]],\n \"2019\": [[200000, 400000, 200000, 200000, 200000]]},\n \"_II_em\": {\"2015\": [15000], \"2020\": [20000]}\n }}\"\"\"\n pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)\n rdict2 = pdict2['policy']\n assert len(rdict2) == len(rdict1)\n for year in rdict2.keys():\n if '_II_em' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_II_em'],\n rdict2[year]['_II_em'],\n atol=0.01, rtol=0.0)\n if '_AMEDT_ec' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_AMEDT_ec'],\n rdict2[year]['_AMEDT_ec'],\n atol=0.01, rtol=0.0)\n\n\ndef test_translate_json_reform_suffixes_eic():\n # test read_json_param_objects(...)\n # using EIC-indexed parameter suffixes\n json1 = \"\"\"{\"policy\": {\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]},\n \"_EITC_c_0kids\": {\"2018\": [510], \"2019\": [510]},\n \"_EITC_c_1kid\": {\"2019\": [3400], \"2018\": [3400]},\n \"_EITC_c_2kids\": {\"2018\": [5616], \"2019\": [5616]},\n \"_EITC_c_3+kids\": {\"2019\": [6318], \"2018\": [6318]}\n }}\"\"\"\n pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)\n rdict1 = pdict1['policy']\n json2 = \"\"\"{\"policy\": {\n \"_EITC_c\": {\"2019\": [[510, 3400, 5616, 6318]],\n \"2018\": [[510, 3400, 5616, 6318]]},\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]}\n }}\"\"\"\n pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)\n rdict2 = pdict2['policy']\n assert len(rdict2) == len(rdict1)\n for year in rdict2.keys():\n if '_II_em' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_II_em'],\n rdict2[year]['_II_em'],\n atol=0.01, rtol=0.0)\n if '_EITC_c' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_EITC_c'],\n rdict2[year]['_EITC_c'],\n atol=0.01, rtol=0.0)\n\n\ndef test_translate_json_reform_suffixes_idedtype():\n # test read_json_param_objects(...)\n # using idedtype-indexed parameter suffixes\n json1 = \"\"\"{\"policy\": {\n \"_ID_BenefitCap_rt\": {\"2019\": [0.2]},\n \"_ID_BenefitCap_Switch_medical\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_casualty\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_misc\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_interest\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_charity\": {\"2019\": [false]},\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]}\n }}\"\"\"\n pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)\n rdict1 = pdict1['policy']\n json2 = \"\"\"{\"policy\": {\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]},\n \"_ID_BenefitCap_Switch\": {\n \"2019\": [[false, true, true, false, false, false, false]]\n },\n \"_ID_BenefitCap_rt\": {\"2019\": [0.2]}\n }}\"\"\"\n pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)\n rdict2 = pdict2['policy']\n assert len(rdict2) == len(rdict1)\n for year in rdict2.keys():\n if '_II_em' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_II_em'],\n rdict2[year]['_II_em'],\n atol=0.01, rtol=0.0)\n if '_ID_BenefitCap_rt' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_ID_BenefitCap_rt'],\n rdict2[year]['_ID_BenefitCap_rt'],\n atol=0.01, rtol=0.0)\n if '_ID_BenefitCap_Switch' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_ID_BenefitCap_Switch'],\n rdict2[year]['_ID_BenefitCap_Switch'],\n atol=0.01, rtol=0.0)\n\n\ndef test_read_json_param_with_suffixes_and_errors():\n # test interaction of policy parameter suffixes and reform errors\n # (fails without 0.10.2 bug fix as reported by Hank Doupe in PB PR#641)\n reform = {\n u'policy': {\n u'_II_brk4_separate': {u'2017': [5000.0]},\n u'_STD_separate': {u'2017': [8000.0]},\n u'_STD_single': {u'2018': [1000.0]},\n u'_II_brk2_headhousehold': {u'2017': [1000.0]},\n u'_II_brk4_single': {u'2017': [500.0]},\n u'_STD_joint': {u'2017': [10000.0], u'2020': [150.0]},\n u'_II_brk2_separate': {u'2017': [1000.0]},\n u'_II_brk2_single': {u'2017': [1000.0]},\n u'_II_brk2_joint': {u'2017': [1000.0]},\n u'_FICA_ss_trt': {u'2017': [-1.0], u'2019': [0.1]},\n u'_II_brk4_headhousehold': {u'2017': [500.0]},\n u'_STD_headhousehold': {u'2017': [10000.0], u'2020': [150.0]},\n u'_II_brk4_joint': {u'2017': [500.0]},\n u'_ID_BenefitSurtax_Switch_medical': {u'2017': [True]}\n }\n }\n json_reform = json.dumps(reform)\n params = Calculator.read_json_param_objects(json_reform, None)\n assert isinstance(params, dict)\n pol = Policy()\n pol.ignore_reform_errors()\n pol.implement_reform(params['policy'],\n print_warnings=False, raise_errors=False)\n assert len(pol.parameter_errors) > 0\n assert len(pol.parameter_warnings) > 0\n\n\ndef test_noreform_documentation():\n reform_json = \"\"\"\n {\n \"policy\": {}\n }\n \"\"\"\n assump_json = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": {},\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n params = Calculator.read_json_param_objects(reform_json, assump_json)\n assert isinstance(params, dict)\n actual_doc = Calculator.reform_documentation(params)\n expected_doc = (\n 'REFORM DOCUMENTATION\\n'\n 'Baseline Growth-Difference Assumption Values by Year:\\n'\n 'none: using default baseline growth assumptions\\n'\n 'Policy Reform Parameter Values by Year:\\n'\n 'none: using current-law policy parameters\\n'\n )\n assert actual_doc == expected_doc\n\n\ndef test_reform_documentation():\n reform_json = \"\"\"\n {\n \"policy\": {\n \"_II_em_cpi\": {\"2016\": false,\n \"2018\": true},\n \"_II_em\": {\"2016\": [5000],\n \"2018\": [6000],\n \"2020\": [7000]},\n \"_EITC_indiv\": {\"2017\": [true]},\n \"_STD_Aged_cpi\": {\"2016\": false},\n \"_STD_Aged\": {\"2016\": [[1600, 1300, 1300, 1600, 1600]],\n \"2020\": [[2000, 2000, 2000, 2000, 2000]]},\n \"_ID_BenefitCap_Switch_medical\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_casualty\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_misc\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_interest\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_charity\": {\"2020\": [false]}\n }\n }\n \"\"\"\n assump_json = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": {},\n // increase baseline inflation rate by one percentage point in 2014+\n // (has no effect on known policy parameter values)\n \"growdiff_baseline\": {\"_ACPIU\": {\"2014\": [0.01]}},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n params = Calculator.read_json_param_objects(reform_json, assump_json)\n assert isinstance(params, dict)\n doc = Calculator.reform_documentation(params)\n assert isinstance(doc, six.string_types)\n dump = False # set to True to print documentation and force test failure\n if dump:\n print(doc)\n assert 1 == 2\n\n\ndef test_distribution_tables(cps_subsample):\n pol = Policy()\n recs = Records.cps_constructor(data=cps_subsample)\n calc1 = Calculator(policy=pol, records=recs)\n assert calc1.current_year == 2014\n calc1.calc_all()\n dt1, dt2 = calc1.distribution_tables(None, 'weighted_deciles')\n assert isinstance(dt1, pd.DataFrame)\n assert dt2 is None\n dt1, dt2 = calc1.distribution_tables(calc1, 'weighted_deciles')\n assert isinstance(dt1, pd.DataFrame)\n assert isinstance(dt2, pd.DataFrame)\n reform = {2014: {'_UBI_u18': [1000],\n '_UBI_1820': [1000],\n '_UBI_21': [1000]}}\n pol.implement_reform(reform)\n assert not pol.parameter_errors\n calc2 = Calculator(policy=pol, records=recs)\n calc2.calc_all()\n dt1, dt2 = calc1.distribution_tables(calc2, 'weighted_deciles')\n assert isinstance(dt1, pd.DataFrame)\n assert isinstance(dt2, pd.DataFrame)\n\n\ndef test_difference_table(cps_subsample):\n cyr = 2014\n pol = Policy()\n recs = Records.cps_constructor(data=cps_subsample)\n calc1 = Calculator(policy=pol, records=recs)\n assert calc1.current_year == cyr\n reform = {cyr: {'_SS_Earnings_c': [9e99]}}\n pol.implement_reform(reform)\n calc2 = Calculator(policy=pol, records=recs)\n assert calc2.current_year == cyr\n calc1.calc_all()\n calc2.calc_all()\n diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')\n assert isinstance(diff, pd.DataFrame)\n\n\ndef test_diagnostic_table(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n adt = calc.diagnostic_table(3)\n assert isinstance(adt, pd.DataFrame)\n\n\ndef test_mtr_graph(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n fig = calc.mtr_graph(calc,\n mars=2,\n income_measure='wages',\n mtr_measure='ptax')\n assert fig\n fig = calc.mtr_graph(calc,\n income_measure='agi',\n mtr_measure='itax')\n assert fig\n\n\ndef test_atr_graph(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n fig = calc.atr_graph(calc, mars=2, atr_measure='itax')\n assert fig\n fig = calc.atr_graph(calc, atr_measure='ptax')\n assert fig\n\n\ndef test_privacy_of_embedded_objects(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n with pytest.raises(AttributeError):\n cyr = calc.__policy.current_year\n with pytest.raises(AttributeError):\n wgh = calc.__records.s006\n with pytest.raises(AttributeError):\n cyr = calc.__consumption.current_year\n with pytest.raises(AttributeError):\n cyr = calc.__behavior.current_year\n\n\ndef test_n65(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n assert calc.n65().sum() > 1500\n"
] | [
[
"numpy.array",
"numpy.allclose",
"numpy.zeros",
"numpy.array_equal"
]
] |
shivgarg/alfred_transformers | [
"3eab07d3a218eb9b809dec8b7120b92ebd00c890"
] | [
"models/model/seq2seq_im_mask_cnn_finetune.py"
] | [
"import os\nimport torch\nimport numpy as np\nimport nn.vnn as vnn\nimport collections\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\nfrom model.seq2seq import Module as Base\nfrom models.utils.metric import compute_f1, compute_exact\nfrom gen.utils.image_util import decompress_mask\nfrom torchvision import transforms\nfrom PIL import Image\n\nclass Module(Base):\n\n def __init__(self, args, vocab):\n '''\n Seq2Seq agent\n '''\n super().__init__(args, vocab)\n\n # encoder and self-attention\n self.enc = nn.LSTM(args.demb, args.dhid, bidirectional=True, batch_first=True)\n self.enc_att = vnn.SelfAttn(args.dhid*2)\n\n # subgoal monitoring\n self.subgoal_monitoring = (self.args.pm_aux_loss_wt > 0 or self.args.subgoal_aux_loss_wt > 0)\n\n # frame mask decoder\n decoder = vnn.ConvFrameMaskDecoderProgressMonitorFinetune if self.subgoal_monitoring else vnn.ConvFrameMaskDecoder\n self.dec = decoder(self.emb_action_low, args.dframe, 2*args.dhid,\n pframe=args.pframe,\n attn_dropout=args.attn_dropout,\n hstate_dropout=args.hstate_dropout,\n actor_dropout=args.actor_dropout,\n input_dropout=args.input_dropout,\n teacher_forcing=args.dec_teacher_forcing)\n\n # dropouts\n self.vis_dropout = nn.Dropout(args.vis_dropout)\n self.lang_dropout = nn.Dropout(args.lang_dropout, inplace=True)\n self.input_dropout = nn.Dropout(args.input_dropout)\n\n # internal states\n self.state_t = None\n self.e_t = None\n self.test_mode = False\n\n # bce reconstruction loss\n self.bce_with_logits = torch.nn.BCEWithLogitsLoss(reduction='none')\n self.mse_loss = torch.nn.MSELoss(reduction='none')\n\n # paths\n self.root_path = os.getcwd()\n self.feat_pt = 'feat_conv.pt'\n\n # params\n self.max_subgoals = 25\n self.max_episode_len = args.max_episode_len\n # reset model\n self.reset()\n\n def featurize(self, batch, load_mask=True, load_frames=True):\n '''\n tensorize and pad batch input\n '''\n device = torch.device('cuda') if self.args.gpu else torch.device('cpu')\n feat = collections.defaultdict(list)\n\n for ex in batch:\n ###########\n # auxillary\n ###########\n\n if not self.test_mode:\n # subgoal completion supervision\n if self.args.subgoal_aux_loss_wt > 0:\n feat['subgoals_completed'].append(np.array(ex['num']['low_to_high_idx']) / self.max_subgoals)\n\n # progress monitor supervision\n if self.args.pm_aux_loss_wt > 0:\n num_actions = len([a for sg in ex['num']['action_low'] for a in sg])\n subgoal_progress = [(i+1)/float(num_actions) for i in range(num_actions)]\n feat['subgoal_progress'].append(subgoal_progress)\n\n #########\n # inputs\n #########\n\n # serialize segments\n self.serialize_lang_action(ex)\n\n # goal and instr language\n lang_goal, lang_instr = ex['num']['lang_goal'], ex['num']['lang_instr']\n\n # zero inputs if specified\n lang_goal = self.zero_input(lang_goal) if self.args.zero_goal else lang_goal\n lang_instr = self.zero_input(lang_instr) if self.args.zero_instr else lang_instr\n\n # append goal + instr\n lang_goal_instr = lang_goal + lang_instr\n feat['lang_goal_instr'].append(lang_goal_instr)\n episode_len = 0\n # load Resnet features from disk\n if load_frames and not self.test_mode:\n root = self.get_task_root(ex)\n #im = torch.load(os.path.join(root, self.feat_pt))\n im = []\n path = \"{}/{}\".format(root,'raw_images')\n imgs = sorted(os.listdir(path))\n tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])\n for img in imgs:\n im.append(tfms(Image.open(\"{}/{}\".format(path,img))))\n im = torch.stack(im)\n num_low_actions = len(ex['plan']['low_actions'])\n num_feat_frames = im.shape[0]\n\n if num_low_actions != num_feat_frames:\n keep = [None] * len(ex['plan']['low_actions'])\n for i, d in enumerate(ex['images']):\n # only add frames linked with low-level actions (i.e. skip filler frames like smooth rotations and dish washing)\n if keep[d['low_idx']] is None:\n keep[d['low_idx']] = im[i]\n keep.append(keep[-1]) # stop frame\n episode_len = min(self.max_episode_len, len(keep))\n keep = keep[:episode_len]\n feat['frames'].append(torch.stack(keep, dim=0))\n else:\n episode_len = min(self.max_episode_len, len(im))\n im = im[:episode_len]\n feat['frames'].append(torch.cat([im, im[-1].unsqueeze(0)], dim=0)) # add stop frame\n\n #########\n # outputs\n #########\n if self.args.subgoal_aux_loss_wt > 0:\n feat['subgoals_completed'][-1] = feat['subgoals_completed'][-1][:episode_len]\n \n if self.args.pm_aux_loss_wt > 0:\n feat['subgoal_progress'][-1] = feat['subgoal_progress'][-1][:episode_len]\n \n if not self.test_mode:\n # low-level action\n feat['action_low'].append([a['action'] for a in ex['num']['action_low']][:episode_len])\n\n # low-level action mask\n if load_mask:\n feat['action_low_mask'].append([self.decompress_mask(a['mask']) for i,a in enumerate(ex['num']['action_low']) if a['mask'] is not None and i<episode_len])\n\n # low-level valid interact\n feat['action_low_valid_interact'].append([a['valid_interact'] for a in ex['num']['action_low']][:episode_len])\n\n\n # tensorization and padding\n for k, v in feat.items():\n if k in {'lang_goal_instr'}:\n # language embedding and padding\n seqs = [torch.tensor(vv, device=device) for vv in v]\n pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)\n seq_lengths = np.array(list(map(len, v)))\n embed_seq = self.emb_word(pad_seq)\n packed_input = pack_padded_sequence(embed_seq, seq_lengths, batch_first=True, enforce_sorted=False)\n feat[k] = packed_input\n elif k in {'action_low_mask'}:\n # mask padding\n seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]\n feat[k] = seqs\n elif k in {'subgoal_progress', 'subgoals_completed'}:\n # auxillary padding\n seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]\n pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)\n feat[k] = pad_seq\n else:\n # default: tensorize and pad sequence\n seqs = [torch.tensor(vv, device=device, dtype=torch.float if ('frames' in k) else torch.long) for vv in v]\n pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)\n feat[k] = pad_seq\n\n return feat\n\n\n def serialize_lang_action(self, feat):\n '''\n append segmented instr language and low-level actions into single sequences\n '''\n is_serialized = not isinstance(feat['num']['lang_instr'][0], list)\n if not is_serialized:\n feat['num']['lang_instr'] = [word for desc in feat['num']['lang_instr'] for word in desc]\n if not self.test_mode:\n feat['num']['action_low'] = [a for a_group in feat['num']['action_low'] for a in a_group]\n\n\n def decompress_mask(self, compressed_mask):\n '''\n decompress mask from json files\n '''\n mask = np.array(decompress_mask(compressed_mask))\n mask = np.expand_dims(mask, axis=0)\n return mask\n\n\n def forward(self, feat, max_decode=300):\n cont_lang, enc_lang = self.encode_lang(feat)\n state_0 = cont_lang, torch.zeros_like(cont_lang)\n frames = self.vis_dropout(feat['frames'])\n res = self.dec(enc_lang, frames, max_decode=self.max_episode_len, gold=feat['action_low'], state_0=state_0)\n feat.update(res)\n return feat\n\n\n def encode_lang(self, feat):\n '''\n encode goal+instr language\n '''\n emb_lang_goal_instr = feat['lang_goal_instr']\n self.lang_dropout(emb_lang_goal_instr.data)\n enc_lang_goal_instr, _ = self.enc(emb_lang_goal_instr)\n enc_lang_goal_instr, _ = pad_packed_sequence(enc_lang_goal_instr, batch_first=True)\n self.lang_dropout(enc_lang_goal_instr)\n cont_lang_goal_instr = self.enc_att(enc_lang_goal_instr)\n\n return cont_lang_goal_instr, enc_lang_goal_instr\n\n\n def reset(self):\n '''\n reset internal states (used for real-time execution during eval)\n '''\n self.r_state = {\n 'state_t': None,\n 'e_t': None,\n 'cont_lang': None,\n 'enc_lang': None\n }\n\n def step(self, feat, prev_action=None):\n '''\n forward the model for a single time-step (used for real-time execution during eval)\n '''\n\n # encode language features\n if self.r_state['cont_lang'] is None and self.r_state['enc_lang'] is None:\n self.r_state['cont_lang'], self.r_state['enc_lang'] = self.encode_lang(feat)\n\n # initialize embedding and hidden states\n if self.r_state['e_t'] is None and self.r_state['state_t'] is None:\n self.r_state['e_t'] = self.dec.go.repeat(self.r_state['enc_lang'].size(0), 1)\n self.r_state['state_t'] = self.r_state['cont_lang'], torch.zeros_like(self.r_state['cont_lang'])\n\n # previous action embedding\n e_t = self.embed_action(prev_action) if prev_action is not None else self.r_state['e_t']\n\n # decode and save embedding and hidden states\n out_action_low, out_action_low_mask, state_t, *_ = self.dec.step(self.r_state['enc_lang'], feat['frames'][:, 0], e_t=e_t, state_tm1=self.r_state['state_t'])\n\n # save states\n self.r_state['state_t'] = state_t\n self.r_state['e_t'] = self.dec.emb(out_action_low.max(1)[1])\n\n # output formatting\n feat['out_action_low'] = out_action_low.unsqueeze(0)\n feat['out_action_low_mask'] = out_action_low_mask.unsqueeze(0)\n return feat\n\n\n def extract_preds(self, out, batch, feat, clean_special_tokens=True):\n '''\n output processing\n '''\n pred = {}\n for ex, alow, alow_mask in zip(batch, feat['out_action_low'].max(2)[1].tolist(), feat['out_action_low_mask']):\n # remove padding tokens\n if self.pad in alow:\n pad_start_idx = alow.index(self.pad)\n alow = alow[:pad_start_idx]\n alow_mask = alow_mask[:pad_start_idx]\n\n if clean_special_tokens:\n # remove <<stop>> tokens\n if self.stop_token in alow:\n stop_start_idx = alow.index(self.stop_token)\n alow = alow[:stop_start_idx]\n alow_mask = alow_mask[:stop_start_idx]\n\n # index to API actions\n words = self.vocab['action_low'].index2word(alow)\n\n # sigmoid preds to binary mask\n alow_mask = F.sigmoid(alow_mask)\n p_mask = [(alow_mask[t] > 0.5).cpu().numpy() for t in range(alow_mask.shape[0])]\n\n task_id_ann = self.get_task_and_ann_id(ex)\n pred[task_id_ann] = {\n 'action_low': ' '.join(words),\n 'action_low_mask': p_mask,\n }\n\n return pred\n\n\n def embed_action(self, action):\n '''\n embed low-level action\n '''\n device = torch.device('cuda') if self.args.gpu else torch.device('cpu')\n action_num = torch.tensor(self.vocab['action_low'].word2index(action), device=device)\n action_emb = self.dec.emb(action_num).unsqueeze(0)\n return action_emb\n\n\n def compute_loss(self, out, batch, feat):\n '''\n loss function for Seq2Seq agent\n '''\n losses = dict()\n\n # GT and predictions\n p_alow = out['out_action_low'].view(-1, len(self.vocab['action_low']))\n l_alow = feat['action_low'].view(-1)\n p_alow_mask = out['out_action_low_mask']\n valid = feat['action_low_valid_interact']\n\n # action loss\n pad_valid = (l_alow != self.pad)\n alow_loss = F.cross_entropy(p_alow, l_alow, reduction='none')\n alow_loss *= pad_valid.float()\n alow_loss = alow_loss.mean()\n losses['action_low'] = alow_loss * self.args.action_loss_wt\n\n # mask loss\n \n valid_idxs = valid.view(-1).nonzero().view(-1)\n flat_p_alow_mask = p_alow_mask.view(p_alow_mask.shape[0]*p_alow_mask.shape[1], *p_alow_mask.shape[2:])[valid_idxs]\n if flat_p_alow_mask.shape[0]!=0:\n flat_alow_mask = torch.cat(feat['action_low_mask'], dim=0)\n alow_mask_loss = self.weighted_mask_loss(flat_p_alow_mask, flat_alow_mask)\n losses['action_low_mask'] = alow_mask_loss * self.args.mask_loss_wt\n \n # subgoal completion loss\n if self.args.subgoal_aux_loss_wt > 0:\n p_subgoal = feat['out_subgoal'].squeeze(2)\n l_subgoal = feat['subgoals_completed']\n sg_loss = self.mse_loss(p_subgoal, l_subgoal)\n sg_loss = sg_loss.view(-1) * pad_valid.float()\n subgoal_loss = sg_loss.mean()\n losses['subgoal_aux'] = self.args.subgoal_aux_loss_wt * subgoal_loss\n\n # progress monitoring loss\n if self.args.pm_aux_loss_wt > 0:\n p_progress = feat['out_progress'].squeeze(2)\n l_progress = feat['subgoal_progress']\n pg_loss = self.mse_loss(p_progress, l_progress)\n pg_loss = pg_loss.view(-1) * pad_valid.float()\n progress_loss = pg_loss.mean()\n losses['progress_aux'] = self.args.pm_aux_loss_wt * progress_loss\n\n return losses\n\n\n def weighted_mask_loss(self, pred_masks, gt_masks):\n '''\n mask loss that accounts for weight-imbalance between 0 and 1 pixels\n '''\n bce = self.bce_with_logits(pred_masks, gt_masks)\n flipped_mask = self.flip_tensor(gt_masks)\n inside = (bce * gt_masks).sum() / (gt_masks).sum()\n outside = (bce * flipped_mask).sum() / (flipped_mask).sum()\n return inside + outside\n\n\n def flip_tensor(self, tensor, on_zero=1, on_non_zero=0):\n '''\n flip 0 and 1 values in tensor\n '''\n res = tensor.clone()\n res[tensor == 0] = on_zero\n res[tensor != 0] = on_non_zero\n return res\n\n\n def compute_metric(self, preds, data):\n '''\n compute f1 and extract match scores for output\n '''\n m = collections.defaultdict(list)\n for task in data:\n ex = self.load_task_json(task)\n i = self.get_task_and_ann_id(ex)\n label = ' '.join([a['discrete_action']['action'] for a in ex['plan']['low_actions']])\n m['action_low_f1'].append(compute_f1(label.lower(), preds[i]['action_low'].lower()))\n m['action_low_em'].append(compute_exact(label.lower(), preds[i]['action_low'].lower()))\n return {k: sum(v)/len(v) for k, v in m.items()}\n"
] | [
[
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.LSTM",
"torch.nn.functional.sigmoid",
"torch.nn.MSELoss",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.stack",
"torch.nn.utils.rnn.pack_padded_sequence",
"numpy.array",
"torch.zeros_like",
"torch.tensor",
"numpy.expand_dims",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.cross_entropy",
"torch.cat",
"torch.device",
"torch.nn.Dropout"
]
] |
GarnixJu2015/GamestonkTerminal | [
"ec400e46ddce4ac934af836b863528f14a13d865"
] | [
"tests/gamestonk_terminal/etf/discovery/test_disc_controller.py"
] | [
"# IMPORTATION STANDARD\nimport os\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom gamestonk_terminal.etf.discovery import disc_controller\n\n# pylint: disable=E1101\n# pylint: disable=W0603\n# pylint: disable=E1111\n\nEMPTY_DF = pd.DataFrame()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"queue, expected\",\n [\n ([\"load\", \"help\"], []),\n ([\"quit\", \"help\"], [\"help\"]),\n ],\n)\ndef test_menu_with_queue(expected, mocker, queue):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n # MOCK SWITCH\n mocker.patch(\n target=f\"{path_controller}.DiscoveryController.switch\",\n return_value=[\"quit\"],\n )\n result_menu = disc_controller.DiscoveryController(queue=queue).menu()\n\n assert result_menu == expected\n\n\[email protected](record_mode=\"none\")\ndef test_menu_without_queue_completion(mocker):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n # ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU\n mocker.patch(\n target=\"gamestonk_terminal.feature_flags.USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"gamestonk_terminal.parent_classes.session\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.parent_classes.session.prompt\",\n return_value=\"quit\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.etf.financedatabase_model.get_etfs_categories\",\n return_value=[\"Bank Loan\"],\n )\n\n # DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER\n mocker.patch.object(\n target=disc_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n )\n mocker.patch(\n target=f\"{path_controller}.session.prompt\",\n return_value=\"quit\",\n )\n\n result_menu = disc_controller.DiscoveryController(queue=None).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"mock_input\",\n [\"help\", \"homee help\", \"home help\", \"mock\"],\n)\ndef test_menu_without_queue_sys_exit(mock_input, mocker):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=disc_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=False,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n return_value=None,\n )\n\n # MOCK USER INPUT\n mocker.patch(\"builtins.input\", return_value=mock_input)\n\n # MOCK SWITCH\n class SystemExitSideEffect:\n def __init__(self):\n self.first_call = True\n\n def __call__(self, *args, **kwargs):\n if self.first_call:\n self.first_call = False\n raise SystemExit()\n return [\"quit\"]\n\n mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())\n mocker.patch(\n target=f\"{path_controller}.DiscoveryController.switch\",\n new=mock_switch,\n )\n\n result_menu = disc_controller.DiscoveryController(queue=None).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected]_stdout\ndef test_print_help():\n\n controller = disc_controller.DiscoveryController(queue=None)\n controller.print_help()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n ],\n ),\n ],\n)\ndef test_switch(an_input, expected_queue):\n\n controller = disc_controller.DiscoveryController(queue=None)\n queue = controller.switch(an_input=an_input)\n\n assert queue == expected_queue\n\n\[email protected](record_mode=\"none\")\ndef test_call_cls(mocker):\n mocker.patch(\"os.system\")\n\n controller = disc_controller.DiscoveryController(queue=None)\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\"quit\", \"quit\", \"quit\"],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\", \"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n ],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n \"help\",\n ],\n ),\n ],\n)\ndef test_call_func_expect_queue(expected_queue, func, queue):\n controller = disc_controller.DiscoveryController(queue=queue)\n result = getattr(controller, func)([])\n\n assert result is None\n assert controller.queue == expected_queue\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"tested_func, other_args, mocked_func, called_args, called_kwargs\",\n [\n (\n \"call_gainers\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"gainers\", 10, \"\"],\n dict(),\n ),\n (\n \"call_decliners\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"decliners\", 10, \"\"],\n dict(),\n ),\n (\n \"call_active\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"active\", 10, \"\"],\n dict(),\n ),\n ],\n)\ndef test_call_func_test(\n tested_func, mocked_func, other_args, called_args, called_kwargs, mocker\n):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n if mocked_func:\n mock = mocker.Mock()\n mocker.patch(\n target=f\"{path_controller}.{mocked_func}\",\n new=mock,\n )\n\n controller = disc_controller.DiscoveryController(queue=None)\n\n getattr(controller, tested_func)(other_args)\n\n if called_args or called_kwargs:\n mock.assert_called_once_with(*called_args, **called_kwargs)\n else:\n mock.assert_called_once()\n else:\n controller = disc_controller.DiscoveryController(queue=None)\n getattr(controller, tested_func)(other_args)\n"
] | [
[
"pandas.DataFrame"
]
] |
maij/pyGSTi | [
"4f8bf5337b01b7afcb7b0580b717b5d1fe281be4"
] | [
"pygsti/models/modelconstruction.py"
] | [
"\"\"\"\nFunctions for the construction of new models.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport collections as _collections\nimport itertools as _itertools\nfrom os import stat\nfrom pygsti.modelmembers.instruments.instrument import Instrument\n\nimport numpy as _np\nimport scipy as _scipy\nimport scipy.linalg as _spl\n\nfrom pygsti.evotypes import Evotype as _Evotype\nfrom pygsti.modelmembers import operations as _op\nfrom pygsti.modelmembers import povms as _povm\nfrom pygsti.modelmembers import states as _state\nfrom pygsti.modelmembers import instruments as _instrument\nfrom pygsti.modelmembers.operations import opfactory as _opfactory\nfrom pygsti.models import stencillabel as _stencil\nfrom pygsti.models.modelnoise import OpModelNoise as _OpModelNoise\nfrom pygsti.models.modelnoise import OpModelPerOpNoise as _OpModelPerOpNoise\nfrom pygsti.models.modelnoise import ComposedOpModelNoise as _ComposedOpModelNoise\nfrom pygsti.models.modelnoise import LindbladNoise as _LindbladNoise\nfrom pygsti.models.modelnoise import StochasticNoise as _StochasticNoise\nfrom pygsti.models.modelnoise import DepolarizationNoise as _DepolarizationNoise\nfrom pygsti.models import explicitmodel as _emdl\nfrom pygsti.models import gaugegroup as _gg\nfrom pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel\nfrom pygsti.models.cloudnoisemodel import CloudNoiseModel as _CloudNoiseModel\nfrom pygsti.baseobjs import label as _label\nfrom pygsti.baseobjs import statespace as _statespace\nfrom pygsti.baseobjs.basis import Basis as _Basis\nfrom pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis\nfrom pygsti.baseobjs.basis import DirectSumBasis as _DirectSumBasis\nfrom pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph\nfrom pygsti.tools import basistools as _bt\nfrom pygsti.tools import internalgates as _itgs\nfrom pygsti.tools import optools as _ot\nfrom pygsti.tools import listtools as _lt\nfrom pygsti.baseobjs.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz\nfrom pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter\nfrom pygsti.tools.legacytools import deprecate as _deprecated_fn\n\n\n#############################################\n# Build gates based on \"standard\" gate names\n############################################\ndef create_spam_vector(vec_expr, state_space, basis):\n \"\"\"\n Build a rho or E vector from an expression.\n\n Parameters\n ----------\n vec_expr : string\n the expression which determines which vector to build. Currenlty, only\n integers are allowed, which specify a the vector for the pure state of\n that index. For example, \"1\" means return vectorize(``|1><1|``). The\n index labels the absolute index of the state within the entire state\n space, and is independent of the direct-sum decomposition of density\n matrix space.\n\n state_space : StateSpace\n The state space that the created operation should act upon.\n\n basis : str or Basis\n The basis of the returned vector. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),\n and Qutrit (qt) (or a custom basis object).\n\n Returns\n -------\n numpy array\n The vector specified by vec_expr in the desired basis.\n \"\"\"\n #So far just allow integer prep_expressions that give the index of state (within the state space) that we\n #prep/measure\n try:\n index = int(vec_expr)\n except:\n raise ValueError(\"Expression must be the index of a state (as a string)\")\n\n state_space = _statespace.StateSpace.cast(state_space)\n if isinstance(basis, str):\n basis = _Basis.cast(basis, state_space)\n assert (state_space.dim == basis.dim), \\\n \"State space labels dim (%s) != basis dim (%s)\" % (state_space.dim, basis.dim)\n\n #standard basis that has the same direct-sum structure as `basis`:\n std_basis = basis.create_equivalent('std')\n vecInSimpleStdBasis = _np.zeros(std_basis.elshape, 'd') # a matrix, but flattened it is our spamvec\n vecInSimpleStdBasis[index, index] = 1.0 # now a matrix with just a single 1 on the diag\n vecInReducedStdBasis = _np.dot(std_basis.from_elementstd_transform_matrix, vecInSimpleStdBasis.flatten())\n # translates the density matrix / state vector to the std basis with our desired block structure\n\n vec = _bt.change_basis(vecInReducedStdBasis, std_basis, basis)\n return vec.reshape(-1, 1)\n\n\ndef create_identity_vec(basis):\n \"\"\"\n Build a the identity vector for a given space and basis.\n\n Parameters\n ----------\n basis : Basis object\n The basis of the returned vector. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),\n and Qutrit (qt) (or a custom basis object).\n\n Returns\n -------\n numpy array\n The identity vector in the desired basis.\n \"\"\"\n opDim = basis.dim\n if isinstance(basis, _DirectSumBasis):\n blockDims = [c.dim for c in basis.component_bases]\n else: blockDims = [opDim]\n\n # assume index given as vec_expr refers to a Hilbert-space state index, so \"reduced-std\" basis\n vecInReducedStdBasis = _np.zeros((opDim, 1), 'd')\n\n #set all diagonal elements of density matrix to 1.0 (end result = identity density mx)\n start = 0; vecIndex = 0\n for blockVecDim in blockDims:\n blockDim = int(_np.sqrt(blockVecDim)) # vec -> matrix dim\n for i in range(start, start + blockDim):\n for j in range(start, start + blockDim):\n if i == j: vecInReducedStdBasis[vecIndex, 0] = 1.0 # set diagonal element of density matrix\n vecIndex += 1\n start += blockDim\n return _bt.change_basis(vecInReducedStdBasis, \"std\", basis)\n\n\ndef create_operation(op_expr, state_space, basis=\"pp\", parameterization=\"full\", evotype='default'):\n \"\"\"\n Build an operation object from an expression.\n\n Parameters\n ----------\n op_expr : string\n expression for the gate to build. String is first split into parts\n delimited by the colon (:) character, which are composed together to\n create the final gate. Each part takes on of the allowed forms:\n\n - I(ssl_0, ...) = identity operation on one or more state space labels\n (ssl_i)\n - X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl\n - Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl\n - Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl\n - CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts\n on qubit labeled by ssl1 with ssl0 being the control.\n - CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts\n on qubit labeled by ssl1 with ssl0 being the control.\n - CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts\n on qubit labeled by ssl1 with ssl0 being the control.\n - CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit\n labeled by ssl1 with ssl0 being the control.\n - CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit\n labeled by ssl1 with ssl0 being the control.\n - LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as\n an x-rotation between states with integer indices i0 and i1 followed\n by complete decoherence between the states.\n\n state_space : StateSpace\n The state space that the created operation should act upon.\n\n basis : str or Basis\n The basis the returned operation should be represented in.\n\n parameterization : {\"full\",\"TP\",\"static\"}, optional\n How to parameterize the resulting gate.\n\n - \"full\" = return a FullArbitraryOp.\n - \"TP\" = return a FullTPOp.\n - \"static\" = return a StaticArbitraryOp.\n\n evotype : Evotype or str, optional\n The evolution type of this operation, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n Returns\n -------\n LinearOperator\n A gate object representing the gate given by op_expr in the desired\n basis.\n \"\"\"\n # op_expr can contain single qubit ops: X(theta) ,Y(theta) ,Z(theta)\n # two qubit ops: CNOT\n # clevel qubit ops: Leak\n # two clevel opts: Flip\n # each of which is given additional parameters specifying which indices it acts upon\n\n #Working with a StateSpaceLabels object gives us access to all the info we'll need later\n state_space = _statespace.StateSpace.cast(state_space)\n if isinstance(basis, str):\n basis = _Basis.cast(basis, state_space)\n assert(state_space.dim == basis.dim), \\\n \"State space labels dim (%s) != basis dim (%s)\" % (state_space.dim, basis.dim)\n\n # ------------------------------------------------------------------------------------------------------------------\n # -- Helper Functions ----------------------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------------------------------------------\n\n def to_label(lbl):\n \"\"\" Convert integer-strings to integers in state space label \"\"\"\n try: return int(lbl)\n except: return lbl.strip()\n\n def to_labels(lbls):\n \"\"\" Convert integer-strings to integers in state space labels \"\"\"\n return [to_label(lbl) for lbl in lbls]\n\n # ------------------------------------------------------------------------------------------------------------------\n # -- End Helper Functions ------------------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------------------------------------------\n\n #FUTURE?: type_preferences = ('static standard', 'static clifford', 'static unitary')\n build_evotype = 'default'\n superop_mxs_in_basis = []\n exprTerms = op_expr.split(':')\n for exprTerm in exprTerms:\n\n l = exprTerm.index('('); r = exprTerm.rindex(')')\n opName = exprTerm[0:l]\n argsStr = exprTerm[l + 1:r]\n args = argsStr.split(',')\n\n if opName == \"I\":\n # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)\n labels = to_labels(args)\n stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels]))\n # a complex 2x2 mx unitary for the identity in Pauli-product basis\n Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype)\n\n #FUTURE?:\n # stdname = 'Gi' if (stateSpaceUDim == 2) else None\n # Uop = _op.create_from_unitary_mx(_np.identity(stateSpaceUDim, complex), type_preferences, 'pp',\n # stdname=stdname, evotype=evotype)\n\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis\n Uop_embed = _op.EmbeddedOp(state_space, labels, Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName == \"D\":\n # like 'I', but only parameterize the diagonal elements - so can be a depolarization-type map\n raise NotImplementedError(\"Removed temporarily - need to update using embedded gates\")\n # # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)\n # labels = to_labels(args)\n # stateSpaceDim = sslbls.product_dim(labels)\n\n # if parameterization not in (\"linear\",\"linearTP\"):\n # raise ValueError(\"'D' gate only makes sense to use when and parameterization == 'linear'\")\n\n # if defaultI2P == \"TP\":\n # # parameterize only the diagonals els after the first\n # indicesToParameterize = [ (i,i) for i in range(1,stateSpaceDim**2) ]\n # else:\n # # parameterize only the diagonals els\n # indicesToParameterize = [ (i,i) for i in range(0,stateSpaceDim**2) ]\n # # *real* 4x4 mx in Pauli-product basis -- still just the identity!\n # pp_opMx = _np.identity(stateSpaceDim**2, 'd')\n # # pp_opMx assumed to be in the Pauli-product basis\n # opTermInFinalBasis = embed_operation(pp_opMx, tuple(labels), indicesToParameterize)\n\n elif opName in ('X', 'Y', 'Z'): # single-qubit gate names\n assert(len(args) == 2) # theta, qubit-index\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi})\n label = to_label(args[1])\n assert(state_space.label_dimension(label) == 4), \"%s gate must act on qubits!\" % opName\n\n if opName == 'X': ex = -1j * theta * sigmax / 2\n elif opName == 'Y': ex = -1j * theta * sigmay / 2\n elif opName == 'Z': ex = -1j * theta * sigmaz / 2\n\n # complex 2x2 unitary matrix operating on single qubit in Pauli-product basis\n Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', build_evotype)\n\n #FUTURE?:\n #stdname = None\n #if _np.isclose(theta, _np.pi): stdname = 'G%spi' % opName.lower()\n #elif _np.isclose(theta, _np.pi/2): stdname = 'G%spi2' % opName.lower()\n # Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', stdname=stdname, evotype=evotype)\n\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis\n Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName == 'N': # more general single-qubit gate\n assert(len(args) == 5) # theta, sigmaX-coeff, sigmaY-coeff, sigmaZ-coeff, qubit-index\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n sxCoeff = eval(args[1], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n syCoeff = eval(args[2], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n szCoeff = eval(args[3], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n label = to_label(args[4])\n assert(state_space.label_dimension(label) == 4), \"%s gate must act on qubits!\" % opName\n\n ex = -1j * theta * (sxCoeff * sigmax / 2. + syCoeff * sigmay / 2. + szCoeff * sigmaz / 2.)\n # complex 2x2 unitary matrix operating on single qubit in Pauli-product basis\n Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', evotype=build_evotype)\n #FUTURE?: Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', evotype=evotype)\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis\n Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName in ('CX', 'CY', 'CZ', 'CNOT', 'CPHASE'): # two-qubit gate names\n\n if opName in ('CX', 'CY', 'CZ'):\n assert(len(args) == 3) # theta, qubit-label1, qubit-label2\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi})\n label1 = to_label(args[1]); label2 = to_label(args[2])\n\n if opName == 'CX': ex = -1j * theta * sigmax / 2\n elif opName == 'CY': ex = -1j * theta * sigmay / 2\n elif opName == 'CZ': ex = -1j * theta * sigmaz / 2\n Utarget = _spl.expm(ex) # 2x2 unitary matrix operating on target qubit\n\n else: # opName in ('CNOT','CPHASE')\n assert(len(args) == 2) # qubit-label1, qubit-label2\n label1 = to_label(args[0]); label2 = to_label(args[1])\n\n if opName == 'CNOT':\n Utarget = _np.array([[0, 1],\n [1, 0]], 'd')\n elif opName == 'CPHASE':\n Utarget = _np.array([[1, 0],\n [0, -1]], 'd')\n\n # 4x4 unitary matrix operating on isolated two-qubit space\n U = _np.identity(4, 'complex'); U[2:, 2:] = Utarget\n assert(state_space.label_dimension(label1) == 4 and state_space.label_dimension(label2) == 4), \\\n \"%s gate must act on qubits!\" % opName\n # complex 4x4 unitary matrix operating on two-qubit in Pauli-product basis\n Uop = _op.StaticUnitaryOp(U, 'pp', build_evotype)\n\n #FUTURE?:\n # if opName == \"CNOT\": stdname = \"Gcnot\"\n # elif opName == \"CPHASE\": stdname = \"Gcphase\"\n # else: stdname = None\n # Uop = _op.create_from_unitary_mx(U, type_preferences, 'pp', stdname=stdname, evotype=evotype)\n\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space\n Uop_embed = _op.EmbeddedOp(state_space, [label1, label2], Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName == \"LX\": # TODO - better way to describe leakage?\n assert(len(args) == 3) # theta, dmIndex1, dmIndex2 - X rotation between any two density matrix basis states\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi})\n i1 = int(args[1]) # row/column index of a single *state* within the density matrix\n i2 = int(args[2]) # row/column index of a single *state* within the density matrix\n ex = -1j * theta * sigmax / 2\n Uop = _spl.expm(ex) # 2x2 unitary matrix operating on the i1-th and i2-th states of the state space basis\n\n opDim = basis.dim\n dmDim = int(_np.sqrt(basis.elsize)) # matrix dim of the \"embedding space\"\n if isinstance(basis, _DirectSumBasis):\n blockDims = [c.dim for c in basis.component_bases]\n else: blockDims = [opDim]\n\n Utot = _np.identity(dmDim, 'complex')\n Utot[i1, i1] = Uop[0, 0]\n Utot[i1, i2] = Uop[0, 1]\n Utot[i2, i1] = Uop[1, 0]\n Utot[i2, i2] = Uop[1, 1]\n\n # dmDim^2 x dmDim^2 mx operating on vectorized total densty matrix\n opTermInStdBasis = _ot.unitary_to_process_mx(Utot)\n\n # contract [3] to [2, 1]\n embedded_std_basis = _Basis.cast('std', 9) # [2]\n std_basis = _Basis.cast('std', blockDims) # std basis w/blockdim structure, i.e. [4,1]\n opTermInReducedStdBasis = _bt.resize_std_mx(opTermInStdBasis, 'contract',\n embedded_std_basis, std_basis)\n\n superop_mx_in_basis = _bt.change_basis(opTermInReducedStdBasis, std_basis, basis)\n\n else: raise ValueError(\"Invalid gate name: %s\" % opName)\n\n superop_mxs_in_basis.append(superop_mx_in_basis)\n\n #Note: expressions are listed in \"matrix composition order\"\n final_superop_mx = superop_mxs_in_basis[0]\n for mx in superop_mxs_in_basis[1:]:\n final_superop_mx = _np.dot(final_superop_mx, mx)\n\n if basis.real:\n assert(_np.linalg.norm(final_superop_mx.imag) < 1e-6), \"Operation matrix should be real but isn't!\"\n final_superop_mx = _np.real(final_superop_mx)\n\n return _op.create_from_superop_mx(final_superop_mx, parameterization, basis,\n evotype=evotype, state_space=state_space)\n\n\ndef _create_explicit_model_from_expressions(state_space, basis,\n op_labels, op_expressions,\n prep_labels=('rho0',), prep_expressions=('0',),\n effect_labels='standard', effect_expressions='standard',\n povm_labels='Mdefault', gate_type=\"full\", prep_type=\"auto\",\n povm_type=\"auto\", instrument_type=\"auto\", evotype='default'):\n \"\"\"\n Build a new Model given lists of operation labels and expressions.\n\n Parameters\n ----------\n state_space : StateSpace\n The state space for this model.\n\n basis : Basis object\n The source and destination basis, respectively. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),\n and Qutrit (qt) (or a custom basis object).\n\n op_labels : list of strings\n A list of labels for each created gate in the final model. To\n conform with text file parsing conventions these names should begin\n with a capital G and can be followed by any number of lowercase\n characters, numbers, or the underscore character.\n\n op_expressions : list of strings\n A list of gate expressions, each corresponding to a operation label in\n op_labels, which determine what operation each gate performs (see\n documentation for :meth:`create_operation`).\n\n prep_labels : list of string, optional\n A list of labels for each created state preparation in the final\n model. To conform with conventions these labels should begin with\n \"rho\".\n\n prep_expressions : list of strings, optional\n A list of vector expressions for each state preparation vector (see\n documentation for :meth:`_create_spam_vector`).\n\n effect_labels : list, optional\n If `povm_labels` is a string, then this is just a list of the effect\n (outcome) labels for the single POVM. If `povm_labels` is a tuple,\n then `effect_labels` must be a list of lists of effect labels, each\n list corresponding to a POVM. If set to the special string `\"standard\"`\n then the length-n binary strings are used when the state space consists\n of n qubits (e.g. `\"000\"`, `\"001\"`, ... `\"111\"` for 3 qubits) and\n the labels `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used, where `<dim>`\n is the dimension of the state space, in all non-qubit cases.\n\n effect_expressions : list, optional\n A list or list-of-lists of (string) vector expressions for each POVM\n effect vector (see documentation for :meth:`_create_spam_vector`). Expressions\n correspond to labels in `effect_labels`. If set to the special string\n `\"standard\"`, then the expressions `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used,\n where `<dim>` is the dimension of the state space.\n\n povm_labels : list or string, optional\n A list of POVM labels, or a single (string) label. In the latter case,\n only a single POVM is created and the format of `effect_labels` and\n `effect_expressions` is simplified (see above).\n\n parameterization : {\"full\",\"TP\",\"static\"}, optional\n How to parameterize the gates of the resulting Model (see\n documentation for :meth:`create_operation`).\n\n evotype : Evotype or str, optional\n The evolution type of this model, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n Returns\n -------\n Model\n The created model.\n \"\"\"\n #defP = \"TP\" if (parameterization in (\"TP\",\"linearTP\")) else \"full\"\n state_space = _statespace.StateSpace.cast(state_space)\n\n ret = _emdl.ExplicitOpModel(state_space, basis.copy(), default_gate_type=gate_type,\n default_prep_type=prep_type, default_povm_type=povm_type,\n default_instrument_type=instrument_type, evotype=evotype)\n #prep_prefix=\"rho\", effect_prefix=\"E\", gate_prefix=\"G\")\n\n if prep_type == \"auto\":\n prep_type = _state.state_type_from_op_type(gate_type)\n if povm_type == \"auto\":\n povm_type = _povm.povm_type_from_op_type(gate_type)\n if instrument_type == \"auto\":\n instrument_type = _instrument.instrument_type_from_op_type(gate_type)\n\n for label, rhoExpr in zip(prep_labels, prep_expressions):\n vec = create_spam_vector(rhoExpr, state_space, basis)\n ret.preps[label] = _state.create_from_dmvec(vec, prep_type, basis, evotype, state_space)\n\n if isinstance(povm_labels, str):\n povm_labels = [povm_labels]\n effect_labels = [effect_labels]\n effect_expressions = [effect_expressions]\n\n dmDim = int(_np.sqrt(basis.dim)) # \"densitymx\" evotype assumed... FIX?\n for povmLbl, ELbls, EExprs in zip(povm_labels,\n effect_labels, effect_expressions):\n effect_vecs = {}\n\n if ELbls == \"standard\":\n qubit_dim = 4\n if state_space.num_tensor_product_blocks == 1 and \\\n all([ldim == qubit_dim for ldim in state_space.tensor_product_block_dimensions(0)]):\n # a single tensor product block comprised of qubits: '000', '001', etc.\n nQubits = len(state_space.tensor_product_block_dimensions(0))\n ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]\n else:\n ELbls = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim\n if EExprs == \"standard\":\n EExprs = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim\n\n effect_vecs = {label: create_spam_vector(expr, state_space, basis)\n for label, expr in zip(ELbls, EExprs)}\n\n if len(effect_vecs) > 0: # don't add POVMs with 0 effects\n ret.povms[povmLbl] = _povm.create_from_dmvecs(effect_vecs, povm_type, basis, evotype, state_space)\n\n for (opLabel, opExpr) in zip(op_labels, op_expressions):\n ret.operations[opLabel] = create_operation(opExpr, state_space, basis, gate_type, evotype)\n\n if gate_type == \"full\":\n ret.default_gauge_group = _gg.FullGaugeGroup(ret.state_space, evotype)\n elif gate_type == \"full TP\":\n ret.default_gauge_group = _gg.TPGaugeGroup(ret.state_space, evotype)\n elif gate_type == 'CPTP':\n ret.default_gauge_group = _gg.UnitaryGaugeGroup(ret.state_space, basis, evotype)\n else:\n ret.default_gauge_group = _gg.TrivialGaugeGroup(ret.state_space)\n\n ret._clean_paramvec()\n return ret\n\n\ndef create_explicit_model_from_expressions(state_space,\n op_labels, op_expressions,\n prep_labels=('rho0',), prep_expressions=('0',),\n effect_labels='standard', effect_expressions='standard',\n povm_labels='Mdefault', basis=\"auto\", gate_type=\"full\",\n prep_type=\"auto\", povm_type=\"auto\", instrument_type=\"auto\",\n evotype='default'):\n \"\"\"\n Build a new :class:`ExplicitOpModel` given lists of labels and expressions.\n\n Parameters\n ----------\n state_space : StateSpace\n the state space for the model.\n\n op_labels : list of strings\n A list of labels for each created gate in the final model. To\n conform with text file parsing conventions these names should begin\n with a capital G and can be followed by any number of lowercase\n characters, numbers, or the underscore character.\n\n op_expressions : list of strings\n A list of gate expressions, each corresponding to a operation label in\n op_labels, which determine what operation each gate performs (see\n documentation for :meth:`create_operation`).\n\n prep_labels : list of string\n A list of labels for each created state preparation in the final\n model. To conform with conventions these labels should begin with\n \"rho\".\n\n prep_expressions : list of strings\n A list of vector expressions for each state preparation vector (see\n documentation for :meth:`_create_spam_vector`).\n\n effect_labels : list, optional\n If `povm_labels` is a string, then this is just a list of the effect\n (outcome) labels for the single POVM. If `povm_labels` is a tuple,\n then `effect_labels` must be a list of lists of effect labels, each\n list corresponding to a POVM. If set to the special string `\"standard\"`\n then the length-n binary strings are used when the state space consists\n of n qubits (e.g. `\"000\"`, `\"001\"`, ... `\"111\"` for 3 qubits) and\n the labels `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used, where `<dim>`\n is the dimension of the state space, in all non-qubit cases.\n\n effect_expressions : list, optional\n A list or list-of-lists of (string) vector expressions for each POVM\n effect vector (see documentation for :meth:`_create_spam_vector`). Expressions\n correspond to labels in `effect_labels`. If set to the special string\n `\"standard\"`, then the expressions `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used,\n where `<dim>` is the dimension of the state space.\n\n povm_labels : list or string, optional\n A list of POVM labels, or a single (string) label. In the latter case,\n only a single POVM is created and the format of `effect_labels` and\n `effect_expressions` is simplified (see above).\n\n basis : {'gm','pp','std','qt','auto'}, optional\n the basis of the matrices in the returned Model\n\n - \"std\" = operation matrix operates on density mx expressed as sum of matrix\n units\n - \"gm\" = operation matrix operates on dentity mx expressed as sum of\n normalized Gell-Mann matrices\n - \"pp\" = operation matrix operates on density mx expresses as sum of\n tensor-product of Pauli matrices\n - \"qt\" = operation matrix operates on density mx expressed as sum of\n Qutrit basis matrices\n - \"auto\" = \"pp\" if possible (integer num of qubits), \"qt\" if density\n matrix dim == 3, and \"gm\" otherwise.\n\n parameterization : {\"full\",\"TP\"}, optional\n How to parameterize the gates of the resulting Model (see\n documentation for :meth:`create_operation`).\n\n evotype : Evotype or str, optional\n The evolution type of this model, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n Returns\n -------\n ExplicitOpModel\n The created model.\n \"\"\"\n\n #Note: so far, all allowed `parameterization` values => densitymx evotype\n state_space = _statespace.StateSpace.cast(state_space)\n stateSpaceDim = state_space.dim\n # Note: what about state_space_labels.tpb_dims?\n\n if basis == \"auto\":\n if _np.isclose(_np.log2(stateSpaceDim) / 2,\n round(_np.log2(stateSpaceDim) / 2)):\n basis = \"pp\"\n elif stateSpaceDim == 9:\n basis = \"qt\"\n else: basis = \"gm\"\n\n return _create_explicit_model_from_expressions(state_space,\n _Basis.cast(basis, state_space),\n op_labels, op_expressions,\n prep_labels, prep_expressions,\n effect_labels, effect_expressions,\n povm_labels, gate_type=gate_type,\n prep_type=prep_type, povm_type=povm_type,\n instrument_type=instrument_type, evotype=evotype)\n\n\ndef create_explicit_alias_model(mdl_primitives, alias_dict):\n \"\"\"\n Creates a model by applying aliases to an existing model.\n\n The new model is created by composing the gates of an existing `Model`,\n `mdl_primitives`, according to a dictionary of `Circuit`s, `alias_dict`.\n The keys of `alias_dict` are the operation labels of the returned `Model`.\n state preparations and POVMs are unaltered, and simply copied from `mdl_primitives`.\n\n Parameters\n ----------\n mdl_primitives : Model\n A Model containing the \"primitive\" gates (those used to compose\n the gates of the returned model).\n\n alias_dict : dictionary\n A dictionary whose keys are strings and values are Circuit objects\n specifying sequences of primitive gates. Each key,value pair specifies\n the composition rule for a creating a gate in the returned model.\n\n Returns\n -------\n Model\n A model whose gates are compositions of primitive gates and whose\n spam operations are the same as those of `mdl_primitives`.\n \"\"\"\n mdl_new = mdl_primitives.copy()\n for gl in mdl_primitives.operations.keys():\n del mdl_new.operations[gl] # remove all gates from mdl_new\n\n for gl, opstr in alias_dict.items():\n mdl_new.operations[gl] = mdl_primitives.sim.product(opstr)\n #Creates fully parameterized gates by default...\n\n mdl_new._clean_paramvec()\n return mdl_new\n\n\ndef create_explicit_model(processor_spec, custom_gates=None,\n depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,\n depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',\n lindblad_parameterization='auto',\n evotype=\"default\", simulator=\"auto\",\n ideal_gate_type='auto', ideal_spam_type='computational',\n embed_gates=False, basis='pp'):\n\n modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization,\n lindblad_parameterization, allow_nonlocal=True)\n\n return _create_explicit_model(processor_spec, modelnoise, custom_gates, evotype,\n simulator, ideal_gate_type, ideal_spam_type, ideal_spam_type, embed_gates, basis)\n\n\ndef _create_explicit_model(processor_spec, modelnoise, custom_gates=None, evotype=\"default\", simulator=\"auto\",\n ideal_gate_type='auto', ideal_prep_type='auto', ideal_povm_type='auto',\n embed_gates=False, basis='pp'):\n qubit_labels = processor_spec.qubit_labels\n state_space = _statespace.QubitSpace(qubit_labels)\n evotype = _Evotype.cast(evotype)\n modelnoise = _OpModelNoise.cast(modelnoise)\n modelnoise.reset_access_counters()\n\n if custom_gates is None:\n custom_gates = {}\n\n if ideal_gate_type == \"auto\":\n ideal_gate_type = ('static standard', 'static clifford', 'static unitary')\n if ideal_prep_type == \"auto\":\n ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)\n if ideal_povm_type == \"auto\":\n ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)\n\n def _embed_unitary(statespace, target_labels, unitary):\n dummyop = _op.EmbeddedOp(statespace, target_labels,\n _op.StaticUnitaryOp(unitary, basis='pp', evotype=\"statevec_slow\")) # basis hardcode?\n return dummyop.to_dense(\"Hilbert\")\n\n local_gates = _setup_local_gates(processor_spec, evotype, None, {}, ideal_gate_type) # no custom *local* gates\n ret = _emdl.ExplicitOpModel(state_space, basis, default_gate_type=ideal_gate_type, evotype=evotype,\n simulator=simulator)\n\n # Special rule: when initializng an explicit model, if the processor spec has an implied global idle\n # gate (e.g. \"(idle)\", then the created model instead has a empty-tuple Label as the key for this op.\n global_idle_name = processor_spec.global_idle_gate_name\n if (global_idle_name is not None) and global_idle_name.startswith('(') and global_idle_name.endswith(')'):\n gn_to_make_emptytup = global_idle_name\n else:\n gn_to_make_emptytup = None\n\n for gn, gate_unitary in processor_spec.gate_unitaries.items():\n\n gate_is_factory = callable(gate_unitary)\n resolved_avail = processor_spec.resolved_availability(gn)\n\n if callable(resolved_avail) or resolved_avail == '*':\n assert (embed_gates), \"Cannot create factories with `embed_gates=False` yet!\"\n key = _label.Label(gn) if (gn != gn_to_make_emptytup) else _label.Label(())\n allowed_sslbls_fn = resolved_avail if callable(resolved_avail) else None\n gate_nQubits = processor_spec.gate_num_qubits(gn)\n ideal_factory = _opfactory.EmbeddingOpFactory(\n state_space, local_gates[gn], num_target_labels=gate_nQubits, allowed_sslbls_fn=allowed_sslbls_fn)\n noiseop = modelnoise.create_errormap(key, evotype, state_space) # No target indices... just local errs?\n factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])\n ret.factories[key] = factory\n\n else: # resolved_avail is a list/tuple of available sslbls for the current gate/factory\n for inds in resolved_avail: # inds are target qubit labels\n key = _label.Label(()) if (inds is None and gn == gn_to_make_emptytup) else _label.Label(gn, inds)\n\n if key in custom_gates: # allow custom_gates to specify gate elements directly\n if isinstance(custom_gates[key], _opfactory.OpFactory):\n ret.factories[key] = custom_gates[key]\n elif isinstance(custom_gates[key], _op.LinearOperator):\n ret.operations[key] = custom_gates[key]\n else: # presumably a numpy array or something like it.\n ret.operations[key] = _op.StaticArbitraryOp(custom_gates[key], evotype,\n state_space) # static gates by default\n continue\n\n if gate_is_factory:\n assert(embed_gates), \"Cannot create factories with `embed_gates=False` yet!\"\n # TODO: check for modelnoise on *local* factory, i.e. create_errormap(gn, ...)??\n if inds is None or inds == tuple(qubit_labels): # then no need to embed\n ideal_factory = local_gates[gn]\n else:\n ideal_factory = _opfactory.EmbeddedOpFactory(state_space, inds, local_gates[gn])\n noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)\n factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])\n ret.factories[key] = factory\n else:\n if inds is None or inds == tuple(qubit_labels): # then no need to embed\n if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity\n assert(gate_unitary == len(qubit_labels)), \\\n \"Idle unitary as int should be on all qubits for %s\" % (str(gn))\n ideal_gate = _op.ComposedOp([], evotype, state_space) # (identity gate on *all* qubits)\n else:\n ideal_gate = _op.create_from_unitary_mx(gate_unitary, ideal_gate_type, 'pp',\n None, evotype, state_space)\n else:\n if embed_gates:\n ideal_gate = local_gates[gn]\n ideal_gate = _op.EmbeddedOp(state_space, inds, ideal_gate)\n else:\n if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity\n gate_unitary = _np.identity(2**gate_unitary, 'd') # turn into explicit identity op\n if gate_unitary.shape[0] == state_space.udim: # no need to embed!\n embedded_unitary = gate_unitary\n else:\n embedded_unitary = _embed_unitary(state_space, inds, gate_unitary)\n ideal_gate = _op.create_from_unitary_mx(embedded_unitary, ideal_gate_type, 'pp',\n None, evotype, state_space)\n\n #TODO: check for modelnoise on *local* gate, i.e. create_errormap(gn, ...)??\n noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)\n layer = _op.ComposedOp([ideal_gate, noiseop]) if (noiseop is not None) else ideal_gate\n ret.operations[key] = layer\n\n # SPAM:\n local_noise = False; independent_gates = True; independent_spam = True\n prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,\n ideal_prep_type, ideal_povm_type, evotype,\n state_space, independent_gates, independent_spam)\n for k, v in prep_layers.items():\n ret.preps[k] = v\n for k, v in povm_layers.items():\n ret.povms[k] = v\n\n modelnoise.warn_about_zero_counters()\n ret._clean_paramvec()\n return ret\n\n\ndef _create_spam_layers(processor_spec, modelnoise, local_noise,\n ideal_prep_type, ideal_povm_type, evotype, state_space, independent_gates, independent_spam):\n \"\"\" local_noise=True creates lindblad ops that are embedded & composed 1Q ops, and assumes\n that modelnoise specifies 1Q noise. local_noise=False assumes modelnoise specifies n-qubit noise\"\"\"\n qubit_labels = processor_spec.qubit_labels\n num_qubits = processor_spec.num_qubits\n singleQ_state_space = _statespace.default_space_for_udim(2) # single qubit state space\n\n # Step 1 -- get the ideal prep and POVM, created as the types we want\n # Step 2 -- add noise, by composing ideal with a noise operation (if desired)\n prep_layers = {}\n povm_layers = {}\n\n def _add_prep_noise(prep_ops):\n \"\"\" Adds one or more noise ops to prep_ops lists (to compose later) \"\"\"\n if local_noise: # then assume modelnoise specifies 1Q errors\n prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)\n if prep_noiseop1Q is not None:\n err_gates = [prep_noiseop1Q.copy() for i in range(num_qubits)] \\\n if independent_gates else [prep_noiseop1Q] * num_qubits\n prep_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])\n for i in range(num_qubits)])\n else: # use modelnoise to construct n-qubit noise\n prepNoiseMap = modelnoise.create_errormap('prep', evotype, state_space, target_labels=None,\n qubit_graph=processor_spec.qubit_graph)\n if prepNoiseMap is not None: prep_ops.append(prepNoiseMap)\n\n def _add_povm_noise(povm_ops):\n \"\"\" Adds one or more noise ops to prep_ops lists (to compose later) \"\"\"\n if local_noise: # then assume modelnoise specifies 1Q errors\n povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)\n if povm_noiseop1Q is not None:\n err_gates = [povm_noiseop1Q.copy() for i in range(num_qubits)] \\\n if independent_gates else [povm_noiseop1Q] * num_qubits\n povm_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])\n for i in range(num_qubits)])\n else: # use modelnoise to construct n-qubit noise\n povmNoiseMap = modelnoise.create_errormap('povm', evotype, state_space, target_labels=None,\n qubit_graph=processor_spec.qubit_graph)\n if povmNoiseMap is not None: povm_ops.append(povmNoiseMap)\n\n def _add_to_prep_layers(ideal_prep, prep_ops):\n \"\"\" Adds noise elements to prep_layers \"\"\"\n if len(prep_ops_to_compose) == 0:\n prep_layers['rho0'] = ideal_prep\n elif len(prep_ops_to_compose) == 1:\n prep_layers['rho0'] = _state.ComposedState(ideal_prep, prep_ops[0])\n else:\n prep_layers['rho0'] = _state.ComposedState(ideal_prep, _op.ComposedOp(prep_ops))\n\n def _add_to_povm_layers(ideal_povm, povm_ops):\n \"\"\" Adds noise elements to povm_layers \"\"\"\n if len(povm_ops_to_compose) == 0:\n povm_layers['Mdefault'] = ideal_povm\n elif len(povm_ops_to_compose) == 1:\n povm_layers['Mdefault'] = _povm.ComposedPOVM(povm_ops[0], ideal_povm, 'pp')\n else:\n povm_layers['Mdefault'] = _povm.ComposedPOVM(_op.ComposedOp(povm_ops), ideal_povm, 'pp')\n\n def _create_nq_noise(lndtype):\n if local_noise:\n # create a 1-qubit exp(errorgen) that is applied to each qubit independently\n errgen_1Q = _op.LindbladErrorgen.from_error_generator(singleQ_state_space.dim, lndtype, 'pp', 'pp',\n truncate=True, evotype=evotype, state_space=None)\n err_gateNQ = _op.ComposedOp([_op.EmbeddedOp(state_space, [qubit_labels[i]],\n _op.ExpErrorgenOp(errgen_1Q.copy()))\n for i in range(num_qubits)], evotype, state_space)\n else:\n # create an n-qubit exp(errorgen)\n errgen_NQ = _op.LindbladErrorgen.from_error_generator(state_space.dim, lndtype, 'pp', 'pp',\n truncate=True, evotype=evotype,\n state_space=state_space)\n err_gateNQ = _op.ExpErrorgenOp(errgen_NQ)\n return err_gateNQ\n\n # Here's where the actual logic starts. The above functions avoid repeated blocks within the different\n # cases below.\n\n # Prep logic\n if isinstance(ideal_prep_type, (tuple, list)): ideal_prep_type = ideal_prep_type[0] # HACK to support multiple vals\n if ideal_prep_type == 'computational' or ideal_prep_type.startswith('lindblad '):\n ideal_prep = _state.ComputationalBasisState([0] * num_qubits, 'pp', evotype, state_space)\n\n prep_ops_to_compose = []\n if ideal_prep_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM\n lndtype = ideal_prep_type[len('lindblad '):]\n\n err_gateNQ = _create_nq_noise(lndtype)\n\n prep_ops_to_compose.append(err_gateNQ)\n\n # Add noise\n _add_prep_noise(prep_ops_to_compose)\n\n #Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)\n _add_to_prep_layers(ideal_prep, prep_ops_to_compose)\n\n elif ideal_prep_type.startswith('tensor product '):\n #Note: with \"tensor product <X>\" types, e.g. \"tensor product static\", we assume modelnoise specifies just\n # a 1Q noise operation, even when `local_noise=False`\n vectype = ideal_prep_type[len('tensor product '):]\n\n v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')\n ideal_prep1Q = _state.create_from_pure_vector(v0, vectype, 'pp', evotype, state_space=None)\n prep_factors = [ideal_prep1Q.copy() for i in range(num_qubits)]\n\n # Add noise\n prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)\n if prep_noiseop1Q is not None:\n prep_factors = [_state.ComposedState(\n factor, (prep_noiseop1Q.copy() if independent_spam else prep_noiseop1Q)) for factor in prep_factors]\n\n prep_layers['rho0'] = _state.TensorProductState(prep_factors, state_space)\n\n else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs\n\n vectype = ideal_prep_type\n vecs = [] # all the basis vectors for num_qubits\n for i in range(2**num_qubits):\n v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0\n vecs.append(v)\n\n ideal_prep = _state.create_from_pure_vector(vecs[0], vectype, 'pp', evotype, state_space=state_space)\n\n # Add noise\n prep_ops_to_compose = []\n _add_prep_noise(prep_ops_to_compose)\n\n # Add final ops to returned dictionaries\n _add_to_prep_layers(ideal_prep, prep_ops_to_compose)\n\n # Povm logic\n if isinstance(ideal_povm_type, (tuple, list)): ideal_povm_type = ideal_povm_type[0] # HACK to support multiple vals\n if ideal_povm_type == 'computational' or ideal_povm_type.startswith('lindblad '):\n ideal_povm = _povm.ComputationalBasisPOVM(num_qubits, evotype, state_space=state_space)\n\n povm_ops_to_compose = []\n if ideal_povm_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM\n lndtype = ideal_povm_type[len('lindblad '):]\n\n err_gateNQ = _create_nq_noise(lndtype)\n\n povm_ops_to_compose.append(err_gateNQ.copy()) # .copy() => POVM errors independent\n\n # Add noise\n _add_povm_noise(povm_ops_to_compose)\n\n #Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)\n effective_ideal_povm = None if len(povm_ops_to_compose) > 0 else ideal_povm\n _add_to_povm_layers(effective_ideal_povm, povm_ops_to_compose)\n\n elif ideal_povm_type.startswith('tensor product '):\n #Note: with \"tensor product <X>\" types, e.g. \"tensor product static\", we assume modelnoise specifies just\n # a 1Q noise operation, even when `local_noise=False`\n vectype = ideal_povm_type[len('tensor product '):]\n\n v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')\n ideal_povm1Q = _povm.create_from_pure_vectors([('0', v0), ('1', v1)], vectype, 'pp',\n evotype, state_space=None)\n povm_factors = [ideal_povm1Q.copy() for i in range(num_qubits)]\n\n # Add noise\n povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)\n if povm_noiseop1Q is not None:\n povm_factors = [_povm.ComposedPOVM(\n (povm_noiseop1Q.copy() if independent_spam else povm_noiseop1Q), factor, 'pp')\n for factor in povm_factors]\n\n povm_layers['Mdefault'] = _povm.TensorProductPOVM(povm_factors, evotype, state_space)\n\n else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs\n\n vectype = ideal_povm_type\n vecs = [] # all the basis vectors for num_qubits\n for i in range(2**num_qubits):\n v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0\n vecs.append(v)\n\n ideal_povm = _povm.create_from_pure_vectors(\n [(format(i, 'b').zfill(num_qubits), v) for i, v in enumerate(vecs)],\n vectype, 'pp', evotype, state_space=state_space)\n\n # Add noise\n povm_ops_to_compose = []\n _add_povm_noise(povm_ops_to_compose)\n\n # Add final ops to returned dictionaries\n _add_to_povm_layers(ideal_povm, povm_ops_to_compose)\n\n return prep_layers, povm_layers\n\n\ndef _setup_local_gates(processor_spec, evotype, modelnoise=None, custom_gates=None,\n ideal_gate_type=('static standard', 'static clifford', 'static unitary')):\n \"\"\"\n Construct a dictionary of potentially noisy gates that act only on their target qubits.\n\n These gates are \"local\" because they act only on their intended target qubits. The gates\n consist of an ideal gate (obviously local, and crosstalk free) of the type given by\n `ideal_gate_type` composed with a noise operation given by `modelnoise`, if one exists.\n The returned dictionary contains keys for all the gate names in `processor_spec`. Custom\n gate objects can be given by `custom_gates`, which override the normal gate construction.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor to create gate operations for. This object specifies the\n gate names and unitaries for the processor, among other things.\n\n evotype : Evotype\n Create gate objects with this evolution type.\n\n modelnoise : ModelNoise, optional\n Noise that should be applied after the ideal gates. This noise must\n be *local* to each gate (i.e. acting on its target qubits). See the\n :class:`ModelNoise` object documentation for details regarding how\n to specify different types of noise. If `None`, then no noise is added .\n\n custom_gates : dict, optional\n A dictionary of gate objects that should be placed in the returned\n dictionary in lieu of objects that would normally be constructed.\n Keys are gate names and values are gates.\n\n ideal_gate_type : str or tuple, optional\n A gate type or tuple of gate types (listed in order of priority) which\n is used to construct the ideal gates. A gate type usually specifies the\n Python class that will be created, which determines 1) the parameterization\n of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`\n operation has no parameters and is a Clifford operation).\n\n Returns\n -------\n gatedict : dict\n A dictionary mapping gate names to local gate operations.\n \"\"\"\n std_gate_unitaries = _itgs.standard_gatename_unitaries()\n if custom_gates is None: custom_gates = {}\n if modelnoise is None: modelnoise = _OpModelPerOpNoise({})\n\n # All possible entries into the upcoming gate dictionary\n # Not just gatenames as it is possible to override in qubit-specific operations\n all_keys = _lt.remove_duplicates(list(processor_spec.gate_names)\n + list(custom_gates.keys())\n + list(modelnoise.keys()))\n\n # Cache ideal ops to ensure only one copy for each name\n ideal_gates = {}\n ideal_factories = {}\n\n gatedict = _collections.OrderedDict()\n for key in all_keys:\n # Use custom gate directly as error gate\n if key in custom_gates:\n gatedict[key] = custom_gates[key]\n continue\n\n # Skip prep, and povm here, just do gates\n if key in ['prep', 'povm']:\n continue\n\n # If key has qubits, get base name for lookup\n label = _label.Label(key)\n name = label.name\n\n U = processor_spec.gate_unitaries[name] # all gate names must be in the processorspec\n if ((name not in processor_spec.nonstd_gate_unitaries)\n or (not callable(processor_spec.nonstd_gate_unitaries[name]) and (name in std_gate_unitaries)\n and processor_spec.nonstd_gate_unitaries[name].shape == std_gate_unitaries[name].shape\n and _np.allclose(processor_spec.nonstd_gate_unitaries[name], std_gate_unitaries[name]))):\n stdname = name # setting `stdname` != None means we can try to create a StaticStandardOp below\n else:\n stdname = None\n\n if isinstance(U, (int, _np.int64)): # signals that the gate is an identity on `U` qubits\n ideal_gate_state_space = _statespace.default_space_for_num_qubits(U)\n noiseop = modelnoise.create_errormap(key, evotype, ideal_gate_state_space, target_labels=None)\n if noiseop is not None:\n gatedict[key] = noiseop\n else:\n gatedict[key] = _op.ComposedOp([], evotype, ideal_gate_state_space) # (identity gate on N qubits)\n\n elif not callable(U): # normal operation (not a factory)\n ideal_gate = ideal_gates.get(name, None)\n if ideal_gate is None:\n ideal_gate = _op.create_from_unitary_mx(U, ideal_gate_type, 'pp', stdname, evotype, state_space=None)\n ideal_gates[name] = ideal_gate\n noiseop = modelnoise.create_errormap(key, evotype, ideal_gate.state_space, target_labels=None)\n # Note: above line creates a *local* noise op, working entirely in the ideal gate's target space.\n # This means it will fail to create error maps with a given (non-local/stencil) set of sslbls, as desired\n\n if noiseop is None:\n gatedict[key] = ideal_gate\n else:\n if isinstance(noiseop, _op.ComposedOp): # avoid additional nested ComposedOp if we already have one\n noiseop.insert(0, ideal_gate)\n gatedict[key] = noiseop\n else:\n gatedict[key] = _op.ComposedOp([ideal_gate, noiseop])\n\n else: # a factory, given by the unitary-valued function U: args -> unitary\n ideal_factory = ideal_factories.get(name, None)\n if ideal_factory is None:\n local_state_space = _statespace.default_space_for_udim(U.shape[0]) # factory *function* SHAPE\n ideal_factory = _opfactory.UnitaryOpFactory(U, local_state_space, 'pp', evotype)\n ideal_factories[name] = ideal_factory\n noiseop = modelnoise.create_errormap(key, evotype, ideal_factory.state_space, target_labels=None)\n gatedict[key] = _opfactory.ComposedOpFactory([ideal_factory, noiseop]) \\\n if (noiseop is not None) else ideal_factory\n return gatedict\n\n\ndef create_crosstalk_free_model(processor_spec, custom_gates=None,\n depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,\n depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',\n lindblad_parameterization='auto',\n evotype=\"default\", simulator=\"auto\", on_construction_error='raise',\n independent_gates=False, independent_spam=True, ensure_composed_gates=False,\n ideal_gate_type='auto', ideal_spam_type='computational', implicit_idle_mode='none'):\n \"\"\"\n Create a n-qubit \"crosstalk-free\" model.\n\n By virtue of being crosstalk-free, this model's operations only\n act nontrivially on their target qubits. Gates consist of an ideal gate\n operation possibly followed by an error operation.\n\n Errors can be specified using any combination of the 4 error rate/coeff arguments,\n but each gate name must be provided exclusively to one type of specification.\n Each specification results in a different type of operation, depending on the parameterization:\n - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)\n - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)\n - `lindblad_error_coeffs` -> exp(LindbladErrorgen)\n\n In addition to the gate names, the special values `\"prep\"` and `\"povm\"` may be\n used as keys to specify the error on the state preparation, measurement, respectively.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor specification to create a model for. This object specifies the\n gate names and unitaries for the processor, and their availability on the\n processor.\n\n custom_gates : dict, optional\n A dictionary that associates with gate labels\n :class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`\n objects. These objects override any other behavior for constructing\n their designated operations. Keys of this dictionary may\n be string-type gate *names* or labels that include target qubits.\n\n depolarization_strengths : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are floats that specify the strength of uniform depolarization.\n\n stochastic_error_probs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are tuples that specify Pauli-stochastic rates for each of the non-trivial\n Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).\n\n lindblad_error_coeffs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are dictionaries corresponding to the `lindblad_term_dict` kwarg taken\n by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`\n tuples, where `termType` can be `\"H\"` (Hamiltonian), `\"S\"`\n (Stochastic), or `\"A\"` (Affine). Hamiltonian and Affine terms always\n have a single basis label (so key is a 2-tuple) whereas Stochastic\n tuples with 1 basis label indicate a *diagonal* term, and are the\n only types of terms allowed when `nonham_mode != \"all\"`. Otherwise,\n Stochastic term tuples can include 2 basis labels to specify\n \"off-diagonal\" non-Hamiltonian Lindblad terms. Basis labels can be\n strings or integers. Values are complex coefficients.\n\n depolarization_parameterization : str of {\"depolarize\", \"stochastic\", or \"lindblad\"}\n Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen\n is used to parameterize the depolarization noise, respectively.\n When \"depolarize\" (the default), a DepolarizeOp is created with the strength given\n in `depolarization_strengths`. When \"stochastic\", the depolarization strength is split\n evenly among the stochastic channels of a StochasticOp. When \"lindblad\", the depolarization\n strength is split evenly among the coefficients of the stochastic error generators\n (which are exponentiated to form a LindbladErrorgen with the \"depol\" parameterization).\n\n stochastic_parameterization : str of {\"stochastic\", or \"lindblad\"}\n Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the\n stochastic noise, respectively. When \"stochastic\", elements of `stochastic_error_probs`\n are used as coefficients in a linear combination of stochastic channels (the default).\n When \"lindblad\", the elements of `stochastic_error_probs` are coefficients of\n stochastic error generators (which are exponentiated to form a LindbladErrorgen with the\n \"cptp\" parameterization).\n\n lindblad_parameterization : \"auto\" or a LindbladErrorgen paramtype\n Determines the parameterization of the LindbladErrorgen. When \"auto\" (the default), the parameterization\n is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.\n When not \"auto\", the parameterization type is passed through to the LindbladErrorgen.\n\n evotype : Evotype or str, optional\n The evolution type. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n simulator : ForwardSimulator or {\"auto\", \"matrix\", \"map\"}\n The simulator used to compute predicted probabilities for the\n resulting :class:`Model`. Using `\"auto\"` selects `\"matrix\"` when there\n are 2 qubits or less, and otherwise selects `\"map\"`.\n\n on_construction_error : {'raise','warn',ignore'}\n What to do when the creation of a gate with the given\n `parameterization` fails. Usually you'll want to `\"raise\"` the error.\n In some cases, for example when converting as many gates as you can\n into `parameterization=\"clifford\"` gates, `\"warn\"` or even `\"ignore\"`\n may be useful.\n\n independent_gates : bool, optional\n Whether gates are allowed independent local noise or not. If False,\n then all gates with the same name (e.g. \"Gx\") will have the *same*\n (local) noise (e.g. an overrotation by 1 degree), and the\n `operation_bks['gates']` dictionary contains a single key per gate\n name. If True, then gates with the same name acting on different\n qubits may have different local noise, and so the\n `operation_bks['gates']` dictionary contains a key for each gate\n available gate placement.\n\n ensure_composed_gates : bool, optional\n If True then the elements of the `operation_bks['gates']` will always\n be :class:`ComposedOp` objects. The purpose of this is to\n facilitate modifying the gate operations after the model is created.\n If False, then the appropriately parameterized gate objects (often\n dense gates) are used directly.\n\n ideal_gate_type : str or tuple, optional\n A gate type or tuple of gate types (listed in order of priority) which\n is used to construct the ideal gates. A gate type usually specifies the\n Python class that will be created, which determines 1) the parameterization\n of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`\n operation has no parameters and is a Clifford operation).\n\n ideal_spam_type : str or tuple, optional\n Similar to `ideal_gate_type` but for SPAM elements (state preparations\n and POVMs).\n\n implicit_idle_mode : {'none', 'add_global'}\n The way idel operations are added implicitly within the created model. `\"none\"`\n doesn't add any \"extra\" idle operations when there is a layer that contains some\n gates but not gates on all the qubits. `\"add_global\"` adds the global idle operation,\n i.e., the operation for a global idle layer (zero gates - a completely empty layer),\n to every layer that is simulated, using the global idle as a background idle that always\n occurs regardless of the operation.\n\n Returns\n -------\n LocalNoiseModel\n A model with `\"rho0\"` prep, `\"Mdefault\"` POVM, and gates labeled by\n the gate names and qubit labels (as specified by `processor_spec`).\n For instance, the operation label for the `\"Gx\"` gate on the second\n qubit might be `Label(\"Gx\",1)`.\n \"\"\"\n modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization,\n lindblad_parameterization, allow_nonlocal=False)\n\n return _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates, evotype,\n simulator, on_construction_error, independent_gates, independent_spam,\n ensure_composed_gates, ideal_gate_type, ideal_spam_type, ideal_spam_type,\n implicit_idle_mode)\n\n\ndef _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, evotype=\"default\", simulator=\"auto\",\n on_construction_error='raise', independent_gates=False, independent_spam=True,\n ensure_composed_gates=False, ideal_gate_type='auto', ideal_prep_type='auto',\n ideal_povm_type='auto', implicit_idle_mode='none'):\n \"\"\"\n Create a n-qubit \"crosstalk-free\" model.\n\n Similar to :method:`create_crosstalk_free_model` but the noise is input more generally,\n as a :class:`ModelNoise` object. Arguments are the same as this function except that\n `modelnoise` is given instead of several more specific noise-describing arguments.\n\n Returns\n -------\n LocalNoiseModel\n \"\"\"\n qubit_labels = processor_spec.qubit_labels\n state_space = _statespace.QubitSpace(qubit_labels)\n evotype = _Evotype.cast(evotype)\n modelnoise = _OpModelNoise.cast(modelnoise)\n modelnoise.reset_access_counters()\n\n if ideal_gate_type == \"auto\":\n ideal_gate_type = ('static standard', 'static clifford', 'static unitary')\n if ideal_prep_type == \"auto\":\n ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)\n if ideal_povm_type == \"auto\":\n ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)\n\n gatedict = _setup_local_gates(processor_spec, evotype, modelnoise, custom_gates, ideal_gate_type)\n\n # (Note: global idle is now handled through processor-spec processing)\n\n # SPAM:\n local_noise = True\n prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,\n ideal_prep_type, ideal_povm_type, evotype,\n state_space, independent_gates, independent_spam)\n\n modelnoise.warn_about_zero_counters()\n return _LocalNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,\n evotype, simulator, on_construction_error,\n independent_gates, ensure_composed_gates,\n implicit_idle_mode)\n\n\ndef create_cloud_crosstalk_model(processor_spec, custom_gates=None,\n depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,\n depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',\n lindblad_parameterization='auto', evotype=\"default\", simulator=\"auto\",\n independent_gates=False, independent_spam=True, errcomp_type=\"gates\",\n implicit_idle_mode=\"none\", verbosity=0):\n \"\"\"\n Create a n-qubit \"cloud-crosstalk\" model.\n\n In a cloud crosstalk model, gates consist of a (local) ideal gates followed\n by an error operation that can act nontrivially on *any* of the processor's qubits\n (not just a gate's target qubits). Typically a gate's errors are specified\n relative to the gate's target qubits, forming a \"cloud\" of errors around the\n target qubits using some notion of locality (that may not be spatial, e.g.\n local in frequency). Currently, the \"ideal\" portion of each gate can only be\n created as a *static* (parameterless) object -- all gate parameters come from\n the error operation.\n\n Errors can be specified using any combination of the 4 error rate/coeff arguments,\n but each gate name must be provided exclusively to one type of specification.\n Each specification results in a different type of operation, depending on the parameterization:\n - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)\n - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)\n - `lindblad_error_coeffs` -> exp(LindbladErrorgen)\n\n In addition to the gate names, the special values `\"prep\"` and `\"povm\"` may be\n used as keys to specify the error on the state preparation, measurement, respectively.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor specification to create a model for. This object specifies the\n gate names and unitaries for the processor, and their availability on the\n processor.\n\n custom_gates : dict, optional\n A dictionary that associates with gate labels\n :class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`\n objects. These objects override any other behavior for constructing\n their designated operations. Keys of this dictionary may\n be string-type gate *names* or labels that include target qubits.\n\n depolarization_strengths : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are floats that specify the strength of uniform depolarization.\n\n stochastic_error_probs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are tuples that specify Pauli-stochastic rates for each of the non-trivial\n Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).\n\n lindblad_error_coeffs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are dictionaries corresponding to the `lindblad_term_dict` kwarg taken\n by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`\n tuples, where `termType` can be `\"H\"` (Hamiltonian), `\"S\"`\n (Stochastic), or `\"A\"` (Affine). Hamiltonian and Affine terms always\n have a single basis label (so key is a 2-tuple) whereas Stochastic\n tuples with 1 basis label indicate a *diagonal* term, and are the\n only types of terms allowed when `nonham_mode != \"all\"`. Otherwise,\n Stochastic term tuples can include 2 basis labels to specify\n \"off-diagonal\" non-Hamiltonian Lindblad terms. Basis labels can be\n strings or integers. Values are complex coefficients.\n\n depolarization_parameterization : str of {\"depolarize\", \"stochastic\", or \"lindblad\"}\n Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen\n is used to parameterize the depolarization noise, respectively.\n When \"depolarize\" (the default), a DepolarizeOp is created with the strength given\n in `depolarization_strengths`. When \"stochastic\", the depolarization strength is split\n evenly among the stochastic channels of a StochasticOp. When \"lindblad\", the depolarization\n strength is split evenly among the coefficients of the stochastic error generators\n (which are exponentiated to form a LindbladErrorgen with the \"depol\" parameterization).\n\n stochastic_parameterization : str of {\"stochastic\", or \"lindblad\"}\n Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the\n stochastic noise, respectively. When \"stochastic\", elements of `stochastic_error_probs`\n are used as coefficients in a linear combination of stochastic channels (the default).\n When \"lindblad\", the elements of `stochastic_error_probs` are coefficients of\n stochastic error generators (which are exponentiated to form a LindbladErrorgen with the\n \"cptp\" parameterization).\n\n lindblad_parameterization : \"auto\" or a LindbladErrorgen paramtype\n Determines the parameterization of the LindbladErrorgen. When \"auto\" (the default), the parameterization\n is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.\n When not \"auto\", the parameterization type is passed through to the LindbladErrorgen.\n\n evotype : Evotype or str, optional\n The evolution type. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n simulator : ForwardSimulator or {\"auto\", \"matrix\", \"map\"}\n The simulator used to compute predicted probabilities for the\n resulting :class:`Model`. Using `\"auto\"` selects `\"matrix\"` when there\n are 2 qubits or less, and otherwise selects `\"map\"`.\n\n independent_gates : bool, optional\n Whether gates are allowed independent noise or not. If False,\n then all gates with the same name (e.g. \"Gx\") will have the *same*\n noise (e.g. an overrotation by 1 degree), and the\n `operation_bks['cloudnoise']` dictionary will contains a single key per gate\n name. If True, then gates with the same name acting on different\n qubits may have different local noise, and so the\n `operation_bks['cloudnoise']` dictionary contains a key for each gate\n available gate placement.\n\n independent_spam : bool, optional\n Similar to `indepenent_gates` but for SPAM operations.\n\n errcomp_type : {'gates', 'errorgens'}\n Whether errors should be combined by composing error maps (`gates`) or by\n exponentiating the sum of error generators (composing the error generators,\n `errorgens`). The latter is only an option when the noise is given solely\n in terms of Lindblad error coefficients.\n\n implicit_idle_mode : {'none', 'add_global'}\n The way idel operations are added implicitly within the created model. `\"none\"`\n doesn't add any \"extra\" idle operations when there is a layer that contains some\n gates but not gates on all the qubits. `\"add_global\"` adds the global idle operation,\n i.e., the operation for a global idle layer (zero gates - a completely empty layer),\n to every layer that is simulated, using the global idle as a background idle that always\n occurs regardless of the operation.\n\n verbosity : int or VerbosityPrinter, optional\n Amount of detail to print to stdout.\n\n Returns\n -------\n CloudNoiseModel\n \"\"\"\n\n modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization,\n lindblad_parameterization, allow_nonlocal=True)\n\n return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates, evotype,\n simulator, independent_gates, independent_spam, errcomp_type,\n implicit_idle_mode, verbosity)\n\n\ndef _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None,\n evotype=\"default\", simulator=\"auto\", independent_gates=False,\n independent_spam=True, errcomp_type=\"errorgens\",\n implicit_idle_mode=\"none\", verbosity=0):\n \"\"\"\n Create a n-qubit \"cloud-crosstalk\" model.\n\n Similar to :method:`create_cloud_crosstalk_model` but the noise is input more generally,\n as a :class:`ModelNoise` object. Arguments are the same as this function except that\n `modelnoise` is given instead of several more specific noise-describing arguments.\n\n Returns\n -------\n CloudNoiseModel\n \"\"\"\n qubit_labels = processor_spec.qubit_labels\n state_space = _statespace.QubitSpace(qubit_labels) # FUTURE: allow other types of state spaces somehow?\n evotype = _Evotype.cast(evotype)\n modelnoise = _OpModelNoise.cast(modelnoise)\n modelnoise.reset_access_counters()\n printer = _VerbosityPrinter.create_printer(verbosity)\n\n #Create static ideal gates without any noise (we use `modelnoise` further down)\n gatedict = _setup_local_gates(processor_spec, evotype, None, custom_gates,\n ideal_gate_type=('static standard', 'static clifford', 'static unitary'))\n stencils = _collections.OrderedDict()\n\n # (Note: global idle is now processed with other processorspec gates)\n\n # SPAM\n local_noise = False\n prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,\n 'computational', 'computational', evotype, state_space,\n independent_gates, independent_spam)\n\n if errcomp_type == 'gates':\n create_stencil_fn = modelnoise.create_errormap_stencil\n apply_stencil_fn = modelnoise.apply_errormap_stencil\n elif errcomp_type == 'errorgens':\n create_stencil_fn = modelnoise.create_errorgen_stencil\n apply_stencil_fn = modelnoise.apply_errorgen_stencil\n else:\n raise ValueError(\"Invalid `errcomp_type` value: %s\" % str(errcomp_type))\n\n def build_cloudnoise_fn(lbl):\n # lbl will be for a particular gate and target qubits. If we have error rates for this specific gate\n # and target qubits (i.e this primitive layer op) then we should build it directly (and independently,\n # regardless of the value of `independent_gates`) using these rates. Otherwise, if we have a stencil\n # for this gate, then we should use it to construct the output, using a copy when gates are independent\n # and a reference to the *same* stencil operations when `independent_gates==False`.\n\n num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None\n if lbl in modelnoise:\n stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)\n elif lbl.name in stencils:\n stencil = stencils[lbl.name]\n elif lbl.name in modelnoise:\n stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)\n stencil = stencils[lbl.name]\n else:\n return None # no cloudnoise error for this label\n\n return apply_stencil_fn(stencil, evotype, state_space, target_labels=lbl.sslbls,\n qubit_graph=processor_spec.qubit_graph,\n copy=independent_gates and (lbl not in modelnoise)) # no need to copy if first case\n\n def build_cloudkey_fn(lbl):\n num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None\n if lbl in modelnoise:\n stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)\n elif lbl.name in stencils:\n stencil = stencils[lbl.name]\n elif lbl.name in modelnoise:\n stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)\n stencil = stencils[lbl.name]\n else:\n # simple cloud-key when there is no cloud noise\n return tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels\n\n #Otherwise, process stencil to get a list of all the qubit labels `lbl`'s cloudnoise error\n # touches and form this into a key\n cloud_sslbls = modelnoise.compute_stencil_absolute_sslbls(stencil, state_space, lbl.sslbls,\n processor_spec.qubit_graph)\n hashable_sslbls = tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels\n cloud_key = (hashable_sslbls, tuple(sorted(cloud_sslbls))) # (sets are unhashable)\n return cloud_key\n\n ret = _CloudNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,\n build_cloudnoise_fn, build_cloudkey_fn,\n simulator, evotype, errcomp_type,\n implicit_idle_mode, printer)\n modelnoise.warn_about_zero_counters() # must do this after model creation so build_ fns have been run\n return ret\n\n\ndef create_cloud_crosstalk_model_from_hops_and_weights(\n processor_spec, custom_gates=None,\n max_idle_weight=1, max_spam_weight=1,\n maxhops=0, extra_weight_1_hops=0, extra_gate_weight=0,\n simulator=\"auto\", evotype='default',\n gate_type=\"H+S\", spam_type=\"H+S\",\n implicit_idle_mode=\"none\", errcomp_type=\"gates\",\n independent_gates=True, independent_spam=True,\n connected_highweight_errors=True,\n verbosity=0):\n \"\"\"\n Create a \"cloud crosstalk\" model based on maximum error weights and hops along the processor's qubit graph.\n\n This function provides a convenient way to construct cloud crosstalk models whose gate errors\n consist of Pauli elementary error generators (i.e. that correspond to Lindblad error coefficients)\n that are limited in weight (number of non-identity Paulis) and support (which qubits have non-trivial\n Paulis on them). Errors are taken to be approximately local, meaning they are concentrated near the\n target qubits of a gate, with the notion of locality taken from the processor specification's qubit graph.\n The caller provides maximum-weight, maximum-hop (a \"hop\" is the movement along a single graph edge), and\n gate type arguments to specify the set of possible errors on a gate.\n\n - The global idle gate (corresponding to an empty circuit layer) has errors that are limited only by\n a maximum weight, `max_idle_weight`.\n - State preparation and POVM errors are constructed similarly, with a global-idle-like error following\n or preceding the preparation or measurement, respectively.\n - Gate errors are placed on all the qubits that can be reached with at most `maxhops` hops from (any of)\n the gate's target qubits. Elementary error generators up to weight `W`, where `W` equals the number\n of target qubits (e.g., 2 for a CNOT gate) plus `extra_gate_weight` are allowed. Weight-1 terms\n are a special case, and the `extra_weight_1_hops` argument adds to the usual `maxhops` in this case\n to allow weight-1 errors on a possibly larger region of qubits around the target qubits.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor specification to create a model for. This object specifies the\n gate names and unitaries for the processor, and their availability on the\n processor.\n\n custom_gates : dict\n A dictionary that associates with gate labels\n :class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`\n objects. These objects describe the full action of the gate or\n primitive-layer they're labeled by (so if the model represents\n states by density matrices these objects are superoperators, not\n unitaries), and override any standard construction based on builtin\n gate names or `nonstd_gate_unitaries`. Keys of this dictionary must\n be string-type gate *names* -- they cannot include state space labels\n -- and they must be *static* (have zero parameters) because they\n represent only the ideal behavior of each gate -- the cloudnoise\n operations represent the parameterized noise. To fine-tune how this\n noise is parameterized, call the :class:`CloudNoiseModel` constructor\n directly.\n\n max_idle_weight : int, optional\n The maximum-weight for errors on the global idle gate.\n\n max_spam_weight : int, optional\n The maximum-weight for state preparation and measurement (SPAM) errors.\n\n maxhops : int\n The locality constraint: for a gate, errors (of weight up to the\n maximum weight for the gate) are allowed to occur on the gate's\n target qubits and those reachable by hopping at most `maxhops` times\n from a target qubit along nearest-neighbor links (defined by the\n `geometry`).\n\n extra_weight_1_hops : int, optional\n Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0\n can be useful for allowing just weight-1 errors (of which there are\n relatively few) to be dispersed farther from a gate's target qubits.\n For example, a crosstalk-detecting model might use this.\n\n extra_gate_weight : int, optional\n Addtional weight, beyond the number of target qubits (taken as a \"base\n weight\" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If\n this equals 1, for instance, then 1-qubit gates can have up to weight-2\n errors and 2-qubit gates can have up to weight-3 errors.\n\n simulator : ForwardSimulator or {\"auto\", \"matrix\", \"map\"}\n The circuit simulator used to compute any\n requested probabilities, e.g. from :method:`probs` or\n :method:`bulk_probs`. Using `\"auto\"` selects `\"matrix\"` when there\n are 2 qubits or less, and otherwise selects `\"map\"`.\n\n evotype : Evotype or str, optional\n The evolution type of this model, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n gate_type : str, optional\n The Lindblad-error parameterization type used for gate operations. This\n may be expanded in the future, but currently the gate errors *must* be of\n the Lindblad error-generator coefficients type, and this argument specifies\n what elementary error-generator coefficients are initially allowed (and linked to\n model parameters), before maximum-weight and locality constraints are imposed.\n In addition to the usual Lindblad error types, (e.g. `\"H\"`, `\"H+S\"`) the special\n values `\"none\"` is allowed to indicate that there should be no errors on the gates\n (useful if you only want errors on the SPAM, for instance).\n\n spam_type : str, optional\n Similar to `gate_type` but for SPAM elements (state preparations\n and POVMs). This specifies the Lindblad-error parameterization for the\n state prepearation and POVM.\n\n implicit_idle_mode : {'none', 'add_global'}\n The way idel operations are added implicitly within the created model. `\"nonw\"`\n doesn't add any \"extra\" idle operations when there is a layer that contains some\n gates but not gates on all the qubits. `\"add_global\"` adds the global idle operation,\n i.e., the operation for a global idle layer (zero gates - a completely empty layer),\n to every layer that is simulated, using the global idle as a background idle that always\n occurs regardless of the operation.\n\n errcomp_type : {\"gates\",\"errorgens\"}\n How errors are composed when creating layer operations in the created\n model. `\"gates\"` means that the errors on multiple gates in a single\n layer are composed as separate and subsequent processes. Specifically,\n the layer operation has the form `Composed(target,idleErr,cloudErr)`\n where `target` is a composition of all the ideal gate operations in the\n layer, `idleErr` is the global idle error if `implicit_idle_mode == 'add_global'`,\n and `cloudErr` is the composition (ordered as layer-label) of cloud-\n noise contributions, i.e. a map that acts as the product of exponentiated\n error-generator matrices. `\"errorgens\"` means that layer operations\n have the form `Composed(target, error)` where `target` is as above and\n `error` results from composing (summing) the idle and cloud-noise error\n *generators*, i.e. a map that acts as the exponentiated sum of error\n generators (ordering is irrelevant in this case).\n\n independent_gates : bool, optional\n Whether the noise added to a gate when it acts on one set of target\n qubits is independent of its noise on a different set of target qubits.\n If False, then all gates with the same name (e.g. \"Gx\") will be constrained\n to having the *same* noise on the cloud around the target qubits (even though\n the target qubits and cloud are different). If True, then gate noise operations\n for different sets of target qubits are independent.\n\n independent_spam : bool, optional\n Similar to `independent_gates` but for state preparation and measurement operations.\n When `False`, the noise applied to each set (individual or pair or triple etc.) of\n qubits must be the same, e.g., if the state preparation is a perfect preparation followed\n by a single-qubit rotation then this rotation must be by the *same* angle on all of\n the qubits.\n\n connected_highweight_errors : bool, optional\n An additional constraint regarding high-weight errors. When `True`, only high weight\n (weight 2+) elementary error generators whose non-trivial Paulis occupy a *connected*\n portion of the qubit graph are allowed. For example, if the qubit graph is a 1D chain\n of 4 qubits, 1-2-3-4, and weight-2 errors are allowed on a single-qubit gate with\n target = qubit-2, then weight-2 errors on 1-2 and 2-3 would be allowed, but errors on\n 1-3 would be forbidden. When `False`, no constraint is imposed.\n\n verbosity : int or VerbosityPrinter, optional\n An integer >= 0 dictating how must output to send to stdout.\n\n Returns\n -------\n CloudNoiseModel\n \"\"\"\n\n # construct noise specifications for the cloudnoise model\n modelnoise = {}\n all_qubit_labels = processor_spec.qubit_labels\n conn = connected_highweight_errors # shorthand: whether high-weight errors must be connected on the graph\n global_idle_name = processor_spec.global_idle_gate_name\n\n # Global Idle\n if max_idle_weight > 0:\n assert(global_idle_name is not None), \\\n \"`max_idle_weight` must equal 0 for processor specs without a global idle gate!\"\n #printer.log(\"Creating Idle:\")\n wt_maxhop_tuples = [(i, None) for i in range(1, max_idle_weight + 1)]\n modelnoise[global_idle_name] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples,\n gate_type, conn)\n\n # SPAM\n if max_spam_weight > 0:\n wt_maxhop_tuples = [(i, None) for i in range(1, max_spam_weight + 1)]\n modelnoise['prep'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)\n modelnoise['povm'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)\n\n # Gates\n weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \\\n [(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n\n weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \\\n [(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n\n for gatenm, gate_unitary in processor_spec.gate_unitaries.items():\n if gatenm == global_idle_name: continue # processed above\n gate_nQubits = int(gate_unitary) if isinstance(gate_unitary, (int, _np.int64)) \\\n else int(round(_np.log2(gate_unitary.shape[0]))) # NOTE: integer gate_unitary => idle on n qubits\n if gate_nQubits not in (1, 2):\n raise ValueError(\"Only 1- and 2-qubit gates are supported. %s acts on %d qubits!\"\n % (str(gatenm), gate_nQubits))\n weight_maxhops_tuples = weight_maxhops_tuples_1Q if gate_nQubits == 1 else weight_maxhops_tuples_2Q\n target_sslbls = ('@0',) if gate_nQubits == 1 else ('@0', '@1')\n modelnoise[gatenm] = _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples,\n gate_type, conn)\n\n return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates,\n evotype, simulator, independent_gates, independent_spam,\n errcomp_type, implicit_idle_mode, verbosity)\n\n\ndef _iter_basis_inds(weight):\n \"\"\" Iterate over product of `weight` non-identity Pauli 1Q basis indices \"\"\"\n basisIndList = [[1, 2, 3]] * weight # assume pauli 1Q basis, and only iterate over non-identity els\n for basisInds in _itertools.product(*basisIndList):\n yield basisInds\n\n\ndef _pauli_product_matrix(sigma_inds):\n \"\"\"\n Construct the Pauli product matrix from the given `sigma_inds`\n\n Parameters\n ----------\n sigma_inds : iterable\n A sequence of integers in the range [0,3] corresponding to the\n I, X, Y, Z Pauli basis matrices.\n\n Returns\n -------\n numpy.ndarray or scipy.sparse.csr_matrix\n \"\"\"\n sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)\n M = _np.identity(1, 'complex')\n for i in sigma_inds:\n M = _np.kron(M, sigmaVec[i])\n return M\n\n\ndef _construct_restricted_weight_pauli_basis(wt, sparse=False):\n basisEl_Id = _pauli_product_matrix(_np.zeros(wt, _np.int64))\n errbasis = [basisEl_Id]\n errbasis_lbls = ['I']\n for err_basis_inds in _iter_basis_inds(wt):\n error = _np.array(err_basis_inds, _np.int64) # length == wt\n basisEl = _pauli_product_matrix(error)\n errbasis.append(basisEl)\n errbasis_lbls.append(''.join([\"IXYZ\"[i] for i in err_basis_inds]))\n\n #printer.log(\"Error on qubits %s -> error basis of length %d\" % (err_qubit_inds, len(errbasis)), 3)\n return _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)\n\n\ndef _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples, lnd_parameterization, connected=True):\n\n # This function:\n # loop over all size-`wt` *connected* combinations, `err_qubit_inds`, of the qubit indices in\n # `possible_err_qubit_inds`\n # - construct a local weight-`wt` Pauli basis & corresponding LindbladErrorgen on `wt` qubits\n # => replace with: opnoise.create_errorgen(evotype, state_space=None) where opnoise is for a wt-qubit op\n # - embed this constructed local error onto `err_qubit_inds`\n # - append embedded error onto running list\n #\n # Noise object structure:\n # OpModelPerOpNoise( { op_key/'idle': { sslbls : opnoise } } )\n # where sslbls can be absolute labels or stencil labels\n # -- could have a fn that spreads a single opnoise onto all the sslbls\n # given by size-`wt` connected combos of `possible_err_qubit_inds` - this would work for independent clouds\n # -- have LindbladNoiseDict and another LindbladPauliAtWeight (?) noise objects,\n # since we want to specify a lindblad noise by giving a weight and an initial basis (Pauli here)\n\n # To build a cloudnoise model from hops & weights:\n modelnoise_dict = {}\n if lnd_parameterization == 'none' or lnd_parameterization is None:\n return {} # special case when we don't want any error parameterization\n\n for wt, max_hops in weight_maxhops_tuples:\n if max_hops is None or max_hops == 0: # Note: maxHops not used in this case\n stencil_lbl = _stencil.StencilLabelAllCombos(target_sslbls, wt, connected)\n else:\n stencil_lbl = _stencil.StencilLabelRadiusCombos(target_sslbls, max_hops, wt, connected)\n\n local_state_space = _statespace.default_space_for_num_qubits(wt)\n modelnoise_dict[stencil_lbl] = _LindbladNoise.from_basis_coefficients(\n lnd_parameterization, _construct_restricted_weight_pauli_basis(wt),\n local_state_space)\n return modelnoise_dict\n\n\ndef _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization, lindblad_parameterization,\n allow_nonlocal):\n\n modelnoises = []\n if depolarization_strengths is not None:\n noise_dict = {}\n for lbl, val in depolarization_strengths.items():\n if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications\n if not allow_nonlocal: raise ValueError(\"Nonlocal depolarization strengths not allowed!\")\n noise_dict[lbl] = {k: _DepolarizationNoise(v, depolarization_parameterization) for k, v in val.items()}\n else:\n noise_dict[lbl] = _DepolarizationNoise(val, depolarization_parameterization)\n modelnoises.append(_OpModelPerOpNoise(noise_dict))\n\n if stochastic_error_probs is not None:\n noise_dict = {}\n for lbl, val in stochastic_error_probs.items():\n if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications\n if not allow_nonlocal: raise ValueError(\"Nonlocal stochastic error probs not allowed!\")\n noise_dict[lbl] = {k: _StochasticNoise(v, stochastic_parameterization) for k, v in val.items()}\n else:\n noise_dict[lbl] = _StochasticNoise(val, stochastic_parameterization)\n modelnoises.append(_OpModelPerOpNoise(noise_dict))\n\n if lindblad_error_coeffs is not None:\n\n if not allow_nonlocal: # the easy case\n modelnoises.append(_OpModelPerOpNoise({lbl: _LindbladNoise(val, lindblad_parameterization)\n for lbl, val in lindblad_error_coeffs.items()}))\n else: # then need to process labels like ('H', 'XX:0,1') or 'HXX:0,1'\n def process_stencil_labels(flat_lindblad_errs):\n nonlocal_errors = _collections.OrderedDict()\n local_errors = _collections.OrderedDict()\n\n for nm, val in flat_lindblad_errs.items():\n if isinstance(nm, str): nm = (nm[0], nm[1:]) # e.g. \"HXX\" => ('H','XX')\n err_typ, basisEls = nm[0], nm[1:]\n sslbls = None\n local_nm = [err_typ]\n for bel in basisEls: # e.g. bel could be \"X:Q0\" or \"XX:Q0,Q1\"\n # OR \"X:<n>\" where n indexes a target qubit or \"X:<dir>\" where dir indicates\n # a graph *direction*, e.g. \"up\"\n if ':' in bel:\n bel_name, bel_sslbls = bel.split(':') # should have form <name>:<comma-separated-sslbls>\n bel_sslbls = bel_sslbls.split(',') # e.g. ('Q0','Q1')\n integerized_sslbls = []\n for ssl in bel_sslbls:\n try: integerized_sslbls.append(int(ssl))\n except: integerized_sslbls.append(ssl)\n bel_sslbls = tuple(integerized_sslbls)\n else:\n bel_name = bel\n bel_sslbls = None\n\n if sslbls is None:\n sslbls = bel_sslbls\n else:\n #Note: sslbls should always be the same if there are multiple basisEls,\n # i.e for nm == ('S',bel1,bel2)\n assert(sslbls is bel_sslbls or sslbls == bel_sslbls), \\\n \"All basis elements of the same error term must operate on the *same* state!\"\n local_nm.append(bel_name) # drop the state space labels, e.g. \"XY:Q0,Q1\" => \"XY\"\n\n # keep track of errors by the qubits they act on, as only each such\n # set will have it's own LindbladErrorgen\n local_nm = tuple(local_nm) # so it's hashable\n if sslbls is not None:\n sslbls = tuple(sorted(sslbls))\n if sslbls not in nonlocal_errors:\n nonlocal_errors[sslbls] = _collections.OrderedDict()\n if local_nm in nonlocal_errors[sslbls]:\n nonlocal_errors[sslbls][local_nm] += val\n else:\n nonlocal_errors[sslbls][local_nm] = val\n else:\n if local_nm in local_errors:\n local_errors[local_nm] += val\n else:\n local_errors[local_nm] = val\n\n if len(nonlocal_errors) == 0:\n return _LindbladNoise(local_errors, lindblad_parameterization)\n else:\n all_errors = []\n if len(local_errors) > 0:\n all_errors.append((None, _LindbladNoise(local_errors, lindblad_parameterization)))\n for sslbls, errdict in nonlocal_errors.items():\n all_errors.append((sslbls, _LindbladNoise(errdict, lindblad_parameterization)))\n return _collections.OrderedDict(all_errors)\n\n modelnoises.append(_OpModelPerOpNoise({lbl: process_stencil_labels(val)\n for lbl, val in lindblad_error_coeffs.items()}))\n\n return _ComposedOpModelNoise(modelnoises)\n\n\n@_deprecated_fn(\"This function is overly specific and will be removed soon.\")\ndef _nparams_xycnot_cloudnoise_model(num_qubits, geometry=\"line\", max_idle_weight=1, maxhops=0,\n extra_weight_1_hops=0, extra_gate_weight=0, require_connected=False,\n independent_1q_gates=True, zz_only=False, bidirectional_cnots=True, verbosity=0):\n \"\"\"\n Compute the number of parameters in a particular :class:`CloudNoiseModel`.\n\n Returns the number of parameters in the :class:`CloudNoiseModel` containing\n X(pi/2), Y(pi/2) and CNOT gates using the specified arguments without\n actually constructing the model (useful for considering parameter-count\n scaling).\n\n Parameters\n ----------\n num_qubits : int\n The total number of qubits.\n\n geometry : {\"line\",\"ring\",\"grid\",\"torus\"} or QubitGraph\n The type of connectivity among the qubits, specifying a\n graph used to define neighbor relationships. Alternatively,\n a :class:`QubitGraph` object may be passed directly.\n\n max_idle_weight : int, optional\n The maximum-weight for errors on the global idle gate.\n\n maxhops : int\n The locality constraint: for a gate, errors (of weight up to the\n maximum weight for the gate) are allowed to occur on the gate's\n target qubits and those reachable by hopping at most `maxhops` times\n from a target qubit along nearest-neighbor links (defined by the\n `geometry`).\n\n extra_weight_1_hops : int, optional\n Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0\n can be useful for allowing just weight-1 errors (of which there are\n relatively few) to be dispersed farther from a gate's target qubits.\n For example, a crosstalk-detecting model might use this.\n\n extra_gate_weight : int, optional\n Addtional weight, beyond the number of target qubits (taken as a \"base\n weight\" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If\n this equals 1, for instance, then 1-qubit gates can have up to weight-2\n errors and 2-qubit gates can have up to weight-3 errors.\n\n require_connected : bool, optional\n If True, then high-weight errors only occur on connected (via `geometry`) qubits.\n For example in a line of qubits there would not be weight-2 errors on qubits 1 and 3.\n\n independent_1q_gates : bool, optional\n If True, 1Q gates on different qubits have separate (distinct) parameters. If\n False, the 1Q gates of each type (e.g. an pi/2 X gate) for different qubits share\n the same set of parameters.\n\n zz_only : bool, optional\n If True, the only high-weight errors allowed are of \"Z^n\" type.\n\n bidirectional_cnots : bool\n Whether CNOT gates can be performed in either direction (and each direction should\n be treated as an indepedent gate)\n\n verbosity : int, optional\n An integer >= 0 dictating how much output to send to stdout.\n\n Returns\n -------\n int\n \"\"\"\n # noise can be either a seed or a random array that is long enough to use\n\n printer = _VerbosityPrinter.create_printer(verbosity)\n printer.log(\"Computing parameters for a %d-qubit %s model\" % (num_qubits, geometry))\n\n qubitGraph = _QubitGraph.common_graph(num_qubits, geometry, directed=True, all_directions=True)\n #printer.log(\"Created qubit graph:\\n\"+str(qubitGraph))\n\n def idle_count_nparams(max_weight):\n \"\"\"Parameter count of a `build_nqn_global_idle`-constructed gate\"\"\"\n ret = 0\n possible_err_qubit_inds = _np.arange(num_qubits)\n for wt in range(1, max_weight + 1):\n nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)\n if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)\n else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt\n nErrParams = 2 * basisSizeWoutId # H+S terms\n ret += nErrTargetLocations * nErrParams\n return ret\n\n def op_count_nparams(target_qubit_inds, weight_maxhops_tuples, debug=False):\n \"\"\"Parameter count of a `build_nqn_composed_gate`-constructed gate\"\"\"\n ret = 0\n #Note: no contrib from idle noise (already parameterized)\n for wt, maxHops in weight_maxhops_tuples:\n possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops), _np.int64)\n if require_connected:\n nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)\n else:\n nErrTargetLocations = _scipy.special.comb(len(possible_err_qubit_inds), wt)\n if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)\n else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt\n nErrParams = 2 * basisSizeWoutId # H+S terms\n if debug:\n print(\" -- wt%d, hops%d: inds=%s locs = %d, eparams=%d, total contrib = %d\" %\n (wt, maxHops, str(possible_err_qubit_inds), nErrTargetLocations,\n nErrParams, nErrTargetLocations * nErrParams))\n ret += nErrTargetLocations * nErrParams\n return ret\n\n nParams = _collections.OrderedDict()\n\n printer.log(\"Creating Idle:\")\n nParams[_label.Label('Gi')] = idle_count_nparams(max_idle_weight)\n\n #1Q gates: X(pi/2) & Y(pi/2) on each qubit\n weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \\\n [(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n\n if independent_1q_gates:\n for i in range(num_qubits):\n printer.log(\"Creating 1Q X(pi/2) and Y(pi/2) gates on qubit %d!!\" % i)\n nParams[_label.Label(\"Gx\", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)\n nParams[_label.Label(\"Gy\", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)\n else:\n printer.log(\"Creating common 1Q X(pi/2) and Y(pi/2) gates\")\n rep = int(num_qubits / 2)\n nParams[_label.Label(\"Gxrep\")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)\n nParams[_label.Label(\"Gyrep\")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)\n\n #2Q gates: CNOT gates along each graph edge\n weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \\\n [(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n seen_pairs = set()\n for i, j in qubitGraph.edges(): # note: all edges have i<j so \"control\" of CNOT is always lower index (arbitrary)\n if bidirectional_cnots is False:\n ordered_tup = (i, j) if i <= j else (j, i)\n if ordered_tup in seen_pairs: continue\n else: seen_pairs.add(ordered_tup)\n\n printer.log(\"Creating CNOT gate between qubits %d and %d!!\" % (i, j))\n nParams[_label.Label(\"Gcnot\", (i, j))] = op_count_nparams((i, j), weight_maxhops_tuples_2Q)\n\n #SPAM\n nPOVM_1Q = 4 # params for a single 1Q POVM\n nParams[_label.Label('rho0')] = 3 * num_qubits # 3 b/c each component is TP\n nParams[_label.Label('Mdefault')] = nPOVM_1Q * num_qubits # num_qubits 1Q-POVMs\n\n return nParams, sum(nParams.values())\n"
] | [
[
"numpy.log2",
"numpy.allclose",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros",
"numpy.kron",
"scipy.linalg.expm",
"numpy.arange",
"numpy.sqrt",
"numpy.dot",
"numpy.identity",
"numpy.real"
]
] |
OmarJabri7/SAIA | [
"54dfee4684dbfd5bf6cb58cc3974abc051022022"
] | [
"data/get_data.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom nltk.corpus import words\nimport nltk\nimport re\nimport string\nfrom data_processing import DisasterProcessor\n\nX = pd.read_csv(\"emotion_data/tweet_emotions.csv\")\n\nstop_wrds = nltk.corpus.stopwords.words(\"english\")\ncolumns = X.columns\ncolumns = [\"content\"]\npreprocessor = DisasterProcessor()\neng_words = set(words.words())\nfor column in columns:\n X[column] = X[column].apply(\n lambda x: ' '.join([re.sub(\"[$@&#]\",\"\",w) for w in x.lower().split(\" \") if w]))\n table = str.maketrans('', '', string.punctuation)\n X[column] = X[column].apply(\n lambda x: ' '.join([w.translate(table) for w in x.split(\" \") if w.isalpha()]))\n X[column] = X[column].apply(\n lambda x: preprocessor.utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_wrds))\n X[column] = X[column].apply(\n lambda x: ' '.join([w for w in x.split(\" \") if len(w) >= 2]))\n\nX[\"content\"] = X[\"content\"].apply(\n lambda x: ' '.join(([w for w in x.split(\" \") if w in eng_words]))\n)\nunique_words = list(X['content'].str.split(' ', expand=True).stack().unique())\n# X.Sentence = X.Sentence.apply(lambda x: x if len(x) > 2 else np.nan)\n\n# X[\"clean_content\"] = X[\"content\"].str.replace('[#,@,&,=,[,http://]', '')\n\nprint(np.unique(X[\"sentiment\"]))\n\nX = X.loc[X['sentiment'].isin(['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger',\n 'surprise', 'worry'])]\n\n# X = X[\"sentiment\" in ['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger']]\n\nX = X[['sentiment','content']]\n\n# happy = X.loc[X['sentiment'].isin(['happiness','fun','enthusiasm','relief']), 'content'].values\n\nhappy = X.loc[X['sentiment'].isin(['happiness']), 'content'].values\n\nlove = X.loc[X['sentiment'].isin(['love']),'content'].values\n\n# sadness = X.loc[X['sentiment'].isin(['sadness','worry']), 'content'].values\n\nsadness = X.loc[X['sentiment'].isin(['sadness']), 'content'].values\n\n# angry = X.loc[X['sentiment'].isin(['hate','anger']), 'content'].values\n\nangry = X.loc[X['sentiment'].isin(['anger']), 'content'].values\n\nsurprise = X.loc[X['sentiment'].isin(['surprise']), 'content'].values\n\nfear = X.loc[X['sentiment'].isin(['fear']),'content'].values\n\n# emotions = dict(Emotion = ['happy','love','sadness','angry','surprise','fear'])\n# data = {\"Sentence\" : [happy, love, sadness, angry, surprise, fear],\n# \"Emotion\" : ['joy','love','sadness','anger','surprise','fear'],}\n#\ndata = {\"Sentence\" : [sadness, angry, fear],\n \"Emotion\" : ['sadness','anger','fear'],}\n\nnew_df = pd.DataFrame(data)\n\nnew_df = new_df.explode('Sentence', ignore_index=True)\n\nnew_df.to_csv('emotion_data/add_data.txt', header=None, index=None, sep=';')\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.unique"
]
] |
AustinHellerRepo/GameManager | [
"2eee8e821f551b4683e59ea8cde7e61c26cf8878"
] | [
"test/latency_position_test.py"
] | [
"from __future__ import annotations\nimport unittest\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom typing import List, Tuple, Dict, Set, Callable, Type\n\n\nclass Dot():\n\n\tdef __init__(self, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float]):\n\n\t\tself.__position = position\n\t\tself.__velocity = velocity\n\t\tself.__acceleration = acceleration\n\t\tself.__time_index_offset = 0\n\n\t\tself.__acceleration_delta = None # type: Tuple[float, float]\n\t\tself.__acceleration_delta_end_time_index = None # type: float\n\t\tself.__acceleration_delta_end_time_index_acceleration = None # type: Tuple[float, float]\n\n\tdef set_positiion(self, *, position: Tuple[float, float]):\n\t\tself.__position = position\n\n\tdef set_velocity(self, *, velocity: Tuple[float, float]):\n\t\tself.__velocity = velocity\n\n\tdef set_acceleration(self, *, acceleration: Tuple[float, float]):\n\t\tself.__acceleration = acceleration\n\n\tdef get_position(self, *, time_index: float) -> Tuple[float, float]:\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tposition = list(self.__position)\n\t\tfor dimension_index in range(len(position)):\n\t\t\tposition[dimension_index] += self.__velocity[dimension_index] * calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index is None:\n\t\t\t\tposition[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0\n\t\t\telse:\n\t\t\t\tif calculated_time_index < self.__acceleration_delta_end_time_index:\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index ** 3) / 6.0\n\t\t\t\telse:\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index ** 2) / 2.0\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) ** 2) / 2.0\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index ** 3) / 6.0\n\t\treturn tuple(position)\n\n\tdef get_velocity(self, *, time_index: float) -> Tuple[float, float]:\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tvelocity = list(self.__velocity)\n\t\tfor dimension_index in range(len(velocity)):\n\t\t\tif self.__acceleration_delta_end_time_index is None:\n\t\t\t\tvelocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index\n\t\t\telse:\n\t\t\t\tif calculated_time_index < self.__acceleration_delta_end_time_index:\n\t\t\t\t\tvelocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index\n\t\t\t\t\tvelocity[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index**2) / 2.0\n\t\t\t\telse:\n\t\t\t\t\tvelocity[dimension_index] += self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index\n\t\t\t\t\tvelocity[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index)\n\t\t\t\t\tvelocity[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index**2) / 2.0\n\t\treturn tuple(velocity)\n\n\tdef get_acceleration(self, *, time_index: float) -> Tuple[float, float]:\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tacceleration = [0] * len(self.__position)\n\t\tfor dimension_index in range(len(acceleration)):\n\t\t\tif self.__acceleration_delta_end_time_index is None:\n\t\t\t\tacceleration[dimension_index] += self.__acceleration[dimension_index]\n\t\t\telse:\n\t\t\t\tif calculated_time_index < self.__acceleration_delta_end_time_index:\n\t\t\t\t\tacceleration[dimension_index] += self.__acceleration[dimension_index]\n\t\t\t\t\tacceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index)\n\t\t\t\telse:\n\t\t\t\t\tacceleration[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index]\n\t\t\t\t\tacceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index)\n\t\treturn tuple(self.__acceleration)\n\n\tdef bounce(self, *, time_index: float):\n\t\tbounce_position = self.get_position(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tbounce_velocity = self.get_velocity(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tbounce_acceleration = self.get_acceleration(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__position = bounce_position\n\t\tself.__velocity = (bounce_velocity[0], -bounce_velocity[1])\n\t\tself.__acceleration = bounce_acceleration\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tif self.__acceleration_delta_end_time_index is not None:\n\t\t\tself.__acceleration_delta_end_time_index -= calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index <= 0:\n\t\t\t\tself.__acceleration_delta = None\n\t\t\t\tself.__acceleration_delta_end_time_index = None\n\t\t\t\tself.__acceleration_delta_end_time_index_acceleration = None\n\t\tself.__time_index_offset = -time_index\n\n\tdef reflect(self, *, time_index: float):\n\t\treflect_position = self.get_position(\n\t\t\ttime_index=time_index\n\t\t)\n\t\treflect_velocity = self.get_velocity(\n\t\t\ttime_index=time_index\n\t\t)\n\t\treflect_acceleration = self.get_acceleration(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__position = reflect_position\n\t\tself.__velocity = (-reflect_velocity[0], reflect_velocity[1])\n\t\tself.__acceleration = reflect_acceleration\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tif self.__acceleration_delta_end_time_index is not None:\n\t\t\tself.__acceleration_delta_end_time_index -= calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index <= 0:\n\t\t\t\tself.__acceleration_delta = None\n\t\t\t\tself.__acceleration_delta_end_time_index = None\n\t\t\t\tself.__acceleration_delta_end_time_index_acceleration = None\n\t\tself.__time_index_offset = -time_index\n\n\tdef set_state(self, *, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float], time_index: float):\n\t\tself.__position = position\n\t\tself.__velocity = velocity\n\t\tself.__acceleration = acceleration\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tif self.__acceleration_delta_end_time_index is not None:\n\t\t\tself.__acceleration_delta_end_time_index -= calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index <= 0:\n\t\t\t\tself.__acceleration_delta = None\n\t\t\t\tself.__acceleration_delta_end_time_index = None\n\t\t\t\tself.__acceleration_delta_end_time_index_acceleration = None\n\t\tself.__time_index_offset = -time_index\n\n\tdef set_acceleration_delta(self, *, time_index: float, acceleration_delta: Tuple[float, float], end_time_index: float):\n\t\ttime_index_position = self.get_position(\n\t\t\ttime_index=time_index\n\t\t)\n\t\ttime_index_velocity = self.get_velocity(\n\t\t\ttime_index=time_index\n\t\t)\n\t\ttime_index_acceleration = self.get_acceleration(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__position = time_index_position\n\t\tself.__velocity = time_index_velocity\n\t\tself.__acceleration = time_index_acceleration\n\t\tself.__time_index_offset = -time_index\n\t\tself.__acceleration_delta = acceleration_delta\n\t\tself.__acceleration_delta_end_time_index = end_time_index\n\t\tself.__acceleration_delta_end_time_index_acceleration = time_index_acceleration\n\n\tdef merge(self, *, dot: Dot, current_time_index: float, merge_time_index_offset: float):\n\t\tself_position = self.get_position(\n\t\t\ttime_index=current_time_index\n\t\t)\n\t\tself_velocity = self.get_velocity(\n\t\t\ttime_index=current_time_index\n\t\t)\n\t\tdestination_position = dot.get_position(\n\t\t\ttime_index=current_time_index + merge_time_index_offset\n\t\t)\n\t\tdestination_velocity = dot.get_velocity(\n\t\t\ttime_index=current_time_index + merge_time_index_offset\n\t\t)\n\t\tdestination_acceleration = dot.get_acceleration(\n\t\t\ttime_index=current_time_index + merge_time_index_offset\n\t\t)\n\n\t\tacceleration_delta = []\n\t\tacceleration = []\n\t\tfor dimension_index in range(len(self.__position)):\n\t\t\ttemp_acceleration_delta = (-12 * destination_position[dimension_index] + 6 * destination_velocity[dimension_index] * merge_time_index_offset + 12 * self_position[dimension_index] + 6 * self_velocity[dimension_index] * merge_time_index_offset) / (merge_time_index_offset**3)\n\t\t\ttemp_acceleration = (destination_velocity[dimension_index] - self_velocity[dimension_index]) / merge_time_index_offset - 0.5 * temp_acceleration_delta * merge_time_index_offset\n\t\t\tacceleration_delta.append(temp_acceleration_delta)\n\t\t\tacceleration.append(temp_acceleration)\n\n\t\tself.__position = self_position\n\t\tself.__velocity = self_velocity\n\t\tself.__acceleration = tuple(acceleration)\n\t\tself.__acceleration_delta = tuple(acceleration_delta)\n\t\tself.__acceleration_delta_end_time_index = merge_time_index_offset\n\t\tself.__acceleration_delta_end_time_index_acceleration = destination_acceleration\n\t\tself.__time_index_offset = -current_time_index\n\n\nclass DotPlotter():\n\n\tdef __init__(self, minimum_position: Tuple[float, float], maximum_position: Tuple[float, float]):\n\n\t\tself.__minimum_position = minimum_position\n\t\tself.__maximum_position = maximum_position\n\n\t\tself.__dots = [] # type: List[Dot]\n\n\t\tself.__x = []\n\t\tself.__y = []\n\t\tself.__figure = None\n\t\tself.__scatter = None\n\n\tdef add_dot(self, *, dot: Dot):\n\t\tself.__dots.append(dot)\n\n\tdef __get_scatter(self, *, time_index: float) -> Tuple[List[float], List[float]]:\n\t\tscatter = ([], [])\n\t\tfor dot in self.__dots:\n\t\t\tposition = dot.get_position(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\n\t\t\tif position[1] < self.__minimum_position[1]:\n\t\t\t\tdot.bounce(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\tif position[0] < self.__minimum_position[0] or position[0] > self.__maximum_position[0]:\n\t\t\t\tdot.reflect(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\n\t\t\tscatter[0].append(position[0])\n\t\t\tscatter[1].append(position[1])\n\n\t\t\tprint(f\"position: {position}\")\n\n\t\treturn scatter\n\n\tdef show(self):\n\t\tplt.ion()\n\t\tself.__figure, ax = plt.subplots()\n\t\tself.__scatter = ax.scatter(self.__x, self.__y, facecolors=\"none\", edgecolors=[\"black\", \"red\"], s=10)\n\t\tplt.xlim(self.__minimum_position[0], self.__maximum_position[0])\n\t\tplt.ylim(self.__minimum_position[1], self.__maximum_position[1])\n\t\tplt.draw()\n\n\tdef refresh(self, *, time_index: float):\n\t\tx, y = self.__get_scatter(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__x.clear()\n\t\tself.__x.extend(x)\n\t\tself.__y.clear()\n\t\tself.__y.extend(y)\n\t\tself.__scatter.set_offsets(np.c_[self.__x, self.__y])\n\t\tself.__figure.canvas.draw_idle()\n\t\tplt.pause(0.01)\n\n\nclass LatencyPositionTest(unittest.TestCase):\n\n\tdef test_initialize(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tself.assertIsNotNone(dot_plotter)\n\n\tdef test_move_dot_along_path(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tdot = Dot(\n\t\t\tposition=(1, 9),\n\t\t\tvelocity=(1, 0),\n\t\t\tacceleration=(0, -1)\n\t\t)\n\n\t\tdot_plotter.add_dot(\n\t\t\tdot=dot\n\t\t)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\twhile time_index < 20.0:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_dot_along_path_in_separate_windows(self):\n\n\t\tdot_plotters_total = 2\n\t\tdot_plotters = []\n\n\t\tfor dot_plotter_index in range(dot_plotters_total):\n\t\t\tdot_plotter = DotPlotter(\n\t\t\t\tminimum_position=(0, 0),\n\t\t\t\tmaximum_position=(10, 10)\n\t\t\t)\n\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdot_plotter.show()\n\n\t\t\tdot_plotters.append(dot_plotter)\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\twhile time_index < 10.0:\n\t\t\tfor dot_plotter in dot_plotters:\n\t\t\t\tdot_plotter.refresh(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_dot_along_path_then_alter_state(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tdot = Dot(\n\t\t\tposition=(1, 9),\n\t\t\tvelocity=(1, 0),\n\t\t\tacceleration=(0, -1)\n\t\t)\n\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dot\n\t\t\tdot.set_state(\n\t\t\t\tposition=dot.get_position(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t),\n\t\t\t\tvelocity=(-1, 1),\n\t\t\t\tacceleration=(0, -1),\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\n\t\tdot_plotter.add_dot(\n\t\t\tdot=dot\n\t\t)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 20.0\n\t\tis_altered = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > maximum_time_index / 2.0:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_dot_along_path_then_set_acceleration_delta(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tdot = Dot(\n\t\t\tposition=(1, 9),\n\t\t\tvelocity=(1, 0),\n\t\t\tacceleration=(0, -1)\n\t\t)\n\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dot\n\t\t\tdot.set_acceleration_delta(\n\t\t\t\ttime_index=time_index,\n\t\t\t\tacceleration_delta=(0, 0.5),\n\t\t\t\tend_time_index=5.0\n\t\t\t)\n\n\t\tdot_plotter.add_dot(\n\t\t\tdot=dot\n\t\t)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 30.0\n\t\talter_time_index = 10.0\n\t\tis_altered = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > alter_time_index:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_two_dots_along_path_in_same_windows(self):\n\n\t\tdots_total = 2\n\t\tdots = []\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tfor dot_index in range(dots_total):\n\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(dot_index + 1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdots.append(dot)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 20.0\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_two_dots_along_path_in_same_windows_but_first_gets_acceleration_delta(self):\n\n\t\tdots_total = 2\n\t\tdots = []\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tfor dot_index in range(dots_total):\n\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdots.append(dot)\n\n\t\tdot_plotter.show()\n\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dots\n\t\t\tdots[0].set_acceleration_delta(\n\t\t\t\ttime_index=time_index,\n\t\t\t\tacceleration_delta=(0, 0.5),\n\t\t\t\tend_time_index=5.0\n\t\t\t)\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 30.0\n\t\talter_time_index = 10.0\n\t\tis_altered = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > alter_time_index:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_two_dots_along_path_in_same_windows_second_merges_specific_time_index_after_first_altered(self):\n\n\t\tdots_total = 2\n\t\tdots = []\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tfor dot_index in range(dots_total):\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdots.append(dot)\n\n\t\tdot_plotter.show()\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dots\n\t\t\tif False:\n\t\t\t\tdots[0].set_acceleration_delta(\n\t\t\t\t\ttime_index=time_index,\n\t\t\t\t\tacceleration_delta=(0, 0.5),\n\t\t\t\t\tend_time_index=1.0\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tdots[0].set_velocity(\n\t\t\t\t\tvelocity=(-1, 1)\n\t\t\t\t)\n\n\t\tdef merge_dot(*, time_index: float):\n\t\t\tnonlocal dots\n\t\t\tdots[1].merge(\n\t\t\t\tdot=dots[0],\n\t\t\t\tcurrent_time_index=time_index,\n\t\t\t\tmerge_time_index_offset=1.0\n\t\t\t)\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.01\n\t\tmaximum_time_index = 30.0\n\t\talter_time_index = 10.0\n\t\tmerge_time_index = 11.0\n\t\tis_altered = False\n\t\tis_merged = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > alter_time_index:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\t\t\tif not is_merged and time_index > merge_time_index:\n\t\t\t\tmerge_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_merged = True\n\n\t\tplt.waitforbuttonpress()\n"
] | [
[
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplot.ion"
]
] |
luisangel86a/tensorflow | [
"77ee5e02721ba797fe01d47019e6017d2bb09ab7"
] | [
"tensorflow/python/keras/optimizer_v2/utils.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Optimizer utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import central_storage_strategy\nfrom tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.distribute import reduce_util as ds_reduce_util\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\ndef all_reduce_sum_gradients(grads_and_vars):\n \"\"\"Returns all-reduced gradients aggregated via summation.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n\n Returns:\n A list of all-reduced gradients.\n \"\"\"\n grads_and_vars = list(grads_and_vars)\n filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n # We switch to a cross-replica context since there is a bug which causes\n # IndexedSlices to be converted to dense tensors when all-reduced in a\n # replica context.\n # TODO(b/150507409): Do not switch to a cross-replica context once the bug\n # is fixed.\n if filtered_grads_and_vars:\n reduced = distribute_ctx.get_replica_context().merge_call(\n _all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n else:\n reduced = []\n # Copy 'reduced' but add None gradients back in\n reduced_with_nones = []\n reduced_pos = 0\n for g, _ in grads_and_vars:\n if g is None:\n reduced_with_nones.append(None)\n else:\n reduced_with_nones.append(reduced[reduced_pos])\n reduced_pos += 1\n assert reduced_pos == len(reduced), \"Failed to add all gradients\"\n return reduced_with_nones\n\n\ndef make_gradient_clipnorm_fn(clipnorm):\n \"\"\"Creates a gradient transformation function for clipping by norm.\"\"\"\n\n def gradient_clipnorm_fn(grads_and_vars):\n\n if isinstance(distribute_ctx.get_strategy(),\n central_storage_strategy.CentralStorageStrategy):\n raise ValueError(\n \"`clipnorm` is not supported with `CenteralStorageStrategy`\")\n\n clipped_grads_and_vars = [\n (clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars\n ]\n return clipped_grads_and_vars\n\n return gradient_clipnorm_fn\n\n\ndef make_gradient_clipvalue_fn(clipvalue):\n \"\"\"Creates a gradient transformation function for clipping by value.\"\"\"\n\n def gradient_clipvalue_fn(grads_and_vars):\n\n if isinstance(distribute_ctx.get_strategy(),\n central_storage_strategy.CentralStorageStrategy):\n raise ValueError(\n \"`clipvalue` is not supported with `CenteralStorageStrategy`\")\n\n clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue,\n clipvalue), v)\n for g, v in grads_and_vars]\n return clipped_grads_and_vars\n\n return gradient_clipvalue_fn\n\n\ndef filter_empty_gradients(grads_and_vars):\n \"\"\"Filter out `(grad, var)` pairs that have a gradient equal to `None`.\"\"\"\n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n\n if not filtered:\n raise ValueError(\"No gradients provided for any variable: %s.\" %\n ([v.name for _, v in grads_and_vars],))\n if vars_with_empty_grads:\n logging.warning(\n (\"Gradients do not exist for variables %s when minimizing the loss.\"),\n ([v.name for v in vars_with_empty_grads]))\n return filtered\n\n\ndef _all_reduce_sum_fn(distribution, grads_and_vars):\n return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM,\n grads_and_vars)\n"
] | [
[
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.clip_ops.clip_by_norm",
"tensorflow.python.platform.tf_logging.warning"
]
] |
qgallouedec/stable-baselines3 | [
"a6f5049a99a4c21a6f0bcce458ca3306cef310e0"
] | [
"stable_baselines3/common/vec_env/subproc_vec_env.py"
] | [
"import multiprocessing as mp\nfrom collections import OrderedDict\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union\n\nimport gym\nimport numpy as np\n\nfrom stable_baselines3.common.vec_env.base_vec_env import (\n CloudpickleWrapper,\n VecEnv,\n VecEnvIndices,\n VecEnvObs,\n VecEnvStepReturn,\n)\n\n\ndef _worker(\n remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper\n) -> None:\n # Import here to avoid a circular import\n from stable_baselines3.common.env_util import is_wrapped\n\n parent_remote.close()\n env = env_fn_wrapper.var()\n while True:\n try:\n cmd, data = remote.recv()\n if cmd == \"step\":\n observation, reward, done, info = env.step(data)\n if done:\n # save final observation where user can get it, then reset\n info[\"terminal_observation\"] = observation\n observation = env.reset()\n remote.send((observation, reward, done, info))\n elif cmd == \"seed\":\n remote.send(env.seed(data))\n elif cmd == \"reset\":\n observation = env.reset()\n remote.send(observation)\n elif cmd == \"render\":\n remote.send(env.render(data))\n elif cmd == \"close\":\n env.close()\n remote.close()\n break\n elif cmd == \"get_spaces\":\n remote.send((env.observation_space, env.action_space))\n elif cmd == \"env_method\":\n method = getattr(env, data[0])\n remote.send(method(*data[1], **data[2]))\n elif cmd == \"get_attr\":\n remote.send(getattr(env, data))\n elif cmd == \"set_attr\":\n remote.send(setattr(env, data[0], data[1]))\n elif cmd == \"is_wrapped\":\n remote.send(is_wrapped(env, data))\n else:\n raise NotImplementedError(f\"`{cmd}` is not implemented in the worker\")\n except EOFError:\n break\n\n\nclass SubprocVecEnv(VecEnv):\n \"\"\"\n Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own\n process, allowing significant speed up when the environment is computationally complex.\n\n For performance reasons, if your environment is not IO bound, the number of environments should not exceed the\n number of logical cores on your CPU.\n\n .. warning::\n\n Only 'forkserver' and 'spawn' start methods are thread-safe,\n which is important when TensorFlow sessions or other non thread-safe\n libraries are used in the parent (see issue #217). However, compared to\n 'fork' they incur a small start-up cost and have restrictions on\n global variables. With those methods, users must wrap the code in an\n ``if __name__ == \"__main__\":`` block.\n For more information, see the multiprocessing documentation.\n\n :param env_fns: Environments to run in subprocesses\n :param start_method: method used to start the subprocesses.\n Must be one of the methods returned by multiprocessing.get_all_start_methods().\n Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.\n \"\"\"\n\n def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None):\n self.waiting = False\n self.closed = False\n n_envs = len(env_fns)\n\n if start_method is None:\n # Fork is not a thread safe method (see issue #217)\n # but is more user friendly (does not require to wrap the code in\n # a `if __name__ == \"__main__\":`)\n forkserver_available = \"forkserver\" in mp.get_all_start_methods()\n start_method = \"forkserver\" if forkserver_available else \"spawn\"\n ctx = mp.get_context(start_method)\n\n self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])\n self.processes = []\n for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):\n args = (work_remote, remote, CloudpickleWrapper(env_fn))\n # daemon=True: if the main process crashes, we should not cause things to hang\n process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error\n process.start()\n self.processes.append(process)\n work_remote.close()\n\n self.remotes[0].send((\"get_spaces\", None))\n observation_space, action_space = self.remotes[0].recv()\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n\n def step_async(self, actions: np.ndarray) -> None:\n for remote, action in zip(self.remotes, actions):\n remote.send((\"step\", action))\n self.waiting = True\n\n def step_wait(self) -> VecEnvStepReturn:\n results = [remote.recv() for remote in self.remotes]\n self.waiting = False\n obs, rews, dones, infos = zip(*results)\n return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos\n\n def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:\n if seed is None:\n seed = np.random.randint(0, 2**32 - 1)\n for idx, remote in enumerate(self.remotes):\n remote.send((\"seed\", seed + idx))\n return [remote.recv() for remote in self.remotes]\n\n def reset(self) -> VecEnvObs:\n for remote in self.remotes:\n remote.send((\"reset\", None))\n obs = [remote.recv() for remote in self.remotes]\n return _flatten_obs(obs, self.observation_space)\n\n def close(self) -> None:\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send((\"close\", None))\n for process in self.processes:\n process.join()\n self.closed = True\n\n def get_images(self) -> Sequence[np.ndarray]:\n for pipe in self.remotes:\n # gather images from subprocesses\n # `mode` will be taken into account later\n pipe.send((\"render\", \"rgb_array\"))\n imgs = [pipe.recv() for pipe in self.remotes]\n return imgs\n\n def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:\n \"\"\"Return attribute from vectorized environment (see base class).\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"get_attr\", attr_name))\n return [remote.recv() for remote in target_remotes]\n\n def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:\n \"\"\"Set attribute inside vectorized environments (see base class).\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"set_attr\", (attr_name, value)))\n for remote in target_remotes:\n remote.recv()\n\n def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:\n \"\"\"Call instance methods of vectorized environments.\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"env_method\", (method_name, method_args, method_kwargs)))\n return [remote.recv() for remote in target_remotes]\n\n def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:\n \"\"\"Check if worker environments are wrapped with a given wrapper\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"is_wrapped\", wrapper_class))\n return [remote.recv() for remote in target_remotes]\n\n def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:\n \"\"\"\n Get the connection object needed to communicate with the wanted\n envs that are in subprocesses.\n\n :param indices: refers to indices of envs.\n :return: Connection object to communicate between processes.\n \"\"\"\n indices = self._get_indices(indices)\n return [self.remotes[i] for i in indices]\n\n\ndef _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs:\n \"\"\"\n Flatten observations, depending on the observation space.\n\n :param obs: observations.\n A list or tuple of observations, one per environment.\n Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.\n :return: flattened observations.\n A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.\n Each NumPy array has the environment index as its first axis.\n \"\"\"\n assert isinstance(obs, (list, tuple)), \"expected list or tuple of observations per environment\"\n assert len(obs) > 0, \"need observations from at least one environment\"\n\n if isinstance(space, gym.spaces.Dict):\n assert isinstance(space.spaces, OrderedDict), \"Dict space must have ordered subspaces\"\n assert isinstance(obs[0], dict), \"non-dict observation for environment with Dict observation space\"\n return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])\n elif isinstance(space, gym.spaces.Tuple):\n assert isinstance(obs[0], tuple), \"non-tuple observation for environment with Tuple observation space\"\n obs_len = len(space.spaces)\n return tuple(np.stack([o[i] for o in obs]) for i in range(obs_len))\n else:\n return np.stack(obs)\n"
] | [
[
"numpy.stack",
"numpy.random.randint"
]
] |
rartino/httk-rsttools | [
"57c46362899105a72b3b6efc45b50bcda8e574a7"
] | [
"rstslide/plugins/Matplotlib/XKCDify.py"
] | [
"\"\"\"\nXKCD plot generator\n-------------------\nAuthor: Jake Vanderplas\n\nThis is a script that will take any matplotlib line diagram, and convert it\nto an XKCD-style plot. It will work for plots with line & text elements,\nincluding axes labels and titles (but not axes tick labels).\n\nThe idea for this comes from work by Damon McDougall\n http://www.mail-archive.com/[email protected]/msg25499.html\n\"\"\"\nimport os\n\nimport numpy as np\nimport pylab as pl\nfrom scipy import interpolate, signal\nimport matplotlib.font_manager as fm\n\nscript_path = os.path.dirname(os.path.abspath(__file__))\n\n# We need a special font for the code below. It can be downloaded this way:\n#import os\n#import urllib2\n#if not os.path.exists('Humor-Sans.ttf'):\n# print 'Downloading the font Humor-sans.'\n# fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans.ttf')\n# open('Humor-Sans.ttf', 'wb').write(fhandle.read())\n\n\ndef xkcd_line(x, y, xlim=None, ylim=None,\n mag=1.0, f1=30, f2=0.05, f3=15):\n \"\"\"\n Mimic a hand-drawn line from (x, y) data\n\n Parameters\n ----------\n x, y : array_like\n arrays to be modified\n xlim, ylim : data range\n the assumed plot range for the modification. If not specified,\n they will be guessed from the data\n mag : float\n magnitude of distortions\n f1, f2, f3 : int, float, int\n filtering parameters. f1 gives the size of the window, f2 gives\n the high-frequency cutoff, f3 gives the size of the filter\n\n Returns\n -------\n x, y : ndarrays\n The modified lines\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n # get limits for rescaling\n if xlim is None:\n xlim = (x.min(), x.max())\n if ylim is None:\n ylim = (y.min(), y.max())\n\n if xlim[1] == xlim[0]:\n xlim = ylim\n\n if ylim[1] == ylim[0]:\n ylim = xlim\n\n # scale the data\n x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])\n y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])\n\n # compute the total distance along the path\n dx = x_scaled[1:] - x_scaled[:-1]\n dy = y_scaled[1:] - y_scaled[:-1]\n dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))\n\n # number of interpolated points is proportional to the distance\n Nu = int(200 * dist_tot)\n u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)\n\n # interpolate curve at sampled points\n k = min(3, len(x) - 1)\n res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)\n x_int, y_int = interpolate.splev(u, res[0])\n\n # we'll perturb perpendicular to the drawn line\n dx = x_int[2:] - x_int[:-2]\n dy = y_int[2:] - y_int[:-2]\n dist = np.sqrt(dx * dx + dy * dy)\n\n # create a filtered perturbation\n coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)\n b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))\n response = signal.lfilter(b, 1, coeffs)\n\n x_int[1:-1] += response * dy / dist\n y_int[1:-1] += response * dx / dist\n\n # un-scale data\n x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]\n y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]\n\n return x_int, y_int\n\n\ndef XKCDify(ax, mag=1.0,\n f1=50, f2=0.01, f3=15,\n forecolor='k',\n bgcolor='w',\n xaxis_loc=None,\n yaxis_loc=None,\n xaxis_arrow='+',\n yaxis_arrow='+',\n ax_extend=0.1,\n expand_axes=False):\n \"\"\"Make axis look hand-drawn\n\n This adjusts all lines, text, legends, and axes in the figure to look\n like xkcd plots. Other plot elements are not modified.\n\n Parameters\n ----------\n ax : Axes instance\n the axes to be modified.\n mag : float\n the magnitude of the distortion\n f1, f2, f3 : int, float, int\n filtering parameters. f1 gives the size of the window, f2 gives\n the high-frequency cutoff, f3 gives the size of the filter\n xaxis_loc, yaxis_log : float\n The locations to draw the x and y axes. If not specified, they\n will be drawn from the bottom left of the plot\n xaxis_arrow, yaxis_arrow : str\n where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''\n ax_extend : float\n How far (fractionally) to extend the drawn axes beyond the original\n axes limits\n expand_axes : bool\n if True, then expand axes to fill the figure (useful if there is only\n a single axes in the figure)\n \"\"\"\n # Get axes aspect\n ext = ax.get_window_extent().extents\n aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])\n\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n\n xspan = xlim[1] - xlim[0]\n yspan = ylim[1] - xlim[0]\n\n xax_lim = (xlim[0] - ax_extend * xspan,\n xlim[1] + ax_extend * xspan)\n yax_lim = (ylim[0] - ax_extend * yspan,\n ylim[1] + ax_extend * yspan)\n\n if xaxis_loc is None:\n xaxis_loc = ylim[0]\n\n if yaxis_loc is None:\n yaxis_loc = xlim[0]\n\n # Draw axes\n xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],\n linestyle='-', color=forecolor)\n yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],\n linestyle='-', color=forecolor)\n\n # Label axes3, 0.5, 'hello', fontsize=14)\n ax.text(xax_lim[1], xaxis_loc - 0.05 * yspan, ax.get_xlabel(),\n fontsize=14, ha='right', va='top', rotation=5)\n ax.text(yaxis_loc - 0.05 * xspan, yax_lim[1], ax.get_ylabel(),\n fontsize=14, ha='right', va='top', rotation=85)\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n # Add title\n ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],\n ax.get_title(),\n ha='center', va='bottom', fontsize=16)\n ax.set_title('')\n\n Nlines = len(ax.lines)\n lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]\n\n for line in lines:\n x, y = line.get_data()\n\n x_int, y_int = xkcd_line(x, y, xlim, ylim,\n mag, f1, f2, f3)\n\n # create foreground and background line\n lw = line.get_linewidth()\n line.set_linewidth(2 * lw)\n line.set_data(x_int, y_int)\n\n# # don't add background line for axes\n# if (line is not xaxis) and (line is not yaxis):\n# line_bg = pl.Line2D(x_int, y_int, color=bgcolor,\n# linewidth=8 * lw)\n# ax.add_line(line_bg)\n\n ax.add_line(line)\n\n # Draw arrow-heads at the end of axes lines\n arr1 = 0.03 * np.array([-1, 0, -1])\n arr2 = 0.02 * np.array([-1, 0, 1])\n\n arr1[::2] += np.random.normal(0, 0.005, 2)\n arr2[::2] += np.random.normal(0, 0.005, 2)\n\n x, y = xaxis.get_data()\n if '+' in str(xaxis_arrow):\n ax.plot(x[-1] + arr1 * xspan * aspect,\n y[-1] + arr2 * yspan,\n color=forecolor, lw=2)\n if '-' in str(xaxis_arrow):\n ax.plot(x[0] - arr1 * xspan * aspect,\n y[0] - arr2 * yspan,\n color=forecolor, lw=2)\n\n x, y = yaxis.get_data()\n if '+' in str(yaxis_arrow):\n ax.plot(x[-1] + arr2 * xspan * aspect,\n y[-1] + arr1 * yspan,\n color=forecolor, lw=2)\n if '-' in str(yaxis_arrow):\n ax.plot(x[0] - arr2 * xspan * aspect,\n y[0] - arr1 * yspan,\n color=forecolor, lw=2)\n\n # Change all the fonts to humor-sans.\n prop = fm.FontProperties(fname=os.path.join(script_path, 'fonts', 'Humor-Sans.ttf'), size=16)\n for text in ax.texts:\n text.set_fontproperties(prop)\n\n # modify legend\n leg = ax.get_legend()\n if leg is not None:\n leg.set_frame_on(False)\n\n for child in leg.get_children():\n if isinstance(child, pl.Line2D):\n x, y = child.get_data()\n child.set_data(xkcd_line(x, y, mag=1., f1=100, f2=0.001))\n child.set_linewidth(2 * child.get_linewidth())\n if isinstance(child, pl.Text):\n child.set_fontproperties(prop)\n\n # Set the axis limits\n ax.set_xlim(xax_lim[0] - 0.1 * xspan,\n xax_lim[1] + 0.1 * xspan)\n ax.set_ylim(yax_lim[0] - 0.1 * yspan,\n yax_lim[1] + 0.1 * yspan)\n\n # adjust the axes\n ax.set_xticks([])\n ax.set_yticks([])\n\n if expand_axes:\n ax.figure.set_facecolor(bgcolor)\n ax.set_axis_off()\n ax.set_position([0, 0, 1, 1])\n\n return ax\n"
] | [
[
"scipy.signal.firwin",
"numpy.random.normal",
"numpy.asarray",
"scipy.interpolate.splev",
"scipy.signal.lfilter",
"numpy.arange",
"scipy.interpolate.splprep",
"numpy.sqrt",
"numpy.array"
]
] |
bubbliiiing/yolox-tf2 | [
"0407c77858d436a6b370e591eea7963cc807f3b4"
] | [
"utils/utils_fit.py"
] | [
"import os\r\n\r\nimport tensorflow as tf\r\nfrom nets.yolo import get_yolo_loss\r\nfrom tqdm import tqdm\r\n\r\n\r\n#------------------------------#\r\n# 防止bug\r\n#------------------------------#\r\ndef get_train_step_fn(strategy):\r\n @tf.function\r\n def train_step(imgs, targets, net, yolo_loss, optimizer):\r\n with tf.GradientTape() as tape:\r\n #------------------------------#\r\n # 计算loss\r\n #------------------------------#\r\n P5_output, P4_output, P3_output = net(imgs, training=True)\r\n args = [P5_output, P4_output, P3_output] + [targets]\r\n \r\n loss_value = yolo_loss(args)\r\n #------------------------------#\r\n # 添加上l2正则化参数\r\n #------------------------------#\r\n loss_value = tf.reduce_sum(net.losses) + loss_value\r\n grads = tape.gradient(loss_value, net.trainable_variables)\r\n optimizer.apply_gradients(zip(grads, net.trainable_variables))\r\n return loss_value\r\n\r\n if strategy == None:\r\n return train_step\r\n else:\r\n #----------------------#\r\n # 多gpu训练\r\n #----------------------#\r\n @tf.function\r\n def distributed_train_step(imgs, targets, net, yolo_loss, optimizer):\r\n per_replica_losses = strategy.run(train_step, args=(imgs, targets, net, yolo_loss, optimizer,))\r\n return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,\r\n axis=None)\r\n return distributed_train_step\r\n\r\n#----------------------#\r\n# 防止bug\r\n#----------------------#\r\ndef get_val_step_fn(strategy):\r\n @tf.function\r\n def val_step(imgs, targets, net, yolo_loss, optimizer):\r\n #------------------------------#\r\n # 计算loss\r\n #------------------------------#\r\n P5_output, P4_output, P3_output = net(imgs, training=False)\r\n args = [P5_output, P4_output, P3_output] + [targets]\r\n loss_value = yolo_loss(args)\r\n #------------------------------#\r\n # 添加上l2正则化参数\r\n #------------------------------#\r\n loss_value = tf.reduce_sum(net.losses) + loss_value\r\n return loss_value\r\n if strategy == None:\r\n return val_step\r\n else:\r\n #----------------------#\r\n # 多gpu验证\r\n #----------------------#\r\n @tf.function\r\n def distributed_val_step(imgs, targets, net, yolo_loss, optimizer):\r\n per_replica_losses = strategy.run(val_step, args=(imgs, targets, net, yolo_loss, optimizer,))\r\n return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,\r\n axis=None)\r\n return distributed_val_step\r\n \r\ndef fit_one_epoch(net, yolo_loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, \r\n input_shape, num_classes, save_period, save_dir, strategy):\r\n train_step = get_train_step_fn(strategy)\r\n val_step = get_val_step_fn(strategy)\r\n \r\n loss = 0\r\n val_loss = 0\r\n print('Start Train')\r\n with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(gen):\r\n if iteration >= epoch_step:\r\n break\r\n images, targets = batch[0], batch[1]\r\n loss_value = train_step(images, targets, net, yolo_loss, optimizer)\r\n loss = loss + loss_value\r\n\r\n pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1), \r\n 'lr' : optimizer.lr.numpy()})\r\n pbar.update(1)\r\n print('Finish Train')\r\n \r\n print('Start Validation')\r\n with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(gen_val):\r\n if iteration >= epoch_step_val:\r\n break\r\n images, targets = batch[0], batch[1]\r\n loss_value = val_step(images, targets, net, yolo_loss, optimizer)\r\n val_loss = val_loss + loss_value\r\n\r\n pbar.set_postfix(**{'total_loss': float(val_loss) / (iteration + 1)})\r\n pbar.update(1)\r\n print('Finish Validation')\r\n\r\n logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val}\r\n loss_history.on_epoch_end([], logs)\r\n eval_callback.on_epoch_end(epoch, logs)\r\n print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))\r\n print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))\r\n \r\n #-----------------------------------------------#\r\n # 保存权值\r\n #-----------------------------------------------#\r\n if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:\r\n net.save_weights(os.path.join(save_dir, \"ep%03d-loss%.3f-val_loss%.3f.h5\" % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val)))\r\n \r\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\r\n print('Save best model to best_epoch_weights.pth')\r\n net.save_weights(os.path.join(save_dir, \"best_epoch_weights.h5\"))\r\n \r\n net.save_weights(os.path.join(save_dir, \"last_epoch_weights.h5\"))"
] | [
[
"tensorflow.GradientTape",
"tensorflow.reduce_sum"
]
] |
aaronspring/doppyo | [
"e29e21fbb997f024f39d2e5e67decfc235b0dcca"
] | [
"doppyo/sugar.py"
] | [
"\"\"\"\n Collection of old doppyo functions and useful tidbits for internal dcfp use\n Authors: Dougie Squire and Thomas Moore\n Date created: 01/10/2018\n Python Version: 3.6\n\"\"\"\n\n# ===================================================================================================\n# Packages\n# ===================================================================================================\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nimport cartopy\nfrom collections import Sequence\nfrom itertools import chain, count\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nfrom cartopy.util import add_cyclic_point\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\n# Load doppyo packages -----\nfrom doppyo import utils\n\n# ===================================================================================================\ndef rank_gufunc(x):\n ''' Returns ranked data along specified dimension '''\n \n import bottleneck\n ranks = bottleneck.nanrankdata(x,axis=-1)\n ranks = ranks[...,0]\n \n return ranks\n\n\ndef compute_rank(da_1, da_2, over_dim): \n ''' Feeds forecast and observation data to ufunc that ranks data along specified dimension'''\n \n # Add 'ensemble' coord to obs if one does not exist -----\n if over_dim not in da_2.coords:\n da_2_pass = da_2.copy()\n da_2_pass.coords[over_dim] = -1\n da_2_pass = da_2_pass.expand_dims(over_dim)\n else:\n da_2_pass = da_2.copy()\n\n # Only keep and combine instances that appear in both dataarrays (excluding the ensemble dim) -----\n aligned = xr.align(da_2_pass, da_1, join='inner', exclude=over_dim)\n combined = xr.concat(aligned, dim=over_dim)\n \n return xr.apply_ufunc(rank_gufunc, combined,\n input_core_dims=[[over_dim]],\n dask='allowed',\n output_dtypes=[int]).rename('rank')\n\n\n# ===================================================================================================\ndef categorize(da, bin_edges):\n \"\"\" \n Returns the indices of the bins to which each value in input array belongs \n Output indices are such that bin_edges[i-1] <= x < bin_edges[i]\n \"\"\"\n\n return xr.apply_ufunc(np.digitize, da, bin_edges,\n input_core_dims=[[],[]],\n dask='allowed',\n output_dtypes=[int]).rename('categorized')\n\n\n# ===================================================================================================\ndef unstack_and_count(da, dims):\n \"\"\" Unstacks provided xarray object and returns the total number of elements along dims \"\"\"\n \n try:\n unstacked = da.unstack(da.dims[0])\n except ValueError:\n unstacked = da\n\n if dims is None:\n return ((0 * unstacked) + 1)\n else:\n return ((0 * unstacked) + 1).sum(dim=dims, skipna=True)\n\n\ndef compute_histogram(da, bin_edges, over_dims):\n \"\"\" Returns the histogram of data over the specified dimensions \"\"\"\n\n # To use groupby_bins, da must have a name -----\n da = da.rename('data') \n \n hist = da.groupby_bins(da, bins=bin_edges, squeeze=False) \\\n .apply(unstack_and_count, dims=over_dims) \\\n .fillna(0) \\\n .rename({'data_bins' : 'bins'})\n hist['bins'] = (bin_edges[0:-1]+bin_edges[1:])/2\n \n # Add nans where data did not fall in any bin -----\n return hist.astype(int).where(hist.sum('bins') != 0)\n\n\n# ===================================================================================================\ndef calc_gradient(da, dim, x=None):\n \"\"\"\n Returns the gradient computed using second order accurate central differences in the \n interior points and either first order accurate one-sided (forward or backwards) \n differences at the boundaries\n\n See https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.gradient.html\n \"\"\" \n \n # Replace dimension values if specified -----\n da_n = da.copy()\n if x is None:\n x = da_n[dim]\n \n centre_chunk = range(len(x[dim])-2)\n \n f_hd = da_n.shift(**{dim:-2})\n f = da_n.shift(**{dim:-1})\n f_hs = da_n\n hs = x.shift(**{dim:-1}) - x\n hd = x.shift(**{dim:-2}) - x.shift(**{dim:-1})\n c = (hs ** 2 * f_hd + (hd ** 2 - hs ** 2) * f - hd ** 2 * f_hs) / \\\n (hs * hd * (hd + hs)).isel(**{dim : centre_chunk})\n c[dim] = x[dim][1:-1]\n\n l = (da_n.shift(**{dim:-1}) - da_n).isel(**{dim : 0}) / \\\n (x.shift(**{dim:-1}) - x).isel(**{dim : 0})\n\n r = (-da_n.shift(**{dim:1}) + da_n).isel(**{dim : -1}) / \\\n (-x.shift(**{dim:1}) + x).isel(**{dim : -1})\n \n grad = xr.concat([l, c, r], dim=dim)\n grad[dim] = da[dim]\n \n return grad\n\n\n# ===================================================================================================\ndef bias_correct_ms(da_biased, da_target, da_target_clim=None, init_date_name='init_date', \n lead_time_name='lead_time'):\n \"\"\"\n Adjusts, per month and lead time, the mean and standard deviation of da_biased to match that \n of da_target.\n Author: Dougie Squire\n Date: 01/09/2018\n \n Parameters\n ----------\n da_biased : xarray DataArray\n Array containing values to be corrected. The time information of this array is anticipated \n in a lead_time/inital_date format\n da_target : xarray DataArray\n Array containing values to use for the correction.\n da_target_clim : xarray DataArray, optional\n Array containing a climatology of da_target. If da_target_clim is provided, this function \n returns both the corrected full field and the anomalies. Otherwise, returns only the \n anomalies\n init_date_name : str, optional\n Name of initial date dimension\n lead_time_name : str, optional\n Name of lead time dimension\n \n Returns\n -------\n corrected : xarray DataArray\n Bias corrected array\n \n Examples\n --------\n >>> biased = xr.DataArray(np.random.normal(size=(48,6)), \n ... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')), \n ... ('lead_time', np.arange(6))])\n >>> biased['lead_time'].attrs['units'] = 'M'\n >>> target = xr.DataArray(np.random.normal(size=(48)), \n ... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))])\n >>> doppyo.utils.bias_correct_ms(biased, target)\n <xarray.DataArray (init_date: 48, lead_time: 6)>\n array([[ 9.336394e-02, 1.133997e-01, -5.851293e-01, -4.908594e-02,\n 7.952765e-01, 5.325052e-01],\n [-1.131123e+00, 1.603380e-01, -1.626906e+00, -1.811439e+00,\n -1.653359e-01, -1.871170e-01],\n [ 6.515435e-01, -1.064662e+00, 2.249610e+00, 6.881682e-01,\n -1.831233e-01, -1.159470e+00],\n ...,\n [-2.096226e+00, 3.143062e-04, 3.603787e-01, -1.515535e+00,\n 5.421578e-02, -6.446119e-01],\n [-8.186274e-01, -9.858171e-01, 1.933307e+00, 5.227265e-02,\n 5.443201e-01, -7.059492e-01],\n [ 2.253396e-02, 2.238470e+00, 1.138728e-01, -3.617103e-01,\n 1.678223e+00, -2.413158e+00]])\n Coordinates:\n * lead_time (lead_time) int64 0 1 2 3 4 5\n * init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31\n \n Notes\n -----------\n Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean and standard \n deviation to be computed reliably\n \"\"\"\n \n def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):\n \"\"\" Groups provided array by lead time and computes mean \"\"\"\n \n return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)\n\n def _groupby_lead_and_std(da, over_dims, init_date_name, lead_time_name):\n \"\"\" Groups provided array by lead time and computes standard deviation \"\"\"\n \n return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).std(over_dims, skipna=True)\n\n def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):\n \"\"\" Unstacks and adjusts input array by a constant shift as a function of month \"\"\"\n \n da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)\n the_month = np.ndarray.flatten(da_us.month.values)\n the_month = int(np.unique(the_month[~np.isnan(the_month)]))\n \n return da_us - shift.sel(month=the_month)\n\n def _unstack_and_scale_per_month(da, scale, init_date_name, lead_time_name):\n \"\"\" Unstacks and scales input array by a constant value as a function of month \"\"\"\n \n da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)\n the_month = np.ndarray.flatten(da_us.month.values)\n the_month = int(np.unique(the_month[~np.isnan(the_month)]))\n \n return da_us * scale.sel(month=the_month)\n\n def _scale_per_month(da, scale):\n \"\"\" Scales input array by a constant value as a function of month \"\"\"\n \n return da.groupby('time.month') * scale\n \n _anomalize = lambda data, clim: datetime_to_leadtime(\n anomalize(\n leadtime_to_datetime(data),clim))\n\n _rescale = lambda da, scale : datetime_to_leadtime(\n _scale_per_month(\n leadtime_to_datetime(da), scale))\n\n da_biased = da_biased.copy()\n da_target = da_target.copy()\n month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12\n month = month.where(month != 0, 12)\n\n # Correct the mean -----\n da_biased.coords['month'] = month\n try:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n except ValueError:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n \n if da_target_clim is not None:\n da_target_mean = da_target.groupby('time.month').mean('time')\n \n da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_meancorr[lead_time_name] = da_biased[lead_time_name]\n da_meancorr.coords['month'] = month\n\n # Compute the corrected anomalies -----\n da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)\n da_anom_meancorr.coords['month'] = month\n else:\n da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]\n da_anom_meancorr.coords['month'] = month\n \n # Correct the standard deviation -----\n try:\n da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=[init_date_name,'ensemble'],\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n except ValueError:\n da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=init_date_name,\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n try:\n da_target_std = da_target.sel(lat=da_biased.lat, lon=da_biased.lon).groupby('time.month').std('time')\n except:\n da_target_std = da_target.groupby('time.month').std('time')\n \n da_anom_stdcorr_tmp = da_anom_meancorr.groupby('month').apply(_unstack_and_scale_per_month, \n scale=(da_target_std / da_biased_std_tmp),\n init_date_name=init_date_name, \n lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_anom_stdcorr_tmp[lead_time_name] = da_biased[lead_time_name]\n da_anom_stdcorr_tmp.coords['month'] = month\n \n # This will \"squeeze\" each pdf at each lead time appropriately. However, the total variance across all leads for \n # a given month will now be incorrect. Thus, we now rescale as a function of month only\n try:\n da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std(['time','ensemble'])\n except ValueError:\n da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std('time')\n da_anom_stdcorr = da_anom_stdcorr_tmp.groupby(init_date_name).apply(_rescale, scale=(da_target_std / da_biased_std))\n \n if da_target_clim is not None:\n da_stdcorr = da_anom_stdcorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)\n return da_stdcorr.drop('month'), da_anom_stdcorr.drop('month')\n else:\n return da_anom_stdcorr.drop('month')\n\n\n# ===================================================================================================\ndef bias_correct_m(da_biased, da_target, da_target_clim=None, init_date_name='init_date', \n lead_time_name='lead_time'):\n \"\"\"\n Adjusts, per month and lead time, the mean of da_biased to match that of da_target\n Author: Dougie Squire\n Date: 01/09/2018\n \n Parameters\n ----------\n da_biased : xarray DataArray\n Array containing values to be corrected. The time information of this array is anticipated \n in a lead_time/inital_date format\n da_target : xarray DataArray\n Array containing values to use for the correction.\n da_target_clim : xarray DataArray, optional\n Array containing a climatology of da_target. If da_target_clim is provided, this function \n returns both the corrected full field and the anomalies. Otherwise, returns only the \n anomalies\n init_date_name : str, optional\n Name of initial date dimension\n lead_time_name : str, optional\n Name of lead time dimension\n \n Returns\n -------\n corrected : xarray DataArray\n Bias corrected array\n \n Examples\n --------\n >>> biased = xr.DataArray(np.random.normal(size=(48,6)), \n ... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')), \n ... ('lead_time', np.arange(6))])\n >>> biased['lead_time'].attrs['units'] = 'M'\n >>> target = xr.DataArray(np.random.normal(size=(48)), \n ... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))])\n >>> doppyo.utils.bias_correct_m(biased, target)\n <xarray.DataArray (init_date: 48, lead_time: 6)>\n array([[ 0.541226, 0.693622, -0.367322, 0.820282, 0.111487, 0.078355],\n [-0.299829, 0.164297, -0.976883, 0.463365, -0.26428 , -0.536119],\n [ 0.078832, -0.260615, -0.235059, -0.349185, 0.567183, -1.543395],\n ...,\n [ 0.335494, -1.121158, 1.313004, 0.604279, 0.135053, 0.031851],\n [ 0.33103 , 0.876521, -0.980873, 0.640328, 1.053691, 0.166768],\n [ 1.207329, 0.021916, 0.210883, -0.189922, 0.075786, 0.047616]])\n Coordinates:\n * init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31\n * lead_time (lead_time) int64 0 1 2 3 4 5\n \n Notes\n -----------\n Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean to be \n computed reliably\n \"\"\"\n\n def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):\n \"\"\" Groups provided array by lead time and computes mean \"\"\"\n \n return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)\n \n def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):\n \"\"\" Unstacks and adjusts input array by a constant shift as a function of month \"\"\"\n \n da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)\n the_month = np.ndarray.flatten(da_us.month.values)\n the_month = int(np.unique(the_month[~np.isnan(the_month)]))\n \n return da_us - shift.sel(month=the_month)\n \n _anomalize = lambda data, clim: datetime_to_leadtime(\n anomalize(\n leadtime_to_datetime(data),clim))\n \n da_biased = da_biased.copy()\n da_target = da_target.copy()\n \n month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12\n month = month.where(month != 0, 12)\n\n # Correct the mean -----\n da_biased.coords['month'] = month\n try:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n except ValueError:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n \n if da_target_clim is not None:\n da_target_mean = da_target.groupby('time.month').mean('time')\n \n da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_meancorr[lead_time_name] = da_biased[lead_time_name]\n da_meancorr.coords['month'] = month\n\n # Compute the corrected anomalies -----\n da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)\n da_anom_meancorr.coords['month'] = month\n else:\n da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]\n da_anom_meancorr.coords['month'] = month\n \n if da_target_clim is not None:\n da_meancorrr = da_anom_meancorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)\n return da_meancorr.drop('month'), da_anom_meancorr.drop('month')\n else:\n return da_anom_meancorr.drop('month')\n\n \n# ===================================================================================================\ndef conditional_bias_correct(da_cmp, da_ref, over_dims):\n \"\"\"\n Return conditional bias corrected data using the approach of Goddard et al. 2013\n \n \n \"\"\"\n\n cc = skill.compute_Pearson_corrcoef(da_cmp.mean('ensemble'), da_ref, over_dims=over_dims, subtract_local_mean=False)\n correct_cond_bias = (da_ref.std(over_dims) / da_cmp.mean('ensemble').std(over_dims)) * cc\n \n return da_cmp * correct_cond_bias\n\n\n# ===================================================================================================\ndef trunc_time(time, freq):\n \"\"\" \n Truncates values in provided time array to provided frequency. E.g. 2018-01-15T12:00 with \n freq = 'M' becomes 2018-01-01. \n \"\"\"\n \n return time.astype('<M8[' + freq + ']')\n\n\n# ===================================================================================================\ndef month_delta(date_in, delta, trunc_to_start=False):\n \"\"\" Increments provided datetime64 array by delta months \"\"\"\n \n date_mod = pd.Timestamp(date_in)\n \n m, y = (date_mod.month + delta) % 12, date_mod.year + ((date_mod.month) + delta - 1) // 12\n \n if not m: m = 12\n \n d = min(date_mod.day, [31,\n 29 if y % 4 == 0 and not y % 400 == 0 else 28,31,30,31,30,31,31,30,31,30,31][m - 1])\n \n if trunc_to_start:\n date_out = trunc_time(np.datetime64(date_mod.replace(day=d,month=m, year=y)),'M')\n else:\n date_out = np.datetime64(date_mod.replace(day=d,month=m, year=y))\n \n return np.datetime64(date_out,'ns')\n\n\n# ===================================================================================================\ndef year_delta(date_in, delta, trunc_to_start=False):\n \"\"\" Increments provided datetime64 array by delta years \"\"\"\n \n date_mod = month_delta(date_in, 12 * delta)\n \n if trunc_to_start:\n date_out = trunc_time(date_mod,'Y')\n else: date_out = date_mod\n \n return date_out\n\n\n# ===================================================================================================\ndef datetime_to_leadtime(data_in):\n \"\"\" Converts time information from single datetime dimension to init_date/lead_time dimension pair \"\"\"\n \n init_date = data_in.time.values[0]\n lead_times = range(len(data_in.time))\n\n try:\n freq = pd.infer_freq(data_in.time.values)\n \n # If pandas tries to assign start time to frequency (e.g. QS-OCT), remove this -----\n if '-' in freq:\n freq = freq[:freq.find('-')]\n \n # Split frequency into numbers and strings -----\n incr_string = ''.join([i for i in freq if i.isdigit()])\n freq_incr = [int(incr_string) if incr_string else 1][0]\n freq_type = ''.join([i for i in freq if not i.isdigit()])\n \n # Specify all lengths great than 1 month in months -----\n if 'QS' in freq_type:\n freq = str(3*freq_incr) + 'MS'\n elif 'Q' in freq_type:\n freq = str(3*freq_incr) + 'M'\n elif ('YS' in freq_type) | ('AS' in freq_type):\n freq = str(12*freq_incr) + 'MS'\n elif ('Y' in freq_type) | ('A' in freq_type):\n freq = str(12*freq_incr) + 'M'\n \n except ValueError:\n dt = (data_in.time.values[1] - data_in.time.values[0]) / np.timedelta64(1, 's')\n month = data_in.time.dt.month[0]\n if dt == 60*60*24:\n freq = 'D'\n elif ((month == 1) | (month == 3) | (month == 5) | (month == 7) | (month == 8) | (month == 10) | \n (month == 12)) & (dt == 31*60*60*24):\n freq = 'MS'\n elif ((month == 4) | (month == 6) | (month == 9) | (month == 11)) & (dt == 30*60*60*24):\n freq = 'MS'\n elif (month == 2) & ((dt == 28*60*60*24) | (dt == 29*60*60*24)): \n freq = 'MS'\n elif (dt == 365*60*60*24) | (dt == 366*60*60*24):\n freq = 'A'\n else:\n freq = 'NA'\n\n data_out = data_in.rename({'time' : 'lead_time'})\n data_out['lead_time'] = lead_times\n data_out['lead_time'].attrs['units'] = freq\n\n data_out.coords['init_date'] = init_date\n \n return data_out\n\n\n# ===================================================================================================\ndef leadtime_to_datetime(data_in, init_date_name='init_date', lead_time_name='lead_time'):\n \"\"\" Converts time information from lead time/initial date dimension pair to single datetime dimension \"\"\"\n \n try:\n init_date = data_in[init_date_name].values[0]\n except IndexError:\n init_date = data_in[init_date_name].values\n \n lead_times = list(map(int, data_in[lead_time_name].values))\n freq = data_in[lead_time_name].attrs['units']\n \n # # Split frequency into numbers and strings -----\n # incr_string = ''.join([i for i in freq if i.isdigit()])\n # freq_incr = [int(incr_string) if incr_string else 1][0]\n # freq_type = ''.join([i for i in freq if not i.isdigit()])\n\n # Deal with special cases of monthly and yearly frequencies -----\n # if 'M' in freq_type:\n # datetimes = np.array([month_delta(init_date, freq_incr * ix) for ix in lead_times])\n # elif ('A' in freq_type) | ('Y' in freq_type):\n # datetimes = np.array([year_delta(init_date, freq_incr * ix) for ix in lead_times])\n # else:\n # datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values \n datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values\n \n data_out = data_in.drop(init_date_name)\n data_out = data_out.rename({lead_time_name : 'time'})\n data_out['time'] = datetimes\n \n return prune(data_out)\n\n\n# ===================================================================================================\ndef get_nearest_point(da, lat, lon):\n \"\"\" Returns the nearest grid point to the specified lat/lon location \"\"\"\n\n return da.sel(lat=lat,lon=lon,method='nearest')\n\n\n# ===================================================================================================\n# visualization tools\n# ===================================================================================================\ndef plot_fields(data, title=None, headings=None, ncol=2, contour=False, vlims=None, clims=None, squeeze_row=1, \n squeeze_col=1, squeeze_cbar=1, shift_cbar=1, cmap='viridis', fontsize=12, invert=False):\n \"\"\" Plots tiles of figures \"\"\"\n \n def _depth(seq):\n for level in count():\n if not seq:\n return level\n seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence)))\n\n matplotlib.rc('font', family='sans-serif')\n matplotlib.rc('font', serif='Helvetica') \n matplotlib.rc('text', usetex='false') \n matplotlib.rcParams.update({'font.size': fontsize})\n\n nrow = int(np.ceil(len(data)/ncol));\n\n fig = plt.figure(figsize=(11*squeeze_col, nrow*4*squeeze_row))\n \n if (clims is not None) & (np.shape(vlims) != np.shape(clims)):\n raise ValueError('The input clims must be equal in size to vlims')\n \n # Check if vlims are given per figure or for all figures -----\n one_cbar = False\n if vlims is None:\n vlims = [[None, None]] * len(data)\n if _depth(vlims) == 1:\n one_cbar = True\n \n over_count = 1\n for idx,dat in enumerate(data):\n if one_cbar:\n vmin, vmax = vlims\n if clims is not None:\n cmin, cmax = clims\n else:\n vmin, vmax = vlims[idx]\n if clims is not None:\n cmin, cmax = clims[idx]\n \n if ('lat' in dat.dims) and ('lon' in dat.dims):\n trans = cartopy.crs.PlateCarree()\n ax = plt.subplot(nrow, ncol, over_count, projection=cartopy.crs.PlateCarree(central_longitude=180))\n extent = [dat.lon.min(), dat.lon.max(), \n dat.lat.min(), dat.lat.max()]\n\n if contour is True:\n if clims is not None:\n ax.coastlines(color='gray')\n im = ax.contourf(dat.lon, dat.lat, dat, levels=np.linspace(vmin,vmax,12), origin='lower', transform=trans, \n vmin=vmin, vmax=vmax, cmap=cmap)\n ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,\n vmin=vmin, vmax=vmax, colors='w', linewidths=2)\n ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,\n vmin=vmin, vmax=vmax, colors='k', linewidths=1)\n else:\n ax.coastlines(color='black')\n im = ax.contourf(dat.lon, dat.lat, dat, origin='lower', transform=trans, vmin=vmin, vmax=vmax, \n cmap=cmap)\n else:\n ax.coastlines(color='black')\n im = ax.imshow(dat, origin='lower', extent=extent, transform=trans, vmin=vmin, vmax=vmax, cmap=cmap)\n\n gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True)\n gl.xlines = False\n gl.ylines = False\n gl.xlabels_top = False\n if over_count % ncol == 0:\n gl.ylabels_left = False\n elif (over_count+ncol-1) % ncol == 0: \n gl.ylabels_right = False\n else:\n gl.ylabels_left = False\n gl.ylabels_right = False\n gl.xlocator = mticker.FixedLocator([-90, 0, 90, 180])\n gl.ylocator = mticker.FixedLocator([-90, -60, 0, 60, 90])\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n \n if not one_cbar:\n cbar = plt.colorbar(im, ax=ax, orientation=\"horizontal\", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)\n tick_locator = mticker.MaxNLocator(nbins=6)\n cbar.locator = tick_locator\n cbar.update_ticks()\n if headings is not None:\n cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);\n elif headings is not None:\n ax.set_title(headings[idx], fontsize=fontsize)\n else:\n ax = plt.subplot(nrow, ncol, over_count)\n if 'lat' in dat.dims:\n x_plt = dat['lat']\n y_plt = dat[utils.get_other_dims(dat,'lat')[0]]\n # if dat.get_axis_num('lat') > 0:\n # dat = dat.transpose()\n elif 'lon' in dat.dims:\n x_plt = dat['lon']\n y_plt = dat[utils.get_other_dims(dat,'lon')[0]]\n # if dat.get_axis_num('lon') > 0:\n # dat = dat.transpose()\n else: \n x_plt = dat[dat.dims[1]]\n y_plt = dat[dat.dims[0]]\n \n extent = [x_plt.min(), x_plt.max(), \n y_plt.min(), y_plt.max()]\n \n if contour is True:\n if clims is not None:\n im = ax.contourf(x_plt, y_plt, dat, levels=np.linspace(vmin,vmax,12), vmin=vmin, vmax=vmax, \n cmap=cmap)\n ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='w', linewidths=2)\n ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='k', linewidths=1)\n else:\n im = ax.contourf(x_plt, y_plt, dat, vmin=vmin, vmax=vmax, cmap=cmap)\n else:\n im = ax.imshow(dat, origin='lower', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap)\n \n if over_count % ncol == 0:\n ax.yaxis.tick_right()\n elif (over_count+ncol-1) % ncol == 0: \n ax.set_ylabel(y_plt.dims[0], fontsize=fontsize)\n else:\n ax.set_yticks([])\n if idx / ncol >= nrow - 1:\n ax.set_xlabel(x_plt.dims[0], fontsize=fontsize)\n \n if not one_cbar:\n cbar = plt.colorbar(im, ax=ax, orientation=\"horizontal\", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)\n tick_locator = mticker.MaxNLocator(nbins=6)\n cbar.locator = tick_locator\n cbar.update_ticks()\n if headings is not None:\n cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);\n elif headings is not None:\n ax.set_title(headings[idx], fontsize=fontsize)\n \n if invert:\n ax.invert_yaxis()\n\n over_count += 1\n\n plt.tight_layout()\n \n if one_cbar:\n vmin, vmax = vlims\n fig.subplots_adjust(bottom=shift_cbar*0.16)\n cbar_ax = fig.add_axes([0.15, 0.13, 0.7, squeeze_cbar*0.020])\n cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal');\n cbar_ax.set_xlabel(title, rotation=0, labelpad=15, fontsize=fontsize);\n cbar.set_ticks(np.linspace(vmin,vmax,5))\n elif title is not None:\n fig.suptitle(title, y=1)\n \n \n# ===================================================================================================\ndef size_GB(xr_object):\n \"\"\"\n How many GB (or GiB) is your xarray object?\n \n // Requires an xarray object\n \n // Returns:\n * equivalent GB (GBytes) - 10^9 conversion\n * equivalent GiB (GiBytes) - 2^ 30 conversion\n \n < Thomas Moore - [email protected] - 10102018 >\n \"\"\" \n bytes = xr_object.nbytes\n Ten2the9 = 10**9\n Two2the30 = 2**30\n GBytes = bytes / Ten2the9\n GiBytes = bytes / Two2the30\n \n #print out results\n print(xr_object.name, \"is\", GBytes, \"GB\", 'which is', GiBytes,\"GiB\")\n \n \n return GBytes,GiBytes\n\n\n# ===================================================================================================\ndef get_pres_name(da):\n \"\"\" \n Returns name of pressure dimension in input array\n Author: Dougie Squire\n Date: 03/03/2018\n \n Parameters\n ----------\n da : xarray DataArray\n Array with coordinate corresponding to pressure\n \n Returns\n -------\n name : str\n Name of dimension corresponding to pressure\n \n Examples\n --------\n >>> A = xr.DataArray(np.random.normal(size=(2,2,2,2,2)), \n ... coords=[('lat', np.arange(2)), ('lon', np.arange(2)), \n ... ('depth', np.arange(2)), ('level', np.arange(2)), \n ... ('pfull', np.arange(2))])\n >>> doppyo.utils.get_pres_name(A)\n 'pfull'\n \"\"\"\n \n if 'pfull' in da.dims:\n return 'pfull'\n elif 'phalf' in da.dims:\n return 'phalf'\n else:\n raise KeyError('Unable to determine pressure dimension')\n pass\n \n \n# =================================================================================================== \ndef did_event(da, event):\n \"\"\" \n Returns array containing True/False where event occurs/does not occur \n \n Notes\n -----\n See http://www.cawcr.gov.au/projects/verification/\n \"\"\"\n \n eval_expr = event.replace(\">\", \"da >\").replace(\"<\", \"da <\").replace(\"==\", \"da ==\") \\\n .replace(\"=\", \"da ==\").replace('&&', '&').replace('||', '|') \\\n .replace(\"and\", \"&\").replace(\"or\", \"|\")\n eval_expr = '(' + eval_expr + ').rename(\"event_logical\")'\n \n return eval(eval_expr)\n\n\n# ===================================================================================================\ndef compute_likelihood(da_logical, dim='ensemble'):\n \"\"\" \n Returns array of likelihoods computed along dim from logical event data \n \n Notes\n -----\n See http://www.cawcr.gov.au/projects/verification/\n \"\"\"\n \n if dim == None:\n likelihood = da_logical\n else:\n likelihood = da_logical.mean(dim=dim).rename('likelihood')\n return likelihood\n\n\n# ===================================================================================================\ndef atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20,\n integrate=True, loop_triple_terms=False, lat_name=None, lon_name=None, \n plevel_name=None):\n \"\"\"\n Returns all terms in the Lorenz energy cycle. Follows formulae and notation used in `Marques \n et al. 2011 Global diagnostic energetics of five state-of-the-art climate models. Climate \n Dynamics`. Note that this decomposition is in the space domain. A space-time decomposition \n can also be carried out (though not in Fourier space, but this is not implemented here (see \n `Oort. 1964 On Estimates of the atmospheric energy cycle. Monthly Weather Review`).\n\n Parameters\n ----------\n temp : xarray DataArray\n Array containing fields of temperature with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n u : xarray DataArray\n Array containing fields of zonal velocity with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n v : xarray DataArray\n Array containing fields of meridional velocity with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n omega : xarray DataArray\n Array containing fields of vertical velocity (pressure coordinates) with at least coordinates \n latitude, longitude and level (following standard naming - see Limitations)\n gh : xarray DataArray\n Array containing fields of geopotential height with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n terms : str or sequence of str\n List of terms to compute. If None, returns all terms. Available options are:\n Pz; total available potential energy in the zonally averaged temperature distribution\n Kz; total kinetic energy in zonally averaged motion\n Pe; total eddy available potential energy [= sum_n Pn (n > 0 only) for spectral=True] (Note that \n for spectral=True, an additional term, Sn, quantifying the rate of transfer of available potential \n energy to eddies of wavenumber n from eddies of all other wavenumbers is also returned)\n Ke; total eddy kinetic energy [= sum_n Kn (n > 0 only) for spectral=True] (Note that for \n spectral=True, an additional term, Ln, quantifying the rate of transfer of kinetic energy to eddies \n of wavenumber n from eddies of all other wavenumbers is also returned)\n Cz; rate of conversion of zonal available potential energy to zonal kinetic energy\n Ca; rate of transfer of total available potential energy in the zonally averaged temperature \n distribution (Pz) to total eddy available potential energy (Pe) [= sum_n Rn (n > 0 only) for \n spectral=True]\n Ce; rate of transfer of total eddy available potential energy (Pe) to total eddy kinetic energy \n (Ke) [= sum_n Cn (n > 0 only) for spectral=True]\n Ck; rate of transfer of total eddy kinetic energy (Ke) to total kinetic energy in zonally \n averaged motion (Kz) [= sum_n Mn (n > 0 only) for spectral=True]\n Gz; rate of generation of zonal available potential energy due to the zonally averaged heating (Pz).\n Note that this term is computed as a residual (Cz + Ca) and cannot be returned in spectral space. \n If Gz is requested with spectral=True, Gz is returned in real-space only\n Ge; rate of generation of eddy available potential energy (Pe). Note that this term is computed as \n a residual (Ce - Ca) and cannot be returned in spectral space. If Ge is requested with spectral=True, \n Ge is returned in real-space only\n Dz; rate of viscous dissipation of zonal kinetic energy (Kz). Note that this term is computed as a \n residual (Cz - Ck) and cannot be returned in spectral space. If Dz is requested with spectral=True, Dz \n is returned in real-space only\n De; rate of dissipation of eddy kinetic energy (Ke). Note that this term is computed as a residual \n (Ce - Ck) and cannot be returned in spectral space. If De is requested with spectral=True, De is \n returned in real-space only\n vgradz : bool, optional\n If True, uses `v-grad-z` approach for computing terms relating to conversion\n of potential energy to kinetic energy. Otherwise, defaults to using the \n `omaga-alpha` approach (see reference above for details)\n spectral : bool, optional\n If True, computes all terms as a function of wavenumber on longitudinal bands. To use this \n option, longitudes must be regularly spaced. Note that Ge and De are computed as residuals and\n cannot be computed in spectral space\n n_wavenumbers : int, optional\n Number of wavenumbers to retain either side of wavenumber=0. Obviously only does anything if \n spectral=True\n integrate : bool, optional\n If True, computes and returns the integral of each term over the mass of the \n atmosphere. Otherwise, only the integrands are returned.\n\n Returns\n -------\n atmos_energy_cycle : xarray Dataset\n \n \n Limitations\n -----------\n All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(), \n doppyo.utils.get_lon_name(), etc)\n Pressure levels must be provided in units of hPa\n \n Notes\n -----\n The following notation is used below (stackable, e.g. *_ZT indicates the time average of the zonal \n average):\n *_A -> area average over an isobaric surface\n *_a -> departure from area average\n *_Z -> zonal average\n *_z -> departure from zonal average\n *_T -> time average\n *_t -> departure from time average\n Additionally, capital variables indicate Fourier transforms:\n F(u) = U\n F(v) = V\n F(omega) = O\n F(gh) = A\n F(temp) = B\n \"\"\"\n \n def _flip_n(da):\n \"\"\" Flips data along wavenumber coordinate \"\"\"\n\n daf = da.copy()\n daf['n'] = -daf['n']\n\n return daf.sortby(daf['n'])\n\n\n def _truncate(F, n_truncate, dim):\n \"\"\" \n Converts spatial frequency dim to wavenumber, n, and truncates all wavenumbers greater than \n n_truncate \n \"\"\"\n F[dim] = 360 * F[dim]\n F = F.rename({dim : 'n'})\n F = F.where(abs(F.n) <= n_truncate, drop=True)\n return F, _flip_n(F)\n\n\n def _triple_terms(A, B, C):\n \"\"\" \n Calculate triple term summation of the form \\int_{m=-inf}^{inf} A(m) * B(n) * C(n - m)\n \"\"\"\n\n # Use rolling operator to build shifted terms -----\n Am = A.rename({'n' : 'm'})\n Cnm = C.rolling(n=len(C.n), center=True).construct('m', fill_value=0)\n Cnm['m'] = -C['n'].values\n\n # Drop m = 0 and n < 0 -----\n Am = Am.where(Am['m'] != 0, drop=True) \n Cnm = Cnm.where(Cnm['m'] != 0, drop=True)\n\n return (B * (Am * Cnm)).sum(dim='m', skipna=False)\n\n\n def _triple_terms_loop(A, B, C):\n \"\"\" \n Calculate triple term summation of the form \\int_{m=-inf}^{inf} A(m) * B(n) * C(n - m)\n \"\"\"\n\n # Loop over all m's and perform rolling sum -----\n ms = A['n'].where(A['n'] != 0, drop=True).values\n ABC = A.copy() * 0\n for m in ms:\n Am = A.sel(n=m)\n Cnm = C.shift(n=int(m)).fillna(0)\n ABC = ABC + (Am * B * Cnm)\n\n return ABC\n \n if terms is None:\n terms = ['Pz', 'Kz', 'Pe', 'Ke', 'Cz', 'Ca', 'Ce', 'Ck', 'Gz', 'Ge', 'Dz', 'De']\n if isinstance(terms, str):\n terms = [terms]\n \n # Initialize some things -----\n if lat_name is None:\n lat_name = utils.get_lat_name(temp)\n if lon_name is None:\n lon_name = utils.get_lon_name(temp)\n if plevel_name is None:\n plevel_name = utils.get_plevel_name(temp)\n \n degtorad = utils.constants().pi / 180\n tan_lat = xr.ufuncs.tan(temp[lat_name] * degtorad)\n cos_lat = xr.ufuncs.cos(temp[lat_name] * degtorad) \n \n # Determine the stability parameter using Saltzman's approach -----\n kappa = utils.constants().R_d / utils.constants().C_pd\n p_kap = (1000 / temp[plevel_name]) ** kappa\n theta_A = utils.average(temp * p_kap, [lat_name, lon_name], weights=cos_lat)\n dtheta_Adp = utils.differentiate_wrt(theta_A, dim=plevel_name, x=(theta_A[plevel_name] * 100))\n gamma = - p_kap * (utils.constants().R_d) / ((temp[plevel_name] * 100) * utils.constants().C_pd) / dtheta_Adp # [1/K]\n energies = gamma.rename('gamma').to_dataset()\n \n # Compute zonal terms\n # ========================\n \n if ('Pz' in terms):\n # Compute the total available potential energy in the zonally averaged temperature\n # distribution, Pz [also commonly called Az] -----\n temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)\n temp_Z = temp.mean(dim=lon_name)\n temp_Za = temp_Z - temp_A\n Pz_int = gamma * utils.constants().C_pd / 2 * temp_Za ** 2 # [J/kg]\n energies['Pz_int'] = Pz_int\n if integrate:\n Pz = _int_over_atmos(Pz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]\n energies['Pz'] = Pz\n \n if ('Kz' in terms):\n # Compute the total kinetic energy in zonally averaged motion, Kz [also commonly \n # called Kz] -----\n u_Z = u.mean(dim=lon_name)\n v_Z = v.mean(dim=lon_name)\n Kz_int = 0.5 * (u_Z ** 2 + v_Z ** 2) # [J/kg]\n energies['Kz_int'] = Kz_int\n if integrate:\n Kz = _int_over_atmos(Kz_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]\n energies['Kz'] = Kz\n \n if ('Cz' in terms):\n # Compute the rate of conversion of zonal available potential energy (Pz) to zonal kinetic\n # energy (Kz), Cz [also commonly called Cz] -----\n if vgradz:\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n gh_Z = gh.mean(dim=lon_name)\n dghdlat = utils.differentiate_wrt(gh_Z, dim=lat_name, x=(gh_Z[lat_name] * degtorad))\n Cz_int = - (utils.constants().g / utils.constants().R_earth) * v_Z * dghdlat # [W/kg]\n energies['Cz_int'] = Cz_int\n if integrate:\n Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=gh[lon_name]) # [W/m^2]\n energies['Cz'] = Cz\n else:\n if 'temp_Za' not in locals():\n temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)\n temp_Z = temp.mean(dim=lon_name)\n temp_Za = temp_Z - temp_A\n omega_A = utils.average(omega, [lat_name, lon_name], weights=cos_lat)\n omega_Z = omega.mean(dim=lon_name)\n omega_Za = omega_Z - omega_A\n Cz_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * omega_Za * temp_Za # [W/kg]\n energies['Cz_int'] = Cz_int\n if integrate:\n Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=omega[lon_name]) # [W/m^2]\n energies['Cz'] = Cz\n \n # Compute eddy terms in Fourier space if spectral=True\n # ==========================================================\n if spectral:\n \n if ('Pe' in terms):\n # Compute the total available potential energy eddies of wavenumber n, Pn -----\n Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / \n len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n Pn_int = (gamma * utils.constants().C_pd * abs(Bp) ** 2)\n energies['Pn_int'] = Pn_int\n if integrate:\n Pn = _int_over_atmos(Pn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]\n energies['Pn'] = Pn\n\n # Compute the rate of transfer of available potential energy to eddies of \n # wavenumber n from eddies of all other wavenumbers, Sn -----\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /\n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /\n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /\n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n \n dBpdlat = utils.differentiate_wrt(Bp, dim=lat_name, x=(Bp[lat_name] * degtorad))\n dBndlat = utils.differentiate_wrt(Bn, dim=lat_name, x=(Bn[lat_name] * degtorad))\n dBpdp = utils.differentiate_wrt(Bp, dim=plevel_name, x=(Bp[plevel_name] * 100))\n dBndp = utils.differentiate_wrt(Bn, dim=plevel_name, x=(Bn[plevel_name] * 100))\n\n if loop_triple_terms:\n BpBnUp = _triple_terms_loop(Bp, Bn, Up)\n BpBpUn = _triple_terms_loop(Bp, Bp, Un)\n BpglBnVp = _triple_terms_loop(Bp, dBndlat, Vp)\n BpglBpVn = _triple_terms_loop(Bp, dBpdlat, Vn)\n BpgpBnOp = _triple_terms_loop(Bp, dBndp, Op)\n BpgpBpOn = _triple_terms_loop(Bp, dBpdp, On)\n BpBnOp = _triple_terms_loop(Bp, Bn, Op)\n BpBpOn = _triple_terms_loop(Bp, Bp, On)\n else:\n BpBnUp = _triple_terms(Bp, Bn, Up)\n BpBpUn = _triple_terms(Bp, Bp, Un)\n BpglBnVp = _triple_terms(Bp, dBndlat, Vp)\n BpglBpVn = _triple_terms(Bp, dBpdlat, Vn)\n BpgpBnOp = _triple_terms(Bp, dBndp, Op)\n BpgpBpOn = _triple_terms(Bp, dBpdp, On)\n BpBnOp = _triple_terms(Bp, Bn, Op)\n BpBpOn = _triple_terms(Bp, Bp, On)\n\n Sn_int = -gamma * utils.constants().C_pd * (1j * Bp['n']) / \\\n (utils.constants().R_earth * xr.ufuncs.cos(Bp[lat_name] * degtorad)) * \\\n (BpBnUp + BpBpUn) + \\\n gamma * utils.constants().C_pd / utils.constants().R_earth * \\\n (BpglBnVp + BpglBpVn) + \\\n gamma * utils.constants().C_pd * (BpgpBnOp + BpgpBpOn) + \\\n gamma * utils.constants().R_d / Bp[plevel_name] * \\\n (BpBnOp + BpBpOn)\n energies['Sn_int'] = Sn_int\n if integrate:\n Sn = abs(_int_over_atmos(Sn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]\n energies['Sn'] = Sn\n \n if ('Ke' in terms):\n # Compute the total kinetic energy in eddies of wavenumber n, Kn -----\n if 'U' not in locals():\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /\n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n Kn_int = abs(Up) ** 2 + abs(Vp) ** 2\n energies['Kn_int'] = Kn_int\n if integrate:\n Kn = _int_over_atmos(Kn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]\n energies['Kn'] = Kn\n\n # Compute the rate of transfer of kinetic energy to eddies of wavenumber n from \n # eddies of all other wavenumbers, Ln -----\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n \n dUpdp = utils.differentiate_wrt(Up, dim=plevel_name, x=(Up[plevel_name] * 100))\n dVpdp = utils.differentiate_wrt(Vp, dim=plevel_name, x=(Vp[plevel_name] * 100))\n dOpdp = utils.differentiate_wrt(Op, dim=plevel_name, x=(Op[plevel_name] * 100))\n dOndp = utils.differentiate_wrt(On, dim=plevel_name, x=(On[plevel_name] * 100))\n dVpcdl = utils.differentiate_wrt(Vp * cos_lat, dim=lat_name, x=(Vp[lat_name] * degtorad))\n dVncdl = utils.differentiate_wrt(Vn * cos_lat, dim=lat_name, x=(Vn[lat_name] * degtorad))\n dUpdl = utils.differentiate_wrt(Up, dim=lat_name, x=(Up[lat_name] * degtorad))\n dVpdl = utils.differentiate_wrt(Vp, dim=lat_name, x=(Vp[lat_name] * degtorad))\n\n if loop_triple_terms:\n UpUnUp = _triple_terms_loop(Up, Un, Up)\n UpUpUn = _triple_terms_loop(Up, Up, Un)\n VpVnUp = _triple_terms_loop(Vp, Vn, Up)\n VpVpUn = _triple_terms_loop(Vp, Vp, Un)\n VpUnUp = _triple_terms_loop(Vp, Un, Up)\n VpUpUn = _triple_terms_loop(Vp, Up, Un)\n UpVnUp = _triple_terms_loop(Up, Vn, Up)\n UpVpUn = _triple_terms_loop(Up, Vp, Un)\n gpUpUngpOp = _triple_terms_loop(dUpdp, Un, dOpdp)\n gpUpUpgpOn = _triple_terms_loop(dUpdp, Up, dOndp)\n gpVpVngpOp = _triple_terms_loop(dVpdp, Vn, dOpdp)\n gpVpVpgpOn = _triple_terms_loop(dVpdp, Vp, dOndp)\n glUpUnglVpc = _triple_terms_loop(dUpdl, Un, dVpcdl)\n glUpUpglVnc = _triple_terms_loop(dUpdl, Up, dVncdl)\n glVpVnglVpc = _triple_terms_loop(dVpdl, Vn, dVpcdl)\n glVpVpglVnc = _triple_terms_loop(dVpdl, Vp, dVncdl)\n else:\n UpUnUp = _triple_terms(Up, Un, Up)\n UpUpUn = _triple_terms(Up, Up, Un)\n VpVnUp = _triple_terms(Vp, Vn, Up)\n VpVpUn = _triple_terms(Vp, Vp, Un)\n VpUnUp = _triple_terms(Vp, Un, Up)\n VpUpUn = _triple_terms(Vp, Up, Un)\n UpVnUp = _triple_terms(Up, Vn, Up)\n UpVpUn = _triple_terms(Up, Vp, Un)\n gpUpUngpOp = _triple_terms(dUpdp, Un, dOpdp)\n gpUpUpgpOn = _triple_terms(dUpdp, Up, dOndp)\n gpVpVngpOp = _triple_terms(dVpdp, Vn, dOpdp)\n gpVpVpgpOn = _triple_terms(dVpdp, Vp, dOndp)\n glUpUnglVpc = _triple_terms(dUpdl, Un, dVpcdl)\n glUpUpglVnc = _triple_terms(dUpdl, Up, dVncdl)\n glVpVnglVpc = _triple_terms(dVpdl, Vn, dVpcdl)\n glVpVpglVnc = _triple_terms(dVpdl, Vp, dVncdl)\n\n Ln_int = -(1j * Up['n']) / (utils.constants().R_earth * cos_lat) * \\\n (UpUnUp - UpUpUn) + \\\n (1j * Vp['n']) / (utils.constants().R_earth * cos_lat) * \\\n (VpVnUp - VpVpUn) - \\\n tan_lat / utils.constants().R_earth * \\\n (VpUnUp + VpUpUn) + \\\n tan_lat / utils.constants().R_earth * \\\n (UpVnUp + UpVpUn) + \\\n (gpUpUngpOp + gpUpUpgpOn) + \\\n (gpVpVngpOp + gpVpVpgpOn) + \\\n 1 / (utils.constants().R_earth * cos_lat) * \\\n (glUpUnglVpc + glUpUpglVnc + glVpVnglVpc + glVpVpglVnc)\n energies['Ln_int'] = Ln_int\n if integrate:\n Ln = abs(_int_over_atmos(Ln_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]\n energies['Ln'] = Ln\n \n if ('Ca' in terms):\n # Compute the rate of transfer of zonal available potential energy to eddy \n # available potential energy in wavenumber n, Rn -----\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'B' not in locals():\n Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / \n len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))\n theta = temp * p_kap\n theta_Z = theta.mean(dim=lon_name)\n theta_Za = theta_Z - theta_A\n dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))\n Rn_int = gamma * utils.constants().C_pd * ((dtemp_Zdlat / utils.constants().R_earth) * (Vp * Bn + Vn * Bp) + \n (p_kap * dtheta_Zadp) * (Op * Bn + On * Bp)) # [W/kg]\n energies['Rn_int'] = Rn_int\n if integrate:\n Rn = abs(_int_over_atmos(Rn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]\n energies['Rn'] = Rn\n\n if ('Ce' in terms):\n # Compute the rate of conversion of available potential energy of wavenumber n \n # to eddy kinetic energy of wavenumber n, Cn -----\n if vgradz:\n if 'U' not in locals():\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / \n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Ap, An = _truncate(utils.fft(gh, dim=lon_name, nfft=len(gh[lon_name]), twosided=True, shift=True) / \n len(gh[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n dApdlat = utils.differentiate_wrt(Ap, dim=lat_name, x=(Ap[lat_name] * degtorad))\n dAndlat = utils.differentiate_wrt(An, dim=lat_name, x=(An[lat_name] * degtorad))\n\n Cn_int = (((-1j * utils.constants().g * Up['n']) / \\\n (utils.constants().R_earth * xr.ufuncs.cos(Up[lat_name] * degtorad))) * \\\n (Ap * Un - An * Up)) - \\\n ((utils.constants().g / utils.constants().R_earth) * \\\n (dApdlat * Vn + dAndlat * Vp)) # [W/kg]\n energies['Cn_int'] = Cn_int\n if integrate:\n Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]\n energies['Cn'] = Cn\n else:\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'B' not in locals():\n Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / \n len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Cn_int = - (utils.constants().R_d / (omega[plevel_name] * 100)) * (Op * Bn + On * Bp) # [W/kg]\n energies['Cn_int'] = Cn_int\n if integrate:\n Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]\n energies['Cn'] = Cn\n \n if ('Ck' in terms):\n # Compute the rate of transfer of kinetic energy to the zonally averaged flow \n # from eddies of wavenumber n, Mn -----\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n if 'u_Z' not in locals():\n u_Z = u.mean(dim=lon_name)\n if 'U' not in locals():\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / \n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v[lat_name] * degtorad))\n du_Zndlat = utils.differentiate_wrt(u_Z / xr.ufuncs.cos(u[lat_name] * degtorad), \n dim=lat_name, x=(u[lat_name] * degtorad))\n dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v[plevel_name] * 100))\n du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u[plevel_name] * 100))\n\n Mn_int = (-2 * Up * Un * v_Z * tan_lat / utils.constants().R_earth) + \\\n (2 * Vp * Vn * dv_Zdlat / utils.constants().R_earth + (Vp * On + Vn * Op) * dv_Zdp) + \\\n ((Up * On + Un * Op) * du_Zdp) + \\\n ((Up * Vn + Un * Vp) * xr.ufuncs.cos(u[lat_name] * degtorad) / \\\n utils.constants().R_earth * du_Zndlat) # [W/kg]\n energies['Mn_int'] = Mn_int\n if integrate:\n Mn = abs(_int_over_atmos(Mn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]\n energies['Mn'] = Mn\n \n else:\n \n if ('Pe' in terms):\n # Compute the total eddy available potential energy, Pe [also commonly called \n # Ae] -----\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n temp_z = temp - temp_Z\n Pe_int = gamma * utils.constants().C_pd / 2 * (temp_z ** 2).mean(dim=lon_name) # [J/kg]\n energies['Pe_int'] = Pe_int\n if integrate:\n Pe = _int_over_atmos(Pe_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]\n energies['Pe'] = Pe\n \n if ('Ke' in terms):\n # Compute the total eddy kinetic energy, Ke -----\n if 'u_Z' not in locals():\n u_Z = u.mean(dim=lon_name)\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n u_z = u - u_Z\n v_z = v - v_Z\n Ke_int = 0.5 * (u_z ** 2 + v_z ** 2).mean(dim=lon_name) # [J/kg]\n energies['Ke_int'] = Ke_int\n if integrate:\n Ke = _int_over_atmos(Ke_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]\n energies['Ke'] = Ke\n \n if ('Ca' in terms):\n # Compute the rate of transfer of total available potential energy in the zonally \n # averaged temperature distribution (Pz) to total eddy available potential energy \n # (Pe), Ca -----\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n if 'omega_Z' not in locals():\n omega_Z = omega.mean(dim=lon_name)\n if 'theta_Z' not in locals():\n theta = temp * p_kap\n theta_Z = theta.mean(dim=lon_name)\n if 'dtemp_Zdlat' not in locals():\n dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))\n v_z = v - v_Z\n temp_z = temp - temp_Z\n omega_z = omega - omega_Z\n oT_Z = (omega_z * temp_z).mean(dim=lon_name)\n oT_A = utils.average(omega_z * temp_z, [lat_name, lon_name], weights=cos_lat)\n oT_Za = oT_Z - oT_A\n theta_Za = theta_Z - theta_A\n dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))\n Ca_int = - gamma * utils.constants().C_pd * \\\n (((v_z * temp_z).mean(dim=lon_name) * dtemp_Zdlat / utils.constants().R_earth) + \\\n (p_kap * oT_Za * dtheta_Zadp)) # [W/kg]\n energies['Ca_int'] = Ca_int\n if integrate:\n Ca = _int_over_atmos(Ca_int, lat_name, lon_name, plevel_name, lon_dim=v[lon_name]) # [W/m^2]\n energies['Ca'] = Ca\n \n if ('Ce' in terms):\n # Compute the rate of transfer of total eddy available potential energy (Pe) to \n # total eddy kinetic energy (Ke), Ce -----\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n if 'omega_Z' not in locals():\n omega_Z = omega.mean(dim=lon_name)\n temp_z = temp - temp_Z\n omega_z = omega - omega_Z\n Ce_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * \\\n (omega_z * temp_z).mean(dim=lon_name) # [W/kg] \n energies['Ce_int'] = Ce_int\n if integrate:\n Ce = _int_over_atmos(Ce_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Ce'] = Ce\n \n if ('Ck' in terms):\n # Compute the rate of transfer of total eddy kinetic energy (Ke) to total kinetic \n # energy in zonally averaged motion (Kz), Ck -----\n if 'u_Z' not in locals():\n u_Z = u.mean(dim=lon_name)\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n if 'omega_Z' not in locals():\n omega_Z = omega.mean(dim=lon_name)\n u_z = u - u_Z\n v_z = v - v_Z\n omega_z = omega - omega_Z\n du_Zndlat = utils.differentiate_wrt(u_Z / cos_lat, dim=lat_name, x=(u_Z[lat_name] * degtorad))\n dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v_Z[lat_name] * degtorad))\n du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u_Z[plevel_name] * 100))\n dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v_Z[plevel_name] * 100))\n Ck_int = (u_z * v_z).mean(dim=lon_name) * cos_lat * du_Zndlat / utils.constants().R_earth + \\\n (u_z * omega_z).mean(dim=lon_name) * du_Zdp + \\\n (v_z ** 2).mean(dim=lon_name) * dv_Zdlat / utils.constants().R_earth + \\\n (v_z * omega_z).mean(dim=lon_name) * dv_Zdp - \\\n (u_z ** 2).mean(dim=lon_name) * v_Z * tan_lat / utils.constants().R_earth\n energies['Ck_int'] = Ck_int\n if integrate:\n Ck = _int_over_atmos(Ck_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Ck'] = Ck\n \n if ('Gz' in terms):\n # Compute the rate of generation of zonal available potential energy due to the zonally\n # averaged heating, Gz -----\n if ('Cz' not in terms) | ('Ca' not in terms):\n raise ValueError('The rate of generation of zonal available potential energy, Gz, is computed from the sum of Cz and Ca. Please add these to the list, terms=[<terms>].')\n if spectral:\n warnings.warn('Rate of generation of zonal available potential energy is computed from the sum of Cz and Ca and cannot be computed in Fourier space. Returning Gz in real-space.')\n Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca\n Gz_int = Cz_int + Ca_int\n energies['Gz_int'] = Gz_int\n if integrate:\n Gz = _int_over_atmos(Gz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Gz'] = Gz\n\n if ('Ge' in terms):\n # Compute the rate of generation of eddy available potential energy (Ae), Ge -----\n if ('Ce' not in terms) | ('Ca' not in terms):\n raise ValueError('The rate of generation of eddy available potential energy, Ge, is computed from the residual of Ce and Ca. Please add these to the list, terms=[<terms>].')\n if spectral:\n warnings.warn('The rate of generation of eddy available potential energy is computed from the residual of Ce and Ca and cannot be computed in Fourier space. Returning Ge in real-space.')\n Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce\n if 'Ca_int' not in locals():\n Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca\n Ge_int = Ce_int - Ca_int\n energies['Ge_int'] = Ge_int\n if integrate:\n Ge = _int_over_atmos(Ge_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Ge'] = Ge\n \n if ('Dz' in terms):\n # Compute the rate of viscous dissipation of zonal kinetic energy, Dz -----\n if ('Cz' not in terms) | ('Ck' not in terms):\n raise ValueError('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck. Please add these to the list, terms=[<terms>].')\n if spectral: \n warnings.warn('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck and cannot be computed in Fourier space. Returning De in real-space.')\n Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck\n Dz_int = Cz_int - Ck_int\n energies['Dz_int'] = Dz_int\n if integrate:\n Dz = _int_over_atmos(Dz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Dz'] = Dz\n\n if ('De' in terms):\n # Compute the rate of dissipation of eddy kinetic energy (Ke), De -----\n if ('Ce' not in terms) | ('Ck' not in terms):\n raise ValueError('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck. Please add these to the list, terms=[<terms>].')\n if spectral:\n warnings.warn('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck and cannot be computed in Fourier space. Returning De in real-space.')\n if 'Ce_int' not in locals():\n Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce\n if 'Ck_int' not in locals():\n Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck\n De_int = Ce_int - Ck_int\n energies['De_int'] = De_int\n if integrate:\n De = _int_over_atmos(De_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['De'] = De\n \n return energies\n\n\n# ===================================================================================================\ndef auto_merge(paths, preprocess=None, parallel=True, **kwargs):\n \"\"\"\n Automatically merge a split xarray Dataset. This is designed to behave like\n `xarray.open_mfdataset`, except it supports concatenation along multiple\n dimensions.\n Parameters\n ----------\n datasets : str or list of str or list of xarray.Dataset\n Either a glob expression or list of paths as you would pass to\n xarray.open_mfdataset, or a list of xarray datasets. If a list of\n datasets is passed, you should make sure that they are represented\n as dask arrays to avoid reading the whole dataset into memory.\n Returns\n -------\n xarray.Dataset\n The merged dataset.\n \"\"\"\n \n if parallel:\n # wrap the open_dataset, getattr, and preprocess with delayed\n open_ = dask.delayed(xr.open_dataset)\n getattr_ = dask.delayed(getattr)\n if preprocess is not None:\n preprocess = dask.delayed(preprocess)\n else:\n open_ = open_dataset\n getattr_ = getattr\n\n datasets = [open_(p, **kwargs) for p in paths]\n file_objs = [getattr_(ds, '_file_obj') for ds in datasets]\n if preprocess is not None:\n datasets = [preprocess(ds) for ds in datasets]\n\n if parallel:\n # calling compute here will return the datasets/file_objs lists,\n # the underlying datasets will still be stored as dask arrays\n datasets, file_objs = dask.compute(datasets, file_objs)\n\n def _combine_along_last_dim(datasets):\n merged = []\n\n # Determine the dimension along which the dataset is split\n split_dims = [d for d in datasets[0].dims if\n len(np.unique([ds[d].values[0] for ds in datasets])) > 1]\n\n # Concatenate along one of the split dimensions\n concat_dim = split_dims[-1]\n\n # Group along the remaining dimensions and concatenate within each\n # group.\n sorted_ds = sorted(datasets, key=lambda ds: tuple(ds[d].values[0]\n for d in split_dims))\n for _, group in itertools.groupby(\n sorted_ds,\n key=lambda ds: tuple(ds[d].values[0] for d in split_dims[:-1])\n ):\n merged.append(xr.auto_combine(group, concat_dim=concat_dim))\n\n return merged\n\n merged = datasets\n while len(merged) > 1:\n merged = _combine_along_last_dim(merged)\n\n return merged[0]"
] | [
[
"matplotlib.ticker.MaxNLocator",
"pandas.infer_freq",
"numpy.ndarray.flatten",
"numpy.timedelta64",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.rc",
"matplotlib.ticker.FixedLocator",
"matplotlib.pyplot.subplot",
"matplotlib.rcParams.update",
"numpy.shape",
"numpy.datetime64",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"pandas.Timestamp",
"numpy.unique",
"numpy.isnan"
]
] |
gonzalo-munillag/Private_AI_OpenMined | [
"c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca",
"c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca"
] | [
"Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py",
"Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_i_renyi.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\" Demo for exponentiated Jensen-Tsallis kernel-1 estimators.\n\nAnalytical vs estimated value is illustrated for spherical normal random\nvariables.\n\n\"\"\"\n\nfrom numpy import eye\nfrom numpy.random import rand, multivariate_normal, randn\nfrom scipy import arange, zeros, ones\nimport matplotlib.pyplot as plt\n\nfrom ite.cost.x_factory import co_factory\nfrom ite.cost.x_analytical_values import analytical_value_k_ejt1\n\n\ndef main():\n # parameters:\n dim = 1 # dimension of the distribution\n num_of_samples_v = arange(1000, 50*1000+1, 2000)\n u = 0.8 # >0, parameter of the Jensen-Tsallis kernel\n cost_name = 'MKExpJT1_HT' # dim >= 1\n\n # initialization:\n alpha = 2\n # fixed; parameter of the Jensen-Tsallis kernel; for alpha = 2 we have\n # explicit formula for the Tsallis entropy, and hence for the\n # Jensen-Tsallis kernel(-1).\n\n distr = 'normal' # fixed\n num_of_samples_max = num_of_samples_v[-1]\n length = len(num_of_samples_v)\n co = co_factory(cost_name, mult=True, alpha=alpha, u=u) # cost object\n k_hat_v = zeros(length) # vector of estimated kernel values\n\n # distr, dim -> samples (y1,y2), distribution parameters (par1,par2), \n # analytical value (k):\n if distr == 'normal':\n # generate samples (y1,y2); y1~N(m1,s1^2xI), y2~N(m2,s2^2xI):\n m1, s1 = randn(dim), rand(1)\n m2, s2 = randn(dim), rand(1)\n y1 = multivariate_normal(m1, s1**2 * eye(dim), num_of_samples_max)\n y2 = multivariate_normal(m2, s2**2 * eye(dim), num_of_samples_max)\n \n par1 = {\"mean\": m1, \"std\": s1}\n par2 = {\"mean\": m2, \"std\": s2}\n else:\n raise Exception('Distribution=?') \n \n k = analytical_value_k_ejt1(distr, distr, u, par1, par2)\n \n # estimation:\n for (tk, num_of_samples) in enumerate(num_of_samples_v):\n k_hat_v[tk] = co.estimation(y1[0:num_of_samples],\n y2[0:num_of_samples]) # broadcast\n print(\"tk={0}/{1}\".format(tk+1, length))\n \n # plot: \n plt.plot(num_of_samples_v, k_hat_v, num_of_samples_v, ones(length)*k)\n plt.xlabel('Number of samples')\n plt.ylabel('Exponentiated Jensen-Tsallis kernel-1')\n plt.legend(('estimation', 'analytical value'), loc='best')\n plt.title(\"Estimator: \" + cost_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python3\n\n\"\"\" Demo for Renyi mutual information estimators.\n\nAnalytical vs estimated value is illustrated for normal random variables.\n\n\"\"\"\n\nfrom numpy.random import rand, multivariate_normal\nfrom numpy import arange, zeros, dot, ones\nimport matplotlib.pyplot as plt\n\nfrom ite.cost.x_factory import co_factory\nfrom ite.cost.x_analytical_values import analytical_value_i_renyi\n\n\ndef main():\n # parameters:\n alpha = 0.7 # parameter of Renyi mutual information, \\ne 1\n dim = 2 # >=2; dimension of the distribution\n num_of_samples_v = arange(100, 10*1000+1, 500)\n\n cost_name = 'MIRenyi_DR'\n # cost_name = 'MIRenyi_HR'\n \n # initialization:\n distr = 'normal' # distribution; fixed \n ds = ones(dim, dtype='int') # dimensions of the 'subspaces'\n num_of_samples_max = num_of_samples_v[-1]\n length = len(num_of_samples_v)\n co = co_factory(cost_name, mult=True, alpha=alpha) # cost object\n # vector of estimated mutual information values:\n i_hat_v = zeros(length)\n\n # distr, dim -> samples (y), distribution parameters (par), analytical \n # value (i):\n if distr == 'normal':\n # mean (m), covariance matrix (c):\n m = rand(dim) \n l = rand(dim, dim)\n c = dot(l, l.T)\n \n # generate samples (y~N(m,c)): \n y = multivariate_normal(m, c, num_of_samples_max)\n \n par = {\"cov\": c} \n else:\n raise Exception('Distribution=?')\n \n i = analytical_value_i_renyi(distr, alpha, par)\n \n # estimation:\n for (tk, num_of_samples) in enumerate(num_of_samples_v):\n i_hat_v[tk] = co.estimation(y[0:num_of_samples], ds) # broadcast\n print(\"tk={0}/{1}\".format(tk+1, length))\n \n # plot: \n plt.plot(num_of_samples_v, i_hat_v, num_of_samples_v, ones(length)*i)\n plt.xlabel('Number of samples')\n plt.ylabel('Renyi mutual information')\n plt.legend(('estimation', 'analytical value'), loc='best')\n plt.title(\"Estimator: \" + cost_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.eye",
"matplotlib.pyplot.legend",
"scipy.zeros",
"numpy.random.randn",
"scipy.ones",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.random.rand",
"scipy.arange",
"matplotlib.pyplot.xlabel"
],
[
"numpy.ones",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.arange",
"matplotlib.pyplot.title",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.random.rand",
"numpy.dot",
"matplotlib.pyplot.xlabel"
]
] |
jiangzoi/incubator-tvm | [
"144c6f45f7217b9df2f5605e06f0903e470ac11c",
"144c6f45f7217b9df2f5605e06f0903e470ac11c",
"144c6f45f7217b9df2f5605e06f0903e470ac11c"
] | [
"tests/python/contrib/test_gemm_acc32_vnni.py",
"tests/python/relay/test_pass_auto_quantize.py",
"topi/tests/python/test_topi_dense_tensorcore.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition\n\nimport tvm\nfrom tvm import te\nimport numpy as np\nfrom topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake\nfrom topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32\nimport pytest\n\n\[email protected](\"skip because feature not enabled\")\ndef test_fc_int8_acc32():\n m = 1024\n n = 1024\n k = 1024\n\n X = te.placeholder((m, k), name='X', dtype=\"uint8\")\n W = te.placeholder((n, k), name='W', dtype=\"int8\")\n\n peak = 280\n print(\"Peak {} Gops/s\".format(peak))\n memory_ops = m * k + n * k + 2 * m * n\n gops_per_mm = 2 * m * n * k\n\n # For LLVM < 8.0, it shows \"'cascadelake' is not a recognized processor for this target\n # (ignoring processor)\" error with the following setting. After LLVM 8.0 is enabled in the\n # test, we should use cascadelake setting.\n def verify(target=\"llvm -mcpu=cascadelake\"):\n if not tvm.runtime.enabled(target):\n print(\"skip because %s is not enabled...\" % target)\n return\n\n ctx = tvm.context(target, 0)\n pc = dot_16x1x16_uint8_int8_int32_cascadelake()\n ak = te.reduce_axis((0, k), name='k')\n packedW = te.placeholder(\n (n // 16, 16 * (k // 4), 4), name='packedW', dtype=\"int8\")\n\n t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype(\n \"int32\") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype(\"int32\"), axis=ak), name=\"F\")\n t_sch = te.create_schedule(t_fc.op)\n a_x, a_y = t_fc.op.axis\n a_k, = t_fc.op.reduce_axis\n\n a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16)\n a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32)\n a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4)\n a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4)\n t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki)\n\n t_sch[t_fc].unroll(a_koi)\n t_sch[t_fc].tensorize(a_yi, pc)\n\n t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name=\"intrinsic\")\n t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10)\n\n # generate the plain data\n a_ = np.random.uniform(1, 10, size=(m, k)).astype(\"uint8\")\n b_ = np.random.uniform(1, 10, size=(n, k)).astype(\"int8\")\n\n packW = np.random.uniform(1, 10, size=(\n n // 16, 16 * (k // 4), 4)).astype(\"int8\")\n # This occurs in pre_compute stage\n for r_idx in range(n // 16):\n for s_idx in range(16 * (k // 4)):\n for t_idx in range(4):\n packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx %\n 16][(s_idx // 16) * 4 + t_idx]\n\n x = tvm.nd.array(a_, ctx)\n w = tvm.nd.array(packW, ctx)\n y = tvm.nd.array(np.zeros((m, n), dtype=\"int32\"), ctx)\n result = t_evaluator(x, w, y)\n\n gops_per_sec = gops_per_mm / result.mean / 1e9\n # verify the correctness\n tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0)\n print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format(\n result.mean * 1000, gops_per_sec, gops_per_sec / peak))\n t_func.export_library(\"tensorize_acc32.o\")\n\n verify()\n\n\nif __name__ == \"__main__\":\n # The test requires Cascade Lake and newer Intel machines to generate the\n # correct AVX512 VNNI instruction. So, disabling the test.\n\n # test_fc_int8_acc32()\n pass\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\n\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.relay import testing\nfrom tvm.relay.expr import Call\n\n\ndef quantize_and_build(out):\n f = relay.Function(relay.analysis.free_vars(out), out)\n mod, params = testing.create_workload(f)\n\n with relay.quantize.qconfig(skip_conv_layers=[]):\n qmod = relay.quantize.quantize(mod, params)\n\n relay.build(qmod, \"llvm\", params=params)\n\n return qmod\n\ndef test_mul_rewrite():\n \"\"\"a test case where rhs of mul is not constant\"\"\"\n data = relay.var(\"data\", shape=(1, 16, 64, 64))\n multiplier = relay.sigmoid(relay.var(\"data\", shape=(1, 16, 1, 1)))\n conv = relay.nn.conv2d(data, relay.var(\"weight\"),\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=16)\n act = relay.nn.relu(data=conv)\n\n quantize_and_build(act * multiplier)\n\n pool = relay.nn.global_avg_pool2d(data=act)\n\n quantize_and_build(act * pool)\n\ndef test_batch_flatten_rewrite():\n\n data = relay.var(\"data\", shape=(1, 16, 64, 64), dtype=\"float32\")\n\n out = relay.nn.conv2d(data, relay.var(\"weight\"),\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=16)\n\n out = relay.nn.batch_flatten(out)\n\n qmod = quantize_and_build(out)\n\n def _check_batch_flatten(node):\n if isinstance(node, Call):\n if(node.op.name == \"nn.batch_flatten\"):\n assert node.checked_type.dtype == \"int8\"\n\n # check if batch_flatten is quantized\n relay.analysis.post_order_visit(qmod[\"main\"], _check_batch_flatten)\n\ndef get_calibration_dataset(input_name):\n dataset = []\n for i in range(5):\n data = np.random.uniform(size=(1, 3, 224, 224))\n dataset.append({input_name: data})\n return dataset\n\n\[email protected](\"create_target\", [True, False])\ndef test_calibrate_target(create_target):\n mod, params = testing.resnet.get_workload(num_layers=18)\n dataset = get_calibration_dataset(\"data\")\n with relay.quantize.qconfig(calibrate_mode=\"kl_divergence\"):\n if create_target:\n with tvm.target.create(\"llvm\"):\n relay.quantize.quantize(mod, params, dataset)\n else:\n # current_target = None\n relay.quantize.quantize(mod, params, dataset)\n\n\ndef test_calibrate_memory_bound():\n mod, params = testing.resnet.get_workload(num_layers=18)\n dataset = get_calibration_dataset(\"data\")\n import multiprocessing\n num_cpu = multiprocessing.cpu_count()\n with relay.quantize.qconfig(calibrate_mode=\"kl_divergence\",\n calibrate_chunk_by=num_cpu):\n relay.quantize.quantize(mod, params, dataset)\n\n\nif __name__ == \"__main__\":\n test_mul_rewrite()\n test_batch_flatten_rewrite()\n test_calibrate_target(False)\n test_calibrate_target(True)\n test_calibrate_memory_bound()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument\n\"\"\"Test code for dense tensorcore operator\"\"\"\nimport numpy as np\nimport tvm\nimport topi\nimport topi.testing\nfrom topi.util import get_const_tuple\nfrom tvm import te\nfrom tvm.contrib.pickle_memoize import memoize\nfrom tvm.contrib import nvcc\n\n\n_dense_implement = {\n \"gpu\": [(topi.cuda.dense_tensorcore, topi.cuda.schedule_dense_tensorcore)]\n}\n\ndef verify_dense(batch, in_dim, out_dim, use_bias=True):\n \"\"\"Dense tensorcore verify function\"\"\"\n A = te.placeholder((batch, in_dim), name='A')\n B = te.placeholder((out_dim, in_dim), name='B')\n C = te.placeholder((out_dim,), name='C')\n dtype = A.dtype\n\n # use memoize to pickle the test data for next time use\n @memoize(\"topi.tests.test_topi_dense_tensorcore\")\n def get_ref_data():\n a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)\n b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)\n c_np = np.random.uniform(size=(out_dim,)).astype(dtype)\n if use_bias:\n d_np = np.maximum(np.dot(a_np, b_np.T) + c_np, 0.0)\n else:\n d_np = np.maximum(np.dot(a_np, b_np.T), 0.0)\n return (a_np, b_np, c_np, d_np)\n # get the test data\n a_np, b_np, c_np, d_np = get_ref_data()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n if not nvcc.have_tensorcore(ctx.compute_version):\n print(\"skip because gpu does not support Tensor Cores\")\n return\n print(\"Running on target: %s\" % device)\n for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement):\n with tvm.target.create(device):\n D = fcompute(A, B, C if use_bias else None)\n D = topi.nn.relu(D)\n s = fschedule([D])\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(b_np, ctx)\n c = tvm.nd.array(c_np, ctx)\n d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx)\n f = tvm.build(s, [A, B, C, D], device, name=\"dense\")\n f(a, b, c, d)\n tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-3)\n\n\n for device in ['cuda']:\n check_device(device)\n\n\ndef test_dense_tensorcore():\n \"\"\"Test cases\"\"\"\n verify_dense(8, 16, 32, use_bias=True)\n verify_dense(16, 32, 16, use_bias=True)\n verify_dense(256, 1024, 1024, use_bias=True)\n verify_dense(1000, 1024, 1024, use_bias=False)\n verify_dense(256, 2048, 1000, use_bias=False)\n\n\nif __name__ == \"__main__\":\n test_dense_tensorcore()\n"
] | [
[
"numpy.random.uniform",
"numpy.dot",
"numpy.zeros"
],
[
"numpy.random.uniform"
],
[
"numpy.random.uniform",
"numpy.dot"
]
] |
kifarid/ray | [
"43c97c2afb979987be82fa50048674e9b6776d5d"
] | [
"rllib/agents/marwil/tests/test_marwil.py"
] | [
"import numpy as np\nimport os\nfrom pathlib import Path\nimport unittest\n\nimport ray\nimport ray.rllib.agents.marwil as marwil\nfrom ray.rllib.evaluation.postprocessing import compute_advantages\nfrom ray.rllib.offline import JsonReader\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\nfrom ray.rllib.utils.test_utils import check, check_compute_single_action, \\\n framework_iterator\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\nclass TestMARWIL(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n ray.init(num_cpus=4)\n\n @classmethod\n def tearDownClass(cls):\n ray.shutdown()\n\n def test_marwil_compilation_and_learning_from_offline_file(self):\n \"\"\"Test whether a MARWILTrainer can be built with all frameworks.\n\n Learns from a historic-data file.\n To generate this data, first run:\n $ ./train.py --run=PPO --env=CartPole-v0 \\\n --stop='{\"timesteps_total\": 50000}' \\\n --config='{\"output\": \"/tmp/out\", \"batch_mode\": \"complete_episodes\"}'\n \"\"\"\n rllib_dir = Path(__file__).parent.parent.parent.parent\n print(\"rllib dir={}\".format(rllib_dir))\n data_file = os.path.join(rllib_dir, \"tests/data/cartpole/large.json\")\n print(\"data_file={} exists={}\".format(data_file,\n os.path.isfile(data_file)))\n\n config = marwil.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 2\n config[\"evaluation_num_workers\"] = 1\n config[\"evaluation_interval\"] = 2\n # Evaluate on actual environment.\n config[\"evaluation_config\"] = {\"input\": \"sampler\"}\n # Learn from offline data.\n config[\"input\"] = [data_file]\n num_iterations = 350\n min_reward = 70.0\n\n # Test for all frameworks.\n for _ in framework_iterator(config, frameworks=(\"tf\", \"torch\")):\n trainer = marwil.MARWILTrainer(config=config, env=\"CartPole-v0\")\n learnt = False\n for i in range(num_iterations):\n eval_results = trainer.train().get(\"evaluation\")\n if eval_results:\n print(\"iter={} R={} \".format(\n i, eval_results[\"episode_reward_mean\"]))\n # Learn until some reward is reached on an actual live env.\n if eval_results[\"episode_reward_mean\"] > min_reward:\n print(\"learnt!\")\n learnt = True\n break\n\n if not learnt:\n raise ValueError(\n \"MARWILTrainer did not reach {} reward from expert \"\n \"offline data!\".format(min_reward))\n\n check_compute_single_action(\n trainer, include_prev_action_reward=True)\n\n trainer.stop()\n\n def test_marwil_loss_function(self):\n \"\"\"\n To generate the historic data used in this test case, first run:\n $ ./train.py --run=PPO --env=CartPole-v0 \\\n --stop='{\"timesteps_total\": 50000}' \\\n --config='{\"output\": \"/tmp/out\", \"batch_mode\": \"complete_episodes\"}'\n \"\"\"\n rllib_dir = Path(__file__).parent.parent.parent.parent\n print(\"rllib dir={}\".format(rllib_dir))\n data_file = os.path.join(rllib_dir, \"tests/data/cartpole/small.json\")\n print(\"data_file={} exists={}\".format(data_file,\n os.path.isfile(data_file)))\n config = marwil.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n # Learn from offline data.\n config[\"input\"] = [data_file]\n\n for fw, sess in framework_iterator(config, session=True):\n reader = JsonReader(inputs=[data_file])\n batch = reader.next()\n\n trainer = marwil.MARWILTrainer(config=config, env=\"CartPole-v0\")\n policy = trainer.get_policy()\n model = policy.model\n\n # Calculate our own expected values (to then compare against the\n # agent's loss output).\n cummulative_rewards = compute_advantages(\n batch, 0.0, config[\"gamma\"], 1.0, False, False)[\"advantages\"]\n if fw == \"torch\":\n cummulative_rewards = torch.tensor(cummulative_rewards)\n if fw != \"tf\":\n batch = policy._lazy_tensor_dict(batch)\n model_out, _ = model.from_batch(batch)\n vf_estimates = model.value_function()\n if fw == \"tf\":\n model_out, vf_estimates = \\\n policy.get_session().run([model_out, vf_estimates])\n adv = cummulative_rewards - vf_estimates\n if fw == \"torch\":\n adv = adv.detach().cpu().numpy()\n adv_squared = np.mean(np.square(adv))\n c_2 = 100.0 + 1e-8 * (adv_squared - 100.0)\n c = np.sqrt(c_2)\n exp_advs = np.exp(config[\"beta\"] * (adv / c))\n dist = policy.dist_class(model_out, model)\n logp = dist.logp(batch[\"actions\"])\n if fw == \"torch\":\n logp = logp.detach().cpu().numpy()\n elif fw == \"tf\":\n logp = sess.run(logp)\n # Calculate all expected loss components.\n expected_vf_loss = 0.5 * adv_squared\n expected_pol_loss = -1.0 * np.mean(exp_advs * logp)\n expected_loss = \\\n expected_pol_loss + config[\"vf_coeff\"] * expected_vf_loss\n\n # Calculate the algorithm's loss (to check against our own\n # calculation above).\n batch.set_get_interceptor(None)\n postprocessed_batch = policy.postprocess_trajectory(batch)\n loss_func = marwil.marwil_tf_policy.marwil_loss if fw != \"torch\" \\\n else marwil.marwil_torch_policy.marwil_loss\n if fw != \"tf\":\n policy._lazy_tensor_dict(postprocessed_batch)\n loss_out = loss_func(policy, model, policy.dist_class,\n postprocessed_batch)\n else:\n loss_out, v_loss, p_loss = policy.get_session().run(\n [policy._loss, policy.loss.v_loss, policy.loss.p_loss],\n feed_dict=policy._get_loss_inputs_dict(\n postprocessed_batch, shuffle=False))\n\n # Check all components.\n if fw == \"torch\":\n check(policy.v_loss, expected_vf_loss, decimals=4)\n check(policy.p_loss, expected_pol_loss, decimals=4)\n elif fw == \"tf\":\n check(v_loss, expected_vf_loss, decimals=4)\n check(p_loss, expected_pol_loss, decimals=4)\n else:\n check(policy.loss.v_loss, expected_vf_loss, decimals=4)\n check(policy.loss.p_loss, expected_pol_loss, decimals=4)\n check(loss_out, expected_loss, decimals=3)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.sqrt",
"numpy.mean",
"numpy.square",
"numpy.exp"
]
] |
LucaMalavolta/q2 | [
"d4cd62c3ea898c99334ea84e2b41ec75db9558f7"
] | [
"config.py"
] | [
"import os\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\npath = os.path.dirname(os.path.realpath(__file__))\r\npath = os.path.join(path, 'Data')\r\n\r\nCOLORTEFF_PATH = os.path.join(path, 'ColorTeff')\r\nMODATM_PATH = os.path.join(path, 'ModelAtmospheres')\r\nISOCHRONES_PATH = os.path.join(path, 'Isochrones')\r\nOTHER_PATH = os.path.join(path, 'Other')\r\n\r\nplt.rc(\"font\", family='serif', serif='Ubuntu', monospace='Ubuntu Mono', \\\r\n size=14)\r\nplt.rc(\"axes\", labelsize=15, titlesize=12)\r\nplt.rc(\"xtick\", top=True, direction='in', labelsize=14)\r\nplt.rc(\"xtick.major\", size=8, width=1)\r\nplt.rc(\"ytick\", right=True, direction='in', labelsize=14)\r\nplt.rc(\"ytick.major\", size=8, width=1)\r\nplt.rc(\"lines\", markersize=10, markeredgewidth=2)\r\nplt.rc(\"lines\", linewidth=3)\r\n\r\ndef moog_is_available():\r\n \"\"\"You should be able to run MOOGSILENT from the command line in order\r\n to use the MOOG features included in q2. This function checks if\r\n MOOG is available on your system. If False, you wont be able to\r\n connect q2 to MOOG and many things will fail.\r\n \"\"\"\r\n if os.system('which MOOGSILENT >/dev/null'):\r\n logger.warning(\"MOOGSILENT is not available\")\r\n return False\r\n else:\r\n logger.info(\"MOOGSILENT is available\")\r\n return True\r\n\r\ndef data_are_available():\r\n \"\"\"q2 needs data files with model atmosphere and isochrone grids.\r\n These files can be downloaded from:\r\n http://www.astrochasqui.com/projects/astro/share/q2Data.tar.gz\r\n They need to be extracted inside the q2 directory.\r\n 'tar xvfz q2Data.tar.gz' will create the Data folder.\r\n \"\"\"\r\n if os.path.exists(path):\r\n logger.info(\"Data folder exists\")\r\n return True\r\n else:\r\n logger.warning(\"Data folder does not exist. See the 'Data' section \"\\\r\n \"at https://github.com/astroChasqui/q2\")\r\n return False\r\n"
] | [
[
"matplotlib.pyplot.rc"
]
] |
SeHwanJoo/mmdetection_body | [
"1e1cadc6df91926fc99c4afbae383df0ea9cfed3"
] | [
"mmdet/models/seg_heads/panoptic_fpn_head.py"
] | [
"import torch\nimport torch.nn as nn\nfrom mmcv.runner import ModuleList\n\nfrom ..builder import HEADS\nfrom ..utils import ConvUpsample\nfrom .base_semantic_head import BaseSemanticHead\n\n\[email protected]_module()\nclass PanopticFPNHead(BaseSemanticHead):\n \"\"\"PanopticFPNHead used in Panoptic FPN.\n\n Arg:\n num_classes (int): Number of classes, including all stuff\n classes and one thing class.\n in_channels (int): Number of channels in the input feature\n map.\n inner_channels (int): Number of channels in inner features.\n start_level (int): The start level of the input features\n used in PanopticFPN.\n end_level (int): The end level of the used features, the\n `end_level`-th layer will not be used.\n fg_range (tuple): Range of the foreground classes.\n bg_range (tuple): Range of the background classes.\n conv_cfg (dict): Dictionary to construct and config\n conv layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Use ``GN`` by default.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n loss_seg (dict): the loss of the semantic head.\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels=256,\n inner_channels=128,\n start_level=0,\n end_level=4,\n fg_range=(1, 80),\n bg_range=(81, 133),\n conv_cfg=None,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n init_cfg=None,\n loss_seg=dict(\n type='CrossEntropyLoss', ignore_index=-1,\n loss_weight=1.0)):\n super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)\n self.fg_range = fg_range\n self.bg_range = bg_range\n self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1\n self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1\n # Used feature layers are [start_level, end_level)\n self.start_level = start_level\n self.end_level = end_level\n self.num_stages = end_level - start_level\n self.inner_channels = inner_channels\n\n self.conv_upsample_layers = ModuleList()\n for i in range(start_level, end_level):\n self.conv_upsample_layers.append(\n ConvUpsample(\n in_channels,\n inner_channels,\n num_layers=i if i > 0 else 1,\n num_upsample=i if i > 0 else 0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n ))\n self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)\n\n def _set_things_to_void(self, gt_semantic_seg):\n \"\"\"Merge thing classes to one class.\"\"\"\n gt_semantic_seg = gt_semantic_seg.int()\n fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (\n gt_semantic_seg <= self.fg_range[1])\n bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (\n gt_semantic_seg <= self.bg_range[1])\n\n new_gt_seg = fg_mask.int() * (self.bg_nums + 1)\n new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,\n new_gt_seg)\n return new_gt_seg\n\n def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):\n \"\"\"The loss of PanopticFPN head.\n\n Things classes will be merged to one class in PanopticFPN.\n \"\"\"\n gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)\n return super().loss(seg_preds, gt_semantic_seg, label_bias)\n\n def init_weights(self):\n super().init_weights()\n nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)\n self.conv_logits.bias.data.zero_()\n\n def forward(self, x):\n # the number of subnets must be not more than\n # the length of features.\n assert self.num_stages <= len(x)\n\n feats = []\n for i, layer in enumerate(self.conv_upsample_layers):\n f = layer(x[self.start_level + i])\n feats.append(f)\n\n feats = torch.sum(torch.stack(feats, dim=0), dim=0)\n seg_preds = self.conv_logits(feats)\n out = dict(seg_preds=seg_preds, feats=feats)\n return out\n"
] | [
[
"torch.where",
"torch.stack",
"torch.nn.Conv2d",
"torch.nn.init.normal_"
]
] |
AndresGarciaEscalante/bstld | [
"cc37fb3388b7731be9e76fd1c4e2be13b6716afe"
] | [
"tf_object_detection/to_tfrecords.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nCreates full-image tfrecords to use the Bosch Small Traffic Lights Dataset\nwith the Tensorflow Object Detection API.\n\nThe training set is split into training and validation. Tfrecords are created\nfor a training, validation, and test set. Labels are grouped by their respective\ncolors to simplify training and because the test-set does not contain any arrows.\n\nDepending on the training method, you may want to look into creating random crops\nfrom the images which can increase training performance due to translated inputs.\nThe tfrecords come without any image augmentation.\n\nThe created tfrecords will be about 18GB.\n\nUsage:\n In the folder with the extracted traffic lights dataset, run\n python /path/to/this/file/to_tfrecords.py\n and it will create the tfrecords there.\n\nThe path of the annotation files, tfrecords, and dataset folder can be specified.\nNote that this is a tutorial file. There are only few checks and no logging.\n\"\"\"\n\nimport argparse\nfrom collections import OrderedDict, defaultdict\nimport hashlib\nimport os\nfrom random import shuffle\n\nimport cv2\nimport tensorflow as tf\nimport tqdm\n\n# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md\nfrom object_detection.utils import dataset_util\n\nimport sys\n \n# getting the name of the directory\n# where the this file is present.\ncurrent = os.path.dirname(os.path.realpath(__file__))\n \n# Getting the parent directory name\n# where the current directory is present.\nparent = os.path.dirname(current)\n \n# adding the parent directory to \n# the sys.path.\nsys.path.append(parent)\n\nfrom read_label_file import get_all_labels\nfrom tf_object_detection import constants\n\n\ndef label_id(label_string):\n \"\"\" For detections without classification \"\"\"\n # For object proposals only, you could return 1\n return constants.TF_ID_MAP[constants.SIMPLIFIED_CLASSES[label_string]]\n\n\ndef modified_label_string(label_string):\n \"\"\" To simplify the problem, training classes are grouped by color \"\"\"\n return constants.SIMPLIFIED_CLASSES[label_string].encode('utf8')\n\n\ndef list_of_dicts_to_dict_of_lists(list_of_dicts):\n \"\"\" [{'a': 0, 'b':3}, {'a': 3, 'b':5}] --> {'a': [0, 3], 'b': [3, 5]}\"\"\"\n assert isinstance(list_of_dicts, list)\n dict_lists = defaultdict(list)\n for some_dict in list_of_dicts:\n for key, value in some_dict.items():\n dict_lists[key].append(value)\n return dict_lists\n\n\ndef clip(some_value):\n \"\"\" Clip values outside [0, 1]. float -> float \"\"\"\n # Just in case some very eager annotators detected lights outside the image. It happens\n return max(0, min(some_value, 1))\n\n\ndef create_object_detection_tfrecords(labels, tfrecords_path, dataset_folder, set_name=''):\n \"\"\" Creates a tfrecord dataset specific to tensorflow/models/research/objection_detection\n params:\n labels: list of annotations as defined in annotation yamls\n tfrecords_path: output path to create tfrecords\n dataset_folder: path to bstld folder, must include rgb directory\n \"\"\"\n\n #shuffle(labels)\n writer = tf.io.TFRecordWriter(tfrecords_path)\n for label in tqdm.tqdm(labels, desc='Creating {}-set'.format(set_name)):\n image_path = os.path.join(dataset_folder, label['path'])\n image = cv2.imread(image_path)\n if image is None:\n print('Did you extract the training, validation, and additional images?')\n raise IOError('Missing: {}'.format(image_path))\n height, width, _ = image.shape\n\n boxes = list_of_dicts_to_dict_of_lists(label['boxes'])\n classes = boxes['label']\n xmin = list(map(lambda x: clip(x / float(width)), boxes['x_min']))\n ymin = list(map(lambda y: clip(y / float(height)), boxes['y_min']))\n xmax = list(map(lambda x: clip(x / float(width)), boxes['x_max']))\n ymax = list(map(lambda y: clip(y / float(height)), boxes['y_max']))\n\n assert len(xmin) == len(xmax) == len(ymin)\n assert len(ymax) == len(classes) == len(label['boxes'])\n\n if not classes:\n continue # We don't need empty images, there are enough negatives\n\n _, image = cv2.imencode('.png', image) # Assuming that works\n image = image.tostring()\n sha256 = hashlib.sha256(image).hexdigest()\n image_format = 'png'\n complete_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')),\n 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),\n 'image/format': dataset_util.bytes_feature(image_format.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(sha256.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(\n list(map(modified_label_string, classes))),\n 'image/object/class/label': dataset_util.int64_list_feature(\n list(map(label_id, classes))),\n }))\n writer.write(complete_example.SerializeToString())\n\n writer.close()\n\n\ndef split_train_labels(train_labels):\n # one entry for each image in a folder/video to check their sizes later\n train_videos = [os.path.split(os.path.split(train_label['path'])[0])[1]\n for train_label in train_labels]\n # NOTE Because set order is not guaranteed (and we want to support different Python versions)\n video_dict = OrderedDict().fromkeys(train_videos)\n video_lengths = [train_videos.count(video) for video in video_dict.keys()]\n # The first three videos are used for the validation set.\n # Note that this may not be a completely clean validation set as the sequences\n # were captured independently but may be on the same day and are taken within\n # the same general area. This split is for object detection demonstation\n # purposes only. For clean dataset separation, the sequences would need to be\n # recorded on separate days and preferably in different areas.\n #\n # validation samples: 933, training samples: 4160 (+215 additional)\n num_valid_samples = sum(video_lengths[:3])\n return train_labels[num_valid_samples:], train_labels[:num_valid_samples]\n\n\ndef create_datasets(config):\n \"\"\" Splits labels and creates datasets \"\"\"\n train_labels = get_all_labels(config['train_yaml'])\n test_labels = get_all_labels(config['test_yaml'])\n\n if config['additional_yaml']:\n additional_labels = get_all_labels(config['additional_yaml'])\n\n # Split training labels into training and validation for \"more correct\" validation\n train_labels, valid_labels = split_train_labels(train_labels)\n train_labels.extend(additional_labels) # add unappealing images to training set\n\n if not os.path.isdir(config['dataset_folder']) or\\\n not os.path.isdir(os.path.join(config['dataset_folder'], 'rgb')):\n print('Dataset_folder needs to contain extracted dataset, including the rgb folder')\n print('{} does not fulfill those requirements'.format(config['dataset_folder']))\n\n create_object_detection_tfrecords(\n train_labels, config['train_tfrecord'], config['dataset_folder'], 'train')\n create_object_detection_tfrecords(\n valid_labels, config['valid_tfrecord'], config['dataset_folder'], 'valid')\n create_object_detection_tfrecords(\n test_labels, config['test_tfrecord'], config['dataset_folder'], 'test')\n\n print('Done creating tfrecords')\n\n\ndef parse_args():\n \"\"\" Command line args to tfrecords creation config \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--train_yaml', default='train.yaml',\n help='Path to train.yaml')\n parser.add_argument('--test_yaml', default='test.yaml',\n help='Path to test.yaml')\n parser.add_argument('--additional_yaml', default='additional_train.yaml',\n help='Path to train_additional.yaml')\n parser.add_argument('--dataset_folder', default='.',\n help='Path to dataset folder')\n parser.add_argument('--train_tfrecord', default='train.tfrecords',\n help='Path to train.tfrecord')\n parser.add_argument('--valid_tfrecord', default='valid.tfrecords',\n help='Path to valid.tfrecord')\n parser.add_argument('--test_tfrecord', default='test.tfrecords',\n help='Path to test.tfrecord')\n args = vars(parser.parse_args())\n return args\n\n\nif __name__ == '__main__':\n config = parse_args()\n create_datasets(config)\n"
] | [
[
"tensorflow.train.BytesList",
"tensorflow.io.TFRecordWriter"
]
] |
soyoung9306/-3-keras | [
"4090fcc86072cda816d1d6056b5113ace49534ae"
] | [
"ex9_1_applications_agumentation.py"
] | [
"\"\"\"\nCH 9.1 Applications/Image Augmentation\n\"\"\"\nfrom sklearn import model_selection\nfrom keras import datasets\nimport keras\nassert keras.backend.image_data_format() == 'channels_last'\n\nfrom keraspp import aigen\n\n\nclass Machine(aigen.Machine_Generator):\n def __init__(self):\n (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()\n _, X, _, y = model_selection.train_test_split(x_train, y_train, test_size=0.02)\n X = X.astype(float)\n\n gen_param_dict = {'rotation_range': 10}\n\n super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict)\n\n\ndef main():\n m = Machine()\n m.run()\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"sklearn.model_selection.train_test_split"
]
] |
yangheng95/PyABSA | [
"f5b46047a58fa8054a0469486be3f1cada933814"
] | [
"pyabsa/core/tc/prediction/text_classifier.py"
] | [
"# -*- coding: utf-8 -*-\n# file: text_classifier.py\n# author: yangheng <[email protected]>\n# Copyright (C) 2020. All Rights Reserved.\nimport json\nimport os\nimport pickle\nimport random\n\nimport numpy\nimport torch\nfrom findfile import find_file\nfrom termcolor import colored\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoTokenizer, AutoModel\n\nfrom pyabsa.functional.dataset import detect_infer_dataset\n\nfrom ..models import GloVeClassificationModelList, BERTClassificationModelList\nfrom ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset\nfrom ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset\n\nfrom ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer\n\nfrom pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError\n\n\nclass TextClassifier:\n def __init__(self, model_arg=None, label_map=None, eval_batch_size=128):\n '''\n from_train_model: load inferring_tutorials model from trained model\n '''\n\n self.initializers = {\n 'xavier_uniform_': torch.nn.init.xavier_uniform_,\n 'xavier_normal_': torch.nn.init.xavier_normal,\n 'orthogonal_': torch.nn.init.orthogonal_\n }\n # load from a training\n if not isinstance(model_arg, str):\n print('Load text classifier from training')\n self.model = model_arg[0]\n self.opt = model_arg[1]\n self.tokenizer = model_arg[2]\n else:\n try:\n if 'fine-tuned' in model_arg:\n raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')\n print('Load text classifier from', model_arg)\n state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])\n model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])\n tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])\n config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])\n\n print('config: {}'.format(config_path))\n print('state_dict: {}'.format(state_dict_path))\n print('model: {}'.format(model_path))\n print('tokenizer: {}'.format(tokenizer_path))\n\n self.opt = pickle.load(open(config_path, mode='rb'))\n\n if state_dict_path or model_path:\n if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()):\n if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args:\n if 'pretrained_bert_name' in self.opt.args:\n self.opt.pretrained_bert = self.opt.pretrained_bert_name\n if state_dict_path:\n try:\n self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert)\n self.model = self.opt.model(self.bert, self.opt)\n except ValueError:\n raise TransformerConnectionError()\n elif model_path:\n if model_path:\n self.model = torch.load(model_path, map_location='cpu')\n if tokenizer_path:\n self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))\n else:\n raise ValueError('No .tokenizer found!')\n else:\n self.tokenizer = build_tokenizer(\n dataset_list=self.opt.dataset_file,\n max_seq_len=self.opt.max_seq_len,\n dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),\n opt=self.opt\n )\n if model_path:\n self.model = torch.load(model_path, map_location='cpu')\n else:\n self.embedding_matrix = build_embedding_matrix(\n word2idx=self.tokenizer.word2idx,\n embed_dim=self.opt.embed_dim,\n dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),\n opt=self.opt\n )\n self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device)\n self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))\n\n print('Config used in Training:')\n print_args(self.opt, mode=1)\n\n except Exception as e:\n raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))\n\n if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \\\n and not hasattr(BERTClassificationModelList, self.model.__class__.__name__):\n raise KeyError('The checkpoint you are loading is not from classifier model.')\n\n if hasattr(BERTClassificationModelList, self.opt.model.__name__):\n self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)\n\n elif hasattr(GloVeClassificationModelList, self.opt.model.__name__):\n self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)\n\n self.opt.inputs_cols = self.model.inputs\n\n self.infer_dataloader = None\n self.opt.eval_batch_size = eval_batch_size\n\n if self.opt.seed is not None:\n random.seed(self.opt.seed)\n numpy.random.seed(self.opt.seed)\n torch.manual_seed(self.opt.seed)\n torch.cuda.manual_seed(self.opt.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n self.opt.initializer = self.opt.initializer\n\n self.label_map = None\n self.set_label_map(label_map)\n\n def set_label_map(self, label_map):\n if label_map:\n print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red'))\n label_map[LABEL_PADDING] = ''\n self.label_map = label_map\n\n def to(self, device=None):\n self.opt.device = device\n self.model.to(device)\n\n def cpu(self):\n self.opt.device = 'cpu'\n self.model.to('cpu')\n\n def cuda(self, device='cuda:0'):\n self.opt.device = device\n self.model.to(device)\n\n def _log_write_args(self):\n n_trainable_params, n_nontrainable_params = 0, 0\n for p in self.model.parameters():\n n_params = torch.prod(torch.tensor(p.shape))\n if p.requires_grad:\n n_trainable_params += n_params\n else:\n n_nontrainable_params += n_params\n print(\n 'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))\n for arg in vars(self.opt):\n if getattr(self.opt, arg) is not None:\n print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))\n\n def batch_infer(self,\n target_file=None,\n print_result=True,\n save_result=False,\n clear_input_samples=True,\n ignore_error=True):\n\n if clear_input_samples:\n self.clear_input_samples()\n\n save_path = os.path.join(os.getcwd(), 'text_classification.result.json')\n\n target_file = detect_infer_dataset(target_file, task='text_classification')\n if not target_file:\n raise FileNotFoundError('Can not find inference datasets!')\n\n self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)\n self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)\n return self._infer(save_path=save_path if save_result else None, print_result=print_result)\n\n def infer(self, text: str = None,\n print_result=True,\n clear_input_samples=True):\n\n if clear_input_samples:\n self.clear_input_samples()\n if text:\n self.dataset.prepare_infer_sample(text)\n else:\n raise RuntimeError('Please specify your datasets path!')\n self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)\n return self._infer(print_result=print_result)\n\n def merge_results(self, results):\n \"\"\" merge APC results have the same input text\n \"\"\"\n final_res = []\n for result in results:\n\n if final_res and \"\".join(final_res[-1]['text'].split()) == \"\".join(result['text'].split()):\n final_res[-1]['label'].append(result['label'])\n final_res[-1]['ref_label'].append(result['ref_label'])\n final_res[-1]['ref_check'].append(result['ref_check'])\n else:\n final_res.append(\n {\n 'text': result['text'].replace(' ', ' '),\n 'label': [result['label']],\n 'ref_label': [result['ref_label']],\n 'ref_check': [result['ref_check']]\n }\n )\n\n return final_res\n\n def _infer(self, save_path=None, print_result=True):\n\n _params = filter(lambda p: p.requires_grad, self.model.parameters())\n\n correct = {True: 'Correct', False: 'Wrong'}\n results = []\n\n with torch.no_grad():\n self.model.eval()\n n_correct = 0\n n_labeled = 0\n n_total = 0\n for _, sample in enumerate(self.infer_dataloader):\n inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label']\n self.model.eval()\n outputs = self.model(inputs)\n sen_logits = outputs\n t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()\n for i, i_probs in enumerate(t_probs):\n if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)):\n sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]\n if sample['label'] != -999:\n real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.')\n else:\n real_sent = 'N.A.'\n if real_sent != -999 and real_sent != '-999':\n n_labeled += 1\n if sent == real_sent:\n n_correct += 1\n else: # for the former versions until 1.2.0\n sent = int(i_probs.argmax(axis=-1))\n real_sent = int(sample['label'][i])\n\n text_raw = sample['text_raw'][i]\n\n results.append({\n 'text': text_raw,\n 'label': sent,\n 'ref_label': real_sent,\n 'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',\n })\n n_total += 1\n if len(self.infer_dataloader) > 1:\n print('Total samples:{}'.format(n_total))\n print('Labeled samples:{}'.format(n_labeled))\n print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))\n\n try:\n if print_result:\n for result in results:\n text_printing = result['text']\n\n if result['ref_label'] != -999:\n if result['label'] == result['ref_label']:\n text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green')\n else:\n text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red')\n else:\n text_info = ' -> {}'.format(result['label'])\n\n text_printing += text_info\n print(text_printing)\n if save_path:\n fout = open(save_path, 'w', encoding='utf8')\n json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)\n # fout.write('Total samples:{}\\n'.format(n_total))\n # fout.write('Labeled samples:{}\\n'.format(n_labeled))\n # fout.write('Prediction Accuracy:{}%\\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.'\n print('inference result saved in: {}'.format(save_path))\n except Exception as e:\n print('Can not save result: {}, Exception: {}'.format(text_raw, e))\n return results\n\n def clear_input_samples(self):\n self.dataset.all_data = []\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.no_grad",
"numpy.random.seed",
"torch.tensor",
"torch.softmax"
]
] |
fqnchina/NeuralRouting | [
"333dc95cb2d9a779de88e2349883a0002111d1b3"
] | [
"dataset_loader.py"
] | [
"import numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torchvision import transforms as tfs\r\nfrom PIL import Image\r\nimport os, cv2, copy, time\r\nfrom config import *\r\n\r\n\r\n# args.\r\nimage_height, image_width = opt.image_height, opt.image_width\r\nintrinsics = opt.intrinsics\r\nclose_radius, far_radiuses = 0, opt.far_radiuses\r\nn_neighb_pts = opt.n_neighb_pts\r\n\r\n\r\ndef isSon(son, fa):\r\n for i in range(len(fa)):\r\n if son[i] != fa[i]:\r\n return False\r\n return True\r\n\r\n\r\n# todo: to be migrated...\r\ndef depth2local(depth): # depth: float32, meter.\r\n cx, cy, fx, fy = intrinsics[0, 2], intrinsics[1, 2], intrinsics[0, 0], intrinsics[1, 1]\r\n u_base = np.tile(np.arange(image_width), (image_height, 1))\r\n v_base = np.tile(np.arange(image_height)[:, np.newaxis], (1, image_width))\r\n X = (u_base - cx) * depth / fx\r\n Y = (v_base - cy) * depth / fy\r\n coord_local = np.stack((X, Y, depth), axis=2)\r\n return coord_local\r\ndef partial_pts(pts_all_in, p, r_min, r_max): # pts_all_in.shape (#points, #channel)\r\n pts_all = copy.deepcopy(pts_all_in)\r\n p_mat = p[np.newaxis, 0:3].repeat(pts_all.shape[0], axis=0)\r\n norms = np.linalg.norm((p_mat - pts_all[:, 0:3]), axis=1)\r\n return pts_all[np.logical_and(norms >= r_min, norms <= r_max)]\r\ndef sample_pts(pts_in, num): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n while pts.shape[0] < num:\r\n pts = np.concatenate((pts, pts), axis=0)\r\n rand_ids = np.arange(pts.shape[0])\r\n np.random.shuffle(rand_ids)\r\n return pts[rand_ids[0:num], :]\r\ndef sample_pts_rc(pts_in, rcs_in, num): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n rcs = copy.deepcopy(rcs_in)\r\n while pts.shape[0] < num:\r\n pts = np.concatenate((pts, pts), axis=0)\r\n rand_ids = np.arange(pts.shape[0])\r\n np.random.shuffle(rand_ids)\r\n return pts[rand_ids[0:num], :], rcs_in[rand_ids[0:num], :]\r\ndef sample_pts9d_r3d(pts_in, num, radius): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n thresh = 500\r\n # remove background by 3d radius\r\n xyz = pts[:, 0:3]\r\n pts = pts[np.linalg.norm(xyz, axis=1) <= radius]\r\n # print('pt num after r3d {}'.format(pts.shape[0]))\r\n if pts.shape[0] < thresh: # avoid infinite loop.\r\n return None\r\n while pts.shape[0] < num:\r\n pts = np.concatenate((pts, pts), axis=0)\r\n rand_ids = np.arange(pts.shape[0])\r\n np.random.shuffle(rand_ids)\r\n return pts[rand_ids[0:num], :]\r\ndef shift_pts(pts_in, cen): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)\r\n pts[:, 0:3] = pts[:, 0:3] - cen_mat\r\n return pts\r\ndef shift_pts6d(pts_in, cen): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)\r\n pts[:, :] = pts[:, :] - cen_mat\r\n return pts\r\ndef shift_pts9d(pts_in, cen): # pts_in.shape (#points, #channel)\r\n cpt = copy.deepcopy(cen)\r\n cpt[3:6] = np.zeros(3) # remove shift of normal\r\n pts = copy.deepcopy(pts_in)\r\n cpt_mat = cpt[np.newaxis, :].repeat(pts.shape[0], axis=0)\r\n pts[:, :] = pts[:, :] - cpt_mat\r\n return pts\r\n\r\n\r\ndef make_ppf(pts9d, cen9d): # (N,9), (9,)\r\n # prepare\r\n n_pts = pts9d.shape[0]\r\n d = pts9d[:, 0:3]\r\n n2 = pts9d[:, 3:6]\r\n n1 = np.repeat(cen9d[3:6].reshape(1, 3), n_pts, axis=0)\r\n # ppf\r\n dim1 = np.linalg.norm(d, axis=1).reshape(n_pts, 1)\r\n d = d / (dim1.reshape(n_pts, 1))\r\n dim2 = np.sum(n1 * d, axis=1).reshape(n_pts, 1)\r\n dim3 = np.sum(n2 * d, axis=1).reshape(n_pts, 1)\r\n dim4 = np.sum(n1 * n2, axis=1).reshape(n_pts, 1)\r\n ppf = np.concatenate((dim1, dim2, dim3, dim4), axis=1)\r\n ppf7d = np.concatenate((ppf, pts9d[:, 6:9]), axis=1)\r\n return ppf7d\r\n\r\ndef compute_points_normal(pts):\r\n raw_shape = pts.shape\r\n normal = np.zeros((raw_shape)) # (r,c,3)\r\n t0 = time.time()\r\n for r in range(2, raw_shape[0] - 2):\r\n for c in range(2, raw_shape[1] - 2):\r\n pts_local = pts[r - 2:r + 3, c - 2:c + 3, :] # (5,5,3)\r\n pts_local = pts_local.reshape(-1, 3) # (N,3)\r\n pts_local = pts_local[np.linalg.norm(pts_local - pts[r, c, :], axis=1) < 0.1] # remove outliers.\r\n if pts_local.shape[0] < 4:\r\n continue\r\n pts_local = pts_local - np.mean(pts_local, axis=0)\r\n C = pts_local.T @ pts_local / pts_local.shape[0]\r\n e, v = np.linalg.eig(C)\r\n d = v[:, np.where(e == np.min(e))[0][0]]\r\n n = d / np.linalg.norm(d)\r\n if np.dot(n, np.array([0, 0, 1])) > 0:\r\n n = -n\r\n normal[r, c, :] = n\r\n t1 = time.time()\r\n print('preprocess data: compute normal cost {:.2f}s'.format(t1 - t0))\r\n return normal\r\n\r\n\r\n# for depth adaptive 2d\r\ndef partial_pts_2d(pts_rc, cen_rc, list_drdc):\r\n result = None\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n mat_drdc = (np.array(list_drdc) / 4).astype(int)\r\n mat_cen_rc = np.array(cen_rc)\r\n mat_targ_rc = cen_rc + mat_drdc\r\n mat_targ_rc[mat_targ_rc < 0] = 0\r\n targ_r = mat_targ_rc[:, 0]\r\n targ_r[targ_r > r_max] = r_max\r\n targ_c = mat_targ_rc[:, 1]\r\n targ_c[targ_c > c_max] = c_max\r\n result = pts_rc[targ_r, targ_c]\r\n return copy.deepcopy(result)\r\n\r\n\r\n# for depth adaptive 2d\r\ndef partial_pts_2d_rc(pts_rc, cen_rc, list_drdc):\r\n result = None\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n mat_drdc = (np.array(list_drdc) / 4).astype(int)\r\n mat_cen_rc = np.array(cen_rc)\r\n mat_targ_rc = cen_rc + mat_drdc\r\n mat_targ_rc[mat_targ_rc < 0] = 0\r\n targ_r = mat_targ_rc[:, 0]\r\n targ_r[targ_r > r_max] = r_max\r\n targ_c = mat_targ_rc[:, 1]\r\n targ_c[targ_c > c_max] = c_max\r\n result = pts_rc[targ_r, targ_c]\r\n return copy.deepcopy(result), copy.deepcopy(\r\n np.concatenate((targ_r.reshape(targ_r.shape[0], 1), targ_c.reshape(targ_c.shape[0], 1)), axis=1))\r\n\r\n\r\n# for depth adaptive 2d with dynamics label\r\ndef partial_pts_2d_with_label(pts_rc, cen_rc, list_drdc, mask): # mask: 0 for static pixel, 255 for dynamic pixel.\r\n result = None\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n mat_drdc = (np.array(list_drdc) / 4).astype(int)\r\n mat_cen_rc = np.array(cen_rc)\r\n mat_targ_rc = cen_rc + mat_drdc\r\n mat_targ_rc[mat_targ_rc < 0] = 0\r\n targ_r = mat_targ_rc[:, 0]\r\n targ_r[targ_r > r_max] = r_max\r\n targ_c = mat_targ_rc[:, 1]\r\n targ_c[targ_c > c_max] = c_max\r\n m1 = np.zeros((mask.shape[0], mask.shape[1]))\r\n m1[mask == 0] = 1\r\n m2 = np.zeros((mask.shape[0], mask.shape[1]))\r\n m2[targ_r, targ_c] = 1\r\n m3 = np.logical_and(m1, m2)\r\n result = pts_rc[m3]\r\n return copy.deepcopy(result)\r\n\r\n\r\nclass LevelDataset_PPF(Dataset):\r\n\r\n def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None, far_radius=None,\r\n enable_color_aug=True, specified_node=None):\r\n super().__init__()\r\n self.data_dir, self.the_list = data_dir, the_list\r\n self.n_pts_per_frame = n_pts_per_frame\r\n self.neighbor_da2d = neighbor_da2d # (n_pts, dim_pt).\r\n self.far_radius = far_radius # scalar.\r\n self.enable_color_aug = enable_color_aug\r\n self.specified_node = specified_node\r\n\r\n def __len__(self):\r\n return len(self.the_list)\r\n\r\n def __getitem__(self, idx):\r\n fid, rc_route = self.the_list[idx]\r\n # load \r\n depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0\r\n color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]\r\n # color jitter\r\n if self.enable_color_aug:\r\n img = Image.fromarray(color)\r\n if np.random.rand() < 0.5:\r\n img = tfs.ColorJitter(brightness=1.)(img)\r\n if np.random.rand() < 0.5:\r\n img = tfs.ColorJitter(contrast=1.)(img)\r\n if np.random.rand() < 0.5:\r\n img = tfs.ColorJitter(saturation=1.)(img)\r\n color = np.array(img)\r\n if np.max(color) > 1:\r\n color = color / 255. - 0.5\r\n local = depth2local(depth)\r\n r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))\r\n depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :]\r\n # normal by 3d neighbor plane fitting.\r\n normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)\r\n if os.path.exists(normal_path):\r\n # print('fid {}'.format(fid)) # to debug rio10 scene09 10\r\n # normal = np.load(normal_path)\r\n if os.path.getsize(normal_path) > 1:\r\n normal = np.load(normal_path, encoding='bytes', allow_pickle=True)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)\r\n # build a patch\r\n rand_ids = np.arange(len(rc_route))\r\n np.random.shuffle(rand_ids)\r\n selected_ids = rand_ids[0:self.n_pts_per_frame * 2] # more candidates\r\n pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))\r\n nb_in = torch.zeros((self.n_pts_per_frame, 7, opt.n_neighb_pts))\r\n route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)).fill_(ary)\r\n rc_list = []\r\n # da2d+3d neighbor\r\n if not self.neighbor_da2d is None:\r\n sid = 0\r\n for tmp_idx in range(len(selected_ids)):\r\n r, c = rc_route[selected_ids[tmp_idx]][0], rc_route[selected_ids[tmp_idx]][1]\r\n if np.isnan(lclnmlclr[r, c, 3]):\r\n continue\r\n if self.specified_node:\r\n if not isSon(rc_route[selected_ids[tmp_idx]][2], self.specified_node):\r\n continue\r\n route_labs[sid] = torch.Tensor(rc_route[selected_ids[tmp_idx]][2])\r\n rc_list.append([r, c])\r\n pt_in[sid] = torch.Tensor(\r\n np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))\r\n da2d_list = (np.array(self.neighbor_da2d) / depth[r, c]).astype(int)\r\n # ppf\r\n pts9d = shift_pts9d(sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),\r\n lclnmlclr[r, c, :])\r\n cen9d = copy.deepcopy(lclnmlclr[r, c, :])\r\n cen9d[0:3] = np.zeros(3)\r\n ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,)\r\n ppf7d[np.isnan(ppf7d)] = 0.0\r\n nb_in[sid] = torch.Tensor(ppf7d).transpose(1, 0)\r\n # remove background by 3d radius\r\n xyz = pts9d[:, 0:3]\r\n ids_out_of_bound = np.linalg.norm(xyz, axis=1) > self.far_radius\r\n nb_in[sid, :, ids_out_of_bound] = 0.\r\n # count\r\n sid += 1\r\n if sid >= self.n_pts_per_frame:\r\n break\r\n pt_in = pt_in[:sid]\r\n nb_in = nb_in[:sid]\r\n route_labs = route_labs[:sid]\r\n return pt_in, nb_in, route_labs, fid, torch.Tensor(np.array(rc_list))\r\n\r\n\r\nclass TestDataset_PPF(Dataset):\r\n\r\n def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None):\r\n super().__init__()\r\n self.data_dir, self.the_list = data_dir, the_list\r\n self.n_pts_per_frame = n_pts_per_frame\r\n self.neighbor_da2d = neighbor_da2d # list of (n_pts, dim_pt)\r\n\r\n def __len__(self):\r\n return len(self.the_list)\r\n\r\n def __getitem__(self, idx):\r\n fid = self.the_list[idx]\r\n # load \r\n depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0\r\n color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]\r\n if np.max(color) > 1:\r\n color = color / 255. - 0.5\r\n local = depth2local(depth)\r\n r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))\r\n depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids,\r\n :]\r\n # normal by 3d neighbor plane fitting.\r\n normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)\r\n if os.path.exists(normal_path):\r\n # normal = np.load(normal_path)\r\n if os.path.getsize(normal_path) > 1:\r\n normal = np.load(normal_path, encoding='bytes', allow_pickle=True)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)\r\n # build a patch\r\n pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))\r\n nb_ms_in = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1, 7, opt.n_neighb_pts))\r\n route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1))\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n rc_list = []\r\n # da2d+3d neighbor\r\n if not self.neighbor_da2d is None:\r\n sid, count_crt, count_max = 0, 0, 9999\r\n mask = np.zeros((r_max, c_max))\r\n while len(rc_list) < self.n_pts_per_frame:\r\n # avoid infinite loop\r\n count_crt += 1\r\n if count_crt > count_max:\r\n break\r\n r, c = np.random.randint(0, r_max), np.random.randint(0, c_max)\r\n if depth[r, c] == 0. or mask[r, c] == 1.:\r\n continue\r\n if np.isnan(lclnmlclr[r, c, 3]):\r\n continue\r\n mask[r, c] = 1.\r\n rc_list.append([r, c])\r\n pt_in[sid] = torch.Tensor(\r\n np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))\r\n for lid in range(opt.tree_height - 1):\r\n da2d_list = (np.array(self.neighbor_da2d[lid]) / depth[r, c]).astype(int)\r\n # ppf\r\n pts9d = shift_pts9d(\r\n sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),\r\n lclnmlclr[r, c, :])\r\n cen9d = copy.deepcopy(lclnmlclr[r, c, :])\r\n cen9d[0:3] = np.zeros(3)\r\n ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,)\r\n ppf7d[np.isnan(ppf7d)] = 0.0\r\n nb_ms_in[sid, lid, :, :] = torch.Tensor(ppf7d).transpose(1, 0)\r\n # remove background by 3d radius\r\n xyz = pts9d[:, 0:3]\r\n ids_out_of_bound = np.linalg.norm(xyz, axis=1) > opt.far_radiuses[lid]\r\n nb_ms_in[sid, lid, :, ids_out_of_bound] = 0.\r\n # count\r\n sid += 1\r\n return pt_in, nb_ms_in, -1, fid, torch.Tensor(np.array(rc_list))\r\n\r\n\r\n# # debug\r\n# if __name__ == '__main__':\r\n# \tprint('done.')\r\n\r\n"
] | [
[
"numpy.sum",
"numpy.save",
"numpy.stack",
"numpy.logical_and",
"numpy.random.rand",
"numpy.isnan",
"numpy.linalg.eig",
"torch.Tensor",
"numpy.mean",
"numpy.load",
"numpy.zeros",
"torch.zeros",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.array",
"numpy.concatenate",
"numpy.random.randint"
]
] |
ischrot/scipy_rmt_bsc | [
"1dd8f7f0ee7ac1311ed1735ca6b6025150524418"
] | [
"scipy/optimize/tests/test_linesearch.py"
] | [
"\"\"\"\nTests for line search routines\n\"\"\"\nfrom numpy.testing import (assert_, assert_equal, assert_array_almost_equal,\n assert_array_almost_equal_nulp, assert_warns,\n suppress_warnings)\nimport scipy.optimize.linesearch as ls\nimport scipy.optimize.nonlin as nl #(LS)\nfrom scipy.linalg import norm\nfrom scipy.optimize.linesearch import LineSearchWarning\nimport numpy as np\nfrom copy import deepcopy # (IS)\n\n\ndef assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=\"\"):\n \"\"\"\n Check that strong Wolfe conditions apply\n \"\"\"\n phi1 = phi(s)\n phi0 = phi(0)\n derphi0 = derphi(0)\n derphi1 = derphi(s)\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s\" % (\n s, phi0, phi1, derphi0, derphi1, err_msg)\n\n assert_(phi1 <= phi0 + c1*s*derphi0, \"Wolfe 1 failed: \" + msg)\n assert_(abs(derphi1) <= abs(c2*derphi0), \"Wolfe 2 failed: \" + msg)\n\n\ndef assert_armijo(s, phi, c1=1e-4, err_msg=\"\"):\n \"\"\"\n Check that Armijo condition applies\n \"\"\"\n phi1 = phi(s)\n phi0 = phi(0)\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; %s\" % (s, phi0, phi1, err_msg)\n assert_(phi1 <= (1 - c1*s)*phi0, msg)\n\n###(LS)###\ndef assert_rmt(alpha, dx, F0, Fx_new, jacobian, param, c1=1e-4, err_msg=\"\"):\n \"\"\"\n Check that RMT condition applies\n \"\"\"\n\n\n parameters = ls.prepare_parameters('rmt',param,jacobian,dx)\n rmt_eta_upper = parameters['rmt_eta_upper']\n rmt_eta_lower = parameters['rmt_eta_lower']\n amin = parameters['amin']\n\n #Step 1: Eval t_dx_omega\n dxbar = jacobian.solve(\n Fx_new\n )\n\n dx_diff = dxbar + (1 - alpha) * dx # note that dx = - J(x_k)^(-1)F(x_k)\n\n nominator = 2 * norm(dx_diff)\n denominator = alpha * norm(dx)\n\n t_dx_omega = nominator / denominator\n\n tester = (rmt_eta_lower <= t_dx_omega and t_dx_omega <= rmt_eta_upper) or (rmt_eta_lower > t_dx_omega and alpha == 1.0)\n\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; %s\" % (alpha, F0, Fx_new, err_msg)\n assert_(tester or (alpha<amin), msg)\n\n\ndef assert_bsc(alpha, x, dx, func, old_jacobian, param, err_msg):\n parameters = ls.prepare_parameters('bsc',param, old_jacobian, dx)\n H_lower = parameters['H_lower']\n H_upper = parameters['H_upper']\n amin = parameters['amin']\n\n x_new = x + alpha * dx\n Fx_new = func(x_new)\n jacobian = deepcopy(old_jacobian)\n jacobian.update(\n x_new.copy(),\n Fx_new\n )\n dx_next_it = -jacobian.solve(\n Fx_new\n )\n dx_diff = dx_next_it - dx\n H_prime = alpha * norm(dx_diff)\n\n tester = (H_lower <= H_prime and H_prime <= H_upper) or (H_lower > H_prime and alpha >= 1.0)\n\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; %s\" % (alpha, func(x), Fx_new, err_msg)\n\n assert_(tester or (alpha<amin), msg)\n###(LS)###\n\ndef assert_line_wolfe(x, p, s, f, fprime, **kw):\n assert_wolfe(s, phi=lambda sp: f(x + p*sp),\n derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)\n\n\ndef assert_line_armijo(x, p, s, f, **kw):\n assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)\n\n\ndef assert_fp_equal(x, y, err_msg=\"\", nulp=50):\n \"\"\"Assert two arrays are equal, up to some floating-point rounding error\"\"\"\n try:\n assert_array_almost_equal_nulp(x, y, nulp)\n except AssertionError as e:\n raise AssertionError(\"%s\\n%s\" % (e, err_msg)) from e\n\n\nclass TestLineSearch(object):\n # -- scalar functions; must have dphi(0.) < 0\n def _scalar_func_1(self, s):\n self.fcount += 1\n p = -s - s**3 + s**4\n dp = -1 - 3*s**2 + 4*s**3\n return p, dp\n\n def _scalar_func_2(self, s):\n self.fcount += 1\n p = np.exp(-4*s) + s**2\n dp = -4*np.exp(-4*s) + 2*s\n return p, dp\n\n def _scalar_func_3(self, s):\n self.fcount += 1\n p = -np.sin(10*s)\n dp = -10*np.cos(10*s)\n return p, dp\n\n # -- n-d functions\n\n def _line_func_1(self, x):\n self.fcount += 1\n f = np.dot(x, x)\n df = 2*x\n return f, df\n\n def _line_func_2(self, x):\n self.fcount += 1\n f = np.dot(x, np.dot(self.A, x)) + 1\n df = np.dot(self.A + self.A.T, x)\n return f, df\n\n # --\n\n def setup_method(self):\n self.scalar_funcs = []\n self.line_funcs = []\n self.N = 20\n self.fcount = 0\n\n def bind_index(func, idx):\n # Remember Python's closure semantics!\n return lambda *a, **kw: func(*a, **kw)[idx]\n\n for name in sorted(dir(self)):\n if name.startswith('_scalar_func_'):\n value = getattr(self, name)\n self.scalar_funcs.append(\n (name, bind_index(value, 0), bind_index(value, 1)))\n elif name.startswith('_line_func_'):\n value = getattr(self, name)\n self.line_funcs.append(\n (name, bind_index(value, 0), bind_index(value, 1)))\n\n np.random.seed(1234)\n self.A = np.random.randn(self.N, self.N)\n\n def scalar_iter(self):\n for name, phi, derphi in self.scalar_funcs:\n for old_phi0 in np.random.randn(3):\n yield name, phi, derphi, old_phi0\n\n def line_iter(self):\n for name, f, fprime in self.line_funcs:\n k = 0\n while k < 9:\n x = np.random.randn(self.N)\n p = np.random.randn(self.N)\n if np.dot(p, fprime(x)) >= 0:\n # always pick a descent direction\n continue\n k += 1\n old_fv = float(np.random.randn())\n yield name, f, fprime, x, p, old_fv\n\n # -- Generic scalar searches\n\n def test_scalar_search_wolfe1(self):\n c = 0\n for name, phi, derphi, old_phi0 in self.scalar_iter():\n c += 1\n s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),\n old_phi0, derphi(0))\n assert_fp_equal(phi0, phi(0), name)\n assert_fp_equal(phi1, phi(s), name)\n assert_wolfe(s, phi, derphi, err_msg=name)\n\n assert_(c > 3) # check that the iterator really works...\n\n def test_scalar_search_wolfe2(self):\n for name, phi, derphi, old_phi0 in self.scalar_iter():\n s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(\n phi, derphi, phi(0), old_phi0, derphi(0))\n assert_fp_equal(phi0, phi(0), name)\n assert_fp_equal(phi1, phi(s), name)\n if derphi1 is not None:\n assert_fp_equal(derphi1, derphi(s), name)\n assert_wolfe(s, phi, derphi, err_msg=\"%s %g\" % (name, old_phi0))\n\n def test_scalar_search_wolfe2_with_low_amax(self):\n def phi(alpha):\n return (alpha - 5) ** 2\n\n def derphi(alpha):\n return 2 * (alpha - 5)\n\n s, _, _, _ = assert_warns(LineSearchWarning,\n ls.scalar_search_wolfe2, phi, derphi, amax=0.001)\n assert_(s is None)\n\n def test_scalar_search_wolfe2_regression(self):\n # Regression test for gh-12157\n # This phi has its minimum at alpha=4/3 ~ 1.333.\n def phi(alpha):\n if alpha < 1:\n return - 3*np.pi/2 * (alpha - 1)\n else:\n return np.cos(3*np.pi/2 * alpha - np.pi)\n\n def derphi(alpha):\n if alpha < 1:\n return - 3*np.pi/2\n else:\n return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)\n\n s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)\n # Without the fix in gh-13073, the scalar_search_wolfe2\n # returned s=2.0 instead.\n assert_(s < 1.5)\n\n def test_scalar_search_armijo(self):\n for name, phi, derphi, old_phi0 in self.scalar_iter():\n s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))\n assert_fp_equal(phi1, phi(s), name)\n assert_armijo(s, phi, err_msg=\"%s %g\" % (name, old_phi0))\n\n ###(LS)###\n ##RMT not usefull for scalar functions, thus no need for test_scalar_search_rmt?\n\n def test_line_search_rmt(self):\n #There is at least 1 function R^20->R to be tested, but this leads to s=None\n for name, f, fprime, x, p, old_f in self.line_iter():\n jac = lambda x: fprime(x)\n x0 = nl._as_inexact(x)\n func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()\n x = x0.flatten()\n jacobian = nl.asjacobian(jac)\n jacobian.setup(x.copy(), f(x), func)\n options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}\n #print(\"1: \",f(x),np.shape(fprime(x)))\n s, dxbar, f_new = ls.scalar_search_rmt(f, x, fprime(x), parameters=options)\n #print(\"2: \",p_new, s)\n assert_fp_equal(f_new, x+s*fprime(x), name)\n assert_rmt(s, fprime(x), f(x), f_new, jacobian, options, err_msg=\"%s %g\" % name)\n\n\n def test_line_search_bsc(self):\n #There is at least 1 function R^20->R to be tested, but this leads to s=None\n for name, f, fprime, x, p, old_f in self.line_iter():\n jac = lambda x: fprime(x)\n x0 = nl._as_inexact(x)\n func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()\n x = x0.flatten()\n jacobian = nl.asjacobian(jac)\n jacobian.setup(x.copy(), f(x), func)\n options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}\n #print(\"1: \",f(x),np.shape(dp(x)))\n s, f_new= ls.scalar_search_bsc(func, x, fprime(x), f(x), parameters=options)\n #print(\"2: \",p_new, s)\n assert_fp_equal(f_new, x+s*fprime(x), name)\n assert_bsc(s, x, fprime(x), func, jacobian, options, err_msg=\"%s %g\" % name)\n ###(LS)###\n \n # -- Generic line searches\n\n def test_line_search_wolfe1(self):\n c = 0\n smax = 100\n for name, f, fprime, x, p, old_f in self.line_iter():\n f0 = f(x)\n g0 = fprime(x)\n self.fcount = 0\n s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,\n g0, f0, old_f,\n amax=smax)\n assert_equal(self.fcount, fc+gc)\n assert_fp_equal(ofv, f(x))\n if s is None:\n continue\n assert_fp_equal(fv, f(x + s*p))\n assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)\n if s < smax:\n c += 1\n assert_line_wolfe(x, p, s, f, fprime, err_msg=name)\n\n assert_(c > 3) # check that the iterator really works...\n\n def test_line_search_wolfe2(self):\n c = 0\n smax = 512\n for name, f, fprime, x, p, old_f in self.line_iter():\n f0 = f(x)\n g0 = fprime(x)\n self.fcount = 0\n with suppress_warnings() as sup:\n sup.filter(LineSearchWarning,\n \"The line search algorithm could not find a solution\")\n sup.filter(LineSearchWarning,\n \"The line search algorithm did not converge\")\n s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,\n g0, f0, old_f,\n amax=smax)\n assert_equal(self.fcount, fc+gc)\n assert_fp_equal(ofv, f(x))\n assert_fp_equal(fv, f(x + s*p))\n if gv is not None:\n assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)\n if s < smax:\n c += 1\n assert_line_wolfe(x, p, s, f, fprime, err_msg=name)\n assert_(c > 3) # check that the iterator really works...\n\n def test_line_search_wolfe2_bounds(self):\n # See gh-7475\n\n # For this f and p, starting at a point on axis 0, the strong Wolfe\n # condition 2 is met if and only if the step length s satisfies\n # |x + s| <= c2 * |x|\n f = lambda x: np.dot(x, x)\n fp = lambda x: 2 * x\n p = np.array([1, 0])\n\n # Smallest s satisfying strong Wolfe conditions for these arguments is 30\n x = -60 * p\n c2 = 0.5\n\n s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)\n assert_line_wolfe(x, p, s, f, fp)\n\n s, _, _, _, _, _ = assert_warns(LineSearchWarning,\n ls.line_search_wolfe2, f, fp, x, p,\n amax=29, c2=c2)\n assert_(s is None)\n\n # s=30 will only be tried on the 6th iteration, so this won't converge\n assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,\n c2=c2, maxiter=5)\n\n def test_line_search_armijo(self):\n c = 0\n for name, f, fprime, x, p, old_f in self.line_iter():\n f0 = f(x)\n g0 = fprime(x)\n self.fcount = 0\n s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)\n c += 1\n assert_equal(self.fcount, fc)\n assert_fp_equal(fv, f(x + s*p))\n assert_line_armijo(x, p, s, f, err_msg=name)\n assert_(c >= 9)\n\n # -- More specific tests\n\n def test_armijo_terminate_1(self):\n # Armijo should evaluate the function only once if the trial step\n # is already suitable\n count = [0]\n\n def phi(s):\n count[0] += 1\n return -s + 0.01*s**2\n s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)\n assert_equal(s, 1)\n assert_equal(count[0], 2)\n assert_armijo(s, phi)\n\n def test_wolfe_terminate(self):\n # wolfe1 and wolfe2 should also evaluate the function only a few\n # times if the trial step is already suitable\n\n def phi(s):\n count[0] += 1\n return -s + 0.05*s**2\n\n def derphi(s):\n count[0] += 1\n return -1 + 0.05*2*s\n\n for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:\n count = [0]\n r = func(phi, derphi, phi(0), None, derphi(0))\n assert_(r[0] is not None, (r, func))\n assert_(count[0] <= 2 + 2, (count, func))\n assert_wolfe(r[0], phi, derphi, err_msg=str(func))\n"
] | [
[
"scipy.optimize.nonlin._array_like",
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.testing.assert_warns",
"scipy.optimize.linesearch.line_search_armijo",
"scipy.optimize.nonlin._as_inexact",
"scipy.optimize.linesearch.line_search_wolfe1",
"scipy.optimize.nonlin.asjacobian",
"scipy.linalg.norm",
"numpy.cos",
"scipy.optimize.linesearch.prepare_parameters",
"scipy.optimize.linesearch.line_search_wolfe2",
"scipy.optimize.linesearch.scalar_search_wolfe2",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.random.randn",
"numpy.exp",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.testing.suppress_warnings",
"numpy.testing.assert_"
]
] |
lxchen2019/Python-Baseball | [
"0498830e92c67de8221aac1777651ae141df0ec6"
] | [
"stats/attendance.py"
] | [
"import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom data import games\n\nattendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']]\nattendance.columns = ['year', 'attendance']\n\nattendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])\nattendance.plot(x='year', y='attendance', figsize = (15, 7), kind = 'bar')\n\nplt.xlabel('Year')\nplt.ylabel('Attendance')\nplt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')\nplt.show()\n"
] | [
[
"pandas.to_numeric",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
xinglu/Tensorflow-2.0-Computer-Vision-Cookbook | [
"92ea6713f664cff9eccaaccea8ac756f808e2066"
] | [
"ch1/recipe4/load_save_model.py"
] | [
"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import *\n\n\ndef load_data():\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n # Normalize data.\n X_train = X_train.astype('float32') / 255.0\n X_test = X_test.astype('float32') / 255.0\n\n # Reshape grayscale to include channel dimension.\n X_train = np.expand_dims(X_train, axis=3)\n X_test = np.expand_dims(X_test, axis=3)\n\n # Process labels.\n label_binarizer = LabelBinarizer()\n y_train = label_binarizer.fit_transform(y_train)\n y_test = label_binarizer.fit_transform(y_test)\n\n return X_train, y_train, X_test, y_test\n\n\ndef build_network():\n input_layer = Input(shape=(28, 28, 1), name='input_layer')\n convolution_1 = Conv2D(kernel_size=(2, 2),\n padding='same',\n strides=(2, 2),\n filters=32,\n name='convolution_1')(input_layer)\n activation_1 = ReLU(name='activation_1')(convolution_1)\n batch_normalization_1 = BatchNormalization(name='batch_normalization_1')(activation_1)\n pooling_1 = MaxPooling2D(pool_size=(2, 2),\n strides=(1, 1),\n padding='same',\n name='pooling_1')(batch_normalization_1)\n dropout = Dropout(rate=0.5, name='dropout')(pooling_1)\n\n flatten = Flatten(name='flatten')(dropout)\n dense_1 = Dense(units=128, name='dense_1')(flatten)\n activation_2 = ReLU(name='activation_2')(dense_1)\n dense_2 = Dense(units=10, name='dense_2')(activation_2)\n output = Softmax(name='output')(dense_2)\n\n network = Model(inputs=input_layer, outputs=output, name='my_model')\n\n return network\n\n\ndef evaluate(model, X_test, y_test):\n _, accuracy = model.evaluate(X_test, y_test, verbose=0)\n print(f'Accuracy: {accuracy}')\n\n\nprint('Loading and pre-processing data.')\nX_train, y_train, X_test, y_test = load_data()\n\n# Split dataset.\nX_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8)\n\n# Build network.\nmodel = build_network()\n\n# Compile and train model.\nprint('Training network...')\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=40, batch_size=1024)\n\nprint('Saving model and weights as HDF5.')\nmodel.save('model_and_weights.hdf5')\n\nprint('Loading model and weights as HDF5.')\nloaded_model = load_model('model_and_weights.hdf5')\n\nprint('Evaluating using loaded model.')\nevaluate(loaded_model, X_test, y_test)\n"
] | [
[
"tensorflow.keras.datasets.mnist.load_data",
"numpy.expand_dims",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.LabelBinarizer"
]
] |
hadi-gharibi/pupil | [
"9d266572cc1ebf659e87206be6e5f1548959d510"
] | [
"pupil/models/clustering.py"
] | [
"from abc import ABC, abstractmethod\nfrom typing import Dict, Protocol, Tuple\n\nimport faiss\nimport numpy as np\nfrom pupil.types import NDArray2D\nfrom sklearn.cluster import AgglomerativeClustering\n\n\nclass Clustering(Protocol):\n n_clusters: int\n\n def fit(self, X: NDArray2D):\n ...\n\n def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n\n ...\n\n def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n \"\"\"After having the center of your clusters, you can use this function to see the distance from X and center of all clusters\n\n Args:\n X (NDArray2D): The input to check.\n\n Returns:\n Tuple[NDArray2D, NDArray2D]: Return (Distances, cluster_ids). Shape of each: (#queries, #clusters)\n \"\"\"\n ...\n\n\nclass FaissKMeansClustering:\n def __init__(\n self,\n n_clusters: int,\n n_init: int = 10,\n max_iter: int = 100,\n ) -> None:\n self.n_clusters = n_clusters\n self.n_init = n_init\n self.max_iter = max_iter\n self.cluster_centers_ = None\n self.inertia_ = None\n\n def fit(self, X: NDArray2D) -> None:\n self.kmeans = faiss.Kmeans(\n d=X.shape[1],\n k=self.n_clusters,\n niter=self.max_iter,\n nredo=self.n_init,\n )\n X = X / np.linalg.norm(X)\n self.kmeans.train(X.astype(np.float32))\n self.cluster_centers_ = self.kmeans.centroids\n self.inertia_ = self.kmeans.obj[-1]\n\n def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n X = X / np.linalg.norm(X)\n return self.kmeans.index.search(X.astype(np.float32), 1) # type: ignore\n\n def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n X = X / np.linalg.norm(X)\n D, I = self.kmeans.index.search(X.astype(np.float32), self.n_clusters) # type: ignore\n return D, I\n\n\nclass Splitter(Protocol):\n def fit(self, X: NDArray2D, clsuter_inds: NDArray2D):\n ...\n\n @property\n def splits(\n self,\n ):\n ...\n\n\nclass Distance1DSplitter:\n def __init__(self, nsplits=3):\n self.nsplits = nsplits\n\n def fit(self, X: NDArray2D, clsuter_inds: NDArray2D) -> None:\n self.clsuter_inds = clsuter_inds\n self.alg = AgglomerativeClustering(n_clusters=self.nsplits)\n self.alg.fit(X.reshape((-1, 1)))\n self._tag_to_index_dict = self._tag_to_index()\n\n def _tag_to_index(self) -> Dict[str, Tuple[int, int]]:\n tags = [\"priority_\" + str(i) for i in range(self.nsplits)]\n\n inds = np.argwhere(np.diff(self.alg.labels_) != 0).flatten().tolist()\n inds.insert(0, -1)\n inds.append(len(self.alg.labels_))\n\n tag_dict = {}\n for i, end in enumerate(inds[1:]):\n start = inds[i] + 1\n tag_dict[tags[i]] = (start, end + 1)\n return tag_dict\n\n @property\n def splits(self):\n res = {}\n for k, v in self._tag_to_index_dict.items():\n res[k] = self.clsuter_inds[0][v[0] : v[1]]\n return res\n"
] | [
[
"numpy.linalg.norm",
"sklearn.cluster.AgglomerativeClustering",
"numpy.diff"
]
] |
h1-the-swan/paper_collection | [
"f07ad5cd8c40ddd75df2031b15c49eee60f1d914"
] | [
"tests/test_paper_collection.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Tests for `paper_collection` package.\"\"\"\n\n\nimport unittest\n\nfrom paper_collection import paper_collection\n\nimport pandas as pd\nimport numpy as np\n\n\nclass TestPaper_collection(unittest.TestCase):\n \"\"\"Tests for `paper_collection` package.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\\t')\n self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True)\n self.num_papers = len(self.df_papers)\n self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\\t')\n self.num_citations = len(self.df_citations)\n self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\\t')\n self.authors_by_paper = self.get_authors_by_paper(self.df_authors)\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n\n def get_authors_by_paper(self, df_authors):\n \"\"\"Get a dictionary mapping paper_id to author data\n\n \"\"\"\n author_data = {}\n for paper_id, group in df_authors.groupby('PaperId'):\n group = group.sort_values('AuthorSequenceNumber')\n this_authors = []\n for _, row in group.iterrows():\n this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId})\n author_data[paper_id] = this_authors\n return author_data\n\n def load_paper(self, prow):\n paper_id = prow.PaperId\n authors = self.authors_by_paper[paper_id]\n return paper_collection.Paper(dataset='mag',\n dataset_version='mag-2019-11-22',\n paper_id=paper_id,\n title=prow.PaperTitle,\n display_title=prow.OriginalTitle,\n doi=prow.Doi,\n pub_date=prow.Date,\n year=prow.Year,\n venue=prow.OriginalVenue,\n authors=authors,\n node_rank=prow.flow)\n\n def test_000_single_paper(self):\n \"\"\"Load a single paper\"\"\"\n prow = self.df_papers.iloc[0]\n p = self.load_paper(prow)\n assert p.display_title is not None\n assert len(p.display_title)\n\n def test_001_collection(self):\n \"\"\"Load a collection\"\"\"\n coll = paper_collection.PaperCollection(description=\"Paper Collection\")\n for _, prow in self.df_papers.iterrows():\n p = self.load_paper(prow)\n coll.papers.append(p)\n assert len(coll) == self.num_papers\n\n def test_002_graph(self):\n \"\"\"Construct graph\"\"\"\n coll = paper_collection.PaperCollection(description=\"Paper Collection\")\n for _, prow in self.df_papers.iterrows():\n p = self.load_paper(prow)\n coll.papers.append(p)\n for _, row in self.df_citations.iterrows():\n coll.citations.append((row.PaperId, row.PaperReferenceId))\n G = coll.construct_graph()\n assert G.number_of_nodes() == self.num_papers\n assert G.number_of_edges() == self.num_citations\n\n"
] | [
[
"pandas.read_csv"
]
] |
mindspore-ai/mindspore | [
"a9fbb25530a2874166ff0045ddcdfc73207bf5eb"
] | [
"mindspore/nn/optim/thor.py"
] | [
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"thor\"\"\"\nimport numpy as np\nfrom mindspore.ops import functional as F, composite as C, operations as P\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nfrom mindspore.common.tensor import Tensor\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nimport mindspore.log as logger\nfrom mindspore._checkparam import Validator\nfrom mindspore.nn.optim.optimizer import Optimizer\nfrom mindspore.parallel._utils import _get_device_num, _get_gradients_mean\nfrom mindspore import context\nfrom mindspore.context import ParallelMode\nfrom mindspore.nn.layer import DenseThor, Conv2dThor, EmbeddingThor, EmbeddingLookupThor\nfrom mindspore.nn.wrap import DistributedGradReducer\nfrom mindspore.train.train_thor.convert_utils import ConvertNetUtils\nfrom mindspore.parallel._auto_parallel_context import auto_parallel_context\n\n\n# Enumerates types of Layer\nOther = -1\nConv = 1\nFC = 2\nEmbedding = 3\nLayerNorm = 4\nBatchNorm = 5\n\nop_add = P.AddN()\napply_decay = C.MultitypeFuncGraph(\"apply_decay\")\n_momentum_opt = C.MultitypeFuncGraph(\"momentum_opt\")\n\n\n@apply_decay.register(\"Number\", \"Bool\", \"Tensor\", \"Tensor\")\ndef _tensor_apply_decay(weight_decay, if_apply, weight, gradient):\n \"\"\"Get grad with weight_decay.\"\"\"\n if if_apply:\n return op_add((weight * weight_decay, gradient))\n return gradient\n\n\n@_momentum_opt.register(\"Function\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\")\ndef _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment):\n \"\"\"Apply momentum optimizer to the weight parameter using Tensor.\"\"\"\n success = True\n success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum))\n return success\n\nIS_ENABLE_GLOBAL_NORM = False\nGRADIENT_CLIP_TYPE = 1\nGRADIENT_CLIP_VALUE = 1.0\nclip_grad = C.MultitypeFuncGraph(\"clip_grad\")\nhyper_map_op = C.HyperMap()\n\n\n@clip_grad.register(\"Number\", \"Number\", \"Tensor\")\ndef _clip_grad(clip_type, clip_value, grad):\n \"\"\"\n Clip gradients.\n\n Inputs:\n clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.\n clip_value (float): Specifies how much to clip.\n grad (tuple[Tensor]): Gradients.\n\n Outputs:\n tuple[Tensor], clipped gradients.\n \"\"\"\n if clip_type not in [0, 1]:\n return grad\n dt = F.dtype(grad)\n if clip_type == 0:\n new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),\n F.cast(F.tuple_to_array((clip_value,)), dt))\n else:\n new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))\n return new_grad\n\n\ndef clip_gradient(enable_clip_grad, gradients):\n \"\"\"clip gradients\"\"\"\n if enable_clip_grad:\n if IS_ENABLE_GLOBAL_NORM:\n gradients = C.clip_by_global_norm(gradients, GRADIENT_CLIP_VALUE, None)\n else:\n gradients = hyper_map_op(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), gradients)\n return gradients\n\nC0 = 16\n\n\ndef _check_param(momentum, frequency, lr, cls_name):\n \"\"\"Check param.\"\"\"\n Validator.check_value_type(\"momentum\", momentum, [float], cls_name)\n if isinstance(momentum, float) and momentum < 0.0:\n raise ValueError(\"momentum should be at least 0.0, but got momentum {}\".format(momentum))\n Validator.check_value_type(\"frequency\", frequency, [int], cls_name)\n if isinstance(frequency, int) and frequency < 2:\n raise ValueError(\"frequency should be at least 2, but got frequency {}\".format(frequency))\n Validator.check_value_type(\"learning rate\", lr, [Tensor], cls_name)\n\n\ndef caculate_device_shape(matrix_dim, channel, is_a):\n if is_a:\n if channel // C0 == 0:\n matrix_dim = (matrix_dim / channel) * C0\n ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim)\n return ll\n\n\ndef is_conv_matmul_support_shape(matrix_a_shape, matrix_g_shape):\n \"\"\"is conv layer matmul support shape\"\"\"\n temp = (matrix_g_shape, matrix_a_shape)\n support_shape = [((4, 4, 16, 16), (49, 49, 16, 16)),\n ((4, 4, 16, 16), (4, 4, 16, 16)),\n ((4, 4, 16, 16), (36, 36, 16, 16)),\n ((16, 16, 16, 16), (4, 4, 16, 16)),\n ((4, 4, 16, 16), (16, 16, 16, 16)),\n ((8, 8, 16, 16), (16, 16, 16, 16)),\n ((8, 8, 16, 16), (72, 72, 16, 16)),\n ((32, 32, 16, 16), (8, 8, 16, 16)),\n ((32, 32, 16, 16), (16, 16, 16, 16)),\n ((8, 8, 16, 16), (32, 32, 16, 16)),\n ((16, 16, 16, 16), (32, 32, 16, 16)),\n ((16, 16, 16, 16), (144, 144, 16, 16)),\n ((64, 64, 16, 16), (16, 16, 16, 16)),\n ((64, 64, 16, 16), (32, 32, 16, 16)),\n ((16, 16, 16, 16), (64, 64, 16, 16)),\n ((32, 32, 16, 16), (64, 64, 16, 16)),\n ((32, 32, 16, 16), (288, 288, 16, 16)),\n ((128, 128, 16, 16), (32, 32, 16, 16)),\n ((128, 128, 16, 16), (64, 64, 16, 16)),\n ((32, 32, 16, 16), (128, 128, 16, 16))]\n if temp in support_shape:\n return True\n return False\n\n\ndef caculate_matmul_shape(matrix_a_dim, matrix_g_dim, split_dim):\n \"\"\"get matmul shape\"\"\"\n split_dima = split_dim\n split_dimg = split_dim\n if matrix_a_dim % split_dim == 0:\n batch_w = matrix_a_dim // split_dim\n else:\n if matrix_a_dim < split_dim:\n batch_w = 1\n split_dima = matrix_a_dim\n else:\n batch_w = matrix_a_dim // split_dim + 1\n\n if matrix_g_dim % split_dim == 0:\n batch_h = matrix_g_dim // split_dim\n else:\n if matrix_g_dim < split_dim:\n batch_h = 1\n split_dimg = matrix_g_dim\n else:\n batch_h = matrix_g_dim // split_dim + 1\n matrix_a_shape = (batch_h, batch_w, split_dima, split_dima)\n matrix_g_shape = (batch_h, split_dimg, split_dimg)\n return matrix_a_shape, matrix_g_shape\n\n\ndef get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map):\n \"\"\"get layer type for dense layer and conv layer\"\"\"\n if subcell.weight.requires_grad:\n if \"rpn_with_loss.rpn_convs_list.\" not in prefix.lower() \\\n or \"rpn_with_loss.rpn_convs_list.0.\" in prefix.lower():\n layertype_map.append(Other)\n\n\ndef find_net_layertype_recur(net, layertype_map):\n \"\"\"get net layer type recursively.\"\"\"\n cells = net.name_cells()\n for name in cells:\n subcell = cells[name]\n prefix = subcell.param_prefix\n if subcell == net:\n continue\n elif isinstance(subcell, Conv2dThor):\n layertype_map.append(Conv)\n elif isinstance(subcell, DenseThor):\n layertype_map.append(FC)\n elif isinstance(subcell, (EmbeddingThor, EmbeddingLookupThor)):\n layertype_map.append(Embedding)\n elif isinstance(subcell, nn.LayerNorm):\n layertype_map.append(LayerNorm)\n elif isinstance(subcell, nn.BatchNorm2d):\n if subcell.gamma.requires_grad:\n layertype_map.append(BatchNorm)\n elif isinstance(subcell, (nn.Conv2d, nn.Dense, nn.Embedding, nn.Conv2dTranspose, nn.Conv1d, nn.Conv1dTranspose,\n nn.BatchNorm1d, nn.GroupNorm, nn.GlobalBatchNorm)):\n if isinstance(subcell, (nn.Dense, nn.Conv2d)):\n get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map)\n else:\n layertype_map.append(Other)\n else:\n find_net_layertype_recur(subcell, layertype_map)\n\n\ndef get_net_layertype_mask(net):\n layertype_map = []\n find_net_layertype_recur(net, layertype_map)\n return layertype_map\n\n\ndef get_layer_counter(layer_type, layer_counter, params, idx):\n \"\"\"get layer counter\"\"\"\n if layer_type in [Conv, FC]:\n if \"bias\" in params[idx].name.lower():\n layer_counter = layer_counter + 1\n else:\n if idx < len(params) - 1 and \"bias\" not in params[idx + 1].name.lower():\n layer_counter = layer_counter + 1\n elif layer_type in [LayerNorm, BatchNorm]:\n if \"beta\" in params[idx].name.lower():\n layer_counter = layer_counter + 1\n else:\n if \"bias\" in params[idx].name.lower():\n layer_counter = layer_counter + 1\n elif \"weight\" in params[idx].name.lower():\n if idx < len(params) - 1 and \"bias\" not in params[idx + 1].name.lower():\n layer_counter = layer_counter + 1\n else:\n layer_counter = layer_counter + 1\n return layer_counter\n\n\ndef thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,\n use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False,\n frequency=100):\n r\"\"\"\n Updates gradients by second-order algorithm--THOR.\n\n Trace-based Hardware-driven layer-ORiented Natural Gradient Descent Computation (THOR) algorithm is proposed in:\n\n `THOR: Trace-based Hardware-driven layer-ORiented Natural Gradient Descent Computation\n <https://www.aaai.org/AAAI21Papers/AAAI-6611.ChenM.pdf>`_\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n A_i = a_i{a_i}^T \\\\\n G_i = D_{s_i}{ D_{s_i}}^T \\\\\n m_i = \\beta * m_i + ({G_i^{(k)}}+\\lambda I)^{-1}) g_i ({\\overline A_{i-1}^{(k)}}+\\lambda I)^{-1} \\\\\n w_i = w_i - \\alpha * m_i \\\\\n \\end{array}\n\n :math:`D_{s_i}` represents the derivative of the loss function of the output of the i-th layer,\n :math:`a_{i-1}` represents the input of i-th layer,and which is the activations of previous layer,\n :math:`\\beta` represents momentum, :math:`I` represents the identity matrix,\n :math:`\\overline A` represents the transpose of matrix A,\n :math:`\\lambda` represents 'damping', :math:`g_i` represents gradients of the i-th layer,\n :math:`\\otimes` represents Kronecker product, :math:`\\alpha` represents 'learning rate'\n\n Note:\n When separating parameter groups, the weight decay in each group will be applied on the parameters if the\n weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied\n on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.\n\n When separating parameter groups, if you want to centralize the gradient, set grad_centralization to True,\n but the gradient centralization can only be applied to the parameters of the convolution layer.\n If the parameters of the non convolution layer are set to True, an error will be reported.\n\n To improve parameter groups performance, the customized order of parameters can be supported.\n\n Args:\n net (Cell): The training network.\n\n learning_rate (Tensor): A value for the learning rate.\n\n damping (Tensor): A value for the damping.\n\n momentum (float): Hyper-parameter of type float, means momentum for the moving average. It must be at least 0.0.\n\n weight_decay (int, float): Weight decay (L2 penalty). It must be equal to or greater than 0.0. Default: 0.0.\n\n loss_scale (float): A value for the loss scale. It must be greater than 0.0. In general, use the\n default value. Default: 1.0.\n\n batch_size (int): The size of a batch. Default: 32\n\n use_nesterov (bool): Enable Nesterov momentum. Default: False.\n\n decay_filter (function): A function to determine which layers the weight decay applied to. And it\n only works when the weight_decay > 0. Default: lambda x: x.name not in []\n\n split_indices (list): Set allreduce fusion strategy by A/G layer indices . Only works when distributed\n computing. ResNet50 as an example, there are 54 layers of A/G respectively, when split_indices is set\n to [26, 53], it means A/G is divided into two groups to allreduce, one is 0~26 layer, and the other\n is 27~53. Default: None\n\n enable_clip_grad (bool): Whether to clip the gradients. Default: False\n\n frequency(int): The update interval of A/G and $A^{-1}/G^{-1}$. When frequency equals N (N is greater than 1),\n A/G and $A^{-1}/G^{-1}$ will be updated every N steps, and other steps will use the stale A/G and\n $A^{-1}/G^{-1}$ to update weights. Default: 100.\n\n Inputs:\n - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.\n\n Outputs:\n tuple[bool], all elements are True.\n\n Raises:\n TypeError: If `learning_rate` is not Tensor.\n TypeError: If `loss_scale`,`momentum` or `frequency` is not a float.\n TypeError: If `weight_decay` is neither float nor int.\n TypeError: If `use_nesterov` is not a bool.\n ValueError: If `loss_scale` is less than or equal to 0.\n ValueError: If `weight_decay` or `momentum` is less than 0.\n ValueError: If `frequency` is not int.\n ValueError: If `frequency` is less than 2.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> from mindspore.nn import thor\n >>> from mindspore import Model\n >>> from mindspore import FixedLossScaleManager\n >>> from mindspore.train.callback import LossMonitor\n >>> from mindspore.train.train_thor import ConvertModelUtils\n >>> from mindspore import nn\n >>> from mindspore import Tensor\n >>>\n >>> net = Net()\n >>> dataset = create_dataset()\n >>> temp = Tensor([4e-4, 1e-4, 1e-5, 1e-5], mstype.float32)\n >>> optim = thor(net, learning_rate=temp, damping=temp, momentum=0.9, loss_scale=128, frequency=4)\n >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n >>> loss_scale = FixedLossScaleManager(128, drop_overflow_update=False)\n >>> model = Model(net, loss_fn=loss, optimizer=optim, loss_scale_manager=loss_scale, metrics={'acc'},\n ... amp_level=\"O2\", keep_batchnorm_fp32=False)\n >>> model = ConvertModelUtils.convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=optim,\n ... loss_scale_manager=loss_scale, metrics={'acc'},\n ... amp_level=\"O2\", keep_batchnorm_fp32=False)\n >>> loss_cb = LossMonitor()\n >>> model.train(1, dataset, callbacks=loss_cb, sink_size=4, dataset_sink_mode=True)\n\n \"\"\"\n context.set_context(max_call_depth=10000)\n ConvertNetUtils().convert_to_thor_net(net)\n if context.get_context(\"device_target\") == \"Ascend\":\n return ThorAscend(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, decay_filter,\n split_indices=split_indices, enable_clip_grad=enable_clip_grad, frequency=frequency)\n return ThorGpu(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size,\n use_nesterov, decay_filter, split_indices=split_indices, enable_clip_grad=enable_clip_grad,\n frequency=frequency)\n\n\nclass ThorGpu(Optimizer):\n \"\"\"\n ThorGpu\n \"\"\"\n def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,\n use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None,\n enable_clip_grad=False, frequency=100):\n params = filter(lambda x: x.requires_grad, net.get_parameters())\n super(ThorGpu, self).__init__(learning_rate, params, weight_decay, loss_scale)\n _check_param(momentum, frequency, learning_rate, self.__class__.__name__)\n self.momentum = Parameter(Tensor(momentum, mstype.float32), name=\"momentum\")\n self.params = self.parameters\n self.use_nesterov = Validator.check_bool(use_nesterov)\n self.moments = self.params.clone(prefix=\"moments\", init='zeros')\n self.hyper_map = C.HyperMap()\n self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov)\n self.net = net\n self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters()))\n self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters()))\n self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters()))\n self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters()))\n self.batch_size = Tensor(batch_size, mstype.float32)\n self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)\n self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)\n self.damping = damping\n self._define_gpu_operator()\n logger.info(\"matrix_a_cov len is {}\".format(len(self.matrix_a_cov)))\n self.thor = True\n self.matrix_a = ()\n self.matrix_g = ()\n self.matrix_a_shape = ()\n self.thor_layer_count = 0\n self.conv_layer_count = 0\n self.weight_fim_idx_map = ()\n self.weight_conv_idx_map = ()\n self.weight_layertype_idx_map = ()\n self._process_matrix_init_and_weight_idx_map(self.net)\n self.matrix_a = ParameterTuple(self.matrix_a)\n self.matrix_g = ParameterTuple(self.matrix_g)\n self.weight_decay = weight_decay\n self.decay_flags = tuple(decay_filter(x) for x in self.parameters)\n self.update_gradient = P.UpdateThorGradient(split_dim=self.split_dim)\n self.enable_clip_grad = enable_clip_grad\n self.frequency = frequency\n self._define_gpu_reducer(split_indices)\n\n def get_frequency(self):\n \"\"\"get thor frequency\"\"\"\n return self.frequency\n\n def _define_gpu_operator(self):\n \"\"\"define gpu operator\"\"\"\n self.transpose = P.Transpose()\n self.shape = P.Shape()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul()\n self.assign = P.Assign()\n self.mul = P.Mul()\n self.gather = P.GatherV2()\n self.one = Tensor(1, mstype.int32)\n self.feature_map = Tensor(1.0, mstype.float32)\n self.axis = 0\n self.cov_step = Parameter(initializer(0, [1], mstype.int32), name=\"cov_step\", requires_grad=False)\n self.cast = P.Cast()\n self.sqrt = P.Sqrt()\n self.eye = P.Eye()\n self.split_dim = 128\n self.embedding_cholesky = P.CholeskyTrsm()\n self.cholesky = P.CholeskyTrsm(split_dim=self.split_dim)\n self.vector_matmul = P.BatchMatMul(transpose_a=True)\n self.reduce_sum = P.ReduceSum(keep_dims=False)\n self.inv = P.Reciprocal()\n self.square = P.Square()\n self.expand = P.ExpandDims()\n\n\n def _define_gpu_reducer(self, split_indices):\n \"\"\"define gpu reducer\"\"\"\n self.parallel_mode = context.get_auto_parallel_context(\"parallel_mode\")\n self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)\n if self.is_distributed:\n mean = _get_gradients_mean()\n degree = _get_device_num()\n if not split_indices:\n self.split_indices = [len(self.matrix_a_cov) - 1]\n else:\n self.split_indices = split_indices\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum6\")\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum8\")\n self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6)\n self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8)\n\n\n def _process_matrix_init_and_weight_idx_map(self, net):\n \"\"\"for GPU, process matrix init shape, and get weight idx map\"\"\"\n layer_type_map = get_net_layertype_mask(net)\n layer_counter = 0\n for idx in range(len(self.params)):\n layer_type = layer_type_map[layer_counter]\n weight = self.params[idx]\n weight_shape = self.shape(weight)\n if layer_type in [Conv, FC] and \"bias\" not in self.params[idx].name.lower():\n in_channels = weight_shape[1]\n out_channels = weight_shape[0]\n matrix_a_dim = in_channels\n if layer_type == Conv:\n matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3]\n matrix_g_dim = out_channels\n matrix_a_shape, matrix_g_shape = caculate_matmul_shape(matrix_a_dim, matrix_g_dim, self.split_dim)\n matrix_a_inv = Parameter(np.zeros(matrix_a_shape).astype(np.float32),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(np.zeros(matrix_g_shape).astype(np.float32),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.matrix_a = self.matrix_a + (matrix_a_inv,)\n self.matrix_g = self.matrix_g + (matrix_g_inv,)\n self.matrix_a_shape = self.matrix_a_shape + (matrix_a_shape,)\n elif layer_type == Embedding:\n vocab_size = weight_shape[0]\n embedding_size = weight_shape[1]\n matrix_a_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float32)),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float32)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.matrix_a = self.matrix_a + (matrix_a_inv,)\n self.matrix_g = self.matrix_g + (matrix_g_inv,)\n self.matrix_a_shape = self.matrix_a_shape + ((vocab_size,),)\n\n if layer_type in [Conv, FC, Embedding] and \"bias\" not in self.params[idx].name.lower():\n self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)\n self.thor_layer_count = self.thor_layer_count + 1\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,)\n if layer_type == Conv:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)\n self.conv_layer_count = self.conv_layer_count + 1\n else:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n else:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)\n if layer_type == LayerNorm:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,)\n else:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,)\n # bert.cls1.output_bias: not a network layer, only a trainable param\n if \"output_bias\" not in self.params[idx].name.lower():\n layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)\n\n def _get_ainv_ginv_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce):\n \"\"\"get matrixA inverse list and matrix G inverse list\"\"\"\n for i in range(len(self.params)):\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n if layer_type in [Conv, FC, Embedding]:\n g = gradients[i]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n damping_a = damping_step\n damping_g = damping_step\n feature_map = self.feature_map\n if layer_type == Conv:\n a_normalizer = self.a_normalizer[conv_layer_count]\n g_normalizer = self.g_normalizer[conv_layer_count]\n a_normalizer = F.depend(a_normalizer, g)\n g_normalizer = F.depend(g_normalizer, g)\n damping_a = self.mul(damping_step, 1.0 / a_normalizer)\n damping_g = self.mul(damping_step, 1.0 / g_normalizer)\n feature_map = self.sqrt(1.0 / a_normalizer)\n a_shape = self.shape(matrix_a)\n a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32)\n damping_a = self.sqrt(damping_a)\n damping_g = self.sqrt(damping_g)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[1], mstype.float32)\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping_g * g_eye\n if layer_type == Embedding:\n a_eye = P.OnesLike()(matrix_a)\n matrix_a = self.mul(matrix_a, 1.0 / self.batch_size)\n matrix_a = matrix_a + damping_a * a_eye\n matrix_a = self.inv(matrix_a)\n matrix_g = self.embedding_cholesky(matrix_g)\n matrix_g = self.matmul(matrix_g, matrix_g)\n else:\n matrix_a = matrix_a + damping_a * a_eye\n matrix_a = self.cholesky(matrix_a)\n matrix_a = self.vector_matmul(matrix_a, matrix_a)\n matrix_a = P.BroadcastTo(self.matrix_a_shape[thor_layer_count])(matrix_a)\n matrix_g = self.cholesky(matrix_g)\n matrix_g = self.vector_matmul(matrix_g, matrix_g)\n matrix_a = self.mul(matrix_a, feature_map)\n matrix_g = self.mul(matrix_g, feature_map)\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g,)\n return matrix_a_allreduce, matrix_g_allreduce\n\n def _process_layernorm(self, damping_step, gradient):\n \"\"\"process layernorm\"\"\"\n damping = self.sqrt(damping_step)\n normalizer = self.batch_size\n normalizer = self.cast(normalizer, mstype.float32)\n fim_cov = self.square(gradient)\n fim_cov = self.mul(fim_cov, 1.0 / normalizer)\n fim_cov = fim_cov + damping\n fim_inv = self.inv(fim_cov)\n gradient = self.mul(fim_inv, gradient)\n return gradient\n\n def _reshape_gradient(self, conv_layer_count, g, g_shape):\n \"\"\"reshape gradient\"\"\"\n if conv_layer_count != -1:\n g = self.reshape(g, g_shape)\n return g\n\n def construct(self, gradients):\n params = self.params\n moments = self.moments\n gradients = self.scale_grad(gradients)\n damping_step = self.gather(self.damping, self.cov_step, self.axis)\n damping_step = self.cast(damping_step, mstype.float32)\n new_grads = ()\n if self.thor:\n matrix_ainv_list = ()\n matrix_ginv_list = ()\n matrix_a_allreduce, matrix_g_allreduce = self._get_ainv_ginv_list(gradients, damping_step,\n matrix_ainv_list, matrix_ginv_list)\n if self.is_distributed:\n matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce)\n matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce)\n\n for i in range(len(self.params)):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n if layer_type in [Conv, FC]:\n g_shape = self.shape(g)\n g = self.reshape(g, (g_shape[0], -1))\n matrix_a = matrix_a_allreduce[thor_layer_count]\n matrix_g = matrix_g_allreduce[thor_layer_count]\n g = self.update_gradient(matrix_g, g, matrix_a)\n self.assign(self.matrix_a[thor_layer_count], matrix_a)\n self.assign(self.matrix_g[thor_layer_count], matrix_g)\n g = self._reshape_gradient(conv_layer_count, g, g_shape)\n elif layer_type == Embedding:\n matrix_a = matrix_a_allreduce[thor_layer_count]\n matrix_g = matrix_g_allreduce[thor_layer_count]\n self.assign(self.matrix_a[thor_layer_count], matrix_a)\n self.assign(self.matrix_g[thor_layer_count], matrix_g)\n temp_a = self.expand(matrix_a, 1)\n g = self.mul(temp_a, g)\n g = self.matmul(g, matrix_g)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n new_grads = new_grads + (g,)\n else:\n for j in range(len(self.params)):\n g = gradients[j]\n thor_layer_count = self.weight_fim_idx_map[j]\n conv_layer_count = self.weight_conv_idx_map[j]\n layer_type = self.weight_layertype_idx_map[j]\n if layer_type in [Conv, FC]:\n g_shape = self.shape(g)\n g = self.reshape(g, (g_shape[0], -1))\n matrix_a = self.matrix_a[thor_layer_count]\n matrix_g = self.matrix_g[thor_layer_count]\n g = self.update_gradient(matrix_g, g, matrix_a)\n g = self._reshape_gradient(conv_layer_count, g, g_shape)\n elif layer_type == Embedding:\n matrix_a = self.matrix_a[thor_layer_count]\n matrix_g = self.matrix_g[thor_layer_count]\n g = gradients[j]\n temp_a = self.expand(matrix_a, 1)\n g = self.mul(temp_a, g)\n g = self.matmul(g, matrix_g)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n new_grads = new_grads + (g,)\n gradients = new_grads\n\n self.cov_step = self.cov_step + self.one\n if self.weight_decay > 0:\n gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)\n gradients = clip_gradient(self.enable_clip_grad, gradients)\n lr = self.get_lr()\n success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)\n return success\n\n\nclass ThorAscend(Optimizer):\n \"\"\"ThorAscend\"\"\"\n\n def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,\n decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100):\n params = filter(lambda x: x.requires_grad, net.get_parameters())\n super(ThorAscend, self).__init__(learning_rate, params, weight_decay, loss_scale)\n _check_param(momentum, frequency, learning_rate, self.__class__.__name__)\n self.momentum = Parameter(Tensor(momentum, mstype.float32), name=\"momentum\")\n self.params = self.parameters\n self.moments = self.params.clone(prefix=\"moments\", init='zeros')\n self.hyper_map = C.HyperMap()\n self.opt = P.ApplyMomentum()\n self.net = net\n self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters()))\n self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters()))\n self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters()))\n self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters()))\n logger.info(\"matrix_a_cov len is {}\".format(len(self.matrix_a_cov)))\n self._define_ascend_operator()\n self.C0 = 16\n self.device_shape_pad_flag = ()\n self.diag_block_dim = 128\n self.matrix_a = ()\n self.matrix_g = ()\n self.thor_layer_count = 0\n self.conv_layer_count = 0\n self.weight_conv_idx_map = ()\n self.weight_fim_idx_map = ()\n self.weight_layertype_idx_map = ()\n self.a_split_pad_dim_map = ()\n self.g_split_pad_dim_map = ()\n self.conv_matmul_support_map = ()\n self.batch_matmul_support_list = [1, 2, 4, 5, 6, 8, 9, 16, 18, 24, 32, 36]\n self.abs_max_support_list = [1, 2, 4, 8, 16, 5, 9, 18, 36, 32]\n self._process_matrix_init_and_weight_idx_map(self.net)\n self.matrix_a = ParameterTuple(self.matrix_a)\n self.matrix_g = ParameterTuple(self.matrix_g)\n self.matrix_max_inv = ()\n for i in range(len(self.matrix_a)):\n self.matrix_max_inv = self.matrix_max_inv + (\n Parameter(initializer(1, [1], mstype.float32), name=\"matrix_max\" + str(i), requires_grad=False),)\n self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)\n self.thor = True\n self.weight_decay = weight_decay\n self.decay_flags = tuple(decay_filter(x) for x in self.parameters)\n self.damping = damping\n self.batch_size = Tensor(batch_size, mstype.float32)\n self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)\n self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)\n self.enable_clip_grad = enable_clip_grad\n self.frequency = frequency\n self._define_ascend_reducer(split_indices)\n\n\n def get_frequency(self):\n \"\"\"get thor frequency\"\"\"\n return self.frequency\n\n def _get_pad_dim(self, matrix_dim):\n \"\"\"get diag split pad dim \"\"\"\n split_pad_dim = 0\n if matrix_dim == 64:\n return split_pad_dim\n res = matrix_dim % self.diag_block_dim\n if res != 0:\n split_pad_dim = self.diag_block_dim - res\n return split_pad_dim\n\n def _define_ascend_operator(self):\n \"\"\"define ascend operator\"\"\"\n self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()\n self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()\n self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()\n self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()\n self.transpose = P.Transpose()\n self.shape = P.Shape()\n self.reshape = P.Reshape()\n self.mul = P.Mul()\n self.log = P.Log()\n self.exp = P.Exp()\n self.sqrt = P.Sqrt()\n self.gather = P.GatherV2()\n self.assign = P.Assign()\n self.cast = P.Cast()\n self.eye = P.Eye()\n self.concat = P.Concat(0)\n self.cholesky = P.CusCholeskyTrsm()\n self.vector_matmul = P.CusBatchMatMul()\n self.tbe_batch_matmul = P.BatchMatMul(transpose_a=True)\n self.fused_abs_max2 = P.CusFusedAbsMax1()\n self.matrix_combine = P.CusMatrixCombine()\n self.slice = P.Slice()\n self.expand = P.ExpandDims()\n self.reduce_sum = P.ReduceSum(keep_dims=False)\n self.square = P.Square()\n self.inv = P.Inv()\n self.matmul = P.MatMul()\n self.axis = 0\n self.one = Tensor(1, mstype.int32)\n self.cov_step = Parameter(initializer(0, [1], mstype.int32), name=\"cov_step\", requires_grad=False)\n\n def _define_ascend_reducer(self, split_indices):\n \"\"\"define ascend reducer\"\"\"\n self.parallel_mode = context.get_auto_parallel_context(\"parallel_mode\")\n self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)\n if self.is_distributed:\n mean = _get_gradients_mean()\n degree = _get_device_num()\n if not split_indices:\n self.split_indices = [len(self.matrix_a_cov) - 1]\n else:\n self.split_indices = split_indices\n if self.conv_layer_count > 0:\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum2\")\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum4\")\n self.grad_reducer_amax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=2)\n self.grad_reducer_gmax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=4)\n\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum6\")\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum8\")\n self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6)\n self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8)\n\n def _get_weight_idx_map(self, layer_type, idx, weight_shape):\n \"\"\"for Ascend, get weight idx map\"\"\"\n if layer_type in [Conv, FC, Embedding] and \"bias\" not in self.params[idx].name.lower():\n self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,)\n if layer_type == Embedding:\n a_pad_dim = 0\n g_pad_dim = 0\n self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,)\n self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,)\n else:\n out_channels = weight_shape[0]\n g_pad_dim = self._get_pad_dim(out_channels)\n self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,)\n matrix_a_dim = weight_shape[1]\n if layer_type == Conv:\n matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3]\n a_pad_dim = self._get_pad_dim(matrix_a_dim)\n self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,)\n\n self.thor_layer_count = self.thor_layer_count + 1\n if layer_type == Conv:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)\n self.conv_layer_count = self.conv_layer_count + 1\n else:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n else:\n self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n if layer_type == LayerNorm:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,)\n else:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,)\n\n def _get_fc_matrix(self, weight_shape):\n \"\"\"for Ascend, get fc matrix_a and matrix_g\"\"\"\n out_channels = weight_shape[0]\n in_channels = weight_shape[1]\n if self.conv_layer_count > 0:\n if out_channels == 1001:\n fc_matrix_a = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)),\n name='matrix_a_inv_' + str(self.thor_layer_count),\n requires_grad=False)\n fc_matrix_g = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count),\n requires_grad=False)\n else:\n fc_matrix_a = Parameter(Tensor(np.eye(in_channels).astype(np.float16)),\n name='matrix_a_inv_' + str(self.thor_layer_count),\n requires_grad=False)\n fc_matrix_g = Parameter(Tensor(np.eye(out_channels).astype(np.float16)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count),\n requires_grad=False)\n self.matrix_a = self.matrix_a + (fc_matrix_a,)\n self.matrix_g = self.matrix_g + (fc_matrix_g,)\n\n def _process_matrix_init_and_weight_idx_map(self, net):\n \"\"\"for Ascend, process matrix init shape, and get weight idx map\"\"\"\n layer_counter = 0\n layer_type_map = get_net_layertype_mask(net)\n for idx in range(len(self.params)):\n layer_type = layer_type_map[layer_counter]\n weight = self.params[idx]\n weight_shape = self.shape(weight)\n if layer_type == Conv and \"bias\" not in self.params[idx].name.lower():\n in_channels = weight_shape[1]\n out_channels = weight_shape[0]\n matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3]\n matrix_g_dim = out_channels\n matrix_a_device_shape, matrix_a_device_dim = caculate_device_shape(matrix_a_dim, in_channels, True)\n matrix_g_device_shape, matrix_g_device_dim = caculate_device_shape(matrix_g_dim, in_channels, False)\n ret = is_conv_matmul_support_shape(matrix_a_device_shape, matrix_g_device_shape)\n if ret:\n matrix_a_inv = Parameter(\n Tensor(np.reshape(np.identity(matrix_a_device_dim).astype(np.float16), matrix_a_device_shape)),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(\n Tensor(np.reshape(np.identity(matrix_g_device_dim).astype(np.float16), matrix_g_device_shape)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.conv_matmul_support_map = self.conv_matmul_support_map + (1,)\n else:\n matrix_a_inv = Parameter(Tensor(np.eye(matrix_a_dim).astype(np.float16)),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(Tensor(np.eye(matrix_g_dim).astype(np.float16)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.conv_matmul_support_map = self.conv_matmul_support_map + (0,)\n self.matrix_a = self.matrix_a + (matrix_a_inv,)\n self.matrix_g = self.matrix_g + (matrix_g_inv,)\n device_shape_pad_flag = False\n if matrix_a_dim != matrix_a_device_dim:\n device_shape_pad_flag = True\n self.device_shape_pad_flag = self.device_shape_pad_flag + (device_shape_pad_flag,)\n elif layer_type == FC and \"bias\" not in self.params[idx].name.lower():\n self._get_fc_matrix(weight_shape)\n self._get_weight_idx_map(layer_type, idx, weight_shape)\n # bert.cls1.output_bias: not a network layer, only a trainable param\n if \"output_bias\" not in self.params[idx].name.lower():\n layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)\n\n def _process_batch_matmul(self, input_matrix):\n \"\"\"process batch matmul\"\"\"\n input_matrix_shape = self.shape(input_matrix)\n if input_matrix_shape[0] in self.batch_matmul_support_list:\n input_matrix = self.vector_matmul(input_matrix, input_matrix)\n else:\n input_matrix = self.tbe_batch_matmul(input_matrix, input_matrix)\n return input_matrix\n\n def _process_cholesky_pad(self, pad_dim, input_matrix, matrix_shape0):\n \"\"\"process cholesky pad\"\"\"\n if pad_dim > 0:\n matrix_sup = self.eye(pad_dim, pad_dim, mstype.float32)\n matrix_sup = P.Pad(((0, 0), (matrix_shape0, 0)))(matrix_sup)\n input_matrix = P.Pad(((0, 0), (0, pad_dim)))(input_matrix)\n input_matrix = self.concat((input_matrix, matrix_sup))\n return input_matrix\n\n\n def _get_abs_max(self, matrix_inv, origin_dim):\n \"\"\"get matrix abs max\"\"\"\n cholesky_shape = self.shape(matrix_inv)\n if cholesky_shape[0] in self.abs_max_support_list:\n matrix_inv_max = P.CusFusedAbsMax1([origin_dim, origin_dim])(matrix_inv)\n matrix_max = self.fused_abs_max2(matrix_inv_max)\n matrix_inv = self.matrix_combine(matrix_inv)\n else:\n matrix_inv = self.matrix_combine(matrix_inv)\n matrix_abs = P.Abs()(matrix_inv)\n matrix_max = P.ReduceMax(keep_dims=False)(matrix_abs)\n return matrix_max, matrix_inv\n\n\n def _get_fc_ainv_ginv(self, index, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce):\n \"\"\"get fc layer ainv and ginv\"\"\"\n thor_layer_count = self.weight_fim_idx_map[index]\n g = gradients[index]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n a_shape = self.shape(matrix_a)\n a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32)\n damping = self.sqrt(damping_step)\n matrix_a = matrix_a + damping * a_eye\n a_pad_dim = self.a_split_pad_dim_map[thor_layer_count]\n matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0])\n matrix_a_inv = self.cholesky(matrix_a)\n matrix_a_inv = self._process_batch_matmul(matrix_a_inv)\n\n weight_shape = self.shape(self.params[index])\n out_channels = weight_shape[0]\n in_channels = weight_shape[1]\n if out_channels == 2:\n matrix_a_inv = self.matrix_combine(matrix_a_inv)\n matrix_g_inv = g_eye\n else:\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping * g_eye\n g_pad_dim = self.g_split_pad_dim_map[thor_layer_count]\n matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0])\n matrix_g_inv = self.cholesky(matrix_g)\n matrix_g_inv = self._process_batch_matmul(matrix_g_inv)\n if self.conv_layer_count > 0:\n a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, in_channels)\n g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels)\n a_max = F.depend(a_max, g)\n g_max = F.depend(g_max, g)\n matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,)\n matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,)\n else:\n matrix_a_inv = self.matrix_combine(matrix_a_inv)\n matrix_g_inv = self.matrix_combine(matrix_g_inv)\n\n if a_pad_dim > 0:\n matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (in_channels, in_channels))\n if g_pad_dim > 0:\n matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels))\n matrix_a_inv_shape = self.shape(matrix_a_inv)\n matrix_g_combine_shape = self.shape(matrix_g_inv)\n if matrix_a_inv_shape[0] == 2048 and matrix_g_combine_shape[0] == 1001:\n matrix_a_inv = self.reshape(matrix_a_inv,\n (matrix_a_inv_shape[0] / 16, 16,\n matrix_a_inv_shape[0] / 16, 16))\n matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3))\n matrix_g_inv = P.Pad(((0, 7), (0, 7)))(matrix_g_inv)\n\n matrix_g_inv_shape = self.shape(matrix_g_inv)\n matrix_g_inv = self.reshape(matrix_g_inv,\n (matrix_g_inv_shape[0] / 16, 16,\n matrix_g_inv_shape[0] / 16, 16))\n matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3))\n\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,)\n return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce\n\n def _process_conv_matmul_device_pad(self, conv_layer_count, weight_shape, matrix_a_inv):\n \"\"\"process conv matmul device pad\"\"\"\n if self.device_shape_pad_flag[conv_layer_count]:\n kernel_hw = weight_shape[2] * weight_shape[3]\n in_channels = weight_shape[1]\n matrix_a_inv = self.reshape(matrix_a_inv, (kernel_hw, in_channels, kernel_hw, in_channels))\n matrix_a_inv = P.Pad(((0, 0), (0, self.C0 - in_channels), (0, 0),\n (0, self.C0 - in_channels)))(matrix_a_inv)\n return matrix_a_inv\n\n\n def _get_ainv_ginv_amax_gmax_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce):\n \"\"\"get matrixA inverse list, matrixG inverse list, matrixA_max list, matrixG_max list\"\"\"\n for i in range(len(self.params)):\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n weight_shape = self.shape(self.params[i])\n out_channels = weight_shape[0]\n if layer_type == Conv:\n g = gradients[i]\n matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3]\n matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n a_shape = self.shape(matrix_a)\n a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32)\n a_normalizer = self.a_normalizer[conv_layer_count]\n g_normalizer = self.g_normalizer[conv_layer_count]\n a_normalizer = F.depend(a_normalizer, g)\n g_normalizer = F.depend(g_normalizer, g)\n damping_a = self.mul(damping_step, self.batch_size / a_normalizer)\n damping_g = self.mul(damping_step, self.batch_size / g_normalizer)\n damping_a = self.sqrt(damping_a)\n matrix_a = matrix_a + damping_a * a_eye\n a_pad_dim = self.a_split_pad_dim_map[thor_layer_count]\n matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0])\n matrix_a_inv = self.cholesky(matrix_a)\n matrix_a_inv = self._process_batch_matmul(matrix_a_inv)\n a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, matrix_a_dim)\n\n damping_g = self.sqrt(damping_g)\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping_g * g_eye\n g_pad_dim = self.g_split_pad_dim_map[thor_layer_count]\n matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0])\n matrix_g_inv = self.cholesky(matrix_g)\n matrix_g_inv = self._process_batch_matmul(matrix_g_inv)\n g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels)\n\n if a_pad_dim > 0:\n matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (matrix_a_dim, matrix_a_dim))\n if g_pad_dim > 0:\n matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels))\n\n if matmul_support_flag == 1:\n matrix_a_inv = self._process_conv_matmul_device_pad(conv_layer_count, weight_shape, matrix_a_inv)\n matrix_a_inv_shape = self.shape(self.matrix_a[thor_layer_count])\n matrix_a_device_temp_shape = (matrix_a_inv_shape[0], matrix_a_inv_shape[2],\n matrix_a_inv_shape[1], matrix_a_inv_shape[3])\n matrix_a_inv = self.reshape(matrix_a_inv, matrix_a_device_temp_shape)\n matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3))\n matrix_g_inv_shape = self.shape(self.matrix_g[thor_layer_count])\n matrix_g_device_temp_shape = (matrix_g_inv_shape[0], matrix_g_inv_shape[2],\n matrix_g_inv_shape[1], matrix_g_inv_shape[3])\n matrix_g_inv = self.reshape(matrix_g_inv, matrix_g_device_temp_shape)\n matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3))\n\n a_max = F.depend(a_max, g)\n g_max = F.depend(g_max, g)\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,)\n matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,)\n matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,)\n elif layer_type == FC:\n matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \\\n self._get_fc_ainv_ginv(i, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce)\n elif layer_type == Embedding:\n g = gradients[i]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32)\n damping = self.sqrt(damping_step)\n a_eye = P.OnesLike()(matrix_a)\n matrix_a = self.mul(matrix_a, 1.0 / self.batch_size)\n matrix_a = matrix_a + damping * a_eye\n matrix_a_inv = self.inv(matrix_a)\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping * g_eye\n matrix_g_inv = self.cholesky(matrix_g)\n matrix_g_inv = self._process_batch_matmul(matrix_g_inv)\n matrix_g_inv = self.matrix_combine(matrix_g_inv)\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,)\n return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce\n\n def _process_layernorm(self, damping_step, gradient):\n \"\"\"process layernorm layer for thor\"\"\"\n damping = self.sqrt(damping_step)\n normalizer = self.cast(self.batch_size, mstype.float32)\n fim_cov = self.square(gradient)\n fim_cov = self.mul(fim_cov, 1.0 / normalizer)\n fim_cov = fim_cov + damping\n fim_inv = self.inv(fim_cov)\n gradient = self.mul(fim_inv, gradient)\n return gradient\n\n def _process_thor_fc(self, thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g):\n \"\"\"process thor graph fc layer\"\"\"\n temp_a = matrix_a_allreduce[thor_layer_count]\n temp_g = matrix_g_allreduce[thor_layer_count]\n self.assign(self.matrix_a_cov[thor_layer_count], temp_a)\n self.assign(self.matrix_g_cov[thor_layer_count], temp_g)\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n return g\n\n def _get_second_gradients_one(self, params_len, gradients, new_grads):\n \"\"\"get second gradients one\"\"\"\n for i in range(params_len):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n matrix_a = self.matrix_a[thor_layer_count]\n matrix_g = self.matrix_g[thor_layer_count]\n matrix_max = self.matrix_max_inv[thor_layer_count]\n grad_shape = self.shape(g)\n if layer_type == FC:\n if grad_shape[0] == 1001:\n g = self.cube_matmul_left_fc(matrix_g, g)\n g = self.cube_matmul_right_fc(g, matrix_a, matrix_max)\n else:\n temp_a = self.cast(matrix_a, mstype.float16)\n temp_g = self.cast(matrix_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, matrix_max)\n elif layer_type == Conv:\n matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]\n if matmul_support_flag == 1:\n g = self.cube_matmul_left(matrix_g, g)\n g = self.cube_matmul_right_mul(g, matrix_a, matrix_max)\n else:\n g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3]))\n temp_a = self.cast(matrix_a, mstype.float16)\n temp_g = self.cast(matrix_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, matrix_max)\n g = self.reshape(g, grad_shape)\n new_grads = new_grads + (g,)\n return new_grads\n\n def _get_second_gradients(self, new_grads, damping_step, gradients):\n \"\"\"get second gradients for thor\"\"\"\n params_len = len(self.params)\n if self.conv_layer_count > 0:\n new_grads = self._get_second_gradients_one(params_len, gradients, new_grads)\n else:\n for i in range(params_len):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n if layer_type == Embedding:\n temp_a_ori = self.matrix_a_cov[thor_layer_count]\n temp_g = self.matrix_g_cov[thor_layer_count]\n temp_a = self.expand(temp_a_ori, 1)\n g = self.mul(temp_a, g)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(g, temp_g)\n g = self.cast(g, mstype.float32)\n elif layer_type == FC:\n temp_a = self.matrix_a_cov[thor_layer_count]\n temp_g = self.matrix_g_cov[thor_layer_count]\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n new_grads = new_grads + (g,)\n return new_grads\n\n def _get_second_grad_by_matmul(self, index, temp_a, temp_g, g, temp_max):\n \"\"\"get second gradient by matmul\"\"\"\n conv_layer_count = self.weight_conv_idx_map[index]\n layer_type = self.weight_layertype_idx_map[index]\n grad_shape = self.shape(g)\n if layer_type == FC:\n if grad_shape[0] == 1001:\n g = self.cube_matmul_left_fc(temp_g, g)\n g = self.cube_matmul_right_fc(g, temp_a, temp_max)\n else:\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, temp_max)\n elif layer_type == Conv:\n a_normalizer = self.a_normalizer[conv_layer_count]\n a_normalizer = F.depend(a_normalizer, g)\n temp_max = self.mul(temp_max, self.batch_size / a_normalizer)\n matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]\n if matmul_support_flag == 1:\n g = self.cube_matmul_left(temp_g, g)\n g = self.cube_matmul_right_mul(g, temp_a, temp_max)\n else:\n g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3]))\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, temp_max)\n g = self.reshape(g, grad_shape)\n return g, temp_max\n\n def _get_second_grad_by_layertype(self, index, matrix_a_allreduce, matrix_g_allreduce, g, damping_step):\n \"\"\"get second gradient by layertype\"\"\"\n thor_layer_count = self.weight_fim_idx_map[index]\n layer_type = self.weight_layertype_idx_map[index]\n if layer_type == Embedding:\n temp_a_ori = matrix_a_allreduce[thor_layer_count]\n temp_g = matrix_g_allreduce[thor_layer_count]\n self.assign(self.matrix_a_cov[thor_layer_count], temp_a_ori)\n self.assign(self.matrix_g_cov[thor_layer_count], temp_g)\n temp_a = self.expand(temp_a_ori, 1)\n g = self.mul(temp_a, g)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(g, temp_g)\n g = self.cast(g, mstype.float32)\n elif layer_type == FC:\n g = self._process_thor_fc(thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n return g\n\n def construct(self, gradients):\n params = self.params\n moments = self.moments\n gradients = self.scale_grad(gradients)\n damping_step = self.gather(self.damping, self.cov_step, self.axis)\n damping_step = self.cast(damping_step, mstype.float32)\n if self.thor:\n matrix_a_allreduce = ()\n matrix_g_allreduce = ()\n matrix_a_max_allreduce = ()\n matrix_g_max_allreduce = ()\n matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \\\n self._get_ainv_ginv_amax_gmax_list(gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce)\n if self.is_distributed:\n matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce)\n matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce)\n if self.conv_layer_count > 0:\n matrix_a_max_allreduce = self.grad_reducer_amax(matrix_a_max_allreduce)\n matrix_g_max_allreduce = self.grad_reducer_gmax(matrix_g_max_allreduce)\n\n new_grads = ()\n if self.conv_layer_count > 0:\n for i in range(len(self.params)):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n temp_a = matrix_a_allreduce[thor_layer_count]\n temp_g = matrix_g_allreduce[thor_layer_count]\n matrix_a_inv_max = self.log(matrix_a_max_allreduce[thor_layer_count])\n matrix_a_inv_max = self.mul(matrix_a_inv_max, -1)\n matrix_a_inv_max = self.exp(matrix_a_inv_max)\n temp_a = self.mul(temp_a, matrix_a_inv_max)\n matrix_g_inv_max = self.log(matrix_g_max_allreduce[thor_layer_count])\n matrix_g_inv_max = self.mul(matrix_g_inv_max, -1)\n matrix_g_inv_max = self.exp(matrix_g_inv_max)\n temp_g = self.mul(temp_g, matrix_g_inv_max)\n temp_max = self.mul(matrix_g_max_allreduce[thor_layer_count],\n matrix_g_max_allreduce[thor_layer_count])\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g, temp_max = self._get_second_grad_by_matmul(i, temp_a, temp_g, g, temp_max)\n self.assign(self.matrix_a[thor_layer_count], temp_a)\n self.assign(self.matrix_g[thor_layer_count], temp_g)\n self.assign(self.matrix_max_inv[thor_layer_count], temp_max)\n new_grads = new_grads + (g,)\n gradients = new_grads\n else:\n for i in range(len(self.params)):\n g = gradients[i]\n g = self._get_second_grad_by_layertype(i, matrix_a_allreduce, matrix_g_allreduce, g, damping_step)\n new_grads = new_grads + (g,)\n gradients = new_grads\n else:\n new_grads = ()\n gradients = self._get_second_gradients(new_grads, damping_step, gradients)\n\n self.cov_step = self.cov_step + self.one\n if self.weight_decay > 0:\n gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)\n gradients = clip_gradient(self.enable_clip_grad, gradients)\n lr = self.get_lr()\n success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)\n return success\n"
] | [
[
"numpy.eye",
"numpy.identity",
"numpy.zeros"
]
] |
XiaoqiMa/shapSD | [
"545f61c9e8329c7271051f22f99ba32508ba74a1"
] | [
"shapSD/pysubgroup/nominal_target.py"
] | [
"'''\nCreated on 29.09.2017\n\n@author: lemmerfn\n'''\nimport numpy as np\nimport scipy.stats\nfrom functools import total_ordering\n\nfrom .measures import AbstractInterestingnessMeasure, BoundedInterestingnessMeasure\nfrom .utils import effective_sample_size, powerset\nfrom .subgroup import SubgroupDescription, Subgroup, NominalSelector\n\n\n@total_ordering\nclass NominalTarget(object):\n\n def __init__(self, target_attribute=None, target_value=None, target_selector=None):\n \"\"\"\n Creates a new target for the boolean model class (classic subgroup discovery). \n If target_attribute and target_value are given, the target_selector is computed using attribute and value\n \"\"\"\n if target_attribute is not None and target_value is not None:\n if target_selector is not None:\n raise BaseException(\n \"NominalTarget is to be constructed EITHER by a selector OR by attribute/value pair\")\n target_selector = NominalSelector(target_attribute, target_value)\n if target_selector is None:\n raise BaseException(\"No target selector given\")\n self.target_selector = target_selector\n\n def __repr__(self):\n return \"T: \" + str(self.target_selector)\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __lt__(self, other):\n return str(self) < str(other)\n\n def covers(self, instance):\n return self.target_selector.covers(instance)\n\n def get_attributes(self):\n return [self.target_selector.get_attribute_name()]\n\n @staticmethod\n def get_base_statistics(data, subgroup, weighting_attribute=None):\n\n if weighting_attribute is None:\n sg_instances = subgroup.subgroup_description.covers(data)\n positives = subgroup.target.covers(data)\n instances_subgroup = np.sum(sg_instances)\n positives_dataset = np.sum(positives)\n instances_dataset = len(data)\n positives_subgroup = np.sum(np.logical_and(sg_instances, positives))\n return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup\n else:\n weights = data[weighting_attribute]\n sg_instances = subgroup.subgroup_description.covers(data)\n positives = subgroup.target.covers(data)\n\n instances_dataset = np.sum(weights)\n instances_subgroup = np.sum(np.dot(sg_instances, weights))\n positives_dataset = np.sum(np.dot(positives, weights))\n positives_subgroup = np.sum(np.dot(np.logical_and(sg_instances, positives), weights))\n return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup\n\n @staticmethod\n def calculate_statistics(subgroup, data, weighting_attribute=None):\n (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \\\n NominalTarget.get_base_statistics(data, subgroup, weighting_attribute)\n subgroup.statistics['size_sg'] = instances_subgroup\n subgroup.statistics['size_dataset'] = instances_dataset\n subgroup.statistics['positives_sg'] = positives_subgroup\n subgroup.statistics['positives_dataset'] = positives_dataset\n\n subgroup.statistics['size_complement'] = instances_dataset - instances_subgroup\n subgroup.statistics['relative_size_sg'] = instances_subgroup / instances_dataset\n subgroup.statistics['relative_size_complement'] = (instances_dataset - instances_subgroup) / instances_dataset\n subgroup.statistics['coverage_sg'] = positives_subgroup / positives_dataset\n subgroup.statistics['coverage_complement'] = (positives_dataset - positives_subgroup) / positives_dataset\n subgroup.statistics['target_share_sg'] = positives_subgroup / instances_subgroup\n subgroup.statistics['target_share_complement'] = (positives_dataset - positives_subgroup) / (\n instances_dataset - instances_subgroup)\n subgroup.statistics['target_share_dataset'] = positives_dataset / instances_dataset\n subgroup.statistics['lift'] = (positives_subgroup / instances_subgroup) / (\n positives_dataset / instances_dataset)\n\n if weighting_attribute is not None:\n (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \\\n NominalTarget.get_base_statistics(subgroup, data, weighting_attribute)\n subgroup.statistics['size_sg_weighted'] = instances_subgroup\n subgroup.statistics['size_dataset_weighted'] = instances_dataset\n subgroup.statistics['positives_sg_weighted'] = positives_subgroup\n subgroup.statistics['positives_dataset_weighted'] = positives_dataset\n\n subgroup.statistics['size_complement_weighted'] = instances_dataset - instances_subgroup\n subgroup.statistics['relative_size_sg_weighted'] = instances_subgroup / instances_dataset\n subgroup.statistics['relative_size_complement_weighted'] = \\\n (instances_dataset - instances_subgroup) / instances_dataset\n subgroup.statistics['coverage_sg_weighted'] = positives_subgroup / positives_dataset\n subgroup.statistics['coverage_complement_weighted'] = (\n positives_dataset - positives_subgroup) / positives_dataset\n subgroup.statistics['target_share_sg_weighted'] = positives_subgroup / instances_subgroup\n subgroup.statistics['target_share_complement_weighted'] = (positives_dataset - positives_subgroup) / (\n instances_dataset - instances_subgroup)\n subgroup.statistics['target_share_dataset_weighted'] = positives_dataset / instances_dataset\n subgroup.statistics['lift_weighted'] = (positives_subgroup / instances_subgroup) / (\n positives_dataset / instances_dataset)\n\n\nclass ChiSquaredQF(AbstractInterestingnessMeasure):\n @staticmethod\n def chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup, min_instances=5,\n bidirect=True, direction_positive=True):\n if (instances_subgroup < min_instances) or ((instances_dataset - instances_subgroup) < min_instances):\n return float(\"-inf\")\n p_subgroup = positives_subgroup / instances_subgroup\n p_dataset = positives_dataset / instances_dataset\n positives_complement = positives_dataset - positives_subgroup\n\n # instancesComplement = instancesDataset - instancesSubgroup\n negatives_subgroup = instances_subgroup - positives_subgroup\n negatives_dataset = instances_dataset - positives_dataset\n negatives_complement = negatives_dataset - negatives_subgroup\n\n # observed = [positivesSubgroup, positives_complement,negatives_subgroup, negatives_complement]\n #\n # if round(positivesSubgroup) < 0 or\n # round(positives_complement) < 0 or\n # round(negatives_subgroup) <0 or\n # round (negatives_complement) < 0:\n # print (\"XXXXX\")\n val = scipy.stats.chi2_contingency([[round(positives_subgroup), round(positives_complement)],\n [round(negatives_subgroup), round(negatives_complement)]],\n correction=False)[0]\n if bidirect:\n return val\n elif direction_positive and p_subgroup > p_dataset:\n return val\n elif not direction_positive and p_subgroup < p_dataset:\n return val\n return -val\n\n @staticmethod\n def chi_squared_qf_weighted(subgroup, data, weighting_attribute, effective_sample_size=0, min_instances=5, ):\n (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics(data,\n weighting_attribute)\n if (instancesSubgroup < min_instances) or ((instancesDataset - instancesSubgroup) < 5):\n return float(\"inf\")\n if effective_sample_size == 0:\n effective_sample_size = effective_sample_size(data[weighting_attribute])\n # p_subgroup = positivesSubgroup / instancesSubgroup\n # p_dataset = positivesDataset / instancesDataset\n\n negatives_subgroup = instancesSubgroup - positivesSubgroup\n negatives_dataset = instancesDataset - positivesDataset\n positives_complement = positivesDataset - positivesSubgroup\n negatives_complement = negatives_dataset - negatives_subgroup\n val = scipy.stats.chi2_contingency([[positivesSubgroup, positives_complement],\n [negatives_subgroup, negatives_complement]], correction=True)[0]\n return scipy.stats.chi2.sf(val * effective_sample_size / instancesDataset, 1)\n\n def __init__(self, direction='bidirect', min_instances=5):\n if direction == 'bidirect':\n self.bidirect = True\n self.direction_positive = True\n if direction == 'positive':\n self.bidirect = False\n self.direction_positive = True\n if direction == 'negative':\n self.bidirect = False\n self.direction_positive = False\n self.min_instances = min_instances\n\n def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None):\n if not self.is_applicable(subgroup):\n raise BaseException(\"Quality measure cannot be used for this target class\")\n if weighting_attribute is None:\n result = self.evaluate_from_statistics(*subgroup.get_base_statistics(data))\n else:\n (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics(\n data, weighting_attribute)\n weights = data[weighting_attribute]\n base = self.evaluate_from_statistics(instancesDataset, positivesDataset, instancesSubgroup,\n positivesSubgroup)\n result = base * effective_sample_size(weights) / instancesDataset\n return result\n\n def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup):\n return ChiSquaredQF.chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup,\n self.min_instances, self.bidirect, self.direction_positive)\n\n def supports_weights(self):\n return True\n\n def is_applicable(self, subgroup):\n return isinstance(subgroup.target, NominalTarget)\n\n\nclass StandardQF(AbstractInterestingnessMeasure, BoundedInterestingnessMeasure):\n @staticmethod\n def standard_qf(a, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup):\n if instances_subgroup == 0:\n return 0\n p_subgroup = positives_subgroup / instances_subgroup\n p_dataset = positives_dataset / instances_dataset\n return (instances_subgroup / instances_dataset) ** a * (p_subgroup - p_dataset)\n\n def __init__(self, a):\n self.a = a\n\n def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None):\n if not self.is_applicable(subgroup):\n raise BaseException(\"Quality measure cannot be used for this target class\")\n return self.evaluate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute))\n\n def optimistic_estimate_from_dataset(self, data, subgroup, weighting_attribute=None):\n if not self.is_applicable(subgroup):\n raise BaseException(\"Quality measure cannot be used for this target class\")\n return self.optimistic_estimate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute))\n\n def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup):\n return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, instances_subgroup,\n positives_subgroup)\n\n def optimistic_estimate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup,\n positives_subgroup):\n return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, positives_subgroup,\n positives_subgroup)\n\n def supports_weights(self):\n return True\n\n def is_applicable(self, subgroup):\n return isinstance(subgroup.target, NominalTarget)\n\n\nclass WRAccQF(StandardQF):\n def __init__(self, a):\n super().__init__(a)\n self.a = 1.0\n\n\nclass LiftQF(StandardQF):\n def __init__(self, a):\n super().__init__(a)\n self.a = 0.0\n\n\nclass SimpleBinomial(StandardQF):\n def __init__(self, a):\n super().__init__(a)\n self.a = 0.5\n\n\n#####\n# GeneralizationAware Interestingness Measures\n#####\nclass GAStandardQF(AbstractInterestingnessMeasure):\n def __init__(self, a):\n self.a = a\n\n def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None):\n (instances_dataset, _, instances_subgroup, positives_subgroup) = subgroup.get_base_statistics(data,\n weighting_attribute)\n if (instances_subgroup == 0) or (instances_dataset == instances_subgroup):\n return 0\n p_subgroup = positives_subgroup / instances_subgroup\n max_target_share = get_max_generalization_target_share(data, subgroup, weighting_attribute)\n relative_size = (instances_subgroup / instances_dataset)\n return relative_size ** self.a * (p_subgroup - max_target_share)\n\n def supports_weights(self):\n return True\n\n def is_applicable(self, subgroup):\n return isinstance(subgroup.target, NominalTarget)\n\n\ndef get_max_generalization_target_share(data, subgroup, weighting_attribute=None):\n selectors = subgroup.subgroup_description.selectors\n generalizations = powerset(selectors)\n max_target_share = 0\n for sels in generalizations:\n sgd = SubgroupDescription(list(sels))\n sg = Subgroup(subgroup.target, sgd)\n (_, _, instances_subgroup, positives_subgroup) = sg.get_base_statistics(data, weighting_attribute)\n target_share = positives_subgroup / instances_subgroup\n max_target_share = max(max_target_share, target_share)\n return max_target_share\n"
] | [
[
"numpy.sum",
"numpy.logical_and",
"numpy.dot"
]
] |
designer357/MSLSTM | [
"923f29f5a274ae41dbfe79d99e1ea28bb0cf5109"
] | [
"train.py"
] | [
"# -*- coding:utf-8 -*-\n\"\"\"\nmincheng:[email protected]\n\"\"\"\nfrom __future__ import division\nimport sys\nimport printlog\nimport datetime\nimport os\nimport time\nimport sklearn\nfrom sklearn.metrics import confusion_matrix\nfrom baselines import sclearn\nimport evaluation\nfrom collections import defaultdict\nimport tensorflow as tf\nimport mslstm\nimport config\nimport loaddata\nimport numpy as np\nimport visualize\nfrom sklearn.metrics import accuracy_score\nfrom baselines import nnkeras,sclearn\nimport matplotlib.pyplot as plt\nflags = tf.app.flags\n\nFLAGS = flags.FLAGS\n\n\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert inputs.shape[0] == targets.shape[0]\n\n if shuffle:\n indices = np.arange(inputs.shape[0])\n np.random.shuffle(indices)\n\n for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]\ndef pprint(msg,method=''):\n #if not 'Warning' in msg:\n if 1<0:\n sys.stdout = printlog.PyLogger('',method+'_'+str(FLAGS.num_neurons1))\n print(msg)\n try:\n sys.stderr.write(msg+'\\n')\n except:\n pass\n #sys.stdout.flush()\n else:\n print(msg)\n#def sess_run(commander,data,label):\n #global sess, data_x, data_y\n #return sess.run(commander, {data_x: data, data_y: label})\n\ndef train_lstm(method,filename_train_list,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list):\n global tempstdout\n FLAGS.option = method\n dropout = 0.8\n x_train, y_train, x_val, y_val, x_test, y_test = loaddata.get_data(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,\n filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class,\n multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,\n waveType=FLAGS.wave_type)\n \"\"\"\n if filename_test == 'HB_AS_Leak.txt':\n filename_train = 'HB_C_N_S.txt'\n elif filename_test == 'HB_Code_Red_I.txt':\n filename_train = 'HB_A_N_S.txt'\n elif filename_test == 'HB_Nimda.txt':\n filename_train = 'HB_A_C_S.txt'\n elif filename_test == 'HB_Slammer.txt':\n filename_train = 'HB_A_C_N.txt'\n print(filename_test)\n #x_train, y_train, x_val, y_val = loaddata.get_trainData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,\n # filename_train, FLAGS.sequence_window, trigger_flag,is_binary_class,\n # multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,\n # waveType=FLAGS.wave_type)\n #x_test, y_test = loaddata.get_testData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,\n # filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class,\n # multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,\n # waveType=FLAGS.wave_type)\n\n \"\"\"\n #loaddata.Multi_Scale_Plotting_2(x_train)\n\n if FLAGS.is_multi_scale:\n FLAGS.scale_levels = x_train.shape[1]\n FLAGS.input_dim = x_train.shape[-1]\n FLAGS.number_class = y_train.shape[1]\n if \"Nimda\" in filename_test:\n FLAGS.batch_size = int(int(x_train.shape[0])/5)\n else:\n FLAGS.batch_size = int(x_train.shape[0])\n else:\n FLAGS.input_dim = x_train.shape[-1]\n FLAGS.number_class = y_train.shape[1]\n if \"Nimda\" in filename_test:\n FLAGS.batch_size = int(int(x_train.shape[0])/5)\n else:\n FLAGS.batch_size = int(x_train.shape[0])\n #g = tf.Graph()\n with tf.Graph().as_default():\n #config = tf.ConfigProto()\n config = tf.ConfigProto(device_count={'/gpu': 0}) #turn GPU on and off\n #config = tf.ConfigProto(log_device_placement=True)\n #config.gpu_options.per_process_gpu_memory_fraction = 0.2\n #with tf.variable_scope(\"middle\")as scope:\n tf.set_random_seed(1337)\n #global_step = tf.Variable(0,name=\"global_step\",trainable=False)\n data_x,data_y = mslstm.inputs(FLAGS.option)\n #output_u_w,prediction, label = mslstm.inference(data_x,data_y,FLAGS.option)\n\n is_training = tf.placeholder(tf.bool)\n prediction, label,output_last = mslstm.inference(data_x,data_y,FLAGS.option,is_training)\n loss = mslstm.loss_(prediction, label)\n tran_op,optimizer = mslstm.train(loss)\n minimize = optimizer.minimize(loss)\n correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(label, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n #summary_op = tf.merge_all_summaries()\n weights = tf.Variable(tf.constant(0.1, shape=[len(y_test)*FLAGS.sequence_window, 1, FLAGS.scale_levels]),\n name=\"weights123\")\n init_op = tf.global_variables_initializer()\n #init_op = tf.initialize_all_variables()\n sess = tf.Session(config=config)\n sess.run(init_op)\n\n #summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)\n #saver = tf.train.Saver()\n saver = tf.train.Saver({\"my_weights\": weights})\n\n epoch_training_loss_list = []\n epoch_training_acc_list = []\n epoch_val_loss_list = []\n epoch_val_acc_list = []\n early_stopping = 10\n no_of_batches = int(len(x_train) / FLAGS.batch_size)\n #visualize.curve_plotting_withWindow(x_train, y_train, 0, \"Train_\"+'_'+FLAGS.option)\n #visualize.curve_plotting_withWindow(x_test, y_test, 2, \"Test_\"+'_'+FLAGS.option)\n total_iteration = 0\n for i in range(FLAGS.max_epochs):\n if early_stopping > 0:\n pass\n else:\n break\n j_iteration = 0\n for j_batch in iterate_minibatches(x_train,y_train,FLAGS.batch_size,shuffle=False):\n j_iteration += 1\n total_iteration += 1\n inp, out = j_batch\n sess.run(minimize, {data_x: inp, data_y: out, is_training:True})\n training_acc, training_loss = sess.run((accuracy, loss), {data_x: inp, data_y: out,is_training:True})\n #sys.stdout = tempstdout\n val_acc, val_loss = sess.run((accuracy, loss), {data_x:x_val, data_y:y_val,is_training:True})\n pprint(\n FLAGS.option + \"_Epoch%s\" % (str(i + 1)) + \">\" * 3 +'_Titer-'+str(total_iteration) +'_iter-'+str(j_iteration)+ str(FLAGS.wave_type) + '-' + str(FLAGS.scale_levels) + '-' + str(FLAGS.learning_rate)+'-'+str(FLAGS.num_neurons1)+'-'+str(FLAGS.num_neurons2)+ \">>>=\" + \"train_accuracy: %s, train_loss: %s\" % (\n str(training_acc), str(training_loss)) \\\n + \",\\tval_accuracy: %s, val_loss: %s\" % (str(val_acc), str(val_loss)), method)\n\n\n epoch_training_loss_list.append(training_loss)\n epoch_training_acc_list.append(training_acc)\n epoch_val_loss_list.append(val_loss)\n epoch_val_acc_list.append(val_acc)\n\n try:\n max_val_acc = epoch_val_acc_list[-2]\n except:\n max_val_acc = 0\n\n if epoch_val_acc_list[-1] < max_val_acc:\n early_stopping -= 1\n elif epoch_val_acc_list[-1] >= max_val_acc:\n early_stopping = 10\n if val_loss > 10 or val_loss == np.nan:\n break\n if 1<0:\n #pprint(\"PPP\")\n weights_results = sess.run(output_last, {data_x:x_test, data_y: y_test})\n #print(weights_results)\n #sys.stdout = tempstdout\n visualize.curve_plotting(weights_results,y_test,filename_test,FLAGS.option)\n #pprint(\"QQQ\")\n with open(filename_test+\"_EA.txt\",'w')as fout:\n fout.write(weights_results)\n #sess.run(weights.assign(weights_results))\n else:\n pass\n\n #weights = output_u_w.eval(session=sess)\n #weights = saver.restore(sess, \"./tf_tmp/model.ckpt\")\n #pprint(weights)\n #weight_list = return_max_index(weights)\n result = sess.run(prediction, {data_x:x_test, data_y: y_test})\n #print(result)\n #pprint(result)\n #print(\"LLL\")\n saver.save(sess, \"./tf_tmp/model.ckpt\")\n sess.close()\n #results = evaluation.evaluation(y_test, result)#Computing ACCURACY, F1-Score, .., etc\n if is_binary_class == True:\n #sys.stdout = tempstdout\n results = evaluation.evaluation(y_test, result, trigger_flag, evalua_flag) # Computing ACCURACY,F1-score,..,etc\n y_test = loaddata.reverse_one_hot(y_test)\n result = loaddata.reverse_one_hot(result)\n else:\n symbol_list = [0, 1, 2, 3, 4]\n sys.stdout = tempstdout\n print(y_test)\n print(result)\n y_test = loaddata.reverse_one_hot(y_test)\n result = loaddata.reverse_one_hot(result)\n\n confmat = confusion_matrix(y_test, result, labels=symbol_list)\n visualize.plotConfusionMatrix(confmat)\n #accuracy = sklearn.metrics.accuracy_score(y_test, result)\n symbol_list2 = [0]\n y_ = []\n for symbol in symbol_list2:\n for tab in range(len(y_test)):\n if y_test[tab] == symbol and y_test[tab] == result[tab]:\n y_.append(symbol)\n # print(y_test[0:10])\n # rint(result[0:10])\n # print(\"Accuracy is :\"+str(accuracy))\n accuracy = float(len(y_)) / (list(result).count(symbol))\n print(\"Accuracy of \" + str(symbol) + \" is :\" + str(accuracy))\n print(\"True is \")\n # print(y_test)\n print(\"The 0 of True is \" + str(list(y_test).count(0)))\n print(\"The 1 of True is \" + str(list(y_test).count(1)))\n print(\"The 2 of True is \" + str(list(y_test).count(2)))\n print(\"The 3 of True is \" + str(list(y_test).count(3)))\n print(\"The 4 of True is \" + str(list(y_test).count(4)))\n # print(len(y_test))\n print(\"Predict is \")\n # print(result)\n print(\"The 0 of Predict is \" + str(list(result).count(0)))\n print(\"The 1 of Predict is \" + str(list(result).count(1)))\n print(\"The 2 of Predict is \" + str(list(result).count(2)))\n print(\"The 3 of Predict is \" + str(list(result).count(3)))\n print(\"The 4 of Predict is \" + str(list(result).count(4)))\n print(\"Accuracy is :\" + str(accuracy))\n f1_score = sklearn.metrics.f1_score(y_test, result,average=\"macro\")\n print(\"F-score is :\" + str(f1_score))\n results = {'ACCURACY': accuracy, 'F1_SCORE': f1_score, 'AUC': 9999, 'G_MEAN': 9999}\n sys.stdout = tempstdout\n #print(weights_results.shape)\n #print(\"215\")\n y_test2 = np.array(y_test)\n result2 = np.array(result)\n #results = accuracy_score(y_test2, result2)\n #print(y_test2)\n #print(result2)\n #print(results)\n with open(os.path.join(os.path.join(os.getcwd(),'stat'),\"StatFalseAlarm_\" + filename_test + \"_True.txt\"), \"w\") as fout:\n for tab in range(len(y_test2)):\n fout.write(str(int(y_test2[tab])) + '\\n')\n with open(os.path.join(os.path.join(os.getcwd(),'stat'),\"StatFalseAlarm_\" + filename_test + \"_\" + method + \"_\" + \"_Predict.txt\"), \"w\") as fout:\n for tab in range(len(result2)):\n fout.write(str(int(result2[tab])) + '\\n')\n #eval_list = [\"AUC\", \"G_MEAN\",\"ACCURACY\",\"F1_SCORE\"]\n for each_eval in evaluation_list:\n result_list_dict[each_eval].append(results[each_eval])\n\n if evalua_flag:\n with open(os.path.join(FLAGS.output, \"TensorFlow_Log\" + filename_test + \".txt\"), \"a\")as fout:\n if not FLAGS.is_multi_scale:\n outfileline = FLAGS.option + \"_epoch:\" + str(FLAGS.max_epochs) + \",_lr:\" + str(FLAGS.learning_rate) + \",_multi_scale:\" + str(FLAGS.is_multi_scale) + \",hidden_nodes: \"+str(FLAGS.num_neurons1)+\"/\"+str(FLAGS.num_neurons2) + \"\\n\"\n else:\n outfileline = FLAGS.option + \"_epoch:\" + str(FLAGS.max_epochs) + \",_wavelet:\"+str(FLAGS.wave_type) + \",_lr:\" + str(FLAGS.learning_rate) + \",_multi_scale:\" + str(FLAGS.is_multi_scale) + \",_train_set_using_level:\" + str(FLAGS.scale_levels) + \",hidden_nodes: \"+str(FLAGS.num_neurons1)+\"/\"+str(FLAGS.num_neurons2) + \"\\n\"\n\n fout.write(outfileline)\n for each_eval in evaluation_list:\n #for eachk, eachv in result_list_dict.items():\n fout.write(each_eval + \": \" + str(round(np.mean(result_list_dict[each_eval]), 3)) + \",\\t\")\n fout.write('\\n')\n return epoch_training_acc_list,epoch_val_acc_list,epoch_training_loss_list,epoch_val_loss_list\n else:\n return results\n\n\n\n\ndef train_classic(method,filename_train,filename_test, trigger_flag,evalua_flag,is_binary_class,evaluation_list):\n return sclearn.Basemodel(method,filename_train,filename_test,trigger_flag,evalua_flag,evaluation_list)\n\ndef train(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type='db1'):\n global data_x, data_y\n result_list_dict = defaultdict(list)\n #evaluation_list = [\"ACCURACY\", \"F1_SCORE\", \"AUC\", \"G_MEAN\"]\n for each in evaluation_list:\n result_list_dict[each] = []\n if 'L' in method or 'RNN' in method:\n sys.stdout = tempstdout\n if method == '1L' or method == '2L' or method == '3L' \\\n or method == '4L' or method == '5L' or method == 'RNN':\n #FLAGS.learning_rate = 0.01\n FLAGS.is_multi_scale = False\n elif 'AL' == method:\n #FLAGS.learning_rate = 0.01\n FLAGS.is_multi_scale = False\n else:\n #FLAGS.learning_rate = 0.05\n FLAGS.is_multi_scale = True\n FLAGS.wave_type = wave_type\n return train_lstm(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list)\n else:\n sys.stdout = tempstdout\n return train_classic(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list)\n\ndef main(unused_argv):\n global tempstdout\n\n #main function\n\n\n #wave_type_list =['db1','db2','haar','coif1','db1','db2','haar','coif1','db1','db2']\n wave_type_list = ['haar']\n multi_scale_value_list = [2,3,4,5,6,10]\n case_label = {'SVM':'SVM','NB':'NB','DT':'DT','Ada.Boost':'Ada.Boost','RF':'RF','1NN':'1NN','1NN-DTW':'DTW',\n 'SVMF':'SVMF','SVMW':'SVMW','MLP':'MLP','RNN':'RNN','1L':'LSTM','2L':'2-LSTM','3L':'3-LSTM',\\\n 'AL':'ALSTM','HL':'MSLSTM','HAL':'MSLSTM'}\n\n trigger_flag = 1\n evalua_flag = True\n is_binary_class = True\n single_layer = True\n\n if is_binary_class:\n filename_list = [\"HB_AS_Leak.txt\",\"HB_Code_Red_I.txt\",\"HB_Nimda.txt\",\"HB_Slammer.txt\"]\n #filename_list = [\"HB_Slammer.txt\"] # HB_Code_Red_I.txt\n # HB_Nimda.txt\n # HB_Slammer.txt\n else:\n filename_list = [\"HB_ALL.txt\"]\n\n if trigger_flag == 1 :\n if single_layer:\n #case = ['AL']\n #case = ['1L','3L','AL']\n case = ['MLP','RNN','1L','2L','3L','AL']\n else:\n case = ['HL','HAL']\n #case = ['HL','HAL']\n\n else:\n case = [\"1NN\"]\n #case = [\"RF\",\"SVM\",\"SVMF\",\"SVMW\",\"NB\",\"DT\",\"Ada.Boost\",\"1NN\"]\n #case = [\"NB\",\"1NN\",\"Ada.Boost\",\"RF\"]\n\n if evalua_flag:\n evaluation_list = [\"AUC\", \"G_MEAN\", \"ACCURACY\", \"F1_SCORE\"]\n else:\n evaluation_list = [\"FPR\", \"TPR\",\"AUC\",\"G_MEAN\"]\n\n wave_type = wave_type_list[0]\n hidden_unit1_list = [8,16,32,64,128,256]\n #hidden_unit1_list = [16]\n\n hidden_unit2_list = [8,16,32,64,128]\n #hidden_unit2_list = [8]\n\n\n #combination_list = [(16,8),(16,32),(16,64),(32,64),(128,16)]\n #combination_list = [(8,8),(8,32),(16,8),(16,64),(128,16),(128,64)]\n #learning_rate_list = [0.001, 0.01, 0.05, 0.1]\n learning_rate_list = [0.1,0.05,0.01,0.001]\n\n for tab in range(len(filename_list)):\n case_list = []\n train_acc_list = []\n val_acc_list = []\n train_loss_list = []\n val_loss_list = []\n if single_layer:\n combination_list = hidden_unit1_list\n else:\n combination_list = []\n for each1 in hidden_unit1_list:\n for each2 in hidden_unit2_list:\n combination_list.append((each1, each2))\n \"\"\"\n if filename_list[tab] == \"HB_AS_Leak.txt\":\n combination_list = [(32, 64), (32, 128), (64, 64)]\n elif filename_list[tab] == \"HB_Code_Red_I.txt\":\n combination_list = [(32, 32), (16, 8), (16, 64), (32, 64)]\n elif filename_list[tab] == \"HB_Nimda.txt\":\n combination_list = [(8, 32), (32, 64)]\n elif filename_list[tab] == \"HB_Slammer.txt\":\n combination_list = [(16, 8), (16, 32), (16, 64)]\n \"\"\"\n\n results = {}\n for each_case in case:\n if 1>0:\n case_list.append(case_label[each_case])\n if trigger_flag: #\n sys.stdout = tempstdout\n if each_case == 'MLP':\n if evalua_flag:\n nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)\n else:\n results[case_label[each_case]] = nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)\n\n else:\n if evalua_flag:\n for learning_rate in learning_rate_list:\n FLAGS.learning_rate = learning_rate\n\n for each_comb in combination_list:\n if not 'H' in each_case:\n FLAGS.num_neurons1 = each_comb\n #FLAGS.num_neurons1 = 16\n #FLAGS.learning_rate = 0.001\n else:\n #if each_case == 'HAL':\n #FLAGS.num_neurons1, FLAGS.num_neurons2 = (100,64)\n #elif each_case == 'HL':\n #FLAGS.num_neurons1, FLAGS.num_neurons2 = (16,8)\n FLAGS.num_neurons1, FLAGS.num_neurons2 = each_comb\n\n train_acc,val_acc,train_loss,val_loss = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type)\n train_acc_list.append(train_acc)\n val_acc_list.append(val_acc)\n train_loss_list.append(train_loss)\n val_loss_list.append(val_loss)\n #visualize.epoch_acc_plotting(filename_list[tab],case_list,FLAGS.sequence_window,FLAGS.learning_rate,train_acc_list,val_acc_list)\n #visualize.epoch_loss_plotting(filename_list[tab], case_list,FLAGS.sequence_window, FLAGS.learning_rate,train_loss_list, val_loss_list)\n else:\n results[case_label[each_case]] = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type)\n\n else:\n sys.stdout = tempstdout\n if evalua_flag:\n sclearn.Basemodel(each_case, filename_list[tab], trigger_flag, evalua_flag,is_binary_class,evaluation_list)\n else:\n results[case_label[each_case]] = sclearn.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)\n else:\n pass\n if not evalua_flag:\n visualize.plotAUC(results,case_list,filename_list[tab])\n else:\n if trigger_flag:\n try:\n print()\n #visualize.epoch_acc_plotting(filename_list[tab], case_list, FLAGS.sequence_window,FLAGS.learning_rate, train_acc_list, val_acc_list)\n #visualize.epoch_loss_plotting(filename_list[tab], case_list, FLAGS.sequence_window,FLAGS.learning_rate, train_loss_list, val_loss_list)\n except:\n pass\n end = time.time()\n pprint(\"The time elapsed : \" + str(end - start) + ' seconds.\\n')\n\n\nif __name__ == \"__main__\":\n global tempstdout\n tempstdout = sys.stdout\n pprint(\"------------------------------------------------\"+str(datetime.datetime.now())+\"--------------------------------------------\")\n start = time.time()\n tf.app.run()\n"
] | [
[
"tensorflow.placeholder",
"numpy.random.shuffle",
"tensorflow.app.run",
"tensorflow.global_variables_initializer",
"numpy.arange",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.f1_score",
"tensorflow.cast",
"tensorflow.set_random_seed",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.array",
"tensorflow.argmax",
"tensorflow.ConfigProto",
"numpy.mean"
]
] |
erinfolami/ZazuML | [
"c5247859353cacf0e4a58f9c530a07038d9e12cf"
] | [
"ObjectDetNet/retinanet/dataloaders/custom_transforms.py"
] | [
"import math\nimport torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nfrom numpy import int64 as int64\nimport torchvision.transforms as transforms\n\nfrom PIL import Image, ImageOps, ImageFilter\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n img = np.array(img).astype(np.float32)\n mask = np.array(mask).astype(np.float32)\n img /= 255.0\n img -= self.mean\n img /= self.std\n\n return {'image': img,\n 'label': mask}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n img = sample['image']\n mask = sample['label']\n img = np.array(img).astype(np.float32).transpose((2, 0, 1))\n mask = np.array(mask).astype(np.float32)\n\n img = torch.from_numpy(img).float()\n mask = torch.from_numpy(mask).float()\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomHorizontalFlip(object):\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomRotate(object):\n def __init__(self, degree):\n self.degree = degree\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n rotate_degree = random.uniform(-1 * self.degree, self.degree)\n img = img.rotate(rotate_degree, Image.BILINEAR)\n mask = mask.rotate(rotate_degree, Image.NEAREST)\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomGaussianBlur(object):\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n if random.random() < 0.5:\n img = img.filter(ImageFilter.GaussianBlur(\n radius=random.random()))\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomScaleCrop(object):\n def __init__(self, base_size, crop_size, fill=0):\n self.base_size = base_size\n self.crop_size = crop_size\n self.fill = fill\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n # random scale (short edge)\n short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))\n w, h = img.size\n if h > w:\n ow = short_size\n oh = int(1.0 * h * ow / w)\n else:\n oh = short_size\n ow = int(1.0 * w * oh / h)\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # pad crop\n if short_size < self.crop_size:\n padh = self.crop_size - oh if oh < self.crop_size else 0\n padw = self.crop_size - ow if ow < self.crop_size else 0\n img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)\n # random crop crop_size\n w, h = img.size\n x1 = random.randint(0, w - self.crop_size)\n y1 = random.randint(0, h - self.crop_size)\n img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n\n return {'image': img,\n 'label': mask}\n\n\nclass FixScaleCrop(object):\n def __init__(self, crop_size):\n self.crop_size = crop_size\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n w, h = img.size\n if w > h:\n oh = self.crop_size\n ow = int(1.0 * w * oh / h)\n else:\n ow = self.crop_size\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # center crop\n w, h = img.size\n x1 = int(round((w - self.crop_size) / 2.))\n y1 = int(round((h - self.crop_size) / 2.))\n img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n\n return {'image': img,\n 'label': mask}\n\n\n# resize to 512*1024\nclass FixedResize(object):\n \"\"\"change the short edge length to size\"\"\"\n\n def __init__(self, resize=512):\n self.size1 = resize # size= 512\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n assert img.size == mask.size\n\n w, h = img.size\n if w > h:\n oh = self.size1\n ow = int(1.0 * w * oh / h)\n else:\n ow = self.size1\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n return {'image': img,\n 'label': mask}\n\n\n# random crop 321*321\nclass RandomCrop(object):\n def __init__(self, crop_size=320):\n self.crop_size = crop_size\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n w, h = img.size\n x1 = random.randint(0, w - self.crop_size)\n y1 = random.randint(0, h - self.crop_size)\n img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n return {'image': img,\n 'label': mask}\n\n\nclass RandomScale(object):\n def __init__(self, scales=(1,)):\n self.scales = scales\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n w, h = img.size\n scale = random.choice(self.scales)\n w, h = int(w * scale), int(h * scale)\n return {'image': img,\n 'label': mask}\n\n\nclass TransformTr(object):\n def __init__(self, resize, multi_scale=None):\n if multi_scale is None:\n self.composed_transforms = transforms.Compose([\n FixedResize(resize=resize),\n # RandomCrop(crop_size=args.crop_size),\n # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),\n # tr.RandomGaussianBlur(),\n # Normalize(mean, std),\n # ToTensor()\n ])\n else:\n self.composed_transforms = transforms.Compose([\n FixedResize(resize=args.resize),\n RandomScale(scales=args.multi_scale),\n RandomCrop(crop_size=args.crop_size),\n # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),\n # tr.RandomGaussianBlur(),\n Normalize(mean, std),\n ToTensor()])\n\n def __call__(self, sample):\n return self.composed_transforms(sample)\n\n\nclass TransformVal(object):\n def __init__(self, args, mean, std):\n self.composed_transforms = transforms.Compose([\n FixedResize(resize=args.resize),\n FixScaleCrop(crop_size=args.crop_size), # TODO:CHECK THIS\n Normalize(mean, std),\n ToTensor()])\n\n def __call__(self, sample):\n return self.composed_transforms(sample)\n"
] | [
[
"numpy.array",
"torch.from_numpy"
]
] |
DwaraknathT/sparsify | [
"bbe3b6e492c2bc8fdd9dd37d87ffc5f51f520792"
] | [
"models/resnet.py"
] | [
"'''\nProperly implemented ResNet-s for CIFAR10 as described in paper [1].\n\nThe implementation and structure of this file is hugely influenced by [2]\nwhich is implemented for ImageNet and doesn't have option A for identity.\nMoreover, most of the implementations on the web is copy-paste from\ntorchvision's resnet and has wrong number of params.\n\nProper ResNet-s for CIFAR10 (for fair comparision and etc.) has following\nnumber of layers and parameters:\n\nname | layers | params\nResNet20 | 20 | 0.27M\nResNet32 | 32 | 0.46M\nResNet44 | 44 | 0.66M\nResNet56 | 56 | 0.85M\nResNet110 | 110 | 1.7M\nResNet1202| 1202 | 19.4m\n\nwhich this implementation indeed has.\n\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\nIf you use this implementation in you work, please don't forget to mention the\nauthor, Yerlan Idelbayev.\n'''\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\nfrom layers.layers import MaskedConv\n\n__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']\n\n\ndef _weights_init(m):\n classname = m.__class__.__name__\n # print(classname)\n if isinstance(m, nn.Linear) or isinstance(m, MaskedConv):\n init.xavier_normal_(m.weight)\n\n\n_AFFINE = True\n\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = MaskedConv(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, affine=_AFFINE)\n self.conv2 = MaskedConv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, affine=_AFFINE)\n\n self.downsample = None\n self.bn3 = None\n if stride != 1 or in_planes != planes:\n self.downsample = nn.Sequential(\n MaskedConv(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False))\n self.bn3 = nn.BatchNorm2d(self.expansion * planes, affine=_AFFINE)\n\n def forward(self, x):\n # x: batch_size * in_c * h * w\n residual = x\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n if self.downsample is not None:\n residual = self.bn3(self.downsample(x))\n out += residual\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n _outputs = [32, 64, 128]\n self.in_planes = _outputs[0]\n\n self.conv1 = MaskedConv(3, _outputs[0], kernel_size=3, stride=1, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(_outputs[0], affine=_AFFINE)\n self.layer1 = self._make_layer(block, _outputs[0], num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, _outputs[1], num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, _outputs[2], num_blocks[2], stride=2)\n self.linear = nn.Linear(_outputs[2], num_classes)\n\n self.apply(_weights_init)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, out.size()[3])\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef resnet20(num_classes):\n return ResNet(BasicBlock, [3, 3, 3], num_classes=num_classes)\n\n\ndef resnet32(num_classes):\n return ResNet(BasicBlock, [5, 5, 5], num_classes=num_classes)\n\n\ndef resnet44(num_classes):\n return ResNet(BasicBlock, [7, 7, 7], num_classes=num_classes)\n\n\ndef resnet56(num_classes):\n return ResNet(BasicBlock, [9, 9, 9], num_classes=num_classes)\n\n\ndef resnet110(num_classes):\n return ResNet(BasicBlock, [18, 18, 18], num_classes=num_classes)\n\n\ndef resnet1202(num_classes):\n return ResNet(BasicBlock, [200, 200, 200], num_clases=num_classes)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.relu",
"torch.nn.Sequential"
]
] |
sweetpand/tensorflow_mri | [
"7a483cbbbe515ad395928311759505707bd72503"
] | [
"recommendation_system_demos/Basic-CMN-Demo/util/gmf.py"
] | [
"import sonnet as snt\nimport tensorflow as tf\n\nfrom util.helper import GraphKeys, add_to_collection\nfrom util.layers import DenseLayer, LossLayer, OptimizerLayer, ModelBase\n\n\nclass PairwiseGMF(ModelBase):\n\n def __init__(self, config):\n \"\"\"\n :param config:\n \"\"\"\n # super(PairwiseGMF, self).__init__(config)\n self.config = config\n self._activation_fn = tf.nn.relu\n self._embedding_initializers = {\n 'embeddings': tf.truncated_normal_initializer(stddev=0.01),\n }\n\n self._embedding_regularizers = {}\n\n self._initializers = {\n \"w\": tf.contrib.layers.xavier_initializer(),\n }\n\n self._regularizers = {\n 'w': tf.contrib.layers.l2_regularizer(config.l2)\n }\n\n self._construct_placeholders()\n self._construct_weights()\n self._construct()\n tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0])\n self.summary = tf.summary.merge_all()\n\n def _construct(self):\n \"\"\"\n Construct the model; main part of it goes here\n \"\"\"\n\n self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers,\n regularizers=self._regularizers, name='OutputVector')\n self.score = tf.squeeze(self.v(self._cur_user * self._cur_item))\n negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative))\n tf.add_to_collection(GraphKeys.PREDICTION, self.score)\n self.loss = LossLayer()(self.score, negative_output)\n self._optimizer = OptimizerLayer(self.config.optimizer, clip=5.0,\n params={})\n self.train = self._optimizer(self.loss)\n\n def _construct_weights(self):\n \"\"\"\n Constructs the user/item memories and user/item external memory/outputs\n Also add the embedding lookups\n \"\"\"\n self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,\n initializers=self._embedding_initializers,\n regularizers=self._embedding_regularizers,\n name='MemoryEmbed')\n\n self.item_memory = snt.Embed(self.config.item_count,\n self.config.embed_size,\n initializers=self._embedding_initializers,\n regularizers=self._embedding_regularizers,\n name=\"ItemMemory\")\n\n # [batch, embedding size]\n self._cur_user = self.user_memory(self.input_users)\n\n # Item memories a query\n self._cur_item = self.item_memory(self.input_items)\n self._cur_item_negative = self.item_memory(self.input_items_negative)\n\n def _construct_placeholders(self):\n self.input_users = tf.placeholder(tf.int32, [None], 'UserID')\n self.input_items = tf.placeholder(tf.int32, [None], 'ItemID')\n self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID')\n\n # Add our placeholders\n add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users,\n self.input_items,\n self.input_items_negative])\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.truncated_normal_initializer",
"tensorflow.get_collection",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.add_to_collection"
]
] |
chaoannricardo/NTU_CARDO_Database | [
"5fbfa1383f2e65a04fabd863c68373f45bbf05fd"
] | [
"DBUIScripts/db_update.py"
] | [
"# -*- coding: utf8 -*-\nimport pandas as pd\nimport pymysql\n\n# import configuration in parent dir\nimport os, sys, inspect\n\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\nimport configuration as conf\n# import packages in models\nfrom models import data_processing, database_management, file_management\n\n\nif __name__ == '__main__':\n\n # change working directory to project location\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(os.path.dirname(abspath))\n os.chdir(dname)\n\n # script parameters\n name_to_update = sys.argv[1]\n update_type = sys.argv[2]\n update_method = sys.argv[3]\n update_point = sys.argv[4]\n\n # start updating database content\n\n # log in database\n config = conf.auto_log_in(\"cardo_main\")\n conn = pymysql.connect(**config)\n conn_cursor = conn.cursor()\n\n # set up parameters for update\n column_to_update = \"\"\n if update_type == \"0\":\n column_to_update = \"是否計算黑名單\"\n elif update_type == \"1\":\n column_to_update = \"CARDO點數\"\n\n command_text = \"\"\n # update table with different method\n if update_method == \"0\":\n command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = 0 WHERE 姓名 = \" + name_to_update + \";\"\n elif update_method == \"1\":\n command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = \" + update_point + \\\n \" WHERE `姓名` = \\'\" + name_to_update + \"\\';\"\n # command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = 0 WHERE 姓名 = \" + name_to_update + \";\"\n elif update_method == \"2\":\n select_command = \"SELECT \" + column_to_update + \" FROM cardo_main.點數記錄表_目前 WHERE 姓名 = \\'\" + name_to_update + \"\\';\"\n data_return = pd.read_sql(select_command, conn)\n update_point = str(int(data_return.iloc[0, 0]) - int(update_point))\n command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = \" + update_point + \" WHERE 姓名 = \\'\" + name_to_update + \"\\';\"\n\n # execute command\n conn_cursor.execute(\"SET SQL_SAFE_UPDATES = 0;\")\n conn.commit()\n conn_cursor.execute(command_text)\n conn.commit()\n"
] | [
[
"pandas.read_sql"
]
] |
hellwue/TreeSpeciesClassification | [
"8fd8dc6496d8317923c6112d3da46844d419e49f"
] | [
"PointCNN/pointcnn_geom+i+ms.py"
] | [
"import pickle\nfrom myutils import load_dataset, call_home, CMDisplay\nfrom itertools import chain\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear as Lin\nfrom torch.optim import Adam\n\nfrom torch_geometric.nn import XConv, fps, global_mean_pool\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\nfrom sklearn.metrics import classification_report as ClRp\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\n\n\nclass PointCNN(pl.LightningModule):\n def __init__(self, numfeatures=1):\n super().__init__()\n\n self.learning_rate = 1e-3\n self.train_acc = pl.metrics.Accuracy()\n self.val_acc = pl.metrics.Accuracy()\n self.test_acc = pl.metrics.Accuracy()\n self.numfeatures = numfeatures\n # First XConv layer.\n # Lifting the point coordinates with no features (0) into feature space\n self.conv1 = XConv(self.numfeatures, 48, dim=3,\n kernel_size=8, hidden_channels=32)\n # Further XConv layers to further enrich the features\n self.conv2 = XConv(48, 96, dim=3, kernel_size=12,\n hidden_channels=64, dilation=2)\n self.conv3 = XConv(96, 192, dim=3, kernel_size=16,\n hidden_channels=128, dilation=2)\n self.conv4 = XConv(192, 384, dim=3, kernel_size=16,\n hidden_channels=256, dilation=2)\n\n # MLPs at the end of the PointCNN\n self.lin1 = Lin(389, 256)\n self.lin2 = Lin(256, 128)\n self.lin3 = Lin(128, 4)\n\n def forward(self, data):\n pos, batch = data.pos, data.batch\n x = data.x if self.numfeatures else None\n ms_feat = data.feat\n # First XConv with no features\n x = F.relu(self.conv1(x, pos, batch))\n\n # Farthest point sampling, keeping only 37.5%\n idx = fps(pos, batch, ratio=0.375)\n x, pos, batch = x[idx], pos[idx], batch[idx]\n\n # Second XConv\n x = F.relu(self.conv2(x, pos, batch))\n\n # Farthest point sampling, keeping only 33.4%\n idx = fps(pos, batch, ratio=0.334)\n x, pos, batch = x[idx], pos[idx], batch[idx]\n\n # Two more XConvs\n x = F.relu(self.conv3(x, pos, batch))\n x = F.relu(self.conv4(x, pos, batch))\n\n # Pool the batch-elements together\n # Each tree is described in one single point with 384 features\n x = global_mean_pool(x, batch)\n\n x = torch.cat((x, ms_feat), dim=1)\n # MLPs at the end with ReLU\n x = F.relu(self.lin1(x))\n x = F.relu(self.lin2(x))\n\n # Dropout (?!): Set randomly values to zero\n x = F.dropout(x, p=0.5, training=self.training)\n # Last MLP predicting labels\n x = self.lin3(x)\n\n # log-SoftMax Activation function to then calculate NLL-Loss (Negative Log Likelihood)\n return F.log_softmax(x, dim=-1)\n\n def training_step(self, data, batch_idx):\n y = data.y\n out = self(data)\n loss = F.nll_loss(out, y)\n self.train_acc(out, y)\n self.log('train_acc', self.train_acc, on_step=True, on_epoch=True)\n self.log('train_loss', loss) # , on_step=True, on_epoch=True)\n return loss\n\n def validation_step(self, data, batch_idx):\n y = data.y\n out = self(data)\n val_loss = F.nll_loss(out, y)\n self.val_acc(out, y)\n self.log('val_acc', self.val_acc, on_step=True, on_epoch=True)\n self.log('val_loss', val_loss) # , on_step=True, on_epoch=True)\n return val_loss\n\n def test_step(self, data, batch_idx):\n y = data.y\n out = self(data)\n test_loss = F.nll_loss(out, y)\n self.test_acc(out, y)\n self.log('test_loss', test_loss)\n return out\n\n def test_step_end(self, outs):\n return outs\n\n def test_epoch_end(self, outs):\n global res\n res = outs\n return outs\n\n def configure_optimizers(self):\n optimizer = Adam(self.parameters(), lr=self.learning_rate)\n return optimizer\n\n\nMODEL_NAME = 'geom+i+ms'\n\n\ntrain_dataset = load_dataset(\n '../../0_data/hdf/train.h5', batch_size=16, shuffle=True, load_ms=True)\nval_dataset = load_dataset(\n '../../0_data/hdf/val.h5', batch_size=16, load_ms=True)\ntest_dataset = load_dataset(\n '../../0_data/hdf/test.h5', batch_size=16, load_ms=True)\n\n\ncheckpoint_callback = ModelCheckpoint(monitor='val_loss', save_top_k=1)\ntrainer = pl.Trainer(gpus=1,\n progress_bar_refresh_rate=1,\n callbacks=[EarlyStopping(\n monitor='val_loss', patience=20)],\n checkpoint_callback=checkpoint_callback)\n\n# pl.seed_everything(420)\nmodel = PointCNN()\ntrainer.fit(model, train_dataset, val_dataset)\nbest_model = checkpoint_callback.best_model_path\nprint(best_model)\ncall_home(f'Done learning {MODEL_NAME}: ' + best_model)\n\nres = []\n\nmodel = PointCNN.load_from_checkpoint(checkpoint_path=best_model)\n# pl.seed_everything(420)\ntrainer.test(model, test_dataloaders=test_dataset)\n\nwith open(f'./results/{MODEL_NAME}_results.pickle', 'wb') as file:\n pickle.dump(res, file)\n\nlogits = list(chain(*(r.exp().argmax(axis=1).tolist() for r in res)))\nground = list(chain(*(tmp.y.tolist() for tmp in test_dataset)))\n\nclassification_report = ClRp(ground,\n logits,\n target_names=['coniferous',\n 'decidious',\n 'snag',\n 'dead tree'],\n digits=3)\nprint(classification_report)\nwith open(f'./results/{MODEL_NAME}_results.txt', 'w') as file:\n file.writelines(classification_report)\n file.writelines(best_model)\n\nCMDisplay(metrics.confusion_matrix(ground, logits)).plot()\nplt.savefig(f'./results/{MODEL_NAME}_results.eps', bbox_inches='tight')\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.dropout",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.savefig",
"torch.nn.functional.nll_loss",
"sklearn.metrics.confusion_matrix",
"torch.cat"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.