repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
xyabc/laygo_public
|
[
"869e11682ed9c4c6eb147876088db0199ce09a24"
] |
[
"GridLayoutGeneratorHelper.py"
] |
[
"#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"GridBasedLayoutGenerator utility functions for users\"\"\"\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__email__ = \"[email protected]\"\n__status__ = \"Prototype\"\n\n#import struct\n#import math\n#from math import *\nimport numpy as np\nfrom copy import deepcopy\n\ndef generate_boundary(laygen, objectname_pfix, placement_grid,\n devname_bottom, devname_top, devname_left, devname_right,\n shape_bottom=None, shape_top=None, shape_left=None, shape_right=None,\n transform_bottom=None, transform_top=None, transform_left=None, transform_right=None,\n origin=np.array([0, 0])):\n \"\"\"generate a boundary structure to resolve boundary design rules\"\"\"\n pg = placement_grid\n #parameters\n if shape_bottom == None:\n shape_bottom = [np.array([1, 1]) for d in devname_bottom]\n if shape_top == None:\n shape_top = [np.array([1, 1]) for d in devname_top]\n if shape_left == None:\n shape_left = [np.array([1, 1]) for d in devname_left]\n if shape_right == None:\n shape_right = [np.array([1, 1]) for d in devname_right]\n if transform_bottom == None:\n transform_bottom = ['R0' for d in devname_bottom]\n if transform_top == None:\n transform_top = ['R0' for d in devname_top]\n if transform_left == None:\n transform_left = ['R0' for d in devname_left]\n if transform_right == None:\n transform_right = ['R0' for d in devname_right]\n\n #bottom\n dev_bottom=[]\n dev_bottom.append(laygen.place(\"I\" + objectname_pfix + 'BNDBTM0', devname_bottom[0], pg, xy=origin,\n shape=shape_bottom[0], transform=transform_bottom[0]))\n for i, d in enumerate(devname_bottom[1:]):\n dev_bottom.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDBTM'+str(i+1), d, pg, dev_bottom[-1].name,\n shape=shape_bottom[i+1], transform=transform_bottom[i+1]))\n dev_left=[]\n dev_left.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDLFT0', devname_left[0], pg, dev_bottom[0].name, direction='top',\n shape=shape_left[0], transform=transform_left[0]))\n for i, d in enumerate(devname_left[1:]):\n dev_left.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDLFT'+str(i+1), d, pg, dev_left[-1].name, direction='top',\n shape=shape_left[i+1], transform=transform_left[i+1]))\n dev_right=[]\n dev_right.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDRHT0', devname_right[0], pg, dev_bottom[-1].name, direction='top',\n shape=shape_right[0], transform=transform_right[0]))\n for i, d in enumerate(devname_right[1:]):\n dev_right.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDRHT'+str(i+1), d, pg, dev_right[-1].name, direction='top',\n shape=shape_right[i+1], transform=transform_right[i+1]))\n dev_top=[]\n dev_top.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDTOP0', devname_top[0], pg, dev_left[-1].name, direction='top',\n shape=shape_top[0], transform=transform_top[0]))\n for i, d in enumerate(devname_top[1:]):\n dev_top.append(laygen.relplace(\"I\" + objectname_pfix + 'BNDTOP'+str(i+1), d, pg, dev_top[-1].name,\n shape=shape_top[i+1], transform=transform_top[i+1]))\n return [dev_bottom, dev_top, dev_left, dev_right]\n\ndef generate_power_rails(laygen, routename_tag, layer, gridname, netnames=['VDD', 'VSS'], direction='x', \n start_coord=0, end_coord=0, route_index=None, via_index=None, generate_pin=True): \n \"\"\"generate power rails\"\"\"\n rail_list=[]\n for netidx, netname in enumerate(netnames):\n rail_sub_list=[]\n for rcnt, ridx in enumerate(route_index[netidx]):\n if direction=='x': rxy0=np.array([[start_coord, ridx], [end_coord, ridx]]) \n if direction=='y': rxy0=np.array([[ridx, start_coord], [ridx, end_coord]]) \n if generate_pin == True:\n p=laygen.pin(name=netname + routename_tag + str(rcnt), layer=layer, xy=rxy0, gridname=gridname, netname=netname)\n rail_sub_list.append(p)\n else:\n r=laygen.route(None, layer, xy0=rxy0[0], xy1=rxy0[1], gridname0=gridname)\n rail_sub_list.append(r)\n if not via_index==None: #via generation\n for vidx in via_index[netidx]:\n if direction=='x': vxy0=np.array([vidx, ridx])\n else: vxy0=np.array([ridx, vidx])\n laygen.via(None, vxy0, gridname=gridname)\n rail_list.append(rail_sub_list)\n return rail_list\n\ndef generate_power_rails_from_rails_xy(laygen, routename_tag, layer, gridname, netnames=['VDD', 'VSS'], direction='x', \n input_rails_xy=None, generate_pin=True, \n overwrite_start_coord=None, overwrite_end_coord=None, \n overwrite_num_routes=None,\n offset_start_index=0, offset_end_index=0):\n \"\"\"generate power rails from pre-existing power rails in upper/lower layer. \n the pre-existing rail information is provided as xy array\n \"\"\"\n route_index=[]\n via_index=[]\n for netidx, netname in enumerate(netnames):\n sub_via_index=[]\n for i, irxy in enumerate(input_rails_xy[netidx]): \n if direction == 'x':\n #boundary estimation\n if netidx==0 and i==0: #initialize\n start_coord=irxy[0][0]\n end_coord=irxy[0][0]\n route_index_start=min((irxy[0][1], irxy[1][1]))\n route_index_end=max((irxy[0][1], irxy[1][1]))\n else:\n if start_coord > irxy[0][0]: start_coord=irxy[0][0]\n if end_coord < irxy[0][0]: end_coord=irxy[0][0]\n rist=min((irxy[0][1], irxy[1][1]))\n ried=max((irxy[0][1], irxy[1][1]))\n if route_index_start < rist: route_index_start = rist\n if route_index_end > ried: route_index_end = ried\n sub_via_index.append(irxy[0][0])\n else:\n #boundary estimation\n if netidx==0 and i==0: #initialize\n start_coord=irxy[0][1]\n end_coord=irxy[0][1]\n route_index_start=min((irxy[0][0], irxy[1][0]))\n route_index_end=max((irxy[0][0], irxy[1][0]))\n else:\n if start_coord > irxy[0][1]: start_coord=irxy[0][1]\n if end_coord < irxy[0][1]: end_coord=irxy[0][1]\n rist=min((irxy[0][0], irxy[1][0]))\n ried=max((irxy[0][0], irxy[1][0]))\n if route_index_start < rist: route_index_start = rist\n if route_index_end > ried: route_index_end = ried\n sub_via_index.append(irxy[0][1])\n via_index.append(np.array(sub_via_index))\n #number of routes and offset \n route_index_start+=offset_start_index\n if not overwrite_num_routes==None:\n route_index_end=route_index_start + overwrite_num_routes\n route_index_end+=offset_end_index\n #route index\n for netidx, netname in enumerate(netnames):\n sub_route_index=[]\n for ri in range(int((route_index_end - route_index_start + 1)/len(netnames))):\n sub_route_index += [route_index_start + netidx + len(netnames)*ri]\n route_index.append(np.array(sub_route_index))\n #overwrite start/end coordinates if necessary\n if not overwrite_start_coord==None:\n start_coord=overwrite_start_coord \n if not overwrite_end_coord==None:\n end_coord=overwrite_end_coord \n return generate_power_rails(laygen, routename_tag=routename_tag, layer=layer, gridname=gridname, netnames=netnames, direction=direction, \n start_coord=start_coord, end_coord=end_coord, route_index=route_index, via_index=via_index, generate_pin=generate_pin) \n\ndef generate_power_rails_from_rails_rect(laygen, routename_tag, layer, gridname, netnames=['VDD', 'VSS'], direction='x', \n input_rails_rect=None, generate_pin=True, \n overwrite_start_coord=None, overwrite_end_coord=None, overwrite_num_routes=None,\n offset_start_index=0, offset_end_index=0):\n \"\"\"generate power rails from pre-existing power rails in upper/lower layer. \n the pre-existing rail information is provided as rect\n \"\"\"\n xy=[]\n for netidx, netname in enumerate(netnames):\n sub_xy=[]\n for i, ir in enumerate(input_rails_rect[netidx]): \n sub_xy.append(laygen.get_rect_xy(ir.name, gridname))\n xy.append(np.array(sub_xy))\n return generate_power_rails_from_rails_xy(laygen, routename_tag, layer, gridname, netnames=netnames, direction=direction, \n input_rails_xy=xy, generate_pin=generate_pin, \n overwrite_start_coord=overwrite_start_coord, overwrite_end_coord=overwrite_end_coord,\n overwrite_num_routes=overwrite_num_routes,\n offset_start_index=offset_start_index, offset_end_index=offset_end_index)\n\ndef generate_power_rails_from_rails_inst(laygen, routename_tag, layer, gridname, netnames=['VDD', 'VSS'], direction='x', \n input_rails_instname=None, input_rails_pin_prefix=['VDD', 'VSS'], generate_pin=True, \n overwrite_start_coord=None, overwrite_end_coord=None, overwrite_num_routes=None,\n offset_start_index=0, offset_end_index=0):\n \"\"\"generate power rails from pre-existing power rails in upper/lower layer. \n the pre-existing rail information is provided as inst / pin prefix\n \"\"\"\n xy=[]\n pdict=laygen.get_inst_pin_coord(None, None, gridname)\n iname=input_rails_instname\n for pfix in input_rails_pin_prefix:\n sub_xy=[]\n for pn, p in pdict[iname].items():\n if pn.startswith(pfix):\n sub_xy.append(p)\n xy.append(sub_xy)\n return generate_power_rails_from_rails_xy(laygen, routename_tag, layer, gridname, netnames=netnames, direction=direction, \n input_rails_xy=xy, generate_pin=generate_pin, \n overwrite_start_coord=overwrite_start_coord, overwrite_end_coord=overwrite_end_coord,\n overwrite_num_routes=overwrite_num_routes,\n offset_start_index=offset_start_index, offset_end_index=offset_end_index)\n\ndef generate_grids_from_xy(laygen, gridname_input, gridname_output, xy, xy_grid_type=None):\n \"\"\"generate route grids combining a pre-existing grid and xy-array\n it will create a new array by copying the given grid and update part of entries from xy-lists\n \"\"\"\n #copy original database\n gi=laygen.get_grid(gridname_input)\n bnd=deepcopy(gi.xy)\n xgrid = deepcopy(gi.get_xgrid())\n ygrid = deepcopy(gi.get_ygrid())\n xwidth = deepcopy(gi.get_xwidth())\n ywidth = deepcopy(gi.get_ywidth())\n _viamap = gi.get_viamap()\n vianame = list(_viamap.keys())[0] #just pickig one via; should be fixed\n #figure out routing direction\n if xy_grid_type==None:\n if abs(xy[0][0][0]-xy[0][1][0]) > abs(xy[0][0][1]-xy[0][1][1]): #aspect ratio\n xy_grid_type = 'ygrid'\n else:\n xy_grid_type = 'xgrid'\n #extract grid information from xy list\n if xy_grid_type== 'xgrid':\n xgrid=[]\n xwidth=[]\n for xy0 in xy:\n xgrid.append(0.5 * (xy0[0][0] + xy0[1][0]))\n xwidth.append(abs(xy0[0][0] - xy0[1][0]))\n #sort\n xwidth = [x for (y, x) in sorted(zip(xgrid, xwidth))]\n xgrid.sort()\n xgrid = np.array(xgrid)\n xwidth = np.array(xwidth)\n bnd[1][0] = max(xgrid)+min(ygrid)\n if xy_grid_type== 'ygrid':\n ygrid=[]\n ywidth=[]\n for xy0 in xy:\n ygrid.append(0.5 * (xy0[0][1] + xy0[1][1]))\n ywidth.append(abs(xy0[0][1] - xy0[1][1]))\n #sort\n ywidth = [x for (y, x) in sorted(zip(ygrid, ywidth))]\n ygrid.sort()\n ygrid=np.array(ygrid)\n ywidth=np.array(ywidth)\n bnd[1][1]=max(ygrid)+min(ygrid)\n # viamap\n viamap = {vianame: []}\n for x in range(len(xgrid)):\n for y in range(len(ygrid)):\n viamap[vianame].append([x, y])\n viamap[vianame] = np.array(viamap[vianame])\n # add grid information\n laygen.grids.add_route_grid(name=gridname_output, libname=None, xy=bnd, xgrid=xgrid, ygrid=ygrid, xwidth=xwidth,\n ywidth=ywidth, viamap=viamap)\n #laygen.grids.display()\n\ndef generate_grids_from_inst(laygen, gridname_input, gridname_output, instname, template_libname,\n inst_pin_prefix=['VDD', 'VSS'], xy_grid_type=None):\n \"\"\"generate route grids combining a pre-existing grid and inst pins\n it will create a new array by copying the given grid and update part of entries from xy coordinates of pins\n \"\"\"\n inst = laygen.get_inst(name=instname)\n t = laygen.templates.get_template(inst.cellname, libname=template_libname)\n xy0 = inst.xy\n xy = []\n for p in t.pins:\n for pfix in inst_pin_prefix:\n if p.startswith(pfix):\n xy.append(xy0 + t.pins[p]['xy'])\n generate_grids_from_xy(laygen, gridname_input, gridname_output, xy, xy_grid_type=xy_grid_type)\n\ndef generate_grids_from_template(laygen, gridname_input, gridname_output, template_name, template_libname,\n template_pin_prefix=['VDD', 'VSS'], xy_grid_type=None, offset=np.array([0, 0])):\n \"\"\"generate route grids combining a pre-existing grid and template pins\n it will create a new array by copying the given grid and update part of entries from xy coordinates of pins\n \"\"\"\n t = laygen.templates.get_template(template_name, libname=template_libname)\n xy = []\n for p in t.pins:\n for pfix in template_pin_prefix:\n if p.startswith(pfix):\n xy.append(offset+t.pins[p]['xy'])\n generate_grids_from_xy(laygen, gridname_input, gridname_output, xy, xy_grid_type=xy_grid_type)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DalhousieAI/pytorch-minimize
|
[
"0fa4a3fdd4fd396ef82c3fdb3bd7880644462b91"
] |
[
"tests/test_pytorch_minimize.py"
] |
[
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytorch_minimize.optim import MinimizeWrapper\nimport numpy as np\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import log_loss\n\n\nn_samples = 120\nn_features = 20\nn_classes = 10\n\n\nclass LogReg(nn.Module):\n def __init__(self):\n super(LogReg, self).__init__()\n self.fc = nn.Linear(n_features, n_classes)\n\n def forward(self, x):\n n = x.size(0)\n x = self.fc(x.view(n,-1))\n output = F.log_softmax(x, dim=1)\n return output\n\ndef main(method, disp=True, floatX='float32', cuda=False):\n # only run tests on CPU\n if cuda:\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n # seed everything\n torch.manual_seed(0)\n np.random.seed(0)\n\n # generate classification dataset\n X, y = make_classification(n_samples=n_samples,\n n_informative=10,\n n_features=n_features,\n n_classes=n_classes)\n # split into training and test\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=(2./12.), random_state=0)\n def torchify(X, y):\n return torch.from_numpy(X).float(), torch.from_numpy(y).long()\n train_dataset = torchify(X_train, y_train)\n test_dataset = torchify(X_test, y_test)\n\n # test sklearn\n # clf = LogisticRegression(penalty='none').fit(X_train, y_train)\n # print(clf.score(X_train, y_train))\n # print(log_loss(y_train, clf.predict_proba(X_train)))\n\n # instance model\n model = LogReg().to(device)\n\n # instance optimizer\n minimizer_args = dict(method=method, options={'disp':True, 'maxiter':10000})\n if floatX == 'float64':\n model = model.double()\n optimizer = MinimizeWrapper(model.parameters(), minimizer_args)\n\n # train\n model.train()\n data, target = train_dataset\n data, target = data.to(device), target.to(device)\n if floatX == 'float64':\n data = data.double()\n class Closure():\n def __init__(self, model):\n self.model = model\n \n @staticmethod\n def loss(model):\n output = model(data)\n return F.nll_loss(output, target) \n\n def __call__(self):\n optimizer.zero_grad()\n loss = self.loss(self.model)\n loss.backward()\n self._loss = loss.item()\n return loss\n closure = Closure(model)\n optimizer.step(closure)\n\n # check if train loss is zero (overfitting)\n assert abs(closure._loss) < 1e-1, f\"Train loss not near zero with {method}: {closure._loss}\"\n return optimizer.res, closure._loss\n\ndef test_jac_methods():\n # test methods that require only the jacobian and not the hessian\n methods = [\"CG\", \"BFGS\", \"L-BFGS-B\", \"SLSQP\", \"TNC\"]\n failing_combinations = [(\"L-BFGS-B\", \"float32\"), (\"TNC\", \"float32\")]\n for method in methods:\n for floatX in [\"float32\", \"float64\"]:\n if (method, floatX) not in failing_combinations:\n _ = main(method, disp=False, floatX=floatX)\n\ndef test_hess_methods():\n methods = [\"Newton-CG\", \"trust-ncg\", \"trust-krylov\", \"trust-exact\", \"trust-constr\"]\n failing_methods = [\"dogleg\"]\n for method in methods:\n for floatX in ['float32', 'float64']:\n _ = main(method, disp=False, floatX=floatX)\n\ndef test_gpu():\n # if there's a GPU, run this test (so this won't run on travis)\n if torch.cuda.is_available():\n for method in [\"CG\", \"Newtom-CG\"]:\n main(method, disp=False, floatX='float32', cuda=True)\n\nif __name__ == \"__main__\":\n res, loss = main(\"Newton-CG\", floatX='float64', cuda=True)\n #res, loss = main(\"TNC\", floatX='float32')\n # print(res)\n print(f\"Train Loss: {loss:.2f}\")\n\n"
] |
[
[
"sklearn.datasets.make_classification",
"numpy.random.seed",
"torch.nn.functional.log_softmax",
"torch.nn.functional.nll_loss",
"torch.manual_seed",
"sklearn.model_selection.train_test_split",
"torch.from_numpy",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sandysa/Environment_Shaping_NSE
|
[
"29db4ae6302cc8c6f546fae22ff7eea2588db94f",
"29db4ae6302cc8c6f546fae22ff7eea2588db94f"
] |
[
"src/shaping_multiple_actors.py",
"src/plotresults.py"
] |
[
"########################################################################################\r\n# Author: Sandhya Saisubramanian\r\n# Description: Implements environment shaping and actor-designer coordination\r\n# \t\t\t for driving domain (multiple actor setting)\r\n########################################################################################\r\nimport numpy as np\r\nimport sys\r\nimport os \r\nimport random\r\nimport time\r\n\r\nfrom driving import Driving\r\nfrom designer import Designer\r\nfrom domain_helper import *\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\ncurrent_file_path = os.path.dirname(os.path.realpath(__file__))\r\nsys.path.append(os.path.join(current_file_path, '..'))\r\nDRIVING_MAP_PATH = os.path.join(current_file_path, '..', 'maps','driving')\r\n\r\nrandom.seed(100)\r\n\r\ndef Predict(x_train,y_train,x_test):\r\n\tfinal_model = RandomForestClassifier(n_estimators=100)\r\n\tx=np.array(x_train)\r\n\ty=np.array(y_train)\r\n\tfinal_model.fit(x_train,y_train)\r\n\r\n\ttest_label = final_model.predict(x_test)\r\n\r\n\treturn test_label\r\n\r\ndef actor_final_NSE_driving(map, start, goal,policy):\r\n\tdr = Driving(map,start,goal)\r\n\treturn dr.calculate_final_cost_NSE(policy)\r\n\r\ndef actor_driving(map, start, goal,trajectory_budget=-1):\r\n\tdr = Driving(map,start,goal)\r\n\tif trajectory_budget > -1:\r\n\t\treturn dr.solve_simulate(trajectory_budget)\r\n\treturn dr.solve()\r\n\r\ndef solve_disapproved_actions(filename,start,goal,disapproved_actions={}):\r\n\tdr = Driving(filename,start,goal)\r\n\treturn dr.solve_feedback(disapproved_actions)\r\n\r\ndef getE0():\r\n\tE0 = os.path.join(DRIVING_MAP_PATH,'grid-1.dr')\r\n\treturn E0\r\n\r\ndef setup_driving():\r\n\tstart_locations = []\r\n\tgoal_locations = []\r\n\tmaps = [os.path.join(DRIVING_MAP_PATH,f) for f in os.listdir(DRIVING_MAP_PATH) if os.path.isfile(os.path.join(DRIVING_MAP_PATH,f))]\r\n\tE0 = os.path.join(DRIVING_MAP_PATH,'grid-1.dr')\r\n\tmodifications = {\r\n\t\t\t'null': E0,\r\n\t\t\t'reduce_speed_all': os.path.join(DRIVING_MAP_PATH,'grid-1_reduced_speed.dr'),\r\n\t\t\t'fill_potholes': os.path.join(DRIVING_MAP_PATH,'grid-1_fill.dr'),\r\n\t\t\t'fill_deep_potholes': os.path.join(DRIVING_MAP_PATH,'grid-1_fill_deep.dr'),\r\n\t\t\t'reduce_speed_fill_deep': os.path.join(DRIVING_MAP_PATH,'grid-1_fill_reduce.dr'), #reduces speed at all shallow potholes and fills deep potholes\r\n\t\t\t'reduce_speed_zone1': os.path.join(DRIVING_MAP_PATH,'grid-1_reduced_speed1.dr'),\r\n\t\t\t'reduce_speed_zone2': os.path.join(DRIVING_MAP_PATH,'grid-1_reduced_speed2.dr'),\r\n\t\t\t'reduce_speed_zone3': os.path.join(DRIVING_MAP_PATH,'grid-1_reduced_speed3.dr'),\r\n\t\t\t'reduce_speed_zone4': os.path.join(DRIVING_MAP_PATH,'grid-1_reduced_speed4.dr')\r\n\t\t\t}\r\n\r\n\t# Cost of each modification proportional to pothole area in E0\r\n\t# Alternatively, this can be defined as per unit cost and \r\n\t# the exact value can be extracted for each configuration.\r\n\tcost_modifications = {\r\n\t\t'null': 0,\r\n\t\t'reduce_speed_zone1': 10,\r\n\t\t'reduce_speed_zone2': 16,\r\n\t\t'reduce_speed_zone3': 30,\r\n\t\t'reduce_speed_zone4': 28,\r\n\t\t'reduce_speed_all': 84, #cost/unit=2\r\n\t\t'fill_potholes': 168, #cost/unit=4\r\n\t\t'fill_deep_potholes': 76,\r\n\t\t'reduce_speed_fill_deep': 122\r\n\t\t}\r\n\r\n\tdummy_start = (1,1)\r\n\tdummy_goal = (13,22)\r\n\tdr = Driving(E0,dummy_start, dummy_goal)\r\n\tstates = dr.getStates()\r\n\r\n\tfor a in range(number_actors):\r\n\t\ts,g = get_start_goal_driving(states)\r\n\t\tstart_locations.append(s)\r\n\t\tgoal_locations.append(g)\r\n\r\n\treturn maps, modifications, cost_modifications, E0, states, start_locations,goal_locations\r\n\r\ndef solve_driving(trajectory_budget_list, start_locations=[], goal_locations=[],cluster=False):\r\n\tk = 10000 # NSE penalty when the actor is unable to reach the goal\r\n\ttrajectory_budget_modifications = []\r\n\ttrajectory_budget_NSE = []\r\n\ttrajectory_budget_costs = []\r\n\r\n\tmaps, modifications, cost_modifications, E0, states,start_loc,goal_loc = setup_driving()\r\n\tif start_locations == []:\r\n\t\tstart_locations = start_loc\r\n\t\tgoal_locations = goal_loc\r\n\r\n\tfor trajectory_budget in trajectory_budget_list:\r\n\t\tvisited = []\r\n\t\tdelta_actor_arr = []\r\n\t\tcurr_policies = []\r\n\t\tcurrent_NSE = []\r\n\r\n\t\tbest_total_nse = 0\r\n\t\tviolation = False\r\n\t\tbest_modification = 'null'\r\n\t\texpected_costs = []\r\n\t\tnse_val = 0\r\n\t\tnum_tested = 0\r\n\r\n\t\tdesigner = Designer(maps, modifications, cost_modifications, E0, delta_designer_percentage, domain_name, states)\r\n\t\t\r\n\t\tstart = time.time()\r\n\t\tfor actor in range(number_actors):\r\n\t\t\tpolicy,visitation_freq, expected_cost = actor_driving(E0, start_locations[actor], goal_locations[actor],trajectory_budget)\r\n\t\t\tcurr_policies.append(policy)\r\n\t\t\tdelta_actor = delta_actor_percentage * expected_cost\r\n\t\t\tdelta_actor_arr.append(delta_actor)\r\n\t\t\texpected_costs.append(expected_cost)\r\n\t\t\t\r\n\t\t\tdesigner.populateParameters(policy,visitation_freq)\r\n\t\t\tnse_val += designer.getNSE(E0, policy)\r\n\t\t\tcurrent_NSE.append(nse_val)\r\n\t\t\r\n\t\t# Computes modifications that do not require testing since they are too \r\n\t\t# similar to other modifications with better utility.\r\n\t\tif(cluster):\r\n\t\t\tprint(\"Shaping budget = \",shaping_budget)\r\n\t\t\tvisited = designer.cluster_best_design(curr_policies,shaping_budget,number_actors)\r\n\r\n\t\tstart1 = time.time()\r\n\r\n\t\tdelta_designer = delta_designer_percentage * nse_val\r\n\t\tif nse_val > delta_designer:\r\n\t\t\tviolation = True\r\n\t\tbest_total_nse = nse_val\r\n\t\t\r\n\t\twhile violation:\r\n\t\t\tviolation = False\r\n\t\t\tcurrent_NSE = []\r\n\t\t\tutility, modification, updated_NSE_actors = designer.best_design_multiple_actors(visited, curr_policies)\r\n\t\t\tvisited.append(modification)\r\n\t\t\tnum_tested += 1\r\n\t\t\tif modification == 'null':\r\n\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tfor actor in range(number_actors):\r\n\t\t\t\tupdated_policy, visitation_freq, updated_cost = actor_driving(modifications[modification], start_locations[actor],\\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t goal_locations[actor],trajectory_budget)\r\n\t\t\t\t\r\n\t\t\t\tif updated_cost - expected_costs[actor] > delta_actor_arr[actor]:\r\n\t\t\t\t\tupdated_NSE_actors[actor] = k\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tpolicy = updated_policy\r\n\r\n\t\t\tif sum(updated_NSE_actors) > delta_designer:\r\n\t\t\t\tviolation = True\r\n\r\n\t\t\tcurrent_NSE = updated_NSE_actors\r\n\r\n\t\t\tif sum(current_NSE) < best_total_nse:\r\n\t\t\t\tbest_modification = modification\r\n\t\t\t\tbest_config = modifications[modification]\r\n\t\t\t\tbest_NSE = current_NSE\r\n\r\n\t\tprint(\"***************************************************************************\")\r\n\t\tprint(\"Time taken (s) =%s time taken (wo similarity calculation) = %s num_tested=%s\\n\"%((time.time() -start),(time.time()-start1),num_tested))\r\n\t\tprint(\"Best modification = %s\\n\"%best_modification)\r\n\t\texpected_costs = []\r\n\t\tfinal_nse = []\r\n\t\ttrajectory_budget_modifications.append(best_modification)\r\n\t\tfor actor in range(number_actors):\r\n\t\t\tpolicy, expected_cost = actor_driving(modifications[best_modification], start_locations[actor], goal_locations[actor])\r\n\t\t\texpected_costs.append(expected_cost)\r\n\t\t\tavg_cost, std_cost, avg_nse, std_nse = actor_final_NSE_driving(modifications[best_modification],\\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tstart_locations[actor], goal_locations[actor],policy)\r\n\t\t\tfinal_nse.append(avg_nse)\r\n\t\t\r\n\t\ttrajectory_budget_NSE.append(final_nse)\r\n\t\ttrajectory_budget_costs.append(expected_costs)\r\n\r\n\treturn trajectory_budget_NSE, trajectory_budget_costs, trajectory_budget_modifications, start_locations, goal_locations\r\n\r\n\r\ndef get_start_goal_driving(states):\r\n\tE0 = os.path.join(DRIVING_MAP_PATH,'grid-1.dr')\r\n\tgrid = readmap(E0)\r\n\twalls = WallLoc_driving(grid)\r\n\tvalid_state = False\r\n\twhile(valid_state == False):\r\n\t\tstart_state = random.choice(states)\r\n\t\tgoal_state = random.choice(states)\r\n\t\tif start_state and goal_state not in walls:\r\n\t\t\tvalid_state = True\r\n\treturn start_state, goal_state\r\n\r\ndef noShaping_driving(start_locations, goal_locations):\r\n\tbaseline_nse = []\r\n\tbaseline_costs = []\r\n\r\n\tE0 = os.path.join(DRIVING_MAP_PATH,'grid-1.dr')\r\n\tfor actor in range(number_actors):\r\n\t\tpolicy,expected_cost = actor_driving(E0, start_locations[actor], goal_locations[actor])\r\n\t\tavg_cost, std_cost, avg_nse, std_nse = actor_final_NSE_driving(E0, start_locations[actor], \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgoal_locations[actor],policy)\r\n\t\tbaseline_nse.append(avg_nse)\r\n\t\tbaseline_costs.append(expected_cost)\r\n\r\n\tbaseline_avg_nse = sum(baseline_nse)/number_actors\r\n\tbaseline_std_nse = np.std(np.array(baseline_nse))\r\n\tbaseline_avg_cost = sum(baseline_costs)/number_actors\r\n\r\n\treturn baseline_avg_nse, baseline_std_nse, baseline_avg_cost\r\n\r\ndef DisapprovedActions(approval,all_states,sa_list,all_actions,generalize_feedback=False):\r\n\tx_train = []\r\n\ty_train = []\r\n\tx_test = []\r\n\ttesting_sa = []\r\n\tdisapproved_actions = {s:[] for s,state in enumerate(all_states)}\r\n\t# Based on gathered feedback\r\n\tfor info in approval:\r\n\t\ttemp=[]\r\n\t\ts = info[0]\r\n\t\ttemp.append(s[0])\r\n\t\ttemp.append(s[1])\r\n\t\ttemp.append(all_actions.index(info[1]))\r\n\t\tx_train.append(temp)\r\n\t\ty_train.append(int(info[2]))\r\n\t\t\r\n\t\tstate_index = all_states.index(info[0])\r\n\t\tif int(info[2]) == 0:\r\n\t\t\tdisapproved_actions[state_index].append(info[1])\r\n\r\n\t# Generalize the gathered data to unseen states:\t\r\n\tif generalize_feedback == True:\r\n\t\tfor sa in sa_list:\r\n\t\t\tsa_val = sa[1:]\r\n\t\t\ttemp=[]\r\n\t\t\tif sa_val not in x_train:\r\n\t\t\t\ts = info[0]\r\n\t\t\t\ttemp.append(s[0])\r\n\t\t\t\ttemp.append(s[1])\r\n\t\t\t\ttemp.append(all_actions.index(sa_val[1]))\r\n\t\t\t\tx_test.append(temp)\t\r\n\t\t\t\ttesting_sa.append(sa_val)\r\n\r\n\t\ty_label = Predict(x_train, y_train, x_test)\r\n\r\n\t\tfor i in range(len(y_label)):\r\n\t\t\tif y_label[i] == 0:\r\n\t\t\t\tsa_val = testing_sa[i]\r\n\t\t\t\tstate_index = all_states.index(sa_val[0])\r\n\t\t\t\tdisapproved_actions[state_index].append(sa_val[1])\r\n\r\n\treturn disapproved_actions\r\n\r\ndef feedback(trajectory_budget_list,start_locations, goal_locations,generalize_feedback=False):\r\n\tfeedback_budget = 500\r\n\tE0 = getE0()\r\n\r\n\tgrid = readmap(E0)\r\n\tdr = Driving(E0,start_locations[0], goal_locations[0])\r\n\tall_states = dr.getStates()\r\n\tall_actions = dr.getActions()\r\n\tsa_list = dr.generate_state_actions()\r\n\tNSE_locations = PotholeLoc_driving(grid)\r\n\tNSE_values = []\r\n\tstd_NSE = 0\r\n\r\n\tfor trajectory_budget in trajectory_budget_list:\r\n\t\ttotal_nse = []\r\n\t\tfor actor in range(number_actors):\r\n\t\t\tpolicy,visitation_freq, expected_cost = actor_driving(E0, start_locations[actor], goal_locations[actor],trajectory_budget)\r\n\t\t\tnse_penalty = NSE_penalty_driving(all_states, policy,NSE_locations,visitation_freq)\r\n\t\t\tdelta_designer = delta_designer_percentage * nse_penalty\r\n\t\t\tfeedback_count = 0\r\n\t\t\r\n\t\t\tapproval = []\r\n\t\t\tif nse_penalty > delta_designer:\r\n\t\t\t\tfor s,state in enumerate(all_states):\r\n\t\t\t\t\tif feedback_count >= feedback_budget:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tif s in policy:\r\n\t\t\t\t\t\ttemp = []\r\n\t\t\t\t\t\taction = policy[s]\r\n\t\t\t\t\t\tif mild_NSE_driving(state,action, NSE_locations) or severe_NSE_driving(state,action, NSE_locations):\t\t\t\r\n\t\t\t\t\t\t\ttemp.append(state)\r\n\t\t\t\t\t\t\ttemp.append(action)\r\n\t\t\t\t\t\t\ttemp.append(0)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\ttemp.append(state)\r\n\t\t\t\t\t\t\ttemp.append(action)\r\n\t\t\t\t\t\t\ttemp.append(1)\r\n\t\t\t\t\t\tapproval.append(temp)\r\n\t\t\t\t\t\tfeedback_count += 1\r\n\t\t\t\tdisapproved_actions = DisapprovedActions(approval,all_states,sa_list,all_actions,generalize_feedback)\r\n\t\t\t\tupdated_policy,updated_expected_cost = solve_disapproved_actions(E0,start_locations[actor], \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgoal_locations[actor],disapproved_actions)\r\n\t\t\t\tif updated_policy != None and updated_expected_cost - expected_cost <= delta_actor_percentage * expected_cost:\r\n\t\t\t\t\tavg_cost, std_cost, avg_nse, std_nse = actor_final_NSE_driving(E0, start_locations[actor], \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgoal_locations[actor],updated_policy)\r\n\t\t\t\r\n\t\t\t\t\ttotal_nse.append(avg_nse)\r\n\t\t\t\telse:\r\n\t\t\t\t\tpolicy,expected_cost = actor_driving(E0, start_locations[actor], goal_locations[actor])\r\n\t\t\t\t\tavg_cost, std_cost, avg_nse, std_nse = actor_final_NSE_driving(E0, start_locations[actor], \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgoal_locations[actor],policy)\r\n\t\t\t\t\ttotal_nse.append(avg_nse)\r\n\t\t\telse:\r\n\t\t\t\tpolicy,expected_cost = actor_driving(E0, start_locations[actor], goal_locations[actor])\r\n\t\t\t\tavg_cost, std_cost, avg_nse, std_nse = actor_final_NSE_driving(E0, start_locations[actor], \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgoal_locations[actor],policy)\r\n\t\t\t\r\n\t\t\t\ttotal_nse.append(avg_nse)\r\n\t\t\tstd_NSE = np.std(np.array(total_nse))\r\n\t\tNSE_values.append(sum(total_nse)/number_actors)\r\n\r\n\treturn NSE_values, std_NSE\r\n\r\n\r\n\r\ndef main():\r\n\tavg_nse = []\r\n\tstd_nse = []\r\n\tavg_cost = []\r\n\r\n\tshaping_budget_avg_nse = []\r\n\tshaping_budget_std_nse = []\r\n\tshaping_budget_avg_cost = []\r\n\r\n\ttrajectory_budget_list = [100]\r\n\r\n\top_file = \"../results/multiple_actors_driving_trials.txt\"\r\n\tprint(\"Shaping with exhaustive search..\\n\")\r\n\tarr_NSE, arr_cost,best_modification, start_locations, goal_locations = solve_driving(trajectory_budget_list)\r\n\r\n\r\n\tprint(\"Shaping with budget..\\n\")\r\n\tcluster_arr_NSE, cluster_arr_cost,cluster_best_modification, \\\r\n\tstart_locations, goal_locations = solve_driving(trajectory_budget_list,start_locations, goal_locations,True)\r\n\r\n\t\r\n\tbaseline_avg_nse, baseline_std_nse, baseline_avg_cost = noShaping_driving(start_locations, goal_locations)\r\n\t\r\n\tfeedback_nse, feedback_std_nse = feedback(trajectory_budget_list,start_locations, goal_locations)\r\n\r\n\tfeedback_gen_nse, feedback_gen_std_nse = feedback(trajectory_budget_list,start_locations, goal_locations,True)\r\n\t\t\t\r\n\tfor t in range(len(trajectory_budget_list)):\r\n\t\tavg_nse.append(sum(arr_NSE[t])/number_actors)\r\n\t\tstd_nse.append(np.std(np.array(arr_NSE[t])))\r\n\t\tavg_cost.append(sum(arr_cost[t])/number_actors)\r\n\t\t\r\n\t\tshaping_budget_avg_nse.append(sum(cluster_arr_NSE[t])/number_actors)\r\n\t\tshaping_budget_std_nse.append(np.std(np.array(cluster_arr_NSE[t])))\r\n\t\tshaping_budget_avg_cost.append(sum(cluster_arr_cost[t])/number_actors)\r\n\r\n\r\n\tfile = open(op_file,\"a+\")\r\n\tfile.write(\"#Actors=%s\\n\"%number_actors)\r\n\tfile.write(\"Designer slack percentage=%s\\n\"%delta_designer_percentage)\r\n\tfile.write(\"Actor slack percentage=25\\n\")\r\n\tfile.write(\"Baseline NSE =%s\\n\"%baseline_avg_nse)\r\n\tfile.write(\"Baseline_std_NSE =%s\\n\"%baseline_std_nse)\r\n\tfile.write(\"Baseline costs =%s\\n\"%baseline_avg_cost)\r\n\r\n\tfile.write(\"Shaping_NSE=%s\\n\"%avg_nse)\r\n\tfile.write(\"Shaping_std_NSE=%s\\n\"%std_nse)\r\n\tfile.write(\"Shaping_actor_costs=%s\\n\"%avg_cost)\r\n\r\n\tfile.write(\"Shaping_budget_NSE=%s\\n\"%shaping_budget_avg_nse)\r\n\tfile.write(\"Shaping_budget_std_NSE=%s\\n\"%shaping_budget_std_nse)\r\n\tfile.write(\"Shaping_budget_actor_costs=%s\\n\"%shaping_budget_avg_cost)\r\n\r\n\tfile.write(\"Feedback_NSE=%s\\n\"%feedback_nse)\r\n\tfile.write(\"Feedback_std_NSE=%s\\n\"%feedback_std_nse)\r\n\tfile.write(\"Feedback_gen_NSE=%s\\n\"%feedback_gen_nse)\r\n\tfile.write(\"Feedback_gen_std_NSE=%s\\n\"%feedback_gen_std_nse)\r\n\tfile.write(\"***********************************\\n\")\r\n\tfile.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tdomain_name = \"driving\"\r\n\tshaping_budget = 4\r\n\tdelta_designer_percentage = 0\r\n\tdelta_actor_percentage = 0.25\r\n\tnumber_actors = 10\r\n\tif len(sys.argv) > 1: \r\n\t\tnumber_actors = int(sys.argv[1])\r\n\tmain()\r\n\t\r\n",
"##########################################################################\r\n# Author: Sandhya Saisubramanian\r\n# Description: Generates plots for single actor, single designer setting\r\n##########################################################################\r\nimport numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cmap\r\nimport seaborn as sns\r\nfrom collections import OrderedDict\r\n\r\ndef readDesignResults(filename):\r\n\tf = open(filename,\"r\")\r\n\tavg = []\r\n\tstd= []\r\n\tshaping_budget_NSE = []\r\n\tshaping_budget_std_NSE = []\r\n\tcosts = []\r\n\tshaping_budget_costs = []\r\n\tbudget = []\r\n\tbaseline_NSE = []\r\n\tbaseline_costs = []\r\n\tstd_baseline_nse = []\r\n\r\n\tfor line in f:\r\n\t\tif \"Budget\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tbudget.append(v)\r\n\t\tif \"Shaping_Average NSE\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tavg.append(float(v))\r\n\t\tif \"Shaping_budget_NSE\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tshaping_budget_NSE.append(float(v))\r\n\t\tif \"Shaping_budget_Std_nse\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tshaping_budget_std_NSE.append(float(v))\r\n\r\n\t\tif \"Shaping_Std_dev_nse\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tstd.append(float(v))\r\n\t\t\r\n\t\tif \"Baseline_NSE\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tbaseline_NSE.append(float(v))\r\n\r\n\t\tif \"Std_dev_baseline_nse\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tstd_baseline_nse.append(float(v))\r\n\r\n\t\tif \"Baseline_costs\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tbaseline_costs.append(float(v))\r\n\r\n\t\tif \"Shaping_Average cost\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tcosts.append(float(v))\r\n\t\tif \"Shaping_budget_cost\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tshaping_budget_costs.append(float(v))\r\n\r\n\tf.close()\r\n\r\n\treturn budget, avg,std, costs, baseline_NSE, std_baseline_nse, baseline_costs, shaping_budget_costs,\\\r\n\t\t\tshaping_budget_NSE,shaping_budget_std_NSE\r\n\r\ndef readFeedbackResults(filename):\r\n\tf = open(filename,\"r\")\r\n\tavg = []\r\n\tstd= []\r\n\tcosts = []\r\n\tfor line in f:\r\n\t\tif \"Average\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tif(float(v) > 100):\r\n\t\t\t\t\t\tavg.append(100)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tavg.append(float(v))\r\n\t\tif \"Std_dev_NSE\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tstd.append(float(v))\r\n\t\tif \"Actor costs\" in line:\r\n\t\t\ttemp = line.strip().replace(\"[\",\"\").replace(\"]\",\"\").split(\"=\")\r\n\t\t\tval = temp[1].strip().split(\",\")\r\n\t\t\tfor v in val:\r\n\t\t\t\tif v!= \"\":\r\n\t\t\t\t\tif(float(v) > 100):\r\n\t\t\t\t\t\tcosts.append(100)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcosts.append(float(v))\r\n\tf.close()\r\n\treturn avg, std, costs\r\n\r\n\r\ndesign_filename = sys.argv[1]\r\nfeedback_filename = sys.argv[2]\r\ngen_feedback_filename = sys.argv[3]\r\nop_file = sys.argv[4]\r\n\r\nbudget, avg,std, costs,baseline_NSE,\\\r\n std_baseline_nse, baseline_costs, shaping_budget_costs,\\\r\n\t\t\tshaping_budget_NSE,shaping_budget_std_NSE = readDesignResults(design_filename)\r\n\r\nfeedback_nse, feedback_std, feedback_costs = readFeedbackResults(feedback_filename)\r\ngen_nse, gen_std, gen_costs = readFeedbackResults(gen_feedback_filename)\r\n\r\n\r\n# Converting standard deviation to standard error for plots\r\nstd = np.array(std)/10\r\nfeedback_std = np.array(feedback_std)/10\r\ngen_std = np.array(gen_std)/10\r\nstd_baseline_nse = np.array(std_baseline_nse)/10\r\nshaping_budget_std_NSE = np.array(shaping_budget_std_NSE)/10\r\n\r\n\r\nN = np.arange(len(budget))\r\nfig = plt.figure(figsize=(6,4))\r\nax = fig.add_subplot(111)\r\nplt.ylabel(\"Average NSE penalty\",fontsize='14')\r\nplt.xlabel(\"#Observed actor trajectories\",fontsize='14')\r\nax.set_xticks(N)\r\nax.set_xticklabels(budget,fontsize='14')\r\nplt.yticks(fontsize='16')\r\nplt.plot(baseline_NSE,linewidth=2, linestyle=\"-\", color=\"green\",marker=\"p\", label=\"Initial\")\r\nplt.plot(feedback_nse,linewidth=2, linestyle=\"-.\", color=\"red\",marker=\"*\", label=\"Feedback\")\r\nplt.plot(gen_nse,linewidth=2, linestyle=\"-.\", color=\"brown\",marker=\"p\", label=\"Feedback w/ generalization\")\r\nplt.plot(avg,linewidth=2, linestyle=\"--\", color=\"blue\",marker=\"o\", label=\"Shaping\")\r\nplt.plot(shaping_budget_NSE,linewidth=2, linestyle=\"--\", color=\"purple\",marker=\"+\", label=\"Shaping w/ budget\")\r\n\r\n\r\nax.fill_between(N,np.array(gen_nse)+ np.array(gen_std), np.array(gen_nse)-np.array(gen_std), color='peachpuff')\r\nax.fill_between(N,np.array(baseline_NSE)+ np.array(std_baseline_nse), np.array(baseline_NSE)-np.array(std_baseline_nse), color='lightgreen')\r\nax.fill_between(N,np.array(avg)+ np.array(std), np.array(avg)-np.array(std), color='cyan')\r\nax.fill_between(N,np.array(shaping_budget_NSE)+ np.array(shaping_budget_std_NSE), np.array(shaping_budget_NSE)-np.array(shaping_budget_std_NSE), color='pink')\r\nax.fill_between(N,np.array(feedback_nse)+ np.array(feedback_std), np.array(feedback_nse)-np.array(feedback_std), color='salmon')\r\nplt.legend(fontsize='14')\r\nbox = ax.get_position()\r\nax.legend(loc ='upper left',ncol=3,handletextpad=0.1,columnspacing=0.8,bbox_to_anchor=(-0.12, box.height+0.5),fancybox=False,fontsize='14')\r\nplt.savefig(op_file+\"_trials_NSE.png\",bbox_inches='tight')\r\n\r\n\r\n# Plot actor's expected costs\r\nfig = plt.figure(figsize=(6,4))\r\nax = fig.add_subplot(111)\r\nplt.ylabel(\"Cost\",fontsize='14')\r\nplt.xlabel(\"#Observed actor trajectories\",fontsize='14')\r\nax.set_xticks(N)\r\nax.set_xticklabels(budget,fontsize='14')\r\nplt.yticks(fontsize='14')\r\nplt.plot(baseline_costs,linewidth=3, linestyle=\"-\", color=\"green\",marker=\"p\", label=\"Initial\")\r\nplt.plot(gen_costs,linewidth=3, linestyle=\"-.\", color=\"brown\",marker=\"p\", label=\"Feedback w/ generalization\")\r\nplt.plot(costs,linewidth=3, linestyle=\"--\", color=\"blue\",marker=\"o\", label=\"Shaping\")\r\nplt.plot(feedback_costs,linewidth=3, linestyle=\"-.\", color=\"red\",marker=\"*\", label=\"Feedback\")\r\n\r\nplt.plot(shaping_budget_costs,linewidth=3, linestyle=\"--\", color=\"purple\",marker=\"+\", label=\"Shaping w/ Budget\")\r\n\r\nplt.legend(fontsize='14')\r\nbox = ax.get_position()\r\nax.legend(loc ='upper left',ncol=3,handletextpad=0.1,columnspacing=0.8,bbox_to_anchor=(-0.12, box.height+0.5),fancybox=False,fontsize='14')\r\nplt.savefig(op_file+\"_trials_primary.png\",bbox_inches='tight')\r\n"
] |
[
[
"numpy.array",
"sklearn.ensemble.RandomForestClassifier"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ayansengupta17/agents
|
[
"c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a",
"c727141f67051b86d2564c4bd5fbc080623bfe19",
"c727141f67051b86d2564c4bd5fbc080623bfe19",
"c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a",
"c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a"
] |
[
"tf_agents/policies/tf_policy.py",
"tf_agents/environments/batched_py_environment_test.py",
"tf_agents/policies/actor_policy_test.py",
"tf_agents/networks/encoding_network_test.py",
"tf_agents/policies/policy_saver.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorFlow Policies API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nimport abc\nfrom typing import Optional, Text, Sequence\n\nimport six\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.distributions import reparameterized_sampling\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\n\n\ntfd = tfp.distributions\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass TFPolicy(tf.Module):\n \"\"\"Abstract base class for TF Policies.\n\n The Policy represents a mapping from `time_steps` recieved from the\n environment to `actions` that can be applied to the environment.\n\n Agents expose two policies. A `policy` meant for deployment and evaluation,\n and a `collect_policy` for collecting data from the environment. The\n `collect_policy` is usually stochastic for exploring the environment better\n and may log auxilliary information such as log probabilities required for\n training as well. `Policy` objects can also be created directly by the users\n without using an `Agent`.\n\n The main methods of TFPolicy are:\n\n * `action`: Maps a `time_step` from the environment to an action.\n * `distribution`: Maps a `time_step` to a distribution over actions.\n * `get_initial_state`: Generates the initial state for stateful policies, e.g.\n RNN/LSTM policies.\n\n Example usage:\n\n ```\n env = SomeTFEnvironment()\n policy = TFRandomPolicy(env.time_step_spec(), env.action_spec())\n # Or policy = agent.policy or agent.collect_policy\n\n policy_state = policy.get_initial_state(env.batch_size)\n time_step = env.reset()\n\n while not time_step.is_last():\n policy_step = policy.action(time_step, policy_state)\n time_step = env.step(policy_step.action)\n\n policy_state = policy_step.state\n # policy_step.info may contain side info for logging, such as action log\n # probabilities.\n ```\n\n Policies can be saved to disk as SavedModels (see policy_saver.py and\n policy_loader.py) or as TF Checkpoints.\n\n A `PyTFEagerPolicy` can be used to wrap a `TFPolicy` so that it works with\n `PyEnvironment`s.\n\n\n **NOTE**: For API consistency, subclasses are not allowed to override public\n methods of `TFPolicy` class. Instead, they may implement the protected methods\n including `_get_initial_state`, `_action`, and `_distribution`. This\n public-calls-private convention allowed this base class to do things like\n properly add `spec` and shape checks, which provide users an easier experience\n when debugging their environments and networks.\n\n For researchers, and those developing new Policies, the `TFPolicy` base class\n constructor also accept a `validate_args` parameter. If `False`, this\n disables all spec structure, dtype, and shape checks in the public methods of\n these classes. It allows algorithm developers to iterate and try different\n input and output structures without worrying about overly restrictive\n requirements, or input and output states being in a certain format. However,\n *disabling argument validation* can make it very hard to identify structural\n input or algorithmic errors; and should not be done for final, or\n production-ready, Policies. In addition to having implementations that may\n disagree with specs, this mean that the resulting Policy may no longer\n interact well with other parts of TF-Agents. Examples include impedance\n mismatches with Actor/Learner APIs, replay buffers, and the model export\n functionality in `PolicySaver.\n \"\"\"\n\n # TODO(b/127327645) Remove this attribute.\n # This attribute allows subclasses to back out of automatic tf.function\n # attribute inside TF1 (for autodeps).\n _enable_functions = True\n\n def __init__(\n self,\n time_step_spec: ts.TimeStep,\n action_spec: types.NestedTensorSpec,\n policy_state_spec: types.NestedTensorSpec = (),\n info_spec: types.NestedTensorSpec = (),\n clip: bool = True,\n emit_log_probability: bool = False,\n automatic_state_reset: bool = True,\n observation_and_action_constraint_splitter: Optional[\n types.Splitter] = None,\n validate_args: bool = True,\n name: Optional[Text] = None):\n \"\"\"Initialization of TFPolicy class.\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps. Usually\n provided by the user to the subclass.\n action_spec: A nest of BoundedTensorSpec representing the actions. Usually\n provided by the user to the subclass.\n policy_state_spec: A nest of TensorSpec representing the policy_state.\n Provided by the subclass, not directly by the user.\n info_spec: A nest of TensorSpec representing the policy info. Provided by\n the subclass, not directly by the user.\n clip: Whether to clip actions to spec before returning them. Default\n True. Most policy-based algorithms (PCL, PPO, REINFORCE) use unclipped\n continuous actions for training.\n emit_log_probability: Emit log-probabilities of actions, if supported. If\n True, policy_step.info will have CommonFields.LOG_PROBABILITY set.\n Please consult utility methods provided in policy_step for setting and\n retrieving these. When working with custom policies, either provide a\n dictionary info_spec or a namedtuple with the field 'log_probability'.\n automatic_state_reset: If `True`, then `get_initial_policy_state` is used\n to clear state in `action()` and `distribution()` for for time steps\n where `time_step.is_first()`.\n observation_and_action_constraint_splitter: A function used to process\n observations with action constraints. These constraints can indicate,\n for example, a mask of valid/invalid actions for a given state of the\n environment. The function takes in a full observation and returns a\n tuple consisting of 1) the part of the observation intended as input to\n the network and 2) the constraint. An example\n `observation_and_action_constraint_splitter` could be as simple as: ```\n def observation_and_action_constraint_splitter(observation): return\n observation['network_input'], observation['constraint'] ```\n *Note*: when using `observation_and_action_constraint_splitter`, make\n sure the provided `q_network` is compatible with the network-specific\n half of the output of the\n `observation_and_action_constraint_splitter`. In particular,\n `observation_and_action_constraint_splitter` will be called on the\n observation before passing to the network. If\n `observation_and_action_constraint_splitter` is None, action\n constraints are not applied.\n validate_args: Python bool. Whether to verify inputs to, and outputs of,\n functions like `action` and `distribution` against spec structures,\n dtypes, and shapes.\n\n Research code may prefer to set this value to `False` to allow iterating\n on input and output structures without being hamstrung by overly\n rigid checking (at the cost of harder-to-debug errors).\n\n See also `TFAgent.validate_args`.\n name: A name for this module. Defaults to the class name.\n \"\"\"\n super(TFPolicy, self).__init__(name=name)\n common.check_tf1_allowed()\n common.tf_agents_gauge.get_cell('TFAPolicy').set(True)\n common.assert_members_are_not_overridden(base_cls=TFPolicy, instance=self)\n if not isinstance(time_step_spec, ts.TimeStep):\n raise ValueError(\n 'The `time_step_spec` must be an instance of `TimeStep`, but is `{}`.'\n .format(type(time_step_spec)))\n\n self._time_step_spec = time_step_spec\n self._action_spec = action_spec\n self._policy_state_spec = policy_state_spec\n self._emit_log_probability = emit_log_probability\n self._validate_args = validate_args\n\n if emit_log_probability:\n log_probability_spec = tensor_spec.BoundedTensorSpec(\n shape=(),\n dtype=tf.float32,\n maximum=0,\n minimum=-float('inf'),\n name='log_probability')\n log_probability_spec = tf.nest.map_structure(\n lambda _: log_probability_spec, action_spec)\n info_spec = policy_step.set_log_probability(info_spec,\n log_probability_spec)\n\n self._info_spec = info_spec\n self._setup_specs()\n self._clip = clip\n self._action_fn = common.function_in_tf1()(self._action)\n self._automatic_state_reset = automatic_state_reset\n self._observation_and_action_constraint_splitter = (\n observation_and_action_constraint_splitter)\n\n def _setup_specs(self):\n self._policy_step_spec = policy_step.PolicyStep(\n action=self._action_spec,\n state=self._policy_state_spec,\n info=self._info_spec)\n self._trajectory_spec = trajectory.from_transition(self._time_step_spec,\n self._policy_step_spec,\n self._time_step_spec)\n\n def variables(self) -> Sequence[tf.Variable]:\n \"\"\"Returns the list of Variables that belong to the policy.\"\"\"\n # Ignore self._variables() in favor of using tf.Module's tracking.\n return super(TFPolicy, self).variables\n\n @property\n def observation_and_action_constraint_splitter(self) -> types.Splitter:\n return self._observation_and_action_constraint_splitter\n\n @property\n def validate_args(self) -> bool:\n \"\"\"Whether `action` & `distribution` validate input and output args.\"\"\"\n return self._validate_args\n\n def get_initial_state(self,\n batch_size: Optional[types.Int]) -> types.NestedTensor:\n \"\"\"Returns an initial state usable by the policy.\n\n Args:\n batch_size: Tensor or constant: size of the batch dimension. Can be None\n in which case no dimensions gets added.\n\n Returns:\n A nested object of type `policy_state` containing properly\n initialized Tensors.\n \"\"\"\n return self._get_initial_state(batch_size)\n\n def _maybe_reset_state(self, time_step, policy_state):\n if policy_state is (): # pylint: disable=literal-comparison\n return policy_state\n\n batch_size = tf.compat.dimension_value(time_step.discount.shape[0])\n if batch_size is None:\n batch_size = tf.shape(time_step.discount)[0]\n\n # Make sure we call this with a kwarg as it may be wrapped in tf.function\n # which would expect a tensor if it was not a kwarg.\n zero_state = self.get_initial_state(batch_size=batch_size)\n condition = time_step.is_first()\n # When experience is a sequence we only reset automatically for the first\n # time_step in the sequence as we can't easily generalize how the policy is\n # unrolled over the sequence.\n if nest_utils.get_outer_rank(time_step, self._time_step_spec) > 1:\n condition = time_step.is_first()[:, 0, ...]\n return nest_utils.where(condition, zero_state, policy_state)\n\n def action(self,\n time_step: ts.TimeStep,\n policy_state: types.NestedTensor = (),\n seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:\n \"\"\"Generates next action given the time_step and policy_state.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n seed: Seed to use if action performs sampling (optional).\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: An action Tensor matching the `action_spec`.\n `state`: A policy state tensor to be fed into the next call to action.\n `info`: Optional side information such as action log probabilities.\n\n Raises:\n RuntimeError: If subclass __init__ didn't call super().__init__.\n ValueError or TypeError: If `validate_args is True` and inputs or\n outputs do not match `time_step_spec`, `policy_state_spec`,\n or `policy_step_spec`.\n \"\"\"\n if self._enable_functions and getattr(self, '_action_fn', None) is None:\n raise RuntimeError(\n 'Cannot find _action_fn. Did %s.__init__ call super?' %\n type(self).__name__)\n if self._enable_functions:\n action_fn = self._action_fn\n else:\n action_fn = self._action\n\n if self._validate_args:\n time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)\n policy_state = nest_utils.prune_extra_keys(\n self._policy_state_spec, policy_state)\n nest_utils.assert_same_structure(\n time_step,\n self._time_step_spec,\n message='time_step and time_step_spec structures do not match')\n # TODO(b/158804957): Use literal comparison because in some strange cases\n # (tf.function? autograph?) the expression \"x not in (None, (), [])\" gets\n # converted to a tensor.\n if not (policy_state is None or policy_state is () or policy_state is []): # pylint: disable=literal-comparison\n nest_utils.assert_same_structure(\n policy_state,\n self._policy_state_spec,\n message=('policy_state and policy_state_spec '\n 'structures do not match'))\n\n if self._automatic_state_reset:\n policy_state = self._maybe_reset_state(time_step, policy_state)\n step = action_fn(time_step=time_step, policy_state=policy_state, seed=seed)\n\n def clip_action(action, action_spec):\n if isinstance(action_spec, tensor_spec.BoundedTensorSpec):\n return common.clip_to_spec(action, action_spec)\n return action\n\n if self._validate_args:\n nest_utils.assert_same_structure(\n step.action, self._action_spec,\n message='action and action_spec structures do not match')\n\n if self._clip:\n clipped_actions = tf.nest.map_structure(clip_action,\n step.action,\n self._action_spec)\n step = step._replace(action=clipped_actions)\n\n if self._validate_args:\n nest_utils.assert_same_structure(\n step,\n self._policy_step_spec,\n message='action output and policy_step_spec structures do not match')\n\n def compare_to_spec(value, spec):\n return value.dtype.is_compatible_with(spec.dtype)\n\n compatibility = [\n compare_to_spec(v, s) for (v, s)\n in zip(tf.nest.flatten(step.action),\n tf.nest.flatten(self.action_spec))]\n\n if not all(compatibility):\n get_dtype = lambda x: x.dtype\n action_dtypes = tf.nest.map_structure(get_dtype, step.action)\n spec_dtypes = tf.nest.map_structure(get_dtype, self.action_spec)\n\n raise TypeError('Policy produced an action with a dtype that doesn\\'t '\n 'match its action_spec. Got action:\\n %s\\n with '\n 'action_spec:\\n %s' % (action_dtypes, spec_dtypes))\n\n return step\n\n def distribution(\n self, time_step: ts.TimeStep, policy_state: types.NestedTensor = ()\n ) -> policy_step.PolicyStep:\n \"\"\"Generates the distribution over next actions given the time_step.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n\n Returns:\n A `PolicyStep` named tuple containing:\n\n `action`: A tf.distribution capturing the distribution of next actions.\n `state`: A policy state tensor for the next call to distribution.\n `info`: Optional side information such as action log probabilities.\n\n Raises:\n ValueError or TypeError: If `validate_args is True` and inputs or\n outputs do not match `time_step_spec`, `policy_state_spec`,\n or `policy_step_spec`.\n \"\"\"\n if self._validate_args:\n time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)\n policy_state = nest_utils.prune_extra_keys(\n self._policy_state_spec, policy_state)\n nest_utils.assert_same_structure(\n time_step,\n self._time_step_spec,\n message='time_step and time_step_spec structures do not match')\n nest_utils.assert_same_structure(\n policy_state,\n self._policy_state_spec,\n message='policy_state and policy_state_spec structures do not match')\n if self._automatic_state_reset:\n policy_state = self._maybe_reset_state(time_step, policy_state)\n step = self._distribution(time_step=time_step, policy_state=policy_state)\n if self.emit_log_probability:\n # This here is set only for compatibility with info_spec in constructor.\n info = policy_step.set_log_probability(\n step.info,\n tf.nest.map_structure(\n lambda _: tf.constant(0., dtype=tf.float32),\n policy_step.get_log_probability(self._info_spec)))\n step = step._replace(info=info)\n if self._validate_args:\n nest_utils.assert_same_structure(\n step,\n self._policy_step_spec,\n message=('distribution output and policy_step_spec structures '\n 'do not match'))\n return step\n\n def update(self,\n policy,\n tau: float = 1.0,\n tau_non_trainable: Optional[float] = None,\n sort_variables_by_name: bool = False) -> tf.Operation:\n \"\"\"Update the current policy with another policy.\n\n This would include copying the variables from the other policy.\n\n Args:\n policy: Another policy it can update from.\n tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard\n update. This is used for trainable variables.\n tau_non_trainable: A float scalar in [0, 1] for non_trainable variables.\n If None, will copy from tau.\n sort_variables_by_name: A bool, when True would sort the variables by name\n before doing the update.\n\n Returns:\n An TF op to do the update.\n \"\"\"\n if self.variables():\n return common.soft_variables_update(\n policy.variables(),\n self.variables(),\n tau=tau,\n tau_non_trainable=tau_non_trainable,\n sort_variables_by_name=sort_variables_by_name)\n else:\n return tf.no_op()\n\n @property\n def emit_log_probability(self) -> bool:\n \"\"\"Whether this policy instance emits log probabilities or not.\"\"\"\n return self._emit_log_probability\n\n @property\n def time_step_spec(self) -> ts.TimeStep:\n \"\"\"Describes the `TimeStep` tensors returned by `step()`.\n\n Returns:\n A `TimeStep` namedtuple with `TensorSpec` objects instead of Tensors,\n which describe the shape, dtype and name of each tensor returned by\n `step()`.\n \"\"\"\n return self._time_step_spec\n\n @property\n def action_spec(self) -> types.NestedTensorSpec:\n \"\"\"Describes the TensorSpecs of the Tensors expected by `step(action)`.\n\n `action` can be a single Tensor, or a nested dict, list or tuple of\n Tensors.\n\n Returns:\n An single BoundedTensorSpec, or a nested dict, list or tuple of\n `BoundedTensorSpec` objects, which describe the shape and\n dtype of each Tensor expected by `step()`.\n \"\"\"\n return self._action_spec\n\n @property\n def policy_state_spec(self) -> types.NestedTensorSpec:\n \"\"\"Describes the Tensors expected by `step(_, policy_state)`.\n\n `policy_state` can be an empty tuple, a single Tensor, or a nested dict,\n list or tuple of Tensors.\n\n Returns:\n An single TensorSpec, or a nested dict, list or tuple of\n `TensorSpec` objects, which describe the shape and\n dtype of each Tensor expected by `step(_, policy_state)`.\n \"\"\"\n return self._policy_state_spec\n\n @property\n def info_spec(self) -> types.NestedTensorSpec:\n \"\"\"Describes the Tensors emitted as info by `action` and `distribution`.\n\n `info` can be an empty tuple, a single Tensor, or a nested dict,\n list or tuple of Tensors.\n\n Returns:\n An single TensorSpec, or a nested dict, list or tuple of\n `TensorSpec` objects, which describe the shape and\n dtype of each Tensor expected by `step(_, policy_state)`.\n \"\"\"\n return self._info_spec\n\n @property\n def policy_step_spec(self) -> policy_step.PolicyStep:\n \"\"\"Describes the output of `action()`.\n\n Returns:\n A nest of TensorSpec which describe the shape and dtype of each Tensor\n emitted by `action()`.\n \"\"\"\n return self._policy_step_spec\n\n # TODO(kbanoop, ebrevdo): Should this be collect_data_spec to mirror agents?\n @property\n def trajectory_spec(self) -> trajectory.Trajectory:\n \"\"\"Describes the Tensors written when using this policy with an environment.\n\n Returns:\n A `Trajectory` containing all tensor specs associated with the\n observation_spec, action_spec, policy_state_spec, and info_spec of\n this policy.\n \"\"\"\n return self._trajectory_spec\n\n @property\n def collect_data_spec(self) -> trajectory.Trajectory:\n \"\"\"Describes the Tensors written when using this policy with an environment.\n\n Returns:\n A nest of TensorSpec which describe the shape and dtype of each Tensor\n required to train the agent which generated this policy.\n \"\"\"\n return self._trajectory_spec\n\n # Subclasses MAY optionally override _action.\n def _action(self, time_step: ts.TimeStep,\n policy_state: types.NestedTensor,\n seed: Optional[types.Seed]) -> policy_step.PolicyStep:\n \"\"\"Implementation of `action`.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n seed: Seed to use if action performs sampling (optional).\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: An action Tensor matching the `action_spec`.\n `state`: A policy state tensor to be fed into the next call to action.\n `info`: Optional side information such as action log probabilities.\n \"\"\"\n seed_stream = tfp.util.SeedStream(seed=seed, salt='tf_agents_tf_policy')\n distribution_step = self._distribution(time_step, policy_state)\n actions = tf.nest.map_structure(\n lambda d: reparameterized_sampling.sample(d, seed=seed_stream()),\n distribution_step.action)\n info = distribution_step.info\n if self.emit_log_probability:\n try:\n log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a),\n actions,\n distribution_step.action)\n info = policy_step.set_log_probability(info, log_probability)\n except:\n raise TypeError('%s does not support emitting log-probabilities.' %\n type(self).__name__)\n\n return distribution_step._replace(action=actions, info=info)\n\n ## Subclasses MUST implement these.\n def _distribution(\n self, time_step: ts.TimeStep,\n policy_state: types.NestedTensorSpec) -> policy_step.PolicyStep:\n \"\"\"Implementation of `distribution`.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: A (optionally nested) of tfp.distribution.Distribution\n capturing the distribution of next actions.\n `state`: A policy state tensor for the next call to distribution.\n `info`: Optional side information such as action log probabilities.\n \"\"\"\n raise NotImplementedError()\n\n # Subclasses MAY optionally overwrite _get_initial_state.\n def _get_initial_state(self, batch_size: int) -> types.NestedTensor:\n \"\"\"Returns the initial state of the policy network.\n\n Args:\n batch_size: A constant or Tensor holding the batch size. Can be None, in\n which case the state will not have a batch dimension added.\n\n Returns:\n A nest of zero tensors matching the spec of the policy network state.\n \"\"\"\n return tensor_spec.zero_spec_nest(\n self._policy_state_spec,\n outer_dims=None if batch_size is None else [batch_size])\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the parallel environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.environments import batched_py_environment\nfrom tf_agents.environments import random_py_environment\nfrom tf_agents.specs import array_spec\nfrom tf_agents.trajectories import time_step as ts\n\nCOMMON_PARAMETERS = (dict(multithreading=False), dict(multithreading=True))\n\n\nclass GymWrapperEnvironmentMock(random_py_environment.RandomPyEnvironment):\n\n def __init__(self, *args, **kwargs):\n super(GymWrapperEnvironmentMock, self).__init__(*args, **kwargs)\n self._info = {}\n\n def get_info(self):\n return self._info\n\n def _step(self, action):\n self._info['last_action'] = action\n return super(GymWrapperEnvironmentMock, self)._step(action)\n\n\nclass BatchedPyEnvironmentTest(tf.test.TestCase, parameterized.TestCase):\n\n @property\n def action_spec(self):\n return array_spec.BoundedArraySpec(\n [7], dtype=np.float32, minimum=-1.0, maximum=1.0)\n\n @property\n def observation_spec(self):\n return array_spec.ArraySpec((3, 3), np.float32)\n\n def _make_batched_py_environment(self, multithreading, num_envs=3):\n self.time_step_spec = ts.time_step_spec(self.observation_spec)\n constructor = functools.partial(random_py_environment.RandomPyEnvironment,\n self.observation_spec, self.action_spec)\n return batched_py_environment.BatchedPyEnvironment(\n envs=[constructor() for _ in range(num_envs)],\n multithreading=multithreading)\n\n def _make_batched_mock_gym_py_environment(self, multithreading, num_envs=3):\n self.time_step_spec = ts.time_step_spec(self.observation_spec)\n constructor = functools.partial(GymWrapperEnvironmentMock,\n self.observation_spec, self.action_spec)\n return batched_py_environment.BatchedPyEnvironment(\n envs=[constructor() for _ in range(num_envs)],\n multithreading=multithreading)\n\n @parameterized.parameters(*COMMON_PARAMETERS)\n def test_close_no_hang_after_init(self, multithreading):\n env = self._make_batched_py_environment(multithreading)\n env.close()\n\n @parameterized.parameters(*COMMON_PARAMETERS)\n def test_get_specs(self, multithreading):\n env = self._make_batched_py_environment(multithreading)\n self.assertEqual(self.observation_spec, env.observation_spec())\n self.assertEqual(self.time_step_spec, env.time_step_spec())\n self.assertEqual(self.action_spec, env.action_spec())\n\n env.close()\n\n @parameterized.parameters(*COMMON_PARAMETERS)\n def test_get_info_gym_env(self, multithreading):\n num_envs = 5\n rng = np.random.RandomState()\n gym_env = self._make_batched_mock_gym_py_environment(\n multithreading, num_envs=num_envs)\n gym_env.reset()\n info = gym_env.get_info()\n self.assertEqual(info, {})\n action = np.stack([\n array_spec.sample_bounded_spec(self.action_spec, rng)\n for _ in range(num_envs)\n ])\n gym_env.step(action)\n info = gym_env.get_info()\n self.assertAllEqual(info['last_action'], action)\n gym_env.close()\n\n @parameterized.parameters(*COMMON_PARAMETERS)\n def test_step(self, multithreading):\n num_envs = 5\n env = self._make_batched_py_environment(multithreading, num_envs=num_envs)\n action_spec = env.action_spec()\n observation_spec = env.observation_spec()\n rng = np.random.RandomState()\n action = np.stack([\n array_spec.sample_bounded_spec(action_spec, rng)\n for _ in range(num_envs)\n ])\n env.reset()\n\n # Take one step and assert observation is batched the right way.\n time_step = env.step(action)\n self.assertEqual(num_envs, time_step.observation.shape[0])\n self.assertAllEqual(observation_spec.shape, time_step.observation.shape[1:])\n self.assertEqual(num_envs, action.shape[0])\n self.assertAllEqual(action_spec.shape, action.shape[1:])\n\n # Take another step and assert that observations have the same shape.\n time_step2 = env.step(action)\n self.assertAllEqual(time_step.observation.shape,\n time_step2.observation.shape)\n env.close()\n\n def test_unstack_actions(self):\n num_envs = 5\n action_spec = self.action_spec\n rng = np.random.RandomState()\n batched_action = np.array([\n array_spec.sample_bounded_spec(action_spec, rng)\n for _ in range(num_envs)\n ])\n\n # Test that actions are correctly unstacked when just batched in np.array.\n unstacked_actions = batched_py_environment.unstack_actions(batched_action)\n for action in unstacked_actions:\n self.assertAllEqual(action_spec.shape, action.shape)\n\n def test_unstack_nested_actions(self):\n num_envs = 5\n action_spec = self.action_spec\n rng = np.random.RandomState()\n batched_action = np.array([\n array_spec.sample_bounded_spec(action_spec, rng)\n for _ in range(num_envs)\n ])\n\n # Test that actions are correctly unstacked when nested in namedtuple.\n class NestedAction(\n collections.namedtuple('NestedAction', ['action', 'other_var'])):\n pass\n\n nested_action = NestedAction(\n action=batched_action, other_var=np.array([13.0] * num_envs))\n unstacked_actions = batched_py_environment.unstack_actions(nested_action)\n for nested_action in unstacked_actions:\n self.assertAllEqual(action_spec.shape, nested_action.action.shape)\n self.assertEqual(13.0, nested_action.other_var)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.policies.actor_policy.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\nfrom tf_agents.networks import actor_distribution_network\nfrom tf_agents.networks import network\nfrom tf_agents.policies import actor_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import test_utils\n\n\nclass DummyActionNet(network.Network):\n\n def __init__(self, input_tensor_spec, output_tensor_spec):\n super(DummyActionNet, self).__init__(\n input_tensor_spec=input_tensor_spec,\n state_spec=(),\n name='DummyActionNet')\n single_action_spec = tf.nest.flatten(output_tensor_spec)[0]\n self._output_tensor_spec = output_tensor_spec\n self._sub_layers = [\n tf.keras.layers.Dense(\n single_action_spec.shape.num_elements(),\n activation=tf.nn.tanh,\n kernel_initializer=tf.compat.v1.initializers.constant([2, 1]),\n bias_initializer=tf.compat.v1.initializers.constant([5]),\n ),\n ]\n\n def call(self, observations, step_type, network_state):\n del step_type\n\n states = tf.cast(tf.nest.flatten(observations)[0], tf.float32)\n for layer in self._sub_layers:\n states = layer(states)\n\n single_action_spec = tf.nest.flatten(self._output_tensor_spec)[0]\n means = tf.reshape(states, [-1] + single_action_spec.shape.as_list())\n spec_means = (single_action_spec.maximum + single_action_spec.minimum) / 2.0\n spec_ranges = (\n single_action_spec.maximum - single_action_spec.minimum) / 2.0\n action_means = spec_means + spec_ranges * means\n\n return (tf.nest.pack_sequence_as(self._output_tensor_spec, [action_means]),\n network_state)\n\n\nclass DummyActionDistributionNet(DummyActionNet):\n\n def call(self, observations, step_type, network_state):\n action_means, network_state = super(DummyActionDistributionNet, self).call(\n observations, step_type, network_state)\n\n def _action_distribution(action_mean):\n action_std = tf.ones_like(action_mean)\n return tfp.distributions.Normal(action_mean, action_std)\n\n return tf.nest.map_structure(_action_distribution,\n action_means), network_state\n\n\ndef test_cases():\n return parameterized.named_parameters({\n 'testcase_name': 'SimpleNet',\n 'network_ctor': DummyActionNet,\n }, {\n 'testcase_name': 'DistributionNet',\n 'network_ctor': DummyActionDistributionNet,\n })\n\n\nclass ActorPolicyTest(parameterized.TestCase, test_utils.TestCase):\n\n def setUp(self):\n super(ActorPolicyTest, self).setUp()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, 2, 3)\n\n @property\n def _time_step(self):\n return ts.restart(tf.constant([1, 2], dtype=tf.float32))\n\n @property\n def _time_step_batch(self):\n return ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST, dtype=tf.int32, shape=[2], name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[2], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[2], name='discount'),\n tf.constant([[1, 2], [3, 4]], dtype=tf.float32, name='observation'))\n\n @test_cases()\n def testBuild(self, network_ctor):\n actor_network = network_ctor(self._obs_spec, self._action_spec)\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n self.assertEqual(policy.time_step_spec, self._time_step_spec)\n self.assertEqual(policy.action_spec, self._action_spec)\n self.assertLen(policy.variables(), 2)\n\n @test_cases()\n def testActionBatch(self, network_ctor):\n actor_network = network_ctor(self._obs_spec, self._action_spec)\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n action_step = policy.action(self._time_step_batch)\n self.assertEqual(action_step.action.shape.as_list(), [2, 1])\n self.assertEqual(action_step.action.dtype, tf.float32)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(np.all(actions_ >= self._action_spec.minimum))\n self.assertTrue(np.all(actions_ <= self._action_spec.maximum))\n\n def testUpdate(self):\n tf.compat.v1.set_random_seed(1)\n actor_network = DummyActionNet(self._obs_spec, self._action_spec)\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n self.assertLen(policy.variables(), 2)\n new_policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n action_step = policy.action(self._time_step_batch)\n self.assertLen(policy.variables(), 2)\n new_action_step = new_policy.action(self._time_step_batch)\n self.assertLen(new_policy.variables(), 2)\n\n self.assertEqual(action_step.action.shape, new_action_step.action.shape)\n self.assertEqual(action_step.action.dtype, new_action_step.action.dtype)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(new_policy.update(policy))\n actions_, new_actions_ = self.evaluate(\n [action_step.action, new_action_step.action])\n self.assertAllEqual(actions_, new_actions_)\n\n def testDeterministicDistribution(self):\n actor_network = DummyActionNet(self._obs_spec, self._action_spec)\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n action_step = policy.action(self._time_step_batch)\n distribution_step = policy.distribution(self._time_step_batch)\n self.assertIsInstance(distribution_step.action,\n tfp.distributions.Deterministic)\n distribution_mean = distribution_step.action.mean()\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n distribution_mean_ = self.evaluate(distribution_mean)\n self.assertNear(actions_[0], distribution_mean_[0], 1e-6)\n\n def testGaussianDistribution(self):\n actor_network = DummyActionDistributionNet(self._obs_spec,\n self._action_spec)\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n distribution_step = policy.distribution(self._time_step_batch)\n self.assertIsInstance(distribution_step.action, tfp.distributions.Normal)\n\n\nclass ActorPolicyDiscreteActionsTest(test_utils.TestCase):\n\n def setUp(self):\n super(ActorPolicyDiscreteActionsTest, self).setUp()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 7)\n\n @property\n def _time_step(self):\n return ts.restart(tf.constant([1, 2], dtype=tf.float32))\n\n @property\n def _time_step_batch(self):\n return ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST, dtype=tf.int32, shape=[2], name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[2], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[2], name='discount'),\n tf.constant([[1, 2], [3, 4]], dtype=tf.float32, name='observation'))\n\n def testBuild(self):\n actor_network = actor_distribution_network.ActorDistributionNetwork(\n self._obs_spec, self._action_spec, fc_layer_params=(2, 1))\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n self.assertEqual(policy.time_step_spec, self._time_step_spec)\n self.assertEqual(policy.action_spec, self._action_spec)\n\n def testActionBatch(self):\n actor_network = actor_distribution_network.ActorDistributionNetwork(\n self._obs_spec, self._action_spec, fc_layer_params=(2, 1))\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n action_step = policy.action(self._time_step_batch)\n self.assertEqual(action_step.action.shape.as_list(), [2, 1])\n self.assertEqual(action_step.action.dtype, self._action_spec.dtype)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(np.all(actions_ >= self._action_spec.minimum))\n self.assertTrue(np.all(actions_ <= self._action_spec.maximum))\n\n def testActionDistribution(self):\n actor_network = actor_distribution_network.ActorDistributionNetwork(\n self._obs_spec, self._action_spec, fc_layer_params=(2, 1))\n policy = actor_policy.ActorPolicy(\n self._time_step_spec, self._action_spec, actor_network=actor_network)\n\n # Force creation of variables before global_variables_initializer.\n policy.variables()\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n distribution = policy.distribution(self._time_step_batch)\n actions_ = self.evaluate(distribution.action.sample())\n self.assertTrue(np.all(actions_ >= self._action_spec.minimum))\n self.assertTrue(np.all(actions_ <= self._action_spec.maximum))\n\n def testMasking(self):\n batch_size = 1000\n num_state_dims = 5\n num_actions = 8\n observations = tf.random.uniform([batch_size, num_state_dims])\n time_step = ts.restart(observations, batch_size=batch_size)\n input_tensor_spec = tensor_spec.TensorSpec([num_state_dims], tf.float32)\n time_step_spec = ts.time_step_spec(input_tensor_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n [1], tf.int32, 0, num_actions - 1)\n\n # We create a fixed mask here for testing purposes. Normally the mask would\n # be part of the observation.\n mask = [0, 1, 0, 1, 0, 0, 1, 0]\n np_mask = np.array(mask)\n tf_mask = tf.constant([mask for _ in range(batch_size)])\n actor_network = actor_distribution_network.ActorDistributionNetwork(\n input_tensor_spec, action_spec, fc_layer_params=(2, 1))\n policy = actor_policy.ActorPolicy(\n time_step_spec, action_spec, actor_network=actor_network,\n observation_and_action_constraint_splitter=(\n lambda observation: (observation, tf_mask)))\n\n # Force creation of variables before global_variables_initializer.\n policy.variables()\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n # Sample from the policy 1000 times, and ensure that actions considered\n # invalid according to the mask are never chosen.\n action_step = policy.action(time_step)\n action = self.evaluate(action_step.action)\n self.assertEqual(action.shape, (batch_size, 1))\n self.assertAllEqual(np_mask[action], np.ones([batch_size, 1]))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Tests for tf_agents.networks.encoding_network.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.keras_layers import sequential_layer\nfrom tf_agents.networks import encoding_network\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.utils import test_utils\n\n\nclass EncodingNetworkTest(test_utils.TestCase, parameterized.TestCase):\n\n def test_empty_layers(self):\n input_spec = tensor_spec.TensorSpec((2, 3), tf.float32)\n network = encoding_network.EncodingNetwork(input_spec,)\n\n with self.assertRaises(ValueError):\n network.variables # pylint: disable=pointless-statement\n\n # Only one layer to flatten input.\n self.assertLen(network.layers, 1)\n config = network.layers[0].get_config()\n self.assertEqual('flatten', config['name'])\n\n out, _ = network(tf.ones((1, 2, 3)))\n self.assertAllEqual(out, [[1, 1, 1, 1, 1, 1]])\n self.assertEmpty(network.variables)\n\n def test_non_preprocessing_layers_2d(self):\n input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)\n network = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1), (15, 2, 1)),\n fc_layer_params=(10, 5, 2),\n activation_fn=tf.keras.activations.tanh,\n )\n\n network.create_variables()\n\n variables = network.variables\n self.assertLen(variables, 10)\n self.assertLen(network.layers, 6)\n\n # Validate first conv layer.\n config = network.layers[0].get_config()\n self.assertEqual('tanh', config['activation'])\n self.assertEqual((2, 2), config['kernel_size'])\n self.assertEqual(16, config['filters'])\n self.assertEqual((1, 1), config['strides'])\n self.assertTrue(config['trainable'])\n\n # Validate second conv layer.\n config = network.layers[1].get_config()\n self.assertEqual('tanh', config['activation'])\n self.assertEqual((2, 2), config['kernel_size'])\n self.assertEqual(15, config['filters'])\n self.assertEqual((1, 1), config['strides'])\n self.assertTrue(config['trainable'])\n\n # Validate flatten layer.\n config = network.layers[2].get_config()\n self.assertEqual('flatten', config['name'])\n\n # Validate dense layers.\n self.assertEqual(10, network.layers[3].get_config()['units'])\n self.assertEqual(5, network.layers[4].get_config()['units'])\n self.assertEqual(2, network.layers[5].get_config()['units'])\n\n def test_non_preprocessing_layers_1d(self):\n input_spec = tensor_spec.TensorSpec((32, 3), tf.float32)\n network = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1), (15, 2, 1)),\n fc_layer_params=(10, 5, 2),\n activation_fn=tf.keras.activations.tanh,\n conv_type='1d',\n )\n\n network.create_variables()\n\n variables = network.variables\n self.assertLen(variables, 10)\n self.assertLen(network.layers, 6)\n\n # Validate first conv layer.\n config = network.layers[0].get_config()\n self.assertEqual('tanh', config['activation'])\n self.assertEqual((2,), config['kernel_size'])\n self.assertEqual(16, config['filters'])\n self.assertEqual((1,), config['strides'])\n self.assertTrue(config['trainable'])\n\n # Validate second conv layer.\n config = network.layers[1].get_config()\n self.assertEqual('tanh', config['activation'])\n self.assertEqual((2,), config['kernel_size'])\n self.assertEqual(15, config['filters'])\n self.assertEqual((1,), config['strides'])\n self.assertTrue(config['trainable'])\n\n def test_conv_raise_error(self):\n input_spec = tensor_spec.TensorSpec((32, 3), tf.float32)\n with self.assertRaises(ValueError):\n _ = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1), (15, 2, 1)),\n fc_layer_params=(10, 5, 2),\n activation_fn=tf.keras.activations.tanh,\n conv_type='3d')\n\n def test_conv_dilation_params(self):\n with self.subTest(name='no dilations'):\n input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)\n network = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1), (15, 2, 1)),\n )\n\n network.create_variables()\n variables = network.variables\n\n self.assertLen(variables, 4)\n self.assertLen(network.layers, 3)\n\n # Validate dilation rates\n config = network.layers[0].get_config()\n self.assertEqual((1, 1), config['dilation_rate'])\n config = network.layers[1].get_config()\n self.assertEqual((1, 1), config['dilation_rate'])\n\n with self.subTest(name='dilations'):\n input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)\n network = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1, 2), (15, 2, 1, (2, 4))),\n )\n\n network.create_variables()\n variables = network.variables\n\n self.assertLen(variables, 4)\n self.assertLen(network.layers, 3)\n\n # Validate dilation rates\n config = network.layers[0].get_config()\n self.assertEqual((2, 2), config['dilation_rate'])\n config = network.layers[1].get_config()\n self.assertEqual((2, 4), config['dilation_rate'])\n\n with self.subTest(name='failing conv spec'):\n input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)\n with self.assertRaises(ValueError):\n network = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1, 2, 4), (15, 2, 1)),\n )\n with self.assertRaises(ValueError):\n network = encoding_network.EncodingNetwork(\n input_spec,\n conv_layer_params=((16, 2, 1), (15, 2)),\n )\n\n def test_preprocessing_layer_no_combiner(self):\n network = encoding_network.EncodingNetwork(\n input_tensor_spec=tensor_spec.TensorSpec([5], tf.float32),\n preprocessing_layers=tf.keras.layers.Lambda(lambda x: x),\n preprocessing_combiner=None,\n fc_layer_params=(2,))\n out, _ = network(tf.ones((3, 5)))\n self.assertAllEqual(out.shape.as_list(), [3, 2])\n\n def test_preprocessing_layers_no_combiner_error(self):\n with self.assertRaisesRegex(ValueError, 'required'):\n encoding_network.EncodingNetwork(\n input_tensor_spec=[\n tensor_spec.TensorSpec([5], tf.float32),\n tensor_spec.TensorSpec([5], tf.float32)\n ],\n preprocessing_layers=[\n tf.keras.layers.Lambda(lambda x: x),\n tf.keras.layers.Lambda(lambda x: x)\n ],\n preprocessing_combiner=None,\n fc_layer_params=(2,))\n\n def test_error_raised_if_missing_preprocessing_layer(self):\n with self.assertRaisesRegex(ValueError, 'sequence length'):\n encoding_network.EncodingNetwork(\n input_tensor_spec=[\n tensor_spec.TensorSpec([5], tf.float32),\n tensor_spec.TensorSpec([5], tf.float32)\n ],\n preprocessing_layers=[\n tf.keras.layers.Lambda(lambda x: x),\n ],\n preprocessing_combiner=None,\n fc_layer_params=(2,))\n\n def test_error_raised_extra_preprocessing_layer(self):\n with self.assertRaisesRegex(ValueError, 'sequence length'):\n encoding_network.EncodingNetwork(\n input_tensor_spec=tensor_spec.TensorSpec([5], tf.float32),\n preprocessing_layers=[\n tf.keras.layers.Lambda(lambda x: x),\n tf.keras.layers.Lambda(lambda x: x)\n ],\n preprocessing_combiner=None,\n fc_layer_params=(2,))\n\n def test_dict_spec_and_pre_processing(self):\n input_spec = {\n 'a': tensor_spec.TensorSpec((32, 32, 3), tf.float32),\n 'b': tensor_spec.TensorSpec((32, 32, 3), tf.float32)\n }\n network = encoding_network.EncodingNetwork(\n input_spec,\n preprocessing_layers={\n 'a':\n sequential_layer.SequentialLayer([\n tf.keras.layers.Dense(4, activation='tanh'),\n tf.keras.layers.Flatten()\n ]),\n 'b':\n tf.keras.layers.Flatten()\n },\n fc_layer_params=(),\n preprocessing_combiner=tf.keras.layers.Concatenate(axis=-1),\n activation_fn=tf.keras.activations.tanh,\n )\n\n sample_input = tensor_spec.sample_spec_nest(input_spec)\n output, _ = network(sample_input)\n # 6144 is the shape from a concat of flat (32, 32, 3) x2.\n self.assertEqual((7168,), output.shape)\n\n def test_layers_buildable(self):\n input_spec = {\n 'a': tensor_spec.TensorSpec((32, 32, 3), tf.float32),\n 'b': tensor_spec.TensorSpec((32, 32, 3), tf.float32)\n }\n network = encoding_network.EncodingNetwork(\n input_spec,\n preprocessing_layers={\n 'a':\n sequential_layer.SequentialLayer([\n tf.keras.layers.Dense(4, activation='tanh'),\n tf.keras.layers.Flatten()\n ]),\n 'b':\n tf.keras.layers.Flatten()\n },\n fc_layer_params=(),\n preprocessing_combiner=tf.keras.layers.Concatenate(axis=-1),\n activation_fn=tf.keras.activations.tanh,\n )\n network.create_variables()\n self.assertNotEmpty(network.variables)\n\n def testDenseFeaturesV1RaisesError(self):\n key = 'feature_key'\n state_dims = 5\n column = tf.feature_column.numeric_column(key, [state_dims])\n input_spec = {key: tensor_spec.TensorSpec([state_dims], tf.int32)}\n dense_features = tf.compat.v1.keras.layers.DenseFeatures([column])\n with self.assertRaisesRegex(ValueError, 'DenseFeatures'):\n encoding_network.EncodingNetwork(\n input_spec, preprocessing_combiner=dense_features)\n\n def testNumericFeatureColumnInput(self):\n key = 'feature_key'\n batch_size = 3\n state_dims = 5\n input_shape = (batch_size, state_dims)\n column = tf.feature_column.numeric_column(key, [state_dims])\n state = {key: tf.ones(input_shape, tf.int32)}\n input_spec = {key: tensor_spec.TensorSpec([state_dims], tf.int32)}\n\n dense_features = tf.compat.v2.keras.layers.DenseFeatures([column])\n network = encoding_network.EncodingNetwork(\n input_spec, preprocessing_combiner=dense_features)\n\n output, _ = network(state)\n self.assertEqual(input_shape, output.shape)\n\n def testIndicatorFeatureColumnInput(self):\n key = 'feature_key'\n vocab_list = [2, 3, 4]\n column = tf.feature_column.categorical_column_with_vocabulary_list(\n key, vocab_list)\n column = tf.feature_column.indicator_column(column)\n\n state_input = [3, 2, 2, 4, 3]\n state = {key: tf.expand_dims(state_input, -1)}\n input_spec = {key: tensor_spec.TensorSpec([1], tf.int32)}\n\n dense_features = tf.compat.v2.keras.layers.DenseFeatures([column])\n network = encoding_network.EncodingNetwork(\n input_spec, preprocessing_combiner=dense_features)\n\n output, _ = network(state)\n expected_shape = (len(state_input), len(vocab_list))\n self.assertEqual(expected_shape, output.shape)\n\n def testCombinedFeatureColumnInput(self):\n columns = {}\n tensors = {}\n specs = {}\n expected_dim = 0\n\n indicator_key = 'indicator_key'\n vocab_list = [2, 3, 4]\n column1 = tf.feature_column.categorical_column_with_vocabulary_list(\n indicator_key, vocab_list)\n columns[indicator_key] = tf.feature_column.indicator_column(column1)\n state_input = [3, 2, 2, 4, 3]\n tensors[indicator_key] = tf.expand_dims(state_input, -1)\n specs[indicator_key] = tensor_spec.TensorSpec([1], tf.int32)\n expected_dim += len(vocab_list)\n\n # TODO(b/134950354): Test embedding column for non-eager mode only for now.\n if not tf.executing_eagerly():\n embedding_key = 'embedding_key'\n embedding_dim = 3\n vocab_list = [2, 3, 4]\n column2 = tf.feature_column.categorical_column_with_vocabulary_list(\n embedding_key, vocab_list)\n columns[embedding_key] = tf.feature_column.embedding_column(\n column2, embedding_dim)\n state_input = [3, 2, 2, 4, 3]\n tensors[embedding_key] = tf.expand_dims(state_input, -1)\n specs[embedding_key] = tensor_spec.TensorSpec([1], tf.int32)\n expected_dim += embedding_dim\n\n numeric_key = 'numeric_key'\n batch_size = 5\n state_dims = 3\n input_shape = (batch_size, state_dims)\n columns[numeric_key] = tf.feature_column.numeric_column(\n numeric_key, [state_dims])\n tensors[numeric_key] = tf.ones(input_shape, tf.int32)\n specs[numeric_key] = tensor_spec.TensorSpec([state_dims], tf.int32)\n expected_dim += state_dims\n\n dense_features = tf.compat.v2.keras.layers.DenseFeatures(\n list(columns.values()))\n network = encoding_network.EncodingNetwork(\n specs, preprocessing_combiner=dense_features)\n\n output, _ = network(tensors)\n expected_shape = (batch_size, expected_dim)\n self.assertEqual(expected_shape, output.shape)\n\n @parameterized.named_parameters(\n ('TrainingTrue', True,),\n ('TrainingFalse', False))\n def testDropoutFCLayers(self, training):\n batch_size = 3\n num_obs_dims = 5\n obs_spec = tensor_spec.TensorSpec([num_obs_dims], tf.float32)\n network = encoding_network.EncodingNetwork(\n obs_spec,\n fc_layer_params=[20],\n dropout_layer_params=[0.5])\n obs = tf.random.uniform([batch_size, num_obs_dims])\n output1, _ = network(obs, training=training)\n output2, _ = network(obs, training=training)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n output1, output2 = self.evaluate([output1, output2])\n if training:\n self.assertGreater(np.linalg.norm(output1 - output2), 0)\n else:\n self.assertAllEqual(output1, output2)\n\n def testWeightDecay(self):\n batch_size = 3\n num_obs_dims = 5\n obs_spec = tensor_spec.TensorSpec([num_obs_dims], tf.float32)\n network = encoding_network.EncodingNetwork(\n obs_spec,\n fc_layer_params=[20],\n weight_decay_params=[0.5])\n obs = tf.random.uniform([batch_size, num_obs_dims])\n network(obs)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n regularization_loss = self.evaluate(network.losses[0])\n self.assertGreater(regularization_loss, 0)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TF-Agents SavedModel API.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nimport copy\nimport functools\nimport os\nfrom typing import Callable, Dict, Tuple, Optional, Text\n\nfrom absl import logging\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\n\n\nPOLICY_SPECS_PBTXT = 'policy_specs.pbtxt'\n\n\ndef _true_if_missing_or_collision(spec, spec_names):\n if not spec.name or spec.name in spec_names:\n return True\n spec_names.add(spec.name)\n return False\n\n\ndef _rename_spec_with_nest_paths(spec):\n renamed_spec = [\n tf.TensorSpec(shape=s.shape, name=path, dtype=s.dtype)\n for path, s in nest_utils.flatten_with_joined_paths(spec)\n ]\n return tf.nest.pack_sequence_as(spec, renamed_spec)\n\n\ndef _check_spec(spec):\n \"\"\"Checks for missing or colliding names in specs.\"\"\"\n spec_names = set()\n checked = [\n _true_if_missing_or_collision(s, spec_names)\n for s in tf.nest.flatten(spec)\n ]\n if any(checked):\n raise ValueError(\n 'Specs contain either a missing name or a name collision.\\n '\n 'Spec names: %s\\n' %\n (tf.nest.map_structure(lambda s: s.name or '<MISSING>', spec),))\n\n\nInputFnType = Callable[[types.NestedTensor], Tuple[types.NestedTensor,\n types.NestedTensor]]\nInputFnAndSpecType = Tuple[InputFnType, types.NestedTensorSpec]\n\n\nclass PolicySaver(object):\n \"\"\"A `PolicySaver` allows you to save a `tf_policy.Policy` to `SavedModel`.\n\n The `save()` method exports a saved model to the requested export location.\n The SavedModel that is exported can be loaded via\n `tf.compat.v2.saved_model.load` (or `tf.saved_model.load` in TF2). It\n will have available signatures (concrete functions): `action`,\n `get_initial_state`, `get_train_step.\n\n The attribute `model_variables` is also available when the saved_model is\n loaded which gives access to model variables in order to update them if\n needed.\n\n Usage:\n ```python\n\n my_policy = agent.collect_policy\n saver = PolicySaver(my_policy, batch_size=None)\n\n for i in range(...):\n agent.train(...)\n if i % 100 == 0:\n saver.save('policy_%d' % global_step)\n ```\n\n To load and use the saved policy directly:\n\n ```python\n saved_policy = tf.compat.v2.saved_model.load('policy_0')\n policy_state = saved_policy.get_initial_state(batch_size=3)\n time_step = ...\n while True:\n policy_step = saved_policy.action(time_step, policy_state)\n policy_state = policy_step.state\n time_step = f(policy_step.action)\n ...\n ```\n\n or to use the distributional form, e.g.:\n\n ```python\n batch_size = 3\n saved_policy = tf.compat.v2.saved_model.load('policy_0')\n policy_state = saved_policy.get_initial_state(batch_size=batch_size)\n time_step = ...\n while True:\n policy_step = saved_policy.distribution(time_step, policy_state)\n policy_state = policy_step.state\n time_step = f(policy_step.action.sample(batch_size))\n ...\n ```\n\n If using the flattened (signature) version, you will be limited to using\n dicts keyed by the specs' name fields.\n\n ```python\n saved_policy = tf.compat.v2.saved_model.load('policy_0')\n get_initial_state_fn = saved_policy.signatures['get_initial_state']\n action_fn = saved_policy.signatures['action']\n\n policy_state_dict = get_initial_state_fn(batch_size=3)\n time_step_dict = ...\n while True:\n time_step_state = dict(time_step_dict)\n time_step_state.update(policy_state_dict)\n policy_step_dict = action_fn(time_step_state)\n policy_state_dict = extract_policy_state_fields(policy_step_dict)\n action_dict = extract_action_fields(policy_step_dict)\n time_step_dict = f(action_dict)\n ...\n ```\n \"\"\"\n\n def __init__(\n self,\n policy: tf_policy.TFPolicy,\n batch_size: Optional[int] = None,\n use_nest_path_signatures: bool = True,\n seed: Optional[types.Seed] = None,\n train_step: Optional[tf.Variable] = None,\n input_fn_and_spec: Optional[InputFnAndSpecType] = None,\n metadata: Optional[Dict[Text, tf.Variable]] = None\n ):\n \"\"\"Initialize PolicySaver for TF policy `policy`.\n\n Args:\n policy: A TF Policy.\n batch_size: The number of batch entries the policy will process at a time.\n This must be either `None` (unknown batch size) or a python integer.\n use_nest_path_signatures: SavedModel spec signatures will be created based\n on the sructure of the specs. Otherwise all specs must have unique\n names.\n seed: Random seed for the `policy.action` call, if any (this should\n usually be `None`, except for testing).\n train_step: Variable holding the train step for the policy. The value\n saved will be set at the time `saver.save` is called. If not provided,\n train_step defaults to -1. Note since the train step must be a variable\n it is not safe to create it directly in TF1 so in that case this is a\n required parameter.\n input_fn_and_spec: A `(input_fn, tensor_spec)` tuple where input_fn is a\n function that takes inputs according to tensor_spec and converts them to\n the `(time_step, policy_state)` tuple that is used as the input to the\n action_fn. When `input_fn_and_spec` is set, `tensor_spec` is the input\n for the action signature. When `input_fn_and_spec is None`, the action\n signature takes as input `(time_step, policy_state)`.\n metadata: A dictionary of `tf.Variables` to be saved along with the\n policy.\n\n Raises:\n TypeError: If `policy` is not an instance of TFPolicy.\n TypeError: If `metadata` is not a dictionary of tf.Variables.\n ValueError: If use_nest_path_signatures is not used and any of the\n following `policy` specs are missing names, or the names collide:\n `policy.time_step_spec`, `policy.action_spec`,\n `policy.policy_state_spec`, `policy.info_spec`.\n ValueError: If `batch_size` is not either `None` or a python integer > 0.\n \"\"\"\n if not isinstance(policy, tf_policy.TFPolicy):\n raise TypeError('policy is not a TFPolicy. Saw: %s' % type(policy))\n if (batch_size is not None and\n (not isinstance(batch_size, int) or batch_size < 1)):\n raise ValueError(\n 'Expected batch_size == None or python int > 0, saw: %s' %\n (batch_size,))\n\n action_fn_input_spec = (policy.time_step_spec, policy.policy_state_spec)\n if use_nest_path_signatures:\n action_fn_input_spec = _rename_spec_with_nest_paths(action_fn_input_spec)\n else:\n _check_spec(action_fn_input_spec)\n\n # Make a shallow copy as we'll be making some changes in-place.\n saved_policy = tf.Module()\n saved_policy.collect_data_spec = copy.copy(policy.collect_data_spec)\n saved_policy.policy_state_spec = copy.copy(policy.policy_state_spec)\n\n if train_step is None:\n if not common.has_eager_been_enabled():\n raise ValueError('train_step is required in TF1 and must be a '\n '`tf.Variable`: %s' % train_step)\n train_step = tf.Variable(\n -1,\n trainable=False,\n dtype=tf.int64,\n aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,\n shape=())\n elif not isinstance(train_step, tf.Variable):\n raise ValueError('train_step must be a TensorFlow variable: %s' %\n train_step)\n\n # We will need the train step for the Checkpoint object.\n self._train_step = train_step\n saved_policy.train_step = self._train_step\n\n self._metadata = metadata or {}\n for key, value in self._metadata.items():\n if not isinstance(key, str):\n raise TypeError('Keys of metadata must be strings: %s' % key)\n if not isinstance(value, tf.Variable):\n raise TypeError('Values of metadata must be tf.Variable: %s' % value)\n saved_policy.metadata = self._metadata\n\n if batch_size is None:\n get_initial_state_fn = policy.get_initial_state\n get_initial_state_input_specs = (tf.TensorSpec(\n dtype=tf.int32, shape=(), name='batch_size'),)\n else:\n get_initial_state_fn = functools.partial(\n policy.get_initial_state, batch_size=batch_size)\n get_initial_state_input_specs = ()\n\n get_initial_state_fn = common.function()(get_initial_state_fn)\n\n original_action_fn = policy.action\n\n if seed is not None:\n\n def action_fn(time_step, policy_state):\n return original_action_fn(time_step, policy_state, seed=seed)\n else:\n action_fn = original_action_fn\n\n def distribution_fn(time_step, policy_state):\n \"\"\"Wrapper for policy.distribution() in the SavedModel.\"\"\"\n try:\n outs = policy.distribution(\n time_step=time_step, policy_state=policy_state)\n return tf.nest.map_structure(_composite_distribution, outs)\n except (TypeError, NotImplementedError) as e:\n # TODO(b/156526399): Move this to just the policy.distribution() call\n # once tfp.experimental.as_composite() properly handles LinearOperator*\n # components as well as TransformedDistributions.\n logging.warn(\n 'WARNING: Could not serialize policy.distribution() for policy '\n '\"%s\". Calling saved_model.distribution() will raise the following '\n 'assertion error: %s', policy, e)\n @common.function()\n def _raise():\n tf.Assert(False, [str(e)])\n return ()\n outs = _raise()\n\n # We call get_concrete_function() for its side effect: to ensure the proper\n # ConcreteFunction is stored in the SavedModel.\n get_initial_state_fn.get_concrete_function(*get_initial_state_input_specs)\n\n train_step_fn = common.function(\n lambda: saved_policy.train_step).get_concrete_function()\n get_metadata_fn = common.function(\n lambda: saved_policy.metadata).get_concrete_function()\n\n def add_batch_dim(spec):\n return tf.TensorSpec(\n shape=tf.TensorShape([batch_size]).concatenate(spec.shape),\n name=spec.name,\n dtype=spec.dtype)\n\n batched_time_step_spec = tf.nest.map_structure(add_batch_dim,\n policy.time_step_spec)\n batched_policy_state_spec = tf.nest.map_structure(add_batch_dim,\n policy.policy_state_spec)\n\n policy_step_spec = policy.policy_step_spec\n policy_state_spec = policy.policy_state_spec\n\n if use_nest_path_signatures:\n batched_time_step_spec = _rename_spec_with_nest_paths(\n batched_time_step_spec)\n batched_policy_state_spec = _rename_spec_with_nest_paths(\n batched_policy_state_spec)\n policy_step_spec = _rename_spec_with_nest_paths(policy_step_spec)\n policy_state_spec = _rename_spec_with_nest_paths(policy_state_spec)\n else:\n _check_spec(batched_time_step_spec)\n _check_spec(batched_policy_state_spec)\n _check_spec(policy_step_spec)\n _check_spec(policy_state_spec)\n\n if input_fn_and_spec is not None:\n # Store a signature based on input_fn_and_spec\n @common.function()\n def polymorphic_action_fn(example):\n action_inputs = input_fn_and_spec[0](example)\n tf.nest.map_structure(\n lambda spec, t: tf.Assert(spec.is_compatible_with(t[0]), [t]),\n action_fn_input_spec, action_inputs)\n return action_fn(*action_inputs)\n\n @common.function()\n def polymorphic_distribution_fn(example):\n action_inputs = input_fn_and_spec[0](example)\n tf.nest.map_structure(\n lambda spec, t: tf.Assert(spec.is_compatible_with(t[0]), [t]),\n action_fn_input_spec, action_inputs)\n return distribution_fn(*action_inputs)\n\n batched_input_spec = tf.nest.map_structure(add_batch_dim,\n input_fn_and_spec[1])\n # We call get_concrete_function() for its side effect: to ensure the\n # proper ConcreteFunction is stored in the SavedModel.\n polymorphic_action_fn.get_concrete_function(example=batched_input_spec)\n polymorphic_distribution_fn.get_concrete_function(\n example=batched_input_spec)\n\n action_input_spec = (input_fn_and_spec[1],)\n\n else:\n action_input_spec = action_fn_input_spec\n if batched_policy_state_spec:\n # Store the signature with a required policy state spec\n polymorphic_action_fn = common.function()(action_fn)\n polymorphic_action_fn.get_concrete_function(\n time_step=batched_time_step_spec,\n policy_state=batched_policy_state_spec)\n\n polymorphic_distribution_fn = common.function()(distribution_fn)\n polymorphic_distribution_fn.get_concrete_function(\n time_step=batched_time_step_spec,\n policy_state=batched_policy_state_spec)\n else:\n # Create a polymorphic action_fn which you can call as\n # restored.action(time_step)\n # or\n # restored.action(time_step, ())\n # (without retracing the inner action twice)\n @common.function()\n def polymorphic_action_fn(time_step,\n policy_state=batched_policy_state_spec):\n return action_fn(time_step, policy_state)\n\n polymorphic_action_fn.get_concrete_function(\n time_step=batched_time_step_spec,\n policy_state=batched_policy_state_spec)\n polymorphic_action_fn.get_concrete_function(\n time_step=batched_time_step_spec)\n\n @common.function()\n def polymorphic_distribution_fn(time_step,\n policy_state=batched_policy_state_spec):\n return distribution_fn(time_step, policy_state)\n\n polymorphic_distribution_fn.get_concrete_function(\n time_step=batched_time_step_spec,\n policy_state=batched_policy_state_spec)\n polymorphic_distribution_fn.get_concrete_function(\n time_step=batched_time_step_spec)\n\n signatures = {\n # CompositeTensors aren't well supported by old-style signature\n # mechanisms, so we do not have a signature for policy.distribution.\n 'action':\n _function_with_flat_signature(\n polymorphic_action_fn,\n input_specs=action_input_spec,\n output_spec=policy_step_spec,\n include_batch_dimension=True,\n batch_size=batch_size),\n 'get_initial_state':\n _function_with_flat_signature(\n get_initial_state_fn,\n input_specs=get_initial_state_input_specs,\n output_spec=policy_state_spec,\n include_batch_dimension=False),\n 'get_train_step':\n _function_with_flat_signature(\n train_step_fn,\n input_specs=(),\n output_spec=train_step.dtype,\n include_batch_dimension=False),\n 'get_metadata':\n _function_with_flat_signature(\n get_metadata_fn,\n input_specs=(),\n output_spec=tf.nest.map_structure(lambda v: v.dtype,\n self._metadata),\n include_batch_dimension=False),\n }\n\n saved_policy.action = polymorphic_action_fn\n saved_policy.distribution = polymorphic_distribution_fn\n saved_policy.get_initial_state = get_initial_state_fn\n saved_policy.get_train_step = train_step_fn\n saved_policy.get_metadata = get_metadata_fn\n # Adding variables as an attribute to facilitate updating them.\n saved_policy.model_variables = policy.variables()\n\n # TODO(b/156779400): Move to a public API for accessing all trackable leaf\n # objects (once it's available). For now, we have no other way of tracking\n # objects like Tables, Vocabulary files, etc.\n try:\n saved_policy._all_assets = policy._unconditional_checkpoint_dependencies # pylint: disable=protected-access\n except AttributeError as e:\n if '_self_unconditional' in str(e):\n logging.warn(\n 'Unable to capture all trackable objects in policy \"%s\". This '\n 'may be okay. Error: %s', policy, e)\n else:\n raise e\n\n self._policy = saved_policy\n self._signatures = signatures\n self._action_input_spec = action_input_spec\n self._policy_step_spec = policy_step_spec\n self._policy_state_spec = policy_state_spec\n\n @property\n def action_input_spec(self) -> types.NestedTensorSpec:\n \"\"\"Tuple `(time_step_spec, policy_state_spec)` for feeding `action`.\n\n This describes the input of `action` in the SavedModel.\n\n This may differ from the original policy if `use_nest_path_signatures` was\n enabled.\n\n Returns:\n A nest of specs.\n \"\"\"\n return self._action_input_spec\n\n @property\n def policy_step_spec(self) -> types.NestedTensorSpec:\n \"\"\"Spec that describes the output of `action` in the SavedModel.\n\n This may differ from the original policy if `use_nest_path_signatures` was\n enabled.\n\n Returns:\n A nest of specs.\n \"\"\"\n return self._policy_step_spec\n\n @property\n def policy_state_spec(self) -> types.NestedTensorSpec:\n \"\"\"Spec that describes the output of `get_initial_state` in the SavedModel.\n\n This may differ from the original policy if `use_nest_path_signatures` was\n enabled.\n\n Returns:\n A nest of specs.\n \"\"\"\n return self._policy_state_spec\n\n @property\n def signatures(self) -> Dict[Text, Callable]: # pylint: disable=g-bare-generic\n \"\"\"Get the (flat) signatures used when exporting the `SavedModel`.\n\n Returns:\n A `dict` mapping each of \"action\", \"get_initial_state\", \"get_train_step\"\n and \"get_metadata\" to their respective flat signatures.\n \"\"\"\n return self._signatures\n\n def get_train_step(self) -> types.Int:\n \"\"\"Returns the train step of the policy.\n\n Returns:\n An integer.\n \"\"\"\n if tf.executing_eagerly():\n return self._train_step.numpy()\n else:\n return tf.identity(self._train_step)\n\n def get_metadata(self) -> Dict[Text, tf.Variable]:\n \"\"\"Returns the metadata of the policy.\n\n Returns:\n An a dictionary of tf.Variable.\n \"\"\"\n if tf.executing_eagerly():\n return {k: self._metadata[k].numpy() for k in self._metadata}\n else:\n return self._metadata\n\n def save(self,\n export_dir: Text,\n options: Optional[tf.saved_model.SaveOptions] = None):\n \"\"\"Save the policy to the given `export_dir`.\n\n Args:\n export_dir: Directory to save the policy to.\n options: Optional `tf.saved_model.SaveOptions` object.\n \"\"\"\n tf.compat.v2.saved_model.save(\n self._policy, export_dir, signatures=self._signatures, options=options)\n\n temp_spec_file_name = '{}_temp'.format(POLICY_SPECS_PBTXT)\n temp_spec_output_path = os.path.join(export_dir, temp_spec_file_name)\n specs = {\n 'collect_data_spec': self._policy.collect_data_spec,\n 'policy_state_spec': self._policy.policy_state_spec\n }\n tensor_spec.to_pbtxt_file(temp_spec_output_path, specs)\n spec_output_path = os.path.join(export_dir, POLICY_SPECS_PBTXT)\n # By moving the file to its final location makes it safer to wait for the\n # file (e.g. from a separate binary). The parameter `overwrite=True`\n # reproduces the exact previous behavior.\n tf.io.gfile.rename(temp_spec_output_path, spec_output_path, overwrite=True)\n\n def save_checkpoint(self,\n export_dir: Text,\n options: Optional[tf.train.CheckpointOptions] = None):\n \"\"\"Saves the policy as a checkpoint to the given `export_dir`.\n\n This will only work with checkpoints generated in TF2.x.\n\n For the checkpoint to be useful users should first call `save` to generate a\n saved_model of the policy. Checkpoints can then be used to update the policy\n without having to reload the saved_model, or saving multiple copies of the\n `saved_model.pb` file.\n\n The checkpoint is always created in the sub-directory 'variables/' and the\n checkpoint file prefix used is 'variables'. The checkpoint files are as\n follows:\n * export_dir/variables/variables.index\n * export_dir/variables/variables-xxxxx-of-xxxxx\n\n This makes the files compatible with the checkpoint part of full saved\n models, which enables you to load a saved model made up from the graph part\n of a full saved model and the variables part of a checkpoint.\n\n Args:\n export_dir: Directory to save the checkpoint to.\n options: Optional `tf.train.CheckpointOptions` object.\n \"\"\"\n # In addition to the policy, also list dependencies on model_variables and\n # train_step so the checkpoint can be combined with a saved graph from a\n # full saved model.\n checkpoint = tf.compat.v2.train.Checkpoint(\n policy=self._policy,\n model_variables=self._policy.model_variables,\n train_step=self._train_step)\n # Use write() to make sure that the file prefix is not modified by appending\n # a save counter value.\n file_prefix = os.path.join(export_dir, tf.saved_model.VARIABLES_DIRECTORY,\n tf.saved_model.VARIABLES_FILENAME)\n checkpoint.write(file_prefix, options=options)\n\n\ndef _function_with_flat_signature(function,\n input_specs,\n output_spec,\n include_batch_dimension,\n batch_size=None):\n \"\"\"Create a tf.function with a given signature for export.\n\n Args:\n function: A callable that can be wrapped in tf.function.\n input_specs: A tuple nested specs declaring ordered arguments to function.\n output_spec: The nested spec describing the output of the function.\n include_batch_dimension: Python bool, whether to prepend a batch dimension\n to inputs and outputs.\n batch_size: Known batch size, or `None` for unknown. Ignored if\n `include_batch_dimension == False`.\n\n Returns:\n A `tf.function` with the given input spec that returns a `dict` mapping\n output spec keys to corresponding output values.\n \"\"\"\n\n def _with_batch(spec):\n if include_batch_dimension:\n return tf.TensorSpec(\n shape=tf.TensorShape([batch_size]).concatenate(spec.shape),\n name=spec.name,\n dtype=spec.dtype)\n else:\n return spec\n\n flat_input_spec = [_with_batch(spec) for spec in tf.nest.flatten(input_specs)]\n\n def as_dict(outputs, output_spec):\n nest_utils.assert_same_structure(outputs, output_spec)\n flat_outputs = tf.nest.flatten(outputs)\n flat_names = [s.name for s in tf.nest.flatten(output_spec)]\n return dict(zip(flat_names, flat_outputs))\n\n @common.function(input_signature=flat_input_spec)\n def function_with_signature(*input_list):\n inputs_ = tf.nest.pack_sequence_as(input_specs, input_list)\n outputs_ = function(*inputs_)\n dict_outputs_ = as_dict(outputs_, output_spec)\n return dict_outputs_\n\n return function_with_signature\n\n\ndef specs_from_collect_data_spec(\n loaded_policy_specs: types.NestedTensorSpec\n) -> Dict[types.NestedSpec, types.NestedSpec]:\n \"\"\"Creates policy specs from specs loaded from disk.\n\n The PolicySaver saves policy specs next to the saved model as\n a `struct.StructuredValue` proto. This recreates the\n original specs from the proto.\n\n Pass the proto loaded from the file with `tensor_spec.from_pbtxt_file()`\n to this function.\n\n Args:\n loaded_policy_specs: `struct.StructuredValue` proto that had been\n previously created by PolicySaver as a pbtxt.\n\n Returns:\n A dict with specs extracted from the proto. The dict contains the following\n keys and values. Except `time_step_spec` all the specs are nests of\n `ArraySpecs`.\n * `collect_data_spec`: Collect data spec for the policy.\n * `time_step_spec`: `TimeStepSpec` for the policy.\n * `action_spec`: Action spec for the policy\n * `policy_state_spec`: State spec for the policy.\n * `info_spec`: Info spec for the policy.\n \"\"\"\n policy_specs = tensor_spec.to_nest_array_spec(loaded_policy_specs)\n collect_data_spec = policy_specs['collect_data_spec']\n policy_state_spec = policy_specs['policy_state_spec']\n time_step_spec = ts.TimeStep(\n step_type=collect_data_spec.step_type,\n reward=collect_data_spec.reward,\n discount=collect_data_spec.discount,\n observation=collect_data_spec.observation)\n action_spec = collect_data_spec.action\n info_spec = collect_data_spec.policy_info\n return dict(\n collect_data_spec=collect_data_spec,\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n policy_state_spec=policy_state_spec,\n info_spec=info_spec)\n\n\ndef _composite_distribution(d):\n \"\"\"Converts tfp Distributions to CompositeTensors.\"\"\"\n return (tfp.experimental.as_composite(d)\n if isinstance(d, tfp.distributions.Distribution)\n else d)\n"
] |
[
[
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.compat.dimension_value",
"tensorflow.no_op",
"tensorflow.nest.flatten",
"tensorflow.nest.map_structure"
],
[
"numpy.array",
"numpy.random.RandomState",
"tensorflow.test.main"
],
[
"tensorflow.constant",
"tensorflow.random.uniform",
"tensorflow.ones_like",
"tensorflow.test.main",
"tensorflow.nest.flatten",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.all",
"numpy.ones",
"tensorflow.compat.v1.initializers.constant",
"tensorflow.compat.v1.set_random_seed",
"numpy.array",
"tensorflow.nest.pack_sequence_as",
"tensorflow.nest.map_structure"
],
[
"tensorflow.feature_column.embedding_column",
"tensorflow.keras.layers.Concatenate",
"tensorflow.feature_column.categorical_column_with_vocabulary_list",
"tensorflow.executing_eagerly",
"tensorflow.keras.layers.Lambda",
"tensorflow.compat.v2.keras.layers.DenseFeatures",
"tensorflow.keras.layers.Dense",
"tensorflow.compat.v1.keras.layers.DenseFeatures",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.linalg.norm",
"tensorflow.feature_column.numeric_column",
"tensorflow.feature_column.indicator_column",
"tensorflow.keras.layers.Flatten"
],
[
"tensorflow.TensorShape",
"tensorflow.executing_eagerly",
"tensorflow.Variable",
"tensorflow.identity",
"tensorflow.nest.flatten",
"tensorflow.Module",
"tensorflow.io.gfile.rename",
"tensorflow.compat.v2.saved_model.save",
"tensorflow.nest.pack_sequence_as",
"tensorflow.compat.v2.train.Checkpoint",
"tensorflow.TensorSpec",
"tensorflow.nest.map_structure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
multimodallearning/weight-estimation-under-cover
|
[
"d79ef9f9e66ebcc20bdce0b20297f2e902c1de67"
] |
[
"models/unet3d_cnn3d_e2e.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.cnn3d import CNN3D\n\n\nclass UNet3D_CNN3D(nn.Module):\n def __init__(self, cfg):\n super(UNet3D_CNN3D, self).__init__()\n\n self.down1a = nn.Sequential(nn.Conv3d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(32),\n nn.ReLU())\n self.down1b = nn.Sequential(nn.Conv3d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(64),\n nn.ReLU())\n self.max1 = nn.MaxPool3d(2)\n\n self.down2a = nn.Sequential(nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(64),\n nn.ReLU())\n self.down2b = nn.Sequential(nn.Conv3d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(128),\n nn.ReLU())\n self.max2 = nn.MaxPool3d(2)\n\n self.down3a = nn.Sequential(nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(128),\n nn.ReLU())\n self.down3b = nn.Sequential(nn.Conv3d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(256),\n nn.ReLU())\n self.max3 = nn.MaxPool3d(2)\n\n self.down4a = nn.Sequential(nn.Conv3d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(256),\n nn.ReLU())\n self.down4b = nn.Sequential(nn.Conv3d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(512),\n nn.ReLU())\n\n self.up1 = nn.ConvTranspose3d(in_channels=512, out_channels=512, kernel_size=2, stride=2)\n self.up2 = nn.ConvTranspose3d(in_channels=256, out_channels=256, kernel_size=2, stride=2)\n self.up3 = nn.ConvTranspose3d(in_channels=128, out_channels=128, kernel_size=2, stride=2)\n\n self.trans1a = nn.Sequential(nn.Conv3d(in_channels=768, out_channels=256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(256),\n nn.ReLU())\n self.trans1b = nn.Sequential(nn.Conv3d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(256),\n nn.ReLU())\n\n self.trans2a = nn.Sequential(nn.Conv3d(in_channels=384, out_channels=128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(128),\n nn.ReLU())\n self.trans2b = nn.Sequential(nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(128),\n nn.ReLU())\n\n self.trans3a = nn.Sequential(nn.Conv3d(in_channels=192, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(64),\n nn.ReLU(),\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm3d(64),\n nn.ReLU(),\n nn.Conv3d(in_channels=64, out_channels=2, kernel_size=1, stride=1, padding=0))\n\n self.predictor = CNN3D(cfg)\n\n def forward(self, x):\n\n x = self.down1a(x)\n x = self.down1b(x)\n x1 = x\n x = self.max1(x)\n x = self.down2a(x)\n x = self.down2b(x)\n x2 = x\n x = self.max2(x)\n x = self.down3a(x)\n x = self.down3b(x)\n x3 = x\n x = self.max3(x)\n x = self.down4a(x)\n x = self.down4b(x)\n x = self.up1(x)\n x = torch.cat((x, x3), dim=1)\n x = self.trans1a(x)\n x = self.trans1b(x)\n x = self.up2(x)\n x = torch.cat((x, x2), dim=1)\n x = self.trans2a(x)\n x = self.trans2b(x)\n x = self.up3(x)\n x = torch.cat((x, x1), dim=1)\n x = self.trans3a(x)\n\n x = F.softmax(x, dim=1)[:, 1:, :, :, :]\n\n x = self.predictor(x)\n return x\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.ConvTranspose3d",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TianchengShu/pydata-revised
|
[
"df6382f1e5f54aeb15cbd8122533adfdd569efbc"
] |
[
"examples/cprof_example.py"
] |
[
"import numpy as np\nfrom numpy.linalg import eigvals\n\ndef run_experiment(niter=100):\n K = 100\n results = []\n for _ in range(niter):\n mat = np.random.randn(K, K)\n max_eigenvalue = np.abs(eigvals(mat)).max()\n results.append(max_eigenvalue)\n return results\nsome_results = run_experiment()\nprint('Largest one we saw: %s' % np.max(some_results))\n"
] |
[
[
"numpy.max",
"numpy.linalg.eigvals",
"numpy.random.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SeokjaeLIM/DSLR-release
|
[
"861429482faf50ee3d6570948af8c48df1fc7f43"
] |
[
"test.py"
] |
[
"#from Python\nimport time\nimport csv\nimport os\nimport math\nimport numpy as np\nimport sys\nfrom shutil import copyfile\n\n#from Pytorch\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nimport torch.nn.utils as torch_utils\nfrom torch.optim.lr_scheduler import StepLR\n\n#from this project\nfrom data_loader import get_loader\nimport data_loader as dl\nimport VisionOP\nimport model\nimport param as p\nimport utils \nimport pytorch_ssim\n\n#local function\ndef to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\ndef denorm(x):\n out = (x + 1) / 2\n return out.clamp(0, 1)\n\ndef norm(x):\n out = (x - 0.5) * 2\n return out.clamp(-1,1)\n\n################ Hyper Parameters ################\n# VERSION\nversion = '2019-12-19(LPGnet-with-LRblock)'\nsubversion = '1_1'\n\n# data Set\ndataSetName = p.dataSetName\ndataSetMode = p.dataSetMode\ndataPath = p.dataPath\n\nmaxDataNum = p.maxDataNum #in fact, 4500\nbatchSize = p.batchSize\n\nMaxCropWidth = p.MaxCropWidth\nMinCropWidth = p.MinCropWidth\nMaxCropHeight = p.MaxCropHeight\nMinCropHeight = p.MinCropHeight\n\n# model\nNOF = p.NOF\n\n# train\nMaxEpoch = p.MaxEpoch\nlearningRate = p.learningRate\n\n# save\nnumberSaveImage = p.numberSaveImage\n\n############################################\n\n\n############################################\n############################################\nprint(\"\")\nprint(\" _____ ______ _______ _____ _ _ ________ __ \")\nprint(\" | __ \\\\| ____|__ __|_ _| \\\\ | | ____\\\\ \\\\ / / \")\nprint(\" | |__) | |__ | | | | | \\\\| | |__ \\\\ V / \")\nprint(\" | _ /| __| | | | | | . ` | __| > < \")\nprint(\" | | \\\\ \\\\| |____ | | _| |_| |\\\\ | |____ / . \\\\\")\nprint(\" |_| \\\\_\\\\______| |_| |_____|_| \\\\_|______/_/ \\\\_\\\\ \") \nprint(\"\")\nprint(\"Retinex model\")\nprint(\"main Version : \" + version)\nprint(\"sub Version : \" + subversion)\nprint(\"\")\n############################################\n############################################\n\ntorch.backends.cudnn.benchmark = True\n\n# system setting\nMODE = sys.argv[1]\n\ndataSetMode = 'test'\ndataPath = './data/test/'\n\ndata_loader = get_loader(dataPath,MaxCropWidth,MinCropWidth,MaxCropHeight,MinCropHeight,batchSize,dataSetName,dataSetMode)\n\n\n#init model\nRetinex = model.LMSN()\nRetinex = nn.DataParallel(Retinex).cuda() \n\n\n\n#model load\nstartEpoch = 0\nprint(\"Load below models\")\n\nif (MODE != 'n'):\n checkpoint_rt = torch.load('./data/model/Retinex' + '.pkl')\n Retinex.load_state_dict(checkpoint_rt['model'])\n print(\"All model loaded.\")\n\ndef psnr(input, target):\n #print(torch.min(input))\n #print(torch.max(input))\n input = torch.abs(input - target).cuda()\n\n MSE = torch.mean(input * input)\n\n PSNR = 10 * math.log10(1 / MSE)\n\n return PSNR\n\n\n#a = time.perf_counter()\n\nfor epoch in range(0, 1):\n\n # ============= Train Retinex & Adjust module =============#\n finali = 0\n ssim = 0\n psnr2 = 0\n\n torch.set_grad_enabled(False)\n\n# rt_scheduler.step(epoch)\n #total_time = 0\n j=0\n avg_in = 0\n avg_out = 0\n for i, (images) in enumerate(data_loader):\n\n \n b,c,h,w_ = images.size()\n w = int(w_/2)\n if i == 0:\n total_time = 0\n \n with torch.no_grad():\n torch.cuda.synchronize()\n Input = to_var(images).contiguous()\n if i >= 0:\n a = time.perf_counter()\n\n Scale1,Scale2,Scale3,res2,res3 = Retinex(Input)\n\n olda = a\n a = time.perf_counter()\n\n total_time = total_time + a - olda\n\n\n print('%d/500, time: %.5f sec ' % ((j+1),total_time / (j+1)), end=\"\\n\")\n j=j+1\n else:\n Scale1,Scale2,Scale3,res2,res3 = Retinex(Input)\n\n\n save_image(Scale3.data, './data/result/%d.png' % (i + 1))\n\n\n"
] |
[
[
"torch.mean",
"torch.abs",
"torch.cuda.synchronize",
"torch.load",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huhuhang/yolov3
|
[
"6c254b3f453c394046381e1c00cb0908b8f97b3a"
] |
[
"yolo_layer.py"
] |
[
"import math\nimport numpy as np\nimport sys\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .utils import bbox_iou, multi_bbox_ious\n\n\nclass YoloLayer(nn.Module):\n def __init__(self, anchors, stride, num_classes): \n super().__init__()\n self.anchors, self.stride = np.array(anchors), stride\n self.num_classes = num_classes\n\n def get_masked_anchors(self):\n return self.anchors/self.stride\n\n def get_region_boxes(self, output, conf_thresh):\n if output.dim() == 3: output = output.unsqueeze(0) \n device = output.device # torch.device(torch_device)\n anchors = torch.from_numpy(self.get_masked_anchors().astype(np.float32)).to(device)\n \n nB = output.size(0)\n nA = len(anchors)\n nC = self.num_classes\n nH = output.size(2)\n nW = output.size(3)\n cls_anchor_dim = nB*nA*nH*nW\n\n assert output.size(1) == (5+nC)*nA\n\n # if you want to debug this is how you get the indexes where objectness is high\n #output = output.view(nB, nA, 5+nC, nH, nW)\n #inds = torch.nonzero((torch.sigmoid(output.view(nB, nA, 5+nC, nH, nW)[:,:,4,:,:]) > conf_thresh))\n\n output = output.view(nB*nA, 5+nC, nH*nW).transpose(0,1).contiguous().view(5+nC, cls_anchor_dim)\n\n grid_x = torch.linspace(0, nW-1, nW).repeat(nB*nA, nH, 1).view(cls_anchor_dim).to(device)\n grid_y = torch.linspace(0, nH-1, nH).repeat(nW,1).t().repeat(nB*nA, 1, 1).view(cls_anchor_dim).to(device)\n ix = torch.LongTensor(range(0,2)).to(device)\n anchor_w = anchors.index_select(1, ix[0]).repeat(1, nB, nH*nW).view(cls_anchor_dim)\n anchor_h = anchors.index_select(1, ix[1]).repeat(1, nB, nH*nW).view(cls_anchor_dim)\n\n xs, ys = torch.sigmoid(output[0]) + grid_x, torch.sigmoid(output[1]) + grid_y\n ws, hs = torch.exp(output[2]) * anchor_w.detach(), torch.exp(output[3]) * anchor_h.detach()\n det_confs = torch.sigmoid(output[4])\n\n cls_confs = torch.nn.Softmax(dim=1)(output[5:5+nC].transpose(0,1)).detach()\n cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)\n cls_max_confs = cls_max_confs.view(-1)\n cls_max_ids = cls_max_ids.view(-1)\n\n\n det_confs = det_confs.to('cpu') #, non_blocking=True for torch 4.1?\n cls_max_confs = cls_max_confs.to('cpu')\n cls_max_ids = cls_max_ids.to('cpu')\n xs, ys = xs.to('cpu'), ys.to('cpu')\n ws, hs = ws.to('cpu'), hs.to('cpu')\n\n\n all_boxes = [[] for i in range(nB)]\n \n inds = torch.LongTensor(range(0,len(det_confs)))\n for ind in inds[det_confs > conf_thresh]:\n bcx = xs[ind]\n bcy = ys[ind]\n bw = ws[ind]\n bh = hs[ind]\n # box = [bcx/nW, bcy/nH, bw/nW, bh/nH, det_confs[ind], cls_max_confs[ind], cls_max_ids[ind]]\n box = [bcx/nW, bcy/nH, bw/nW, bh/nH, det_confs[ind], cls_max_confs[ind], cls_max_ids[ind]]\n box = [i.item() for i in box]\n\n batch = math.ceil(ind/(nA*nH*nW))\n all_boxes[batch].append(box)\n\n return all_boxes\n\n\n def build_targets(self, pred_boxes, target, anchors, nH, nW):\n self.ignore_thresh = 0.5\n self.truth_thresh = 1.\n\n # Works faster on CPU than on GPU.\n devi = torch.device('cpu')\n pred_boxes = pred_boxes.to(devi)\n target = target.to(devi)\n anchors = anchors.to(devi)\n\n #max_targets = target[0].view(-1,5).size(0) # 50\n nB = target.size(0)\n nA = len(anchors)\n\n anchor_step = anchors.size(1) # anchors[nA][anchor_step]\n conf_mask = torch.ones (nB, nA, nH, nW)\n coord_mask = torch.zeros(nB, nA, nH, nW)\n cls_mask = torch.zeros(nB, nA, nH, nW)\n tcoord = torch.zeros( 4, nB, nA, nH, nW)\n tconf = torch.zeros(nB, nA, nH, nW)\n tcls = torch.zeros(nB, nA, nH, nW)\n #twidth, theight = self.net_width/self.stride, self.net_height/self.stride\n twidth, theight = nW, nH\n nAnchors = nA*nH*nW\n\n for b in range(nB):\n cur_pred_boxes = pred_boxes[b*nAnchors:(b+1)*nAnchors].t()\n cur_ious = torch.zeros(nAnchors)\n tbox = target[b].view(-1,5)\n\n # If the bounding box prior is not the best but does overlap a ground truth object by\n # more than some threshold we ignore the prediction (conf_mask)\n for t in range(tbox.size(0)):\n if tbox[t][1] == 0:\n break\n gx, gy = tbox[t][1] * nW, tbox[t][2] * nH\n gw, gh = tbox[t][3] * twidth, tbox[t][4] * theight\n cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors,1).t()\n cur_ious = torch.max(cur_ious, multi_bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))\n ignore_ix = cur_ious>self.ignore_thresh\n conf_mask[b][ignore_ix.view(nA,nH,nW)] = 0\n\n for t in range(tbox.size(0)):\n if tbox[t][1] == 0:\n break\n # nGT += 1\n gx, gy = tbox[t][1] * nW, tbox[t][2] * nH\n gw, gh = tbox[t][3] * twidth, tbox[t][4] * theight\n gw, gh = gw.float(), gh.float()\n gi, gj = int(gx), int(gy)\n\n tmp_gt_boxes = torch.FloatTensor([0, 0, gw, gh]).repeat(nA,1).t()\n anchor_boxes = torch.cat((torch.zeros(nA, anchor_step), anchors),1).t()\n _, best_n = torch.max(multi_bbox_ious(tmp_gt_boxes, anchor_boxes, x1y1x2y2=False), 0)\n\n coord_mask[b][best_n][gj][gi] = 1\n cls_mask [b][best_n][gj][gi] = 1\n conf_mask [b][best_n][gj][gi] = 1\n tcoord [0][b][best_n][gj][gi] = gx - gi\n tcoord [1][b][best_n][gj][gi] = gy - gj\n tcoord [2][b][best_n][gj][gi] = math.log(gw/anchors[best_n][0])\n tcoord [3][b][best_n][gj][gi] = math.log(gh/anchors[best_n][1])\n tcls [b][best_n][gj][gi] = tbox[t][0]\n tconf [b][best_n][gj][gi] = 1 # yolov1 would have used iou-value here\n\n return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls\n\n\n def get_loss(self, output, target, return_single_value=True):\n device = output.device\n\n anchors = torch.from_numpy(self.get_masked_anchors().astype(np.float32)).to(device)\n\n nB = output.data.size(0) # batch size\n nA = len(anchors)\n nC = self.num_classes\n nH = output.data.size(2)\n nW = output.data.size(3)\n cls_anchor_dim = nB*nA*nH*nW\n\n output = output.view(nB, nA, (5+nC), nH, nW)\n\n ix = torch.LongTensor(range(0,5)).to(device)\n coord = output.index_select(2, ix[0:4]).view(nB*nA, -1, nH*nW).transpose(0,1).contiguous().view(4,cls_anchor_dim) # x, y, w, h\n coord[0:2] = coord[0:2].sigmoid() # x, y: bx = σ(tx) (+ cx)\n conf = output.index_select(2, ix[4]).view(nB, nA, nH, nW).sigmoid()\n\n grid_x = torch.linspace(0, nW-1, nW).repeat(nB*nA, nH, 1).view(cls_anchor_dim).to(device)\n grid_y = torch.linspace(0, nH-1, nH).repeat(nW,1).t().repeat(nB*nA, 1, 1).view(cls_anchor_dim).to(device)\n anchor_w = anchors.index_select(1, ix[0]).repeat(1, nB*nH*nW).view(cls_anchor_dim)\n anchor_h = anchors.index_select(1, ix[1]).repeat(1, nB*nH*nW).view(cls_anchor_dim)\n\n pred_boxes = torch.FloatTensor(4, cls_anchor_dim).to(device)\n pred_boxes[0] = coord[0] + grid_x # bx = σ(tx) + cx\n pred_boxes[1] = coord[1] + grid_y\n pred_boxes[2] = coord[2].exp() * anchor_w # pw*e(tw)\n pred_boxes[3] = coord[3].exp() * anchor_h \n pred_boxes = pred_boxes.transpose(0,1).contiguous().view(-1,4)\n\n coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls = \\\n self.build_targets(pred_boxes.detach(), target.detach(), anchors.detach(), nH, nW)\n\n cls_grid = torch.linspace(5,5+nC-1,nC).long().to(device)\n cls = output.index_select(2, cls_grid)\n cls = cls.view(nB*nA, nC, nH*nW).transpose(1,2).contiguous().view(cls_anchor_dim, nC)\n cls_mask = (cls_mask == 1)\n tcls = tcls[cls_mask].long().view(-1)\n cls_mask = cls_mask.view(-1, 1).repeat(1,nC).to(device)\n cls = cls[cls_mask].view(-1, nC)\n \n tcoord = tcoord.view(4, cls_anchor_dim).to(device)\n tconf, tcls = tconf.to(device), tcls.to(device)\n coord_mask, conf_mask = coord_mask.view(cls_anchor_dim).to(device), conf_mask.to(device)\n\n loss_coord = nn.MSELoss(size_average=False)(coord*coord_mask, tcoord*coord_mask)/2\n loss_conf = nn.MSELoss(size_average=False)(conf*conf_mask, tconf*conf_mask)\n loss_cls = nn.CrossEntropyLoss(size_average=False)(cls, tcls) if cls.size(0) > 0 else 0\n loss = loss_coord + loss_conf + loss_cls\n\n if math.isnan(loss.item()):\n print(conf, tconf) \n raise ValueError('YoloLayer has isnan in loss')\n #sys.exit(0)\n \n if return_single_value: return loss\n else: return [loss, loss_coord, loss_conf, loss_cls]\n"
] |
[
[
"torch.nn.Softmax",
"torch.sigmoid",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.max",
"torch.linspace",
"torch.zeros",
"torch.exp",
"torch.FloatTensor",
"torch.device",
"numpy.array",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sositon/IML.HUJI
|
[
"aaf32089d0ccf4212e33d306bab6ac19b6275e8d"
] |
[
"IMLearn/learners/regressors/linear_regression.py"
] |
[
"from __future__ import annotations\nfrom typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom numpy.linalg import pinv\nfrom ...metrics.loss_functions import mean_square_error\n\n\nclass LinearRegression(BaseEstimator):\n \"\"\"\n Linear Regression Estimator\n\n Solving Ordinary Least Squares optimization problem\n \"\"\"\n\n def __init__(self, include_intercept: bool = True) -> LinearRegression:\n \"\"\"\n Instantiate a linear regression estimator\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n Attributes\n ----------\n include_intercept_: bool\n Should fitted model include an intercept or not\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by linear regression. To be set in\n `LinearRegression.fit` function.\n \"\"\"\n super().__init__()\n self.include_intercept_, self.coefs_ = include_intercept, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to given samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.include_intercept_`\n \"\"\"\n # todo maybe I should add X.reshape(-1,1) inside this function?\n X = self.reshape_samples_with_intercept(X)\n self.coefs_ = pinv(X)@y\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n X = self.reshape_samples_with_intercept(X)\n return X @ self.coefs_\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n y_pred = self.predict(X)\n return mean_square_error(y, y_pred)\n\n def reshape_samples_with_intercept(self, X):\n \"\"\"\n add ones column (from the left) if include_intercept = True\n \"\"\"\n # X.reshape(-1, 1)\n return np.append(np.ones_like(X), X, axis=1) if self.include_intercept_ else X"
] |
[
[
"numpy.linalg.pinv",
"numpy.ones_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Cam2337/RecycleNet-DCGAN
|
[
"a6691d1e3e03e286192a1791fd323bd2f442ad9f"
] |
[
"src/train.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"Training module for DCGAN.\"\"\"\n\nimport argparse\nimport logging\nlogging.root.setLevel(logging.INFO)\nimport os\nfrom typing import Any, Dict, List, Tuple\n\nimport model\nimport utils\nimport math\nimport numpy as np\nimport torch\nimport torch.cuda\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\nfrom torchvision.utils import save_image\n\n# Constants #\n\nRESULTS_DIR = 'results'\nNUM_CHANNELS = 3\nOPTIMAL_D_SCORE = 0.5\n\nFAKE_LABEL = 0\nREAL_LABEL = 1\nSOFT_COEFF = 0.25\n\nMIN_LR = 10e-5\nMAX_LR = 1.0\n\nCHECKPOINT_FREQ = 500\nIMG_SAVE_COEF = 0.98\nGAN_ERROR_THRESHOLD = 0.98\nGRID_SIZE = 64\nLOGGING_FREQ = 10\nNUM_FAKES = 500\n\n# Create figures directory\nFIGURES_DIR = os.path.join(RESULTS_DIR, 'figures')\nMODEL_DIR = os.path.join(RESULTS_DIR, 'model')\n\nos.makedirs(FIGURES_DIR, exist_ok=True)\nos.makedirs(MODEL_DIR, exist_ok=True)\n\n# Public Functions #\n\ndef parse_args():\n \"\"\"Parses command-line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'dataroot',\n help='The root of the directory whose image data to process.',\n type=str,\n )\n parser.add_argument(\n 'name',\n help='The base name of this batch of synthesized images, e.g. \"metal\".',\n type=str,\n )\n parser.add_argument(\n '--batch-size',\n help='The batch size for batch training.',\n type=int,\n default=128,\n )\n parser.add_argument(\n '--beta1',\n help='The Beta1 parameter for Adam Optimization.',\n type=float,\n default=0.5,\n )\n parser.add_argument(\n '--beta2',\n help='The Beta2 parameter for Adam Optimization.',\n type=float,\n default=0.999,\n )\n parser.add_argument(\n '--image-size',\n help='The size of the images.',\n type=int,\n default=64,\n )\n parser.add_argument(\n '--learning-rate',\n help='The learning rate to apply during parameter updates.',\n type=float,\n default=0.0002,\n )\n parser.add_argument(\n '--netD-checkpoint',\n help='Initializes the Discriminator from the specified checkpoint.',\n type=str,\n )\n parser.add_argument(\n '--netG-checkpoint',\n help='Initializes the Generator from the specified checkpoint.',\n type=str,\n )\n parser.add_argument(\n '--num-epochs',\n help='The number of training epochs to run.',\n type=int,\n default=5,\n )\n parser.add_argument(\n '--num-gpus',\n help='The number of GPUs available for training. Use 0 for CPU.',\n type=int,\n default=1,\n )\n parser.add_argument(\n '--num-trials',\n help='The number of trials to use during hyperparameter searching.',\n type=int,\n default=10,\n )\n parser.add_argument(\n '--num-trial-cpus',\n help='The number of CPUs available during hyperparameter searching.',\n type=int,\n default=1,\n )\n parser.add_argument(\n '--num-trial-gpus',\n help='The number of GPUs available during hyperparameter searching.',\n type=int,\n default=1,\n )\n parser.add_argument(\n '--num-workers',\n help='The number of parallel workers for the DataLoader.',\n type=int,\n default=2,\n )\n\n # Perform some basic argument validation\n args = parser.parse_args()\n if not os.path.exists(args.dataroot) and os.path.isdir(args.dataroot):\n raise ValueError(f'{args.dataroot} is not a valid directory.')\n return args\n\ndef synthesize_training_data(\n netG: model.Generator, fixed_noise: torch.Tensor, epoch: int):\n \"\"\"Saves synthesized images given a latent vector and a generator model.\"\"\"\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n for s in range(NUM_FAKES):\n save_image(\n fake[s,:,:,:],\n os.path.join(FIGURES_DIR, f'fake_out_{epoch}_{s}.png'))\n\ndef load_checkpoint(\n model: nn.Module,\n optimizer: optim.Optimizer,\n filepath: str) -> Tuple[int, float]:\n \"\"\"Loads model and optimizer state from the provided .model file.\"\"\"\n if not os.path.exists(filepath):\n raise ValueError(f'Filepath: {filepath} does not exist!')\n\n logging.info(f'Loading checkpoint: {filepath}...')\n checkpoint = torch.load(filepath)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return (checkpoint['epoch'], checkpoint['loss'])\n\ndef save_checkpoint(\n model: nn.Module,\n optimizer: optim.Optimizer,\n epoch: int,\n loss: float,\n filepath: str):\n \"\"\"Saves model and optimizer state to a filepath.\"\"\"\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'loss': loss,\n 'optimizer_state_dict': optimizer.state_dict(),\n }, filepath)\n\ndef train(config: Dict[str, Any]) -> Tuple[List[float], List[float], List[torch.Tensor]]:\n \"\"\"The primary function for DCGAN training.\n\n Note: Per GANHacks, Discriminator training is conducted in *two* separate\n batch training sessions: one with all-real data, and one with all-fake data.\n See more at: https://github.com/soumith/ganhacks.forward\n\n Args:\n config: A dict with the following parameters:\n * netG: The Generator to train.\n * netG_checkpoint: An optional .model checkpoint to load from.\n * netD: The Discriminator to train.\n * netD_checkpoint: An optional .model checkpoint to load from.\n * dataloader: The PyTorch DataLoader used to iterate through data.\n * device: The device that the models are loaded onto.\n * learning_rate: The learning rate to apply during updates.\n * num_epochs: The number of training epochs.\n * beta1: The Beta1 parameter of Adam optimization.\n * beta2: The Beta2 parameter of Adam optimization.\n * pre_epoch: An optional hook for processing prior-to the epoch.\n * post_epoch: An optional hook for processing post-epoch.\n Returns:\n A tuple of lists containing the loss of the Generator and the\n Discriminator, respectively, from each training iteration, along with\n a list of images.\n \"\"\"\n\n # Set parameters\n netG = config['netG']\n netD = config['netD']\n dataloader = config['dataloader']\n device = config['device']\n learning_rate = config['learning_rate']\n num_epochs = config['num_epochs']\n beta1 = config['beta1']\n beta2 = config['beta2']\n\n # Retrieve optional configuration parameters\n pre_epoch = config.get('pre_epoch')\n post_epoch = config.get('post_epoch')\n netG_checkpoint = config.get('netG_checkpoint')\n netD_checkpoint = config.get('netD_checkpoint')\n\n # Batch of input latent vectors\n fixed_noise = torch.randn(\n NUM_FAKES, netG.latent_vector_size, 1, 1, device=device)\n\n # Setup loss function and optimizers\n lossF = nn.BCELoss()\n optD = optim.Adam(netD.parameters(), lr=learning_rate, betas=(beta1, beta2))\n optG = optim.Adam(netG.parameters(), lr=learning_rate, betas=(beta1, beta2))\n\n # Load from saved state, if provided\n checkpoint_epochs = []\n if netG_checkpoint is not None:\n G_epoch, _ = load_checkpoint(netG, optG, netG_checkpoint)\n checkpoint_epochs.append(G_epoch)\n if netD_checkpoint is not None:\n D_epoch, _ = load_checkpoint(netD, optD, netD_checkpoint)\n checkpoint_epochs.append(D_epoch)\n\n # Dump model configuration\n logging.info(f'Generator:\\n{netG}')\n logging.info(f'Discriminator:\\n{netD}')\n\n # Main training loop\n img_list = []\n G_losses = []\n D_losses = []\n D_batch_scores = []\n iters = 0\n\n logging.info('Starting training...')\n epoch = min(checkpoint_epochs) if checkpoint_epochs else 0\n while epoch < num_epochs:\n\n logging.info(f'Starting epoch: {epoch}...')\n\n # Call into pre-epoch handler, if present\n if pre_epoch is not None:\n pre_epoch(\n epoch=epoch,\n G_losses=G_losses,\n D_losses=D_losses,\n D_batch_scores=D_batch_scores)\n\n for i, data in enumerate(dataloader, 0):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n ## Real data\n netD.zero_grad()\n\n ## Format batch\n real_cpu = data[0].to(device)\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), REAL_LABEL, device=device)\n utils.add_label_noise(label, p_flip=0.05)\n\n r_label_soft = (\n REAL_LABEL +\n (torch.randn((b_size,), device=device)*SOFT_COEFF))\n r_label_noisy_soft = torch.mul(label, r_label_soft)\n\n ## Forward pass real data through discriminator\n output = netD(real_cpu).view(-1)\n\n ## Calculate loss on all-real batch; calculate gradients\n errD_real = lossF(output, r_label_noisy_soft)\n errD_real.backward()\n D_x = output.mean().item()\n\n ## Fake data\n noise = torch.randn(\n b_size, netG.latent_vector_size, 1, 1, device=device)\n\n ## Generate fake image batch with G\n fake = netG(noise)\n label.fill_(FAKE_LABEL)\n utils.add_label_noise(label, p_flip=0.05)\n f_label_noisy_soft = (\n label +\n torch.abs(torch.randn((b_size,), device=device))*SOFT_COEFF)\n\n ## Classify all fake batch with D\n output = netD(fake.detach()).view(-1)\n\n ## Calculate D's loss on the all-fake batch\n errD_fake = lossF(output, f_label_noisy_soft)\n\n ## Calculate the gradients for this batch\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n\n ## Add the gradients from the all-real and all-fake batches; Update\n errD = errD_real + errD_fake\n optD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(REAL_LABEL) # fake labels are real for generator cost\n\n # Since we just updated D, perform another forward pass of all-fake\n # batch through D\n output = netD(fake).view(-1)\n\n # Calculate G's loss based on this output\n errG = lossF(output, label)\n\n # Calculate gradients for G; Update\n errG.backward()\n D_G_z2 = output.mean().item()\n optG.step()\n\n # Save losses for plotting\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Save discriminator output\n D_batch_scores.append(D_x)\n\n # Output training stats\n if i % LOGGING_FREQ == 0:\n logging.info(f'[{epoch}/{num_epochs}][{i}/{len(dataloader)}]\\t'\n f'Loss_D: {errD.item():.4f}\\tLoss_G: '\n f'{errG.item():.4f}\\t'\n f'D(x): {D_x:.4f}\\tD(G(z)): '\n f'{D_G_z1:.4f} / {D_G_z2:.4f}')\n\n\n # Save checkpoint; dump model and optimizer states along with grid\n if ((iters % CHECKPOINT_FREQ == 0) or\n ((epoch == num_epochs - 1) and (i == len(dataloader) - 1))):\n img_list.append(\n vutils.make_grid(\n fake[0:GRID_SIZE], padding=2, normalize=True))\n save_checkpoint(\n netG, optG, epoch, errG.item(),\n os.path.join(MODEL_DIR, f'modelG_{epoch}.model'))\n save_checkpoint(\n netD, optD, epoch, errD.item(),\n os.path.join(MODEL_DIR, f'modelD_{epoch}.model'))\n\n # If we're sufficiently late into training, and the generator is having\n # success fooling the discriminator, synthesize training images\n if ((epoch >= math.floor(IMG_SAVE_COEF * num_epochs)) and\n (errG.item() <= GAN_ERROR_THRESHOLD)):\n synthesize_training_data(fixed_noise, epoch)\n\n iters += 1\n\n # Call into post-epoch handler, if present\n epoch += 1\n if post_epoch is not None:\n post_epoch(\n epoch=epoch,\n G_losses=G_losses,\n D_losses=D_losses,\n avg_D_batch_scores=D_batch_scores)\n\n return (G_losses, D_losses, img_list)\n\ndef main():\n \"\"\"main.\"\"\"\n args = parse_args()\n\n device = torch.device((\n 'cuda:0' if torch.cuda.is_available and args.num_gpus > 0 else 'cpu'))\n logging.info(f'Running with device: {device}')\n\n # Initialize models\n netG = model.Generator().to(device)\n netD = model.Discriminator().to(device)\n if device.type == 'cuda' and args.num_gpus > 1:\n netG = nn.DataParallel(netG, list(range(args.num_gpus)))\n netD = nn.DataParallel(netD, list(range(args.num_gpus)))\n\n # Apply DCGAN paper weight-reinitialization\n # See more: https://arxiv.org/pdf/1511.06434.pdf\n netG.apply(utils.dcgan_weights_reinit)\n netD.apply(utils.dcgan_weights_reinit)\n\n # Load dataset and resize\n dataset = utils.data_synthesis(\n os.path.abspath(args.dataroot),\n image_size=(args.image_size, args.image_size, NUM_CHANNELS),\n custom_transforms=[\n transforms.ColorJitter(\n brightness=0.05,\n contrast=0.05,\n saturation=0.05,\n hue=0.03,\n ),\n transforms.RandomCrop(size=args.image_size),\n transforms.RandomHorizontalFlip(p=0.9),\n transforms.RandomVerticalFlip(p=0.9),\n transforms.Lambda(lambd=lambda img: img) # Identity transform\n ]\n )\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n )\n\n config = {\n 'netG': netG,\n 'netG_checkpoint': args.netG_checkpoint,\n 'netD': netD,\n 'netD_checkpoint': args.netD_checkpoint,\n 'dataloader': dataloader,\n 'device': device,\n 'learning_rate': args.learning_rate,\n 'num_epochs': args.num_epochs,\n 'beta1': args.beta1,\n 'beta2': args.beta2,\n }\n\n logging.info('Beginning training loop...')\n G_losses, D_losses, img_list = train(config)\n utils.plot_results(\n device=device,\n dataloader=dataloader,\n G_losses=G_losses,\n D_losses=D_losses,\n img_list=img_list,\n name=args.name,\n outdir=FIGURES_DIR,\n )\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.full",
"torch.load",
"torch.randn",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.mul",
"torch.no_grad",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hertzsprung/seamless-wave-uq
|
[
"10a9b2e18d11cf3f4e711a90523f85758e5fb531"
] |
[
"swepc.python/swepc/test/topography.py"
] |
[
"import numpy as np\nimport sys\n\nclass Bump:\n def __init__(self, a_mean, a_stddev, halfWidth):\n self.a_mean = a_mean\n self.a_stddev = a_stddev\n self.halfWidth = halfWidth\n\n def z0(self, x):\n return self.a_mean*(1/np.cosh(np.pi/self.halfWidth*x))**2\n\n def z1(self, x):\n return np.sqrt(self.__dzda(x)**2 * self.a_stddev**2)\n\n def __dzda(self, x):\n return (1/np.cosh(np.pi/self.halfWidth*x))**2\n\nclass TwoBumps:\n def __init__(self, a_mean, a_stddev, halfWidth):\n self.bump = Bump(a_mean, a_stddev, halfWidth)\n\n def z0(self, x):\n z = 0.0\n\n if 30 < x and x <= 40:\n z = self.bump.a_mean\n\n return z + self.bump.z0(x)\n\n def z1(self, x):\n return self.bump.z1(x)\n\nclass RandomSmoothBump:\n def __init__(self, mesh, a_mean, a_stddev, halfWidth, a_min, a_max):\n self.C = mesh.C\n self.a_mean = a_mean\n self.a_stddev = a_stddev\n self.halfWidth = halfWidth\n self.a_min = a_min\n self.a_max = a_max\n\n def sample(self):\n a = np.random.normal(self.a_mean, self.a_stddev)\n while a < self.a_min or a > self.a_max:\n print(\"# rejecting a =\", a, file=sys.stderr)\n a = np.random.normal(self.a_mean, self.a_stddev)\n bump = Bump(a, 0.0, self.halfWidth)\n return [bump.z0(x) for x in self.C], a\n\nclass TsengTopography:\n def __init__(self, offset = 0.0):\n self.xs = [0.0, 50.0, 100.0, 150.0, 200.0, 250.0, 300.0, 350.0, 400.0,\n 425.0, 450.0, 470.0, 475.0, 500.0, 505.0, 530.0, 550.0, 565.0,\n 575.0, 600.0, 650.0, 700.0, 750.0, 800.0, 820.0, 900.0, 950.0,\n 1500.0]\n self.zs = np.array([0.0, 0.0, 2.5, 5.0, 5.0, 3.0, 5.0, 5.0, 7.5, 8.0,\n 9.0, 9.0, 9.1, 9.0, 9.0, 6.0, 5.5, 5.5, 5.0, 4.0, 3.0, 3.0,\n 2.3, 2.0, 1.2, 0.4, 0.0, 0.0])\n self.zs = self.zs + offset\n\n def z0(self, x):\n target_x = x\n i1, x1 = [(i, x) for i, x in enumerate(self.xs) if x < target_x][-1]\n i2, x2 = [(i, x) for i, x in enumerate(self.xs) if x >= target_x][0]\n z1, z2 = self.zs[i1], self.zs[i2]\n m = (z2 - z1)/(x2 - x1)\n c = z1 - m*x1\n return m*target_x + c\n\n def z1(self, x):\n return 0.0\n\nclass RandomTsengTopography:\n def __init__(self, mesh, stddev):\n self.C = mesh.C\n self.stddev = stddev \n\n def sample(self):\n offset = np.random.normal(0.0, self.stddev)\n deterministic = TsengTopography(offset)\n return [deterministic.z0(x) for x in self.C], offset\n"
] |
[
[
"numpy.random.normal",
"numpy.array",
"numpy.cosh"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shubham-shinde/Machine-Learning
|
[
"e49863c49171fd8b44f72f37112a744ea63df228"
] |
[
"naive bias/naive.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 16:50:52 2018\n\n@author: shubham\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#import the data\ndataset = pd.read_csv('cancer.csv')\n#index location = iloc\n#dataset is a 2d matrix\n#select all row in first column\nprint(dataset.head())\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:,-1].values\n#data preprocessing\n\n#import knn function form sklearn\nfrom sklearn.naive_bayes import GaussianNB\n\n#create object\nnaive = GaussianNB()\n\n#train the model\nnaive.fit(X,y)\n\n#prepare the test data\nX_test = [[12, 70, 12], [13, 20, 13]]\n\n#test the model (returns the class)\nprediction = naive.predict(X_test)\n\nprint(prediction)"
] |
[
[
"pandas.read_csv",
"sklearn.naive_bayes.GaussianNB"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
LukasHedegaard/domain-adaptation-datasets
|
[
"44f6ae66743b4a855644a770093e34cf0ede9ce3"
] |
[
"tests/datasetops/testing_utils.py"
] |
[
"from pathlib import Path\nfrom typing import NamedTuple\nfrom datasetops.loaders import Loader\nimport numpy as np\n\n\nclass DatasetPaths(NamedTuple):\n FOLDER_DATA: str = \"folder_dataset_class_data/amazon/back_pack\"\n FOLDER_CLASS_DATA: str = \"folder_dataset_class_data/amazon\"\n FOLDER_DATASET_CLASS_DATA: str = \"folder_dataset_class_data\"\n MAT_SINGLE_WITH_MULTI_DATA: str = \"mat_single_with_multi_data\"\n\n\nDATASET_PATHS = DatasetPaths()\n\n\ndef get_test_dataset_path(dataset_path: str) -> str:\n return str((Path(__file__).parent.parent / \"recourses\" / dataset_path).absolute())\n\n\ndef from_dummy_data(num_total=11, with_label=False) -> Loader:\n a_ids = list(range(5))\n b_ids = list(range(5, num_total))\n\n def get_data(i):\n return (i,)\n\n def get_labelled_data(i):\n nonlocal a_ids\n return i, \"a\" if i < len(a_ids) else \"b\"\n\n ds = Loader(get_labelled_data if with_label else get_data)\n ds.extend(a_ids)\n ds.extend(b_ids)\n return ds\n\n\nDUMMY_NUMPY_DATA_SHAPE_1D = (18,)\nDUMMY_NUMPY_DATA_SHAPE_2D = (6, 3)\nDUMMY_NUMPY_DATA_SHAPE_3D = (2, 3, 3)\n\n\ndef from_dummy_numpy_data() -> Loader:\n a_ids = list(range(5))\n b_ids = list(range(5, 11))\n labels = [*[1 for _ in a_ids], *[2 for _ in b_ids]]\n\n num_samples = len(a_ids) + len(b_ids)\n data = np.arange(num_samples * DUMMY_NUMPY_DATA_SHAPE_1D[0]).reshape(\n (num_samples, DUMMY_NUMPY_DATA_SHAPE_1D[0])\n )\n # data = data / data.max()\n\n def get_data(idx):\n return data[idx], labels[idx]\n\n ds = Loader(get_data)\n ds.extend(a_ids)\n ds.extend(b_ids)\n return ds\n"
] |
[
[
"numpy.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiangwei221/pointnet.pytorch
|
[
"a6cf0f2bf49355e2e81ef3f22ab87c500a7db6a7"
] |
[
"train_segmentation.py"
] |
[
"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom datasets import PartDataset\nfrom pointnet import PointNetDenseCls\nimport torch.nn.functional as F\nimport json\nwith open('global_config.json') as f:\n global_config = json.load(f)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batchSize', type=int, default=32, help='input batch size')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')\nparser.add_argument('--outf', type=str, default='seg', help='output folder')\nparser.add_argument('--model', type=str, default = '', help='model path')\nparser.add_argument('--dataset_path', type=str, default = global_config['dataset_path'], help='dataset path')\n\nopt = parser.parse_args()\nprint (opt)\n\nopt.manualSeed = random.randint(1, 10000) # fix seed\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ndataset = PartDataset(root = opt.dataset_path, classification = False, class_choice = ['Chair'])\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\ntest_dataset = PartDataset(root = opt.dataset_path, classification = False, class_choice = ['Chair'], train = False)\ntestdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\nprint(len(dataset), len(test_dataset))\nnum_classes = dataset.num_seg_classes\nprint('classes', num_classes)\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nblue = lambda x:'\\033[94m' + x + '\\033[0m'\n\n\nclassifier = PointNetDenseCls(k = num_classes)\n\nif opt.model != '':\n classifier.load_state_dict(torch.load(opt.model))\n\noptimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)\nclassifier.cuda()\n\nnum_batch = len(dataset)/opt.batchSize\n\nfor epoch in range(opt.nepoch):\n for i, data in enumerate(dataloader, 0):\n points, target = data\n points, target = Variable(points), Variable(target)\n points = points.transpose(2,1) \n points, target = points.cuda(), target.cuda() \n optimizer.zero_grad()\n classifier = classifier.train()\n pred, _ = classifier(points)\n pred = pred.view(-1, num_classes)\n target = target.view(-1,1)[:,0] - 1\n #print(pred.size(), target.size())\n loss = F.nll_loss(pred, target)\n loss.backward()\n optimizer.step()\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.data).cpu().sum()\n print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500)))\n \n if i % 10 == 0:\n j, data = next(enumerate(testdataloader, 0))\n points, target = data\n points, target = Variable(points), Variable(target)\n points = points.transpose(2,1) \n points, target = points.cuda(), target.cuda()\n classifier = classifier.eval()\n pred, _ = classifier(points)\n pred = pred.view(-1, num_classes)\n target = target.view(-1,1)[:,0] - 1\n\n loss = F.nll_loss(pred, target)\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.data).cpu().sum()\n print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize * 2500)))\n \n torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))"
] |
[
[
"torch.autograd.Variable",
"torch.manual_seed",
"torch.nn.functional.nll_loss",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OMS-NetZero/FAIR
|
[
"1d89c23096ae583c06581b1a2e1cb59f1aa44e13"
] |
[
"fair/forcing/aerosols.py"
] |
[
"from __future__ import division\n\nimport numpy as np\nfrom ..constants import molwt\nfrom ..RCPs import rcp45\n\nr45e = rcp45.Emissions\n\ndef Stevens(emissions, stevens_params=np.array([0.001875, 0.634, 60.]),\n ref_isSO2=True, E_pi=0):\n \"\"\"Calculates aerosol forcing based on Stevens (2015) that relates sulphate\n aerosol forcing to SOx emissions in a logarithmic fashion.\n\n Input:\n emissions: anthropogenic emissions database\n Keywords:\n stevens_params: 3 element array\n 0. scaling parameter for ERFari (alpha)\n 1. scaling parameter for ERFaci (beta)\n 2. natural emissions of SOx in Mt/yr\n ref_isSO2: True if E_SOx_nat is in units of SO2 rather than S.\n E_pi: pre-industrial/reference emissions of SO2 (or S).\n Output:\n ERFari, ERFaci: aerosol effective radiative forcing due to \n aerosol-radiation interactions and aerosol-cloud interactions.\n \"\"\"\n\n alpha, beta, E_SOx_nat = stevens_params\n\n factor = 1\n if ref_isSO2:\n factor = molwt.SO2/molwt.S\n em_SOx = emissions[:,5] * factor\n em_pi = E_pi * factor\n\n ERFari = -alpha * (em_SOx-em_pi)\n# ERFaci = (\n# (-beta * np.log(em_SOx/E_SOx_nat + 1)) - \n# (-beta * np.log(em_pi/E_SOx_nat + 1)) )\n ERFaci = (-beta * np.log((em_SOx-em_pi)/E_SOx_nat + 1))\n return ERFari, ERFaci\n\n\ndef aerocom_direct(emissions,\n beta = np.array(\n [-6.2227e-3, 0.0, -3.8392e-4, -1.16551e-3, 1.601537e-2, -1.45339e-3,\n -1.55605e-3]), E_pi=np.zeros(40), diagnostics=None\n ):\n\n \"\"\"Calculates direct aerosol forcing based on linear relationships between\n emissions and forcing in Aerocom models.\n\n Reference: Myhre et al., 2013: https://www.atmos-chem-phys.net/13/1853/2013\n\n If inputs from an RCPs SCEN file are used, the units will be correct.\n\n Inputs:\n emissions: (nt x 40) emissions array\n Keywords:\n beta: 7-element array of forcing efficiencies in W m-2 (Mt yr-1)-1 for\n SOx, CO, NMVOC, NOx, BC, OC, NH3 (in that order)\n E_pi: pre-industrial emissions (40 element array)\n diagnostics: if 'AR6', give split of direct aerosol effect by species\n Outputs:\n Forcing time series\n \"\"\"\n\n em_SOx, em_CO, em_NMVOC, em_NOx, em_BC, em_OC, em_NH3 = \\\n emissions[:,[5, 6, 7, 8, 9, 10, 11]].T\n\n F_SOx = beta[0] * (em_SOx - E_pi[5])\n F_CO = beta[1] * (em_CO - E_pi[6])\n F_NMVOC = beta[2] * (em_NMVOC - E_pi[7])\n F_NOx = beta[3] * (em_NOx - E_pi[8])\n F_BC = beta[4] * (em_BC - E_pi[9])\n F_OC = beta[5] * (em_OC - E_pi[10])\n F_NH3 = beta[6] * (em_NH3 - E_pi[11])\n\n if diagnostics == 'AR6':\n ERFari = np.column_stack([F_SOx, F_CO, F_NMVOC, F_NOx, F_BC, F_OC, F_NH3])\n else:\n ERFari = F_SOx+F_CO+F_NMVOC+F_NOx+F_BC+F_OC+F_NH3\n \n return ERFari\n\n\ndef ghan_indirect(emissions, fix_pre1850_RCP=True, scale_AR5=False,\n ghan_params=np.array([-1.95011431, 0.01107147, 0.01387492]),\n E_pi=np.zeros(40)):\n \"\"\"Estimates the aerosol indirect effect based on the simple model in\n Ghan et al., (2013), doi:10.1002/jgrd.50567.\n\n This function is just an emulator - a full implementation in Python of the\n Ghan routine (originally coded in Fortran) exists, but will require\n optimisation before it can be used in FaIR. I hope to make the full version\n available in a future version.\n\n A 500-member Latin Hypercube sample of emissions of SOx, NMVOC, BC and OC\n was prepared offline and run through the Ghan simple model and a functional\n relationship fit to the output. SOA aerosol (based on NMVOC emissions) is\n sometimes unreliable and does not exert a strong dependence on the ERF, and\n OC+BC is parameterised as primary organic matter, so the resulting output\n is a function of SOx and (BC+OC) emissions.\n\n Inputs:\n emissions: (nt x 40) numpy emissions array\n Keywords:\n fix_pre1850_RCP: Use different relationship for 1750/65 to 1850 based\n on anthropogenic emissions from Skeie et al (2011)\n for 1750 (atmos-chem-phys.net/11/11827/2011)\n scale_AR5: If True, scale the forcing output so that the best\n estimate forcing in 2011 is -0.45 W/m2 based on 2011\n emissions from the RCPs. The Ghan emulator is built on\n results from the CAM5 GCM. As reported in AR5 WG1 Ch7,\n GCMs tend to overestimate forcing from aerosol-cloud\n interactions.\n ghan_params: 3-element numpy array\n 0: scale factor\n 1: sensitivity to SOx emissions\n 2: sensitivity to BC+OC emissions\n Outputs:\n Forcing timeseries\n \"\"\"\n\n year, em_SOx, em_BC, em_OC = emissions[:,[0, 5, 9, 10]].T\n\n def _ERFaci(em,\n ghan_params=np.array([-1.95011431, 0.01107147, 0.01387492])):\n scale = ghan_params[0]\n b_SOx = ghan_params[1]\n b_POM = ghan_params[2]\n return scale*np.log(1+b_SOx*em[0]+b_POM*em[1])\n\n # PI forcing was not zero as there were some emissions. Use estimates\n # from Skeie et al, 2011 for 1750 forcing.\n E_1765 = np.array([1.0, 11.2])\n nt = len(year)\n F_pd = np.zeros(nt)\n for i in range(nt):\n if year[i]>=1850 or fix_pre1850_RCP==False:\n F_pd[i] = _ERFaci([em_SOx[i], em_BC[i]+em_OC[i]],\n ghan_params=ghan_params)\n else:\n # linearly interpolate between 1765 and 1850\n E_1850 = np.array([r45e.sox[85], r45e.bc[85]+r45e.oc[85]])\n F_pd[i] = _ERFaci((year[i]-1765)/85.*E_1850 +\n (1850-year[i])/85.*E_1765,\n ghan_params=ghan_params)\n\n # 1765 emissions = zero forcing\n F_1765 = -0.3002836449793625\n F_2011 = -1.5236182344467388\n\n # are we rescaling to AR5 best estimate with the default parameters?\n if scale_AR5:\n scale=-0.45/(F_2011-F_1765)\n else:\n scale=1.0\n\n ERFaci = (F_pd - F_1765) * scale\n return ERFaci\n\n\ndef ghan2(emissions, E_pi, ghan_params):\n \"\"\"temphack for fair1.6\"\"\"\n beta, n_so2, n_pom = ghan_params\n pd_re = -beta * np.log(1 + emissions[:,5]/n_so2 + emissions[:,9:11].sum(axis=1)/n_pom)\n pi_re = -beta * np.log(1 + E_pi[5]/n_so2 + E_pi[9:11].sum()/n_pom)\n return pd_re - pi_re\n"
] |
[
[
"numpy.log",
"numpy.array",
"numpy.zeros",
"numpy.column_stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcFig/python-control
|
[
"bad993e49155b11fb0aff1abc1cb12a92545d2c0",
"bad993e49155b11fb0aff1abc1cb12a92545d2c0",
"bad993e49155b11fb0aff1abc1cb12a92545d2c0"
] |
[
"control/lti.py",
"control/bdalg.py",
"control/modelsimp.py"
] |
[
"\"\"\"lti.py\n\nThe lti module contains the LTI parent class to the child classes StateSpace\nand TransferFunction. It is designed for use in the python-control library.\n\nRoutines in this module:\n\nLTI.__init__\nisdtime()\nisctime()\ntimebase()\ntimebaseEqual()\n\"\"\"\n\nimport numpy as np\nfrom numpy import absolute, real\n\n__all__ = ['issiso', 'timebase', 'timebaseEqual', 'isdtime', 'isctime',\n 'pole', 'zero', 'damp', 'evalfr', 'freqresp', 'dcgain']\n\nclass LTI:\n \"\"\"LTI is a parent class to linear time-invariant (LTI) system objects.\n\n LTI is the parent to the StateSpace and TransferFunction child\n classes. It contains the number of inputs and outputs, and the\n timebase (dt) for the system.\n\n The timebase for the system, dt, is used to specify whether the\n system is operating in continuous or discrete time. It can have\n the following values:\n\n * dt = None No timebase specified\n * dt = 0 Continuous time system\n * dt > 0 Discrete time system with sampling time dt\n * dt = True Discrete time system with unspecified sampling time\n\n When two LTI systems are combined, their timebases much match. A system\n with timebase None can be combined with a system having a specified\n timebase, and the result will have the timebase of the latter system.\n\n \"\"\"\n\n def __init__(self, inputs=1, outputs=1, dt=None):\n \"\"\"Assign the LTI object's numbers of inputs and ouputs.\"\"\"\n\n # Data members common to StateSpace and TransferFunction.\n self.inputs = inputs\n self.outputs = outputs\n self.dt = dt\n\n def isdtime(self, strict=False):\n \"\"\"\n Check to see if a system is a discrete-time system\n\n Parameters\n ----------\n strict: bool, optional\n If strict is True, make sure that timebase is not None. Default \n is False. \n \"\"\"\n\n # If no timebase is given, answer depends on strict flag\n if self.dt == None:\n return True if not strict else False\n\n # Look for dt > 0 (also works if dt = True)\n return self.dt > 0\n\n def isctime(self, strict=False):\n \"\"\"\n Check to see if a system is a continuous-time system\n\n Parameters\n ----------\n sys : LTI system\n System to be checked\n strict: bool, optional\n If strict is True, make sure that timebase is not None. Default \n is False. \n \"\"\"\n # If no timebase is given, answer depends on strict flag\n if self.dt is None:\n return True if not strict else False\n return self.dt == 0\n\n def issiso(self):\n '''Check to see if a system is single input, single output'''\n return self.inputs == 1 and self.outputs == 1\n\n def damp(self):\n '''Natural frequency, damping ratio of system poles\n\n Returns\n -------\n wn : array\n Natural frequencies for each system pole\n zeta : array\n Damping ratio for each system pole\n poles : array\n Array of system poles\n '''\n poles = self.pole()\n\n if isdtime(self, strict=True):\n splane_poles = np.log(poles)/self.dt\n else:\n splane_poles = poles\n wn = absolute(splane_poles)\n Z = -real(splane_poles)/wn\n return wn, Z, poles\n\n def dcgain(self):\n \"\"\"Return the zero-frequency gain\"\"\"\n raise NotImplementedError(\"dcgain not implemented for %s objects\" %\n str(self.__class__))\n\n# Test to see if a system is SISO\ndef issiso(sys, strict=False):\n \"\"\"\n Check to see if a system is single input, single output\n\n Parameters\n ----------\n sys : LTI system\n System to be checked\n strict: bool (default = False)\n If strict is True, do not treat scalars as SISO\n \"\"\"\n if isinstance(sys, (int, float, complex, np.number)) and not strict:\n return True\n elif not isinstance(sys, LTI):\n raise ValueError(\"Object is not an LTI system\")\n\n # Done with the tricky stuff...\n return sys.issiso()\n\n# Return the timebase (with conversion if unspecified)\ndef timebase(sys, strict=True):\n \"\"\"Return the timebase for an LTI system\n\n dt = timebase(sys)\n\n returns the timebase for a system 'sys'. If the strict option is\n set to False, dt = True will be returned as 1.\n \"\"\"\n # System needs to be either a constant or an LTI system\n if isinstance(sys, (int, float, complex, np.number)):\n return None\n elif not isinstance(sys, LTI):\n raise ValueError(\"Timebase not defined\")\n\n # Return the sample time, with converstion to float if strict is false\n if (sys.dt == None):\n return None\n elif (strict):\n return float(sys.dt)\n\n return sys.dt\n\n# Check to see if two timebases are equal\ndef timebaseEqual(sys1, sys2):\n \"\"\"Check to see if two systems have the same timebase\n\n timebaseEqual(sys1, sys2)\n\n returns True if the timebases for the two systems are compatible. By\n default, systems with timebase 'None' are compatible with either\n discrete or continuous timebase systems. If two systems have a discrete\n timebase (dt > 0) then their timebases must be equal.\n \"\"\"\n\n if (type(sys1.dt) == bool or type(sys2.dt) == bool):\n # Make sure both are unspecified discrete timebases\n return type(sys1.dt) == type(sys2.dt) and sys1.dt == sys2.dt\n elif (sys1.dt is None or sys2.dt is None):\n # One or the other is unspecified => the other can be anything\n return True\n else:\n return sys1.dt == sys2.dt\n\n# Check to see if a system is a discrete time system\ndef isdtime(sys, strict=False):\n \"\"\"\n Check to see if a system is a discrete time system\n\n Parameters\n ----------\n sys : LTI system\n System to be checked\n strict: bool (default = False)\n If strict is True, make sure that timebase is not None\n \"\"\"\n\n # Check to see if this is a constant\n if isinstance(sys, (int, float, complex, np.number)):\n # OK as long as strict checking is off\n return True if not strict else False\n\n # Check for a transfer function or state-space object\n if isinstance(sys, LTI):\n return sys.isdtime(strict)\n\n # Got passed something we don't recognize\n return False\n\n# Check to see if a system is a continuous time system\ndef isctime(sys, strict=False):\n \"\"\"\n Check to see if a system is a continuous-time system\n\n Parameters\n ----------\n sys : LTI system\n System to be checked\n strict: bool (default = False)\n If strict is True, make sure that timebase is not None\n \"\"\"\n\n # Check to see if this is a constant\n if isinstance(sys, (int, float, complex, np.number)):\n # OK as long as strict checking is off\n return True if not strict else False\n\n # Check for a transfer function or state space object\n if isinstance(sys, LTI):\n return sys.isctime(strict)\n\n # Got passed something we don't recognize\n return False\n\ndef pole(sys):\n \"\"\"\n Compute system poles.\n\n Parameters\n ----------\n sys: StateSpace or TransferFunction\n Linear system\n\n Returns\n -------\n poles: ndarray\n Array that contains the system's poles.\n\n Raises\n ------\n NotImplementedError\n when called on a TransferFunction object\n\n See Also\n --------\n zero\n TransferFunction.pole\n StateSpace.pole\n\n \"\"\"\n\n return sys.pole()\n\n\ndef zero(sys):\n \"\"\"\n Compute system zeros.\n\n Parameters\n ----------\n sys: StateSpace or TransferFunction\n Linear system\n\n Returns\n -------\n zeros: ndarray\n Array that contains the system's zeros.\n\n Raises\n ------\n NotImplementedError\n when called on a MIMO system\n\n See Also\n --------\n pole\n StateSpace.zero\n TransferFunction.zero\n\n \"\"\"\n\n return sys.zero()\n\ndef damp(sys, doprint=True):\n \"\"\"\n Compute natural frequency, damping ratio, and poles of a system\n\n The function takes 1 or 2 parameters\n\n Parameters\n ----------\n sys: LTI (StateSpace or TransferFunction)\n A linear system object\n doprint:\n if true, print table with values\n\n Returns\n -------\n wn: array\n Natural frequencies of the poles\n damping: array\n Damping values\n poles: array\n Pole locations\n\n Algorithm\n ---------\n If the system is continuous,\n wn = abs(poles)\n Z = -real(poles)/poles.\n\n If the system is discrete, the discrete poles are mapped to their\n equivalent location in the s-plane via\n\n s = log10(poles)/dt\n\n and\n\n wn = abs(s)\n Z = -real(s)/wn.\n\n See Also\n --------\n pole\n \"\"\"\n wn, damping, poles = sys.damp()\n if doprint:\n print('_____Eigenvalue______ Damping___ Frequency_')\n for p, d, w in zip(poles, damping, wn) :\n if abs(p.imag) < 1e-12:\n print(\"%10.4g %10.4g %10.4g\" %\n (p.real, 1.0, -p.real))\n else:\n print(\"%10.4g%+10.4gj %10.4g %10.4g\" %\n (p.real, p.imag, d, w))\n return wn, damping, poles\n\ndef evalfr(sys, x):\n \"\"\"\n Evaluate the transfer function of an LTI system for a single complex\n number x.\n\n To evaluate at a frequency, enter x = omega*j, where omega is the\n frequency in radians\n\n Parameters\n ----------\n sys: StateSpace or TransferFunction\n Linear system\n x: scalar\n Complex number\n\n Returns\n -------\n fresp: ndarray\n\n See Also\n --------\n freqresp\n bode\n\n Notes\n -----\n This function is a wrapper for StateSpace.evalfr and\n TransferFunction.evalfr.\n\n Examples\n --------\n >>> sys = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> evalfr(sys, 1j)\n array([[ 44.8-21.4j]])\n >>> # This is the transfer function matrix evaluated at s = i.\n\n .. todo:: Add example with MIMO system\n \"\"\"\n if issiso(sys):\n return sys.horner(x)[0][0]\n return sys.horner(x)\n\ndef freqresp(sys, omega):\n \"\"\"\n Frequency response of an LTI system at multiple angular frequencies.\n\n Parameters\n ----------\n sys: StateSpace or TransferFunction\n Linear system\n omega: array_like\n List of frequencies\n\n Returns\n -------\n mag: ndarray\n phase: ndarray\n omega: list, tuple, or ndarray\n\n See Also\n --------\n evalfr\n bode\n\n Notes\n -----\n This function is a wrapper for StateSpace.freqresp and\n TransferFunction.freqresp. The output omega is a sorted version of the\n input omega.\n\n Examples\n --------\n >>> sys = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> mag, phase, omega = freqresp(sys, [0.1, 1., 10.])\n >>> mag\n array([[[ 58.8576682 , 49.64876635, 13.40825927]]])\n >>> phase\n array([[[-0.05408304, -0.44563154, -0.66837155]]])\n\n .. todo::\n Add example with MIMO system\n\n #>>> sys = rss(3, 2, 2)\n #>>> mag, phase, omega = freqresp(sys, [0.1, 1., 10.])\n #>>> mag[0, 1, :]\n #array([ 55.43747231, 42.47766549, 1.97225895])\n #>>> phase[1, 0, :]\n #array([-0.12611087, -1.14294316, 2.5764547 ])\n #>>> # This is the magnitude of the frequency response from the 2nd\n #>>> # input to the 1st output, and the phase (in radians) of the\n #>>> # frequency response from the 1st input to the 2nd output, for\n #>>> # s = 0.1i, i, 10i.\n \"\"\"\n\n return sys.freqresp(omega)\n\ndef dcgain(sys):\n \"\"\"Return the zero-frequency (or DC) gain of the given system\n\n Returns\n -------\n gain : ndarray\n The zero-frequency gain, or np.nan if the system has a pole\n at the origin\n \"\"\"\n return sys.dcgain()\n",
"\"\"\"bdalg.py\n\nThis file contains some standard block diagram algebra.\n\nRoutines in this module:\n\nappend\nseries\nparallel\nnegate\nfeedback\nconnect\n\n\"\"\"\n\n\"\"\"Copyright (c) 2010 by California Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the California Institute of Technology nor\n the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior\n written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\nFOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\nOR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\nUSE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGE.\n\nAuthor: Richard M. Murray\nDate: 24 May 09\nRevised: Kevin K. Chen, Dec 10\n\n$Id$\n\n\"\"\"\n\nimport scipy as sp\nimport numpy as np\nfrom . import xferfcn as tf\nfrom . import statesp as ss\nfrom . import frdata as frd\n\n__all__ = ['series', 'parallel', 'negate', 'feedback', 'append', 'connect']\n\ndef series(sys1, *sysn):\n \"\"\"Return the series connection (... \\* sys3 \\*) sys2 \\* sys1\n\n Parameters\n ----------\n sys1: scalar, StateSpace, TransferFunction, or FRD\n sysn: other scalars, StateSpaces, TransferFunctions, or FRDs\n\n Returns\n -------\n out: scalar, StateSpace, or TransferFunction\n\n Raises\n ------\n ValueError\n if `sys2.inputs` does not equal `sys1.outputs`\n if `sys1.dt` is not compatible with `sys2.dt`\n\n See Also\n --------\n parallel\n feedback\n\n Notes\n -----\n This function is a wrapper for the __mul__ function in the StateSpace and\n TransferFunction classes. The output type is usually the type of `sys2`.\n If `sys2` is a scalar, then the output type is the type of `sys1`.\n\n If both systems have a defined timebase (dt = 0 for continuous time,\n dt > 0 for discrete time), then the timebase for both systems must\n match. If only one of the system has a timebase, the return\n timebase will be set to match it.\n\n Examples\n --------\n >>> sys3 = series(sys1, sys2) # Same as sys3 = sys2 * sys1\n\n >>> sys5 = series(sys1, sys2, sys3, sys4) # More systems\n\n \"\"\"\n from functools import reduce\n return reduce(lambda x, y:y*x, sysn, sys1)\n\ndef parallel(sys1, *sysn):\n \"\"\"\n Return the parallel connection sys1 + sys2 (+ sys3 + ...)\n\n Parameters\n ----------\n sys1: scalar, StateSpace, TransferFunction, or FRD\n *sysn: other scalars, StateSpaces, TransferFunctions, or FRDs\n\n Returns\n -------\n out: scalar, StateSpace, or TransferFunction\n\n Raises\n ------\n ValueError\n if `sys1` and `sys2` do not have the same numbers of inputs and outputs\n\n See Also\n --------\n series\n feedback\n\n Notes\n -----\n This function is a wrapper for the __add__ function in the\n StateSpace and TransferFunction classes. The output type is usually\n the type of `sys1`. If `sys1` is a scalar, then the output type is\n the type of `sys2`.\n\n If both systems have a defined timebase (dt = 0 for continuous time,\n dt > 0 for discrete time), then the timebase for both systems must\n match. If only one of the system has a timebase, the return\n timebase will be set to match it.\n\n Examples\n --------\n >>> sys3 = parallel(sys1, sys2) # Same as sys3 = sys1 + sys2\n\n >>> sys5 = parallel(sys1, sys2, sys3, sys4) # More systems\n\n \"\"\"\n from functools import reduce\n return reduce(lambda x, y:x+y, sysn, sys1)\n\ndef negate(sys):\n \"\"\"\n Return the negative of a system.\n\n Parameters\n ----------\n sys: StateSpace, TransferFunction or FRD\n\n Returns\n -------\n out: StateSpace or TransferFunction\n\n Notes\n -----\n This function is a wrapper for the __neg__ function in the StateSpace and\n TransferFunction classes. The output type is the same as the input type.\n\n If both systems have a defined timebase (dt = 0 for continuous time,\n dt > 0 for discrete time), then the timebase for both systems must\n match. If only one of the system has a timebase, the return\n timebase will be set to match it.\n\n Examples\n --------\n >>> sys2 = negate(sys1) # Same as sys2 = -sys1.\n\n \"\"\"\n\n return -sys;\n\n#! TODO: expand to allow sys2 default to work in MIMO case?\ndef feedback(sys1, sys2=1, sign=-1):\n \"\"\"\n Feedback interconnection between two I/O systems.\n\n Parameters\n ----------\n sys1: scalar, StateSpace, TransferFunction, FRD\n The primary plant.\n sys2: scalar, StateSpace, TransferFunction, FRD\n The feedback plant (often a feedback controller).\n sign: scalar\n The sign of feedback. `sign` = -1 indicates negative feedback, and\n `sign` = 1 indicates positive feedback. `sign` is an optional\n argument; it assumes a value of -1 if not specified.\n\n Returns\n -------\n out: StateSpace or TransferFunction\n\n Raises\n ------\n ValueError\n if `sys1` does not have as many inputs as `sys2` has outputs, or if\n `sys2` does not have as many inputs as `sys1` has outputs\n NotImplementedError\n if an attempt is made to perform a feedback on a MIMO TransferFunction\n object\n\n See Also\n --------\n series\n parallel\n\n Notes\n -----\n This function is a wrapper for the feedback function in the StateSpace and\n TransferFunction classes. It calls TransferFunction.feedback if `sys1` is a\n TransferFunction object, and StateSpace.feedback if `sys1` is a StateSpace\n object. If `sys1` is a scalar, then it is converted to `sys2`'s type, and\n the corresponding feedback function is used. If `sys1` and `sys2` are both\n scalars, then TransferFunction.feedback is used.\n\n \"\"\"\n\n # Check for correct input types.\n if not isinstance(sys1, (int, float, complex, np.number,\n tf.TransferFunction, ss.StateSpace, frd.FRD)):\n raise TypeError(\"sys1 must be a TransferFunction, StateSpace \" +\n \"or FRD object, or a scalar.\")\n if not isinstance(sys2, (int, float, complex, np.number,\n tf.TransferFunction, ss.StateSpace, frd.FRD)):\n raise TypeError(\"sys2 must be a TransferFunction, StateSpace \" +\n \"or FRD object, or a scalar.\")\n\n # If sys1 is a scalar, convert it to the appropriate LTI type so that we can\n # its feedback member function.\n if isinstance(sys1, (int, float, complex, np.number)):\n if isinstance(sys2, tf.TransferFunction):\n sys1 = tf._convert_to_transfer_function(sys1)\n elif isinstance(sys2, ss.StateSpace):\n sys1 = ss._convertToStateSpace(sys1)\n elif isinstance(sys2, frd.FRD):\n sys1 = frd._convertToFRD(sys1, sys2.omega)\n else: # sys2 is a scalar.\n sys1 = tf._convert_to_transfer_function(sys1)\n sys2 = tf._convert_to_transfer_function(sys2)\n\n return sys1.feedback(sys2, sign)\n\ndef append(*sys):\n '''append(sys1, sys2, ..., sysn)\n\n Group models by appending their inputs and outputs\n\n Forms an augmented system model, and appends the inputs and\n outputs together. The system type will be the type of the first\n system given; if you mix state-space systems and gain matrices,\n make sure the gain matrices are not first.\n\n Parameters\n ----------\n sys1, sys2, ... sysn: StateSpace or Transferfunction\n LTI systems to combine\n\n\n Returns\n -------\n sys: LTI system\n Combined LTI system, with input/output vectors consisting of all\n input/output vectors appended\n\n Examples\n --------\n >>> sys1 = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> sys2 = ss(\"-1.\", \"1.\", \"1.\", \"0.\")\n >>> sys = append(sys1, sys2)\n\n .. todo::\n also implement for transfer function, zpk, etc.\n '''\n s1 = sys[0]\n for s in sys[1:]:\n s1 = s1.append(s)\n return s1\n\ndef connect(sys, Q, inputv, outputv):\n '''\n Index-base interconnection of system\n\n The system sys is a system typically constructed with append, with\n multiple inputs and outputs. The inputs and outputs are connected\n according to the interconnection matrix Q, and then the final\n inputs and outputs are trimmed according to the inputs and outputs\n listed in inputv and outputv.\n\n Note: to have this work, inputs and outputs start counting at 1!!!!\n\n Parameters\n ----------\n sys: StateSpace Transferfunction\n System to be connected\n Q: 2d array\n Interconnection matrix. First column gives the input to be connected\n second column gives the output to be fed into this input. Negative\n values for the second column mean the feedback is negative, 0 means\n no connection is made\n inputv: 1d array\n list of final external inputs\n outputv: 1d array\n list of final external outputs\n\n Returns\n -------\n sys: LTI system\n Connected and trimmed LTI system\n\n Examples\n --------\n >>> sys1 = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6, 8\", \"9.\")\n >>> sys2 = ss(\"-1.\", \"1.\", \"1.\", \"0.\")\n >>> sys = append(sys1, sys2)\n >>> Q = sp.mat([ [ 1, 2], [2, -1] ]) # basically feedback, output 2 in 1\n >>> sysc = connect(sys, Q, [2], [1, 2])\n '''\n # first connect\n K = sp.zeros( (sys.inputs, sys.outputs) )\n for r in sp.array(Q).astype(int):\n inp = r[0]-1\n for outp in r[1:]:\n if outp > 0 and outp <= sys.outputs:\n K[inp,outp-1] = 1.\n elif outp < 0 and -outp >= -sys.outputs:\n K[inp,-outp-1] = -1.\n sys = sys.feedback(sp.matrix(K), sign=1)\n\n # now trim\n Ytrim = sp.zeros( (len(outputv), sys.outputs) )\n Utrim = sp.zeros( (sys.inputs, len(inputv)) )\n for i,u in enumerate(inputv):\n Utrim[u-1,i] = 1.\n for i,y in enumerate(outputv):\n Ytrim[i,y-1] = 1.\n return sp.matrix(Ytrim)*sys*sp.matrix(Utrim)\n",
"#! TODO: add module docstring\n# modelsimp.py - tools for model simplification\n#\n# Author: Steve Brunton, Kevin Chen, Lauren Padilla\n# Date: 30 Nov 2010\n#\n# This file contains routines for obtaining reduced order models\n#\n# Copyright (c) 2010 by California Institute of Technology\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the California Institute of Technology nor\n# the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\n# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n#\n# $Id$\n\n# Python 3 compatibility\nfrom __future__ import print_function\n\n# External packages and modules\nimport numpy as np\nfrom .exception import ControlSlycot\nfrom .lti import isdtime, isctime\nfrom .statesp import StateSpace\nfrom .statefbk import gram\n\n__all__ = ['hsvd', 'balred', 'modred', 'era', 'markov', 'minreal']\n\n# Hankel Singular Value Decomposition\n# The following returns the Hankel singular values, which are singular values\n#of the matrix formed by multiplying the controllability and observability\n#grammians\ndef hsvd(sys):\n \"\"\"Calculate the Hankel singular values.\n\n Parameters\n ----------\n sys : StateSpace\n A state space system\n\n Returns\n -------\n H : Matrix\n A list of Hankel singular values\n\n See Also\n --------\n gram\n\n Notes\n -----\n The Hankel singular values are the singular values of the Hankel operator.\n In practice, we compute the square root of the eigenvalues of the matrix\n formed by taking the product of the observability and controllability\n gramians. There are other (more efficient) methods based on solving the\n Lyapunov equation in a particular way (more details soon).\n\n Examples\n --------\n >>> H = hsvd(sys)\n\n \"\"\"\n # TODO: implement for discrete time systems\n if (isdtime(sys, strict=True)):\n raise NotImplementedError(\"Function not implemented in discrete time\")\n\n Wc = gram(sys,'c')\n Wo = gram(sys,'o')\n WoWc = np.dot(Wo, Wc)\n w, v = np.linalg.eig(WoWc)\n\n hsv = np.sqrt(w)\n hsv = np.matrix(hsv)\n hsv = np.sort(hsv)\n hsv = np.fliplr(hsv)\n # Return the Hankel singular values\n return hsv\n\ndef modred(sys, ELIM, method='matchdc'):\n \"\"\"\n Model reduction of `sys` by eliminating the states in `ELIM` using a given\n method.\n\n Parameters\n ----------\n sys: StateSpace\n Original system to reduce\n ELIM: array\n Vector of states to eliminate\n method: string\n Method of removing states in `ELIM`: either ``'truncate'`` or\n ``'matchdc'``.\n\n Returns\n -------\n rsys: StateSpace\n A reduced order model\n\n Raises\n ------\n ValueError\n Raised under the following conditions:\n\n * if `method` is not either ``'matchdc'`` or ``'truncate'``\n\n * if eigenvalues of `sys.A` are not all in left half plane\n (`sys` must be stable)\n\n Examples\n --------\n >>> rsys = modred(sys, ELIM, method='truncate')\n \"\"\"\n\n #Check for ss system object, need a utility for this?\n\n #TODO: Check for continous or discrete, only continuous supported right now\n # if isCont():\n # dico = 'C'\n # elif isDisc():\n # dico = 'D'\n # else:\n if (isctime(sys)):\n dico = 'C'\n else:\n raise NotImplementedError(\"Function not implemented in discrete time\")\n\n\n #Check system is stable\n if np.any(np.linalg.eigvals(sys.A).real >= 0.0):\n raise ValueError(\"Oops, the system is unstable!\")\n\n ELIM = np.sort(ELIM)\n # Create list of elements not to eliminate (NELIM)\n NELIM = [i for i in range(len(sys.A)) if i not in ELIM]\n # A1 is a matrix of all columns of sys.A not to eliminate\n A1 = sys.A[:,NELIM[0]]\n for i in NELIM[1:]:\n A1 = np.hstack((A1, sys.A[:,i]))\n A11 = A1[NELIM,:]\n A21 = A1[ELIM,:]\n # A2 is a matrix of all columns of sys.A to eliminate\n A2 = sys.A[:,ELIM[0]]\n for i in ELIM[1:]:\n A2 = np.hstack((A2, sys.A[:,i]))\n A12 = A2[NELIM,:]\n A22 = A2[ELIM,:]\n\n C1 = sys.C[:,NELIM]\n C2 = sys.C[:,ELIM]\n B1 = sys.B[NELIM,:]\n B2 = sys.B[ELIM,:]\n\n if method=='matchdc':\n # if matchdc, residualize\n\n # Check if the matrix A22 is invertible\n if np.linalg.matrix_rank(A22) != len(ELIM):\n raise ValueError(\"Matrix A22 is singular to working precision.\")\n\n # Now precompute A22\\A21 and A22\\B2 (A22I = inv(A22))\n # We can solve two linear systems in one pass, since the\n # coefficients matrix A22 is the same. Thus, we perform the LU\n # decomposition (cubic runtime complexity) of A22 only once!\n # The remaining back substitutions are only quadratic in runtime.\n A22I_A21_B2 = np.linalg.solve(A22, np.concatenate((A21, B2), axis=1))\n A22I_A21 = A22I_A21_B2[:, :A21.shape[1]]\n A22I_B2 = A22I_A21_B2[:, A21.shape[1]:]\n\n Ar = A11 - A12*A22I_A21\n Br = B1 - A12*A22I_B2\n Cr = C1 - C2*A22I_A21\n Dr = sys.D - C2*A22I_B2\n elif method=='truncate':\n # if truncate, simply discard state x2\n Ar = A11\n Br = B1\n Cr = C1\n Dr = sys.D\n else:\n raise ValueError(\"Oops, method is not supported!\")\n\n rsys = StateSpace(Ar,Br,Cr,Dr)\n return rsys\n\ndef balred(sys, orders, method='truncate', alpha=None):\n \"\"\"\n Balanced reduced order model of sys of a given order.\n States are eliminated based on Hankel singular value.\n If sys has unstable modes, they are removed, the\n balanced realization is done on the stable part, then\n reinserted in accordance with the reference below.\n\n Reference: Hsu,C.S., and Hou,D., 1991,\n Reducing unstable linear control systems via real Schur transformation.\n Electronics Letters, 27, 984-986.\n\n Parameters\n ----------\n sys: StateSpace\n Original system to reduce\n orders: integer or array of integer\n Desired order of reduced order model (if a vector, returns a vector\n of systems)\n method: string\n Method of removing states, either ``'truncate'`` or ``'matchdc'``.\n alpha: float\n Redefines the stability boundary for eigenvalues of the system matrix A.\n By default for continuous-time systems, alpha <= 0 defines the stability\n boundary for the real part of A's eigenvalues and for discrete-time\n systems, 0 <= alpha <= 1 defines the stability boundary for the modulus\n of A's eigenvalues. See SLICOT routines AB09MD and AB09ND for more\n information.\n\n Returns\n -------\n rsys: StateSpace\n A reduced order model or a list of reduced order models if orders is a list\n\n Raises\n ------\n ValueError\n * if `method` is not ``'truncate'`` or ``'matchdc'``\n ImportError\n if slycot routine ab09ad, ab09md, or ab09nd is not found\n\n ValueError\n if there are more unstable modes than any value in orders\n\n Examples\n --------\n >>> rsys = balred(sys, orders, method='truncate')\n\n \"\"\"\n if method!='truncate' and method!='matchdc':\n raise ValueError(\"supported methods are 'truncate' or 'matchdc'\")\n elif method=='truncate':\n try:\n from slycot import ab09md, ab09ad\n except ImportError:\n raise ControlSlycot(\"can't find slycot subroutine ab09md or ab09ad\")\n elif method=='matchdc':\n try:\n from slycot import ab09nd\n except ImportError:\n raise ControlSlycot(\"can't find slycot subroutine ab09nd\")\n\n #Check for ss system object, need a utility for this?\n\n #TODO: Check for continous or discrete, only continuous supported right now\n # if isCont():\n # dico = 'C'\n # elif isDisc():\n # dico = 'D'\n # else:\n dico = 'C'\n\n job = 'B' # balanced (B) or not (N)\n equil = 'N' # scale (S) or not (N)\n if alpha is None:\n if dico == 'C':\n alpha = 0.\n elif dico == 'D':\n alpha = 1.\n\n rsys = [] #empty list for reduced systems\n\n #check if orders is a list or a scalar\n try:\n order = iter(orders)\n except TypeError: #if orders is a scalar\n orders = [orders]\n\n for i in orders:\n n = np.size(sys.A,0)\n m = np.size(sys.B,1)\n p = np.size(sys.C,0)\n if method == 'truncate':\n #check system stability\n if np.any(np.linalg.eigvals(sys.A).real >= 0.0):\n #unstable branch\n Nr, Ar, Br, Cr, Ns, hsv = ab09md(dico,job,equil,n,m,p,sys.A,sys.B,sys.C,alpha=alpha,nr=i,tol=0.0)\n else:\n #stable branch\n Nr, Ar, Br, Cr, hsv = ab09ad(dico,job,equil,n,m,p,sys.A,sys.B,sys.C,nr=i,tol=0.0)\n rsys.append(StateSpace(Ar, Br, Cr, sys.D))\n\n elif method == 'matchdc':\n Nr, Ar, Br, Cr, Dr, Ns, hsv = ab09nd(dico,job,equil,n,m,p,sys.A,sys.B,sys.C,sys.D,alpha=alpha,nr=i,tol1=0.0,tol2=0.0)\n rsys.append(StateSpace(Ar, Br, Cr, Dr))\n\n #if orders was a scalar, just return the single reduced model, not a list\n if len(orders) == 1:\n return rsys[0]\n #if orders was a list/vector, return a list/vector of systems\n else:\n return rsys\n\ndef minreal(sys, tol=None, verbose=True):\n '''\n Eliminates uncontrollable or unobservable states in state-space\n models or cancelling pole-zero pairs in transfer functions. The\n output sysr has minimal order and the same response\n characteristics as the original model sys.\n\n Parameters\n ----------\n sys: StateSpace or TransferFunction\n Original system\n tol: real\n Tolerance\n verbose: bool\n Print results if True\n\n Returns\n -------\n rsys: StateSpace or TransferFunction\n Cleaned model\n '''\n sysr = sys.minreal(tol)\n if verbose:\n print(\"{nstates} states have been removed from the model\".format(\n nstates=len(sys.pole()) - len(sysr.pole())))\n return sysr\n\ndef era(YY, m, n, nin, nout, r):\n \"\"\"\n Calculate an ERA model of order `r` based on the impulse-response data `YY`.\n\n .. note:: This function is not implemented yet.\n\n Parameters\n ----------\n YY: array\n `nout` x `nin` dimensional impulse-response data\n m: integer\n Number of rows in Hankel matrix\n n: integer\n Number of columns in Hankel matrix\n nin: integer\n Number of input variables\n nout: integer\n Number of output variables\n r: integer\n Order of model\n\n Returns\n -------\n sys: StateSpace\n A reduced order model sys=ss(Ar,Br,Cr,Dr)\n\n Examples\n --------\n >>> rsys = era(YY, m, n, nin, nout, r)\n \"\"\"\n raise NotImplementedError('This function is not implemented yet.')\n\ndef markov(Y, U, M):\n \"\"\"\n Calculate the first `M` Markov parameters [D CB CAB ...]\n from input `U`, output `Y`.\n\n Parameters\n ----------\n Y: array_like\n Output data\n U: array_like\n Input data\n M: integer\n Number of Markov parameters to output\n\n Returns\n -------\n H: matrix\n First M Markov parameters\n\n Notes\n -----\n Currently only works for SISO\n\n Examples\n --------\n >>> H = markov(Y, U, M)\n \"\"\"\n\n # Convert input parameters to matrices (if they aren't already)\n Ymat = np.mat(Y)\n Umat = np.mat(U)\n n = np.size(U)\n\n # Construct a matrix of control inputs to invert\n UU = Umat\n for i in range(1, M-1):\n newCol = np.vstack((0, UU[0:n-1,i-2]))\n UU = np.hstack((UU, newCol))\n Ulast = np.vstack((0, UU[0:n-1,M-2]))\n for i in range(n-1,0,-1):\n Ulast[i] = np.sum(Ulast[0:i-1])\n UU = np.hstack((UU, Ulast))\n\n # Invert and solve for Markov parameters\n H = np.linalg.lstsq(UU, Y)[0]\n\n return H\n"
] |
[
[
"numpy.log",
"numpy.absolute",
"numpy.real"
],
[
"scipy.zeros",
"scipy.array",
"scipy.matrix"
],
[
"numpy.matrix",
"numpy.dot",
"numpy.hstack",
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.linalg.matrix_rank",
"numpy.fliplr",
"numpy.linalg.eig",
"numpy.sort",
"numpy.concatenate",
"numpy.linalg.lstsq",
"numpy.size",
"numpy.mat",
"numpy.sum",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scentini/tensorflow
|
[
"204ed332c0886a0e0ab10b22ba8d67b97e1c83c4",
"169124c0c9630b719e7f0e55722c38c7ecd6c5ac",
"169124c0c9630b719e7f0e55722c38c7ecd6c5ac",
"169124c0c9630b719e7f0e55722c38c7ecd6c5ac",
"169124c0c9630b719e7f0e55722c38c7ecd6c5ac",
"169124c0c9630b719e7f0e55722c38c7ecd6c5ac"
] |
[
"tensorflow/python/ops/math_grad.py",
"tensorflow/python/distribute/one_device_strategy.py",
"tensorflow/python/ops/gradients_util.py",
"tensorflow/python/distribute/saved_model_test_base.py",
"tensorflow/python/ops/linalg_grad.py",
"tensorflow/python/distribute/cluster_resolver/cluster_resolver.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Gradients for operators defined in math_ops.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import pywrap_tensorflow as c_api\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef _safe_shape_div(x, y):\n \"\"\"Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`.\"\"\"\n return x // math_ops.maximum(y, 1)\n\n\[email protected](\"ArgMax\")\ndef _ArgMaxGrad(op, grad):\n del op, grad\n return [None, None]\n\n\[email protected](\"ArgMin\")\ndef _ArgMinGrad(op, grad):\n del op, grad\n return [None, None]\n\n\[email protected](\"EuclideanNorm\")\ndef _EuclideanNormGrad(op, grad):\n \"\"\"Gradient for EuclideanNorm.\"\"\"\n\n output = op.outputs[0]\n\n if not op.get_attr(\"keep_dims\"):\n output_shape_kept_dims = math_ops.reduced_shape(\n array_ops.shape(op.inputs[0]), op.inputs[1])\n output = array_ops.reshape(output, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n\n return math_ops.truediv(op.inputs[0], output / grad), None\n\n\ndef SmartBroadcastGradientArgs(x, y, grad):\n \"\"\"Optimized version of `broadcast_gradient_args` that caches results.\n\n This implementation avoids creating `broadcast_gradient_args` ops in the case\n that the input shapes are fully defined, and provides hints to the calling\n code that can be used to avoid creating reduction and reshaping ops.\n\n Args:\n x: The left input tensor to a broadcasting binary op.\n y: The right input tensor to a broadcasting binary op.\n grad: The incoming gradient tensor for a broadcasting binary op.\n\n Returns:\n A pair of tuples, containing:\n * A 3-tuple of broadcast information for x, containing:\n * The shape of x (as a tuple or Tensor).\n * The reduction indices for x (as a tuple or Tensor).\n * A boolean, which if True, indicates that x's shape differs from grad's\n shape (and so x's gradient must be reduced and/or reshaped).\n * A 3-tuple of broadcast information for y, containing the respective\n details for y.\n \"\"\"\n # NOTE: It may be productive to apply these optimizations in the eager case\n # as well.\n if context.executing_eagerly() or not (\n isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)\n and isinstance(grad, ops.Tensor)):\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (sx, rx, True), (sy, ry, True)\n\n # pylint: disable=protected-access\n x_shape_tuple = x._shape_tuple()\n y_shape_tuple = y._shape_tuple()\n grad_shape_tuple = grad._shape_tuple()\n # pylint: enable=protected-access\n\n if (x_shape_tuple is None or None in x_shape_tuple or\n y_shape_tuple is None or None in y_shape_tuple):\n sx = array_ops.shape_internal(x, optimize=False)\n sy = array_ops.shape_internal(y, optimize=False)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (sx, rx, True), (sy, ry, True)\n\n x_needs_reduction = x_shape_tuple != grad_shape_tuple\n y_needs_reduction = y_shape_tuple != grad_shape_tuple\n\n # Get the default graph rather than relying on `x.graph`, `y.graph`, or\n # `grad.graph`, because these may be eager tensors.\n g = ops.get_default_graph()\n\n try:\n rx, ry = g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] # pylint: disable=protected-access\n return (x_shape_tuple, rx, x_needs_reduction), (\n y_shape_tuple, ry, y_needs_reduction)\n except KeyError:\n rx, ry = array_ops.broadcast_gradient_args(x_shape_tuple, y_shape_tuple)\n # TODO(mrry): If this becomes a bottleneck, add a multi-output version of\n # `TF_TryEvaluateConstant()`.\n rx_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(\n rx.graph._c_graph, rx._as_tf_output())) # pylint: disable=protected-access\n assert rx_value is not None\n ry_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(\n ry.graph._c_graph, ry._as_tf_output())) # pylint: disable=protected-access\n assert ry_value is not None\n g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] = ( # pylint: disable=protected-access\n rx_value, ry_value)\n\n return (x_shape_tuple, rx_value, x_needs_reduction), (\n y_shape_tuple, ry_value, y_needs_reduction)\n\n\n_empty_tuple = ()\n\n\ndef _IsScalar(x):\n return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access\n\n\[email protected](\"Sum\")\ndef _SumGrad(op, grad):\n \"\"\"Gradient for Sum.\"\"\"\n # Fast path for when reducing to a scalar and ndims is known: adds only\n # Reshape and Tile ops (and possibly a Shape).\n input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access\n if input_0_shape is not None:\n axes = tensor_util.constant_value(op.inputs[1])\n if axes is not None:\n rank = len(input_0_shape)\n if np.array_equal(axes, np.arange(rank)): # Reduce all dims.\n if context.executing_eagerly():\n ctx = context.context()\n new_shape = ctx.ones_rank_cache().get(rank)\n if new_shape is None:\n new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)\n ctx.ones_rank_cache().put(rank, new_shape)\n else:\n new_shape = [1] * rank\n grad = array_ops.reshape(grad, new_shape)\n # If shape is not fully defined (but rank is), we use Shape.\n if None not in input_0_shape:\n input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)\n else:\n input_shape = array_ops.shape(op.inputs[0])\n return [array_ops.tile(grad, input_shape), None]\n elif None not in input_0_shape and not context.executing_eagerly():\n # The shape and reduction indices are statically known, so we use a\n # graph-level cache to avoid recomputing `reduced_shape()` for each\n # invocation.\n graph = ops.get_default_graph()\n\n # Canonicalize `axes` to be a tuple of indices. The incoming\n # value may be a scalar or a vector, and may include negative indices.\n axes = tuple(axes.reshape(-1))\n\n try:\n output_shape_kept_dims, tile_scaling = graph._reduced_shape_cache[ # pylint: disable=protected-access\n (input_0_shape, axes)]\n except KeyError:\n\n # Compute and cache `output_shape_kept_dims` and `tile_scaling`.\n def EvaluateAsTuple(t):\n value = c_api.TF_TryEvaluateConstant_wrapper(\n t.graph._c_graph, t._as_tf_output()) # pylint: disable=protected-access\n assert value is not None\n return tuple(value)\n\n output_shape_kept_dims = EvaluateAsTuple(\n math_ops.reduced_shape(input_0_shape, axes))\n tile_scaling = EvaluateAsTuple(\n _safe_shape_div(input_0_shape, output_shape_kept_dims))\n graph._reduced_shape_cache[(input_0_shape, axes)] = ( # pylint:disable=protected-access\n output_shape_kept_dims, tile_scaling)\n\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.tile(grad, tile_scaling), None]\n\n input_shape = array_ops.shape(op.inputs[0])\n\n if compat.forward_compatible(2019, 10, 23):\n if not op.get_attr(\"keep_dims\"):\n with ops.colocate_with(input_shape):\n # TODO(apassos) remove this once device placement for eager ops makes\n # more sense.\n output_shape_kept_dims = math_ops.reduced_shape(input_shape,\n op.inputs[1])\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.broadcast_to(grad, input_shape), None]\n with ops.colocate_with(input_shape):\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.tile(grad, tile_scaling), None]\n\n\ndef _MinOrMaxGrad(op, grad):\n \"\"\"Gradient for Min or Max. Amazingly it's precisely the same code.\"\"\"\n input_shape = array_ops.shape(op.inputs[0])\n y = op.outputs[0]\n if not op.get_attr(\"keep_dims\"):\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n y = array_ops.reshape(y, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n else:\n output_shape_kept_dims = array_ops.shape(y)\n\n # Compute the number of selected (maximum or minimum) elements in each\n # reduction dimension. If there are multiple minimum or maximum elements\n # then the gradient will be divided between them.\n indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)\n num_selected = array_ops.reshape(\n math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)\n\n return [math_ops.divide(indicators, num_selected) * grad, None]\n\n\[email protected](\"Max\")\ndef _MaxGrad(op, grad):\n \"\"\"Gradient for Max.\"\"\"\n return _MinOrMaxGrad(op, grad)\n\n\[email protected](\"Min\")\ndef _MinGrad(op, grad):\n return _MinOrMaxGrad(op, grad)\n\n\[email protected](\"Mean\")\ndef _MeanGrad(op, grad):\n \"\"\"Gradient for Mean.\"\"\"\n sum_grad = _SumGrad(op, grad)[0]\n input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access\n output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access\n if (input_shape is not None and output_shape is not None and\n None not in input_shape and None not in output_shape):\n input_size = np.prod(input_shape)\n output_size = np.prod(output_shape)\n factor = input_size // max(output_size, 1)\n factor = constant_op.constant(factor, dtype=sum_grad.dtype)\n else:\n input_shape = array_ops.shape(op.inputs[0])\n output_shape = array_ops.shape(op.outputs[0])\n factor = _safe_shape_div(\n math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))\n return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None\n\n\[email protected](\"Prod\")\ndef _ProdGrad(op, grad):\n \"\"\"Gradient for Prod.\"\"\"\n # The gradient can be expressed by dividing the product by each entry of the\n # input tensor, but this approach can't deal with zeros in the input.\n # Here, we avoid this problem by composing the output as a product of two\n # cumprod operations.\n\n input_shape = array_ops.shape(op.inputs[0])\n # Reshape reduction indices for the case where the parameter is a scalar\n reduction_indices = array_ops.reshape(op.inputs[1], [-1])\n\n # Expand grad to full input shape\n if not op.get_attr(\"keep_dims\"):\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n\n grad = array_ops.broadcast_to(grad, input_shape)\n\n # Pack all reduced dimensions into a single one, so we can perform the\n # cumprod ops. If the reduction dims list is empty, it defaults to float32,\n # so we need to cast here. We put all the shape-related ops on CPU to avoid\n # copying back and forth, and since listdiff is CPU only.\n with ops.device(\"/cpu:0\"):\n rank = array_ops.rank(op.inputs[0])\n reduction_indices = (reduction_indices + rank) % rank\n reduced = math_ops.cast(reduction_indices, dtypes.int32)\n idx = math_ops.range(0, rank)\n other, _ = array_ops.setdiff1d(idx, reduced)\n perm = array_ops.concat([reduced, other], 0)\n reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))\n other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))\n permuted = array_ops.transpose(op.inputs[0], perm)\n permuted_shape = array_ops.shape(permuted)\n reshaped = array_ops.reshape(permuted, (reduced_num, other_num))\n\n # Calculate product, leaving out the current entry\n left = math_ops.cumprod(reshaped, axis=0, exclusive=True)\n right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)\n # For complex inputs, the gradient is in the conjugate direction.\n y = array_ops.reshape(\n math_ops.conj(left) * math_ops.conj(right), permuted_shape)\n\n # Invert the transpose and reshape operations.\n # Make sure to set the statically known shape information through a reshape.\n out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))\n return array_ops.reshape(out, input_shape), None\n\n\[email protected](\"SegmentSum\")\ndef _SegmentSumGrad(op, grad):\n \"\"\"Gradient for SegmentSum.\"\"\"\n return array_ops.gather(grad, op.inputs[1]), None\n\n\[email protected](\"SegmentMean\")\ndef _SegmentMeanGrad(op, grad):\n \"\"\"Gradient for SegmentMean.\"\"\"\n input_rank = array_ops.rank(op.inputs[0])\n ones_shape = array_ops.concat([\n array_ops.shape(op.inputs[1]),\n array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)\n ], 0)\n ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))\n scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))\n return array_ops.gather(scaled_grad, op.inputs[1]), None\n\n\[email protected](\"SparseSegmentSum\")\ndef _SparseSegmentSumGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSum.\"\"\"\n input_rows = array_ops.shape(op.inputs[0])[0]\n return (math_ops.unsorted_segment_sum(\n array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,\n None)\n\n\[email protected](\"SparseSegmentSumWithNumSegments\")\ndef _SparseSegmentSumWithNumSegmentsGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSumWithNumSegments.\"\"\"\n input_rows = array_ops.shape(op.inputs[0])[0]\n return (math_ops.unsorted_segment_sum(\n array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,\n None, None)\n\n\[email protected](\"SparseSegmentMean\")\ndef _SparseSegmentMeanGrad(op, grad):\n \"\"\"Gradient for SparseSegmentMean.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None)\n\n\[email protected](\"SparseSegmentMeanWithNumSegments\")\ndef _SparseSegmentMeanWithNumSegmentsGrad(op, grad):\n \"\"\"Gradient for SparseSegmentMeanWithNumSegments.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None, None)\n\n\[email protected](\"SparseSegmentSqrtN\")\ndef _SparseSegmentSqrtNGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSqrtN.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None)\n\n\[email protected](\"SparseSegmentSqrtNWithNumSegments\")\ndef _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSqrtNWithNumSegments.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None, None)\n\n\ndef _SegmentMinOrMaxGrad(op, grad):\n \"\"\" Gradient for SegmentMin and SegmentMax. \"\"\"\n zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)\n # Get the number of selected (minimum or maximum) elements in each segment.\n gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])\n is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n num_selected = math_ops.segment_sum(\n math_ops.cast(is_selected, grad.dtype), op.inputs[1])\n # Compute the gradient for each segment. The gradient for the ith segment is\n # divided evenly among the selected elements in that segment.\n weighted_grads = math_ops.divide(grad, num_selected)\n gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])\n return array_ops.where_v2(is_selected, gathered_grads, zeros), None\n\n\[email protected](\"SegmentMin\")\ndef _SegmentMinGrad(op, grad):\n \"\"\"Gradient for SegmentMin.\"\"\"\n return _SegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"SegmentMax\")\ndef _SegmentMaxGrad(op, grad):\n \"\"\"Gradient for SegmentMax.\"\"\"\n return _SegmentMinOrMaxGrad(op, grad)\n\n\ndef _GatherDropNegatives(params,\n ids,\n zero_clipped_indices=None,\n is_positive=None):\n \"\"\" Helper function for unsorted segment ops.\n\n Gathers params for\n positive segment ids and gathers 0 for inputs with negative segment id.\n Also returns the clipped indices and a boolean mask with the same shape\n as ids where a positive id is masked as true. With this, the latter two\n can be passed as arguments to this function to reuse them.\n \"\"\"\n if zero_clipped_indices is None:\n zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))\n gathered = array_ops.gather(params, zero_clipped_indices)\n if is_positive is None:\n is_positive = math_ops.greater_equal(ids, 0)\n # tf.where(condition, x, y) requires condition to have the same shape as x\n # and y.\n is_positive_shape = array_ops.shape(is_positive)\n broadcastable_shape = array_ops.concat(\n [is_positive_shape,\n array_ops.ones([array_ops.rank(gathered)\n - array_ops.rank(is_positive)],\n dtype=is_positive_shape.dtype)],\n axis=0)\n is_positive = array_ops.reshape(is_positive, broadcastable_shape)\n is_positive = (\n is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))\n # replace gathered params of negative indices with 0\n zero_slice = array_ops.zeros_like(gathered)\n return (array_ops.where_v2(is_positive, gathered,\n zero_slice), zero_clipped_indices, is_positive)\n\n\ndef _UnsortedSegmentMinOrMaxGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. \"\"\"\n # Get the number of selected (minimum or maximum) elements in each segment.\n gathered_outputs, zero_clipped_indices, is_positive = \\\n _GatherDropNegatives(op.outputs[0], op.inputs[1])\n is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n is_selected = math_ops.logical_and(is_selected, is_positive)\n num_selected = math_ops.unsorted_segment_sum(\n math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])\n # Compute the gradient for each segment. The gradient for the ith segment is\n # divided evenly among the selected elements in that segment.\n weighted_grads = math_ops.divide(grad, num_selected)\n gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,\n zero_clipped_indices, is_positive)\n zeros = array_ops.zeros_like(gathered_grads)\n return array_ops.where_v2(is_selected, gathered_grads, zeros), None, None\n\n\[email protected](\"UnsortedSegmentSum\")\ndef _UnsortedSegmentSumGrad(op, grad):\n \"\"\"Gradient for UnsortedSegmentSum.\"\"\"\n return _GatherDropNegatives(grad, op.inputs[1])[0], None, None\n\n\[email protected](\"UnsortedSegmentMax\")\ndef _UnsortedSegmentMaxGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentMax. \"\"\"\n return _UnsortedSegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"UnsortedSegmentMin\")\ndef _UnsortedSegmentMinGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentMin. \"\"\"\n return _UnsortedSegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"UnsortedSegmentProd\")\ndef _UnsortedSegmentProdGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentProd.\n\n The gradient can be expressed for each segment by dividing the segment's\n product by each element of the segment input tensor, but this approach can't\n deal with zeros in the input.\n Unlike reduce_prod we can't use cumsum here as individual segments may have\n a different number of elements. Therefore we consider three cases:\n 1) A segment input contains no zeros and we can safely divide by the input\n tensor.\n 2) A segment contains exactly one zero. Then the gradient of each input of\n the segment is zero except for the 0-input, there the gradient is\n the product of the remaining segment entries.\n 3) A segment contains at least two zeros. The gradient is zero for all\n segment inputs.\n \"\"\"\n # Note that unsorted_segment_sum will filter out the negative indices,\n # so we don't need to do a logical_and with is_positive here\n is_zero = math_ops.equal(op.inputs[0], 0)\n num_zeros = gen_math_ops.unsorted_segment_sum(\n math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])\n # handle case 3 and set the gradient to 0 for segments with more than one\n # 0 as input\n grad = array_ops.where_v2(\n math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)\n # replace all zeros with ones and compute the unsorted_segment_prod\n non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(op.inputs[0]),\n op.inputs[0])\n non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,\n op.inputs[1], op.inputs[2])\n # clip the indices for gather to be positive\n zero_clipped_indices = math_ops.maximum(op.inputs[1],\n array_ops.zeros_like(op.inputs[1]))\n gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)\n gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)\n prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.\n # Now fetch the individual results for segments containing 0 and those that\n # don't. is_zero will also fetch results for entries with negative index\n # but the following gather_drop_negatives sets the corresponding entry in\n # grad to 0 for these\n partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod,\n prod_divided_by_el)\n gathered_grad = _GatherDropNegatives(grad, op.inputs[1],\n zero_clipped_indices)[0]\n return gathered_grad * partial_derivative, None, None\n\n\[email protected](\"Abs\")\ndef _AbsGrad(op, grad):\n x = op.inputs[0]\n return grad * math_ops.sign(x)\n\n\[email protected](\"Neg\")\ndef _NegGrad(_, grad):\n \"\"\"Returns -grad.\"\"\"\n return -grad\n\n\[email protected](\"Inv\")\ndef _InvGrad(op, grad):\n \"\"\"Returns -grad * (1 / x^2).\"\"\"\n y = op.outputs[0] # y = 1 / x\n return gen_math_ops.reciprocal_grad(y, grad)\n\n\[email protected](\"Reciprocal\")\ndef _ReciprocalGrad(op, grad):\n \"\"\"Returns -grad * (1 / x^2).\"\"\"\n y = op.outputs[0] # y = 1 / x\n return gen_math_ops.reciprocal_grad(y, grad)\n\n\[email protected](\"InvGrad\")\ndef _InvGradGrad(op, grad):\n b = op.inputs[1]\n # op.output[0]: y = -b * conj(a)^2\n with ops.control_dependencies([grad]):\n ca = math_ops.conj(op.inputs[0])\n cg = math_ops.conj(grad)\n return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)\n\n\[email protected](\"ReciprocalGrad\")\ndef _ReciprocalGradGrad(op, grad):\n b = op.inputs[1]\n # op.output[0]: y = -b * conj(a)^2\n with ops.control_dependencies([grad]):\n ca = math_ops.conj(op.inputs[0])\n cg = math_ops.conj(grad)\n return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)\n\n\[email protected](\"Square\")\ndef _SquareGrad(op, grad):\n x = op.inputs[0]\n # Added control dependencies to prevent 2*x from being computed too early.\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n y = constant_op.constant(2.0, dtype=x.dtype)\n return math_ops.multiply(grad, math_ops.multiply(x, y))\n\n\[email protected](\"Sqrt\")\ndef _SqrtGrad(op, grad):\n y = op.outputs[0] # y = x^(1/2)\n return gen_math_ops.sqrt_grad(y, grad)\n\n\[email protected](\"SqrtGrad\")\ndef _SqrtGradGrad(op, grad):\n a = op.inputs[0]\n y = op.outputs[0] # y = 0.5 * b / conj(a)\n with ops.control_dependencies([grad]):\n if compat.forward_compatible(2019, 12, 14):\n ga = gen_math_ops.xdivy(grad, a)\n return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga\n else:\n ga = grad / a\n return -math_ops.conj(ga) * y, 0.5 * ga\n\n\[email protected](\"Rsqrt\")\ndef _RsqrtGrad(op, grad):\n \"\"\"Returns -0.5 * grad * conj(y)^3.\"\"\"\n y = op.outputs[0] # y = x^(-1/2)\n return gen_math_ops.rsqrt_grad(y, grad)\n\n\[email protected](\"RsqrtGrad\")\ndef _RsqrtGradGrad(op, grad):\n \"\"\"Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3.\"\"\"\n a = op.inputs[0] # a = x^{-1/2}\n b = op.inputs[1] # backprop gradient for a\n with ops.control_dependencies([grad]):\n ca = math_ops.conj(a)\n cg = math_ops.conj(grad)\n grad_a = -1.5 * cg * b * math_ops.square(ca)\n grad_b = gen_math_ops.rsqrt_grad(ca, grad)\n return grad_a, grad_b\n\n\[email protected](\"Exp\")\ndef _ExpGrad(op, grad):\n \"\"\"Returns grad * exp(x).\"\"\"\n y = op.outputs[0] # y = e^x\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(y, grad)\n else:\n return grad * y\n\n\[email protected](\"Expm1\")\ndef _Expm1Grad(op, grad):\n \"\"\"Returns grad * exp(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n y = math_ops.exp(x)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(y, grad)\n else:\n return grad * y\n\n\[email protected](\"Log\")\ndef _LogGrad(op, grad):\n \"\"\"Returns grad * (1/x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n if compat.forward_compatible(2019, 12, 14):\n return gen_math_ops.xdivy(grad, x)\n else:\n return grad * math_ops.reciprocal(x)\n\n\[email protected](\"Log1p\")\ndef _Log1pGrad(op, grad):\n \"\"\"Returns grad * (1/(1 + x)).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n if compat.forward_compatible(2019, 12, 14):\n return gen_math_ops.xdivy(grad, 1 + x)\n else:\n return grad * math_ops.reciprocal(1 + x)\n\n\[email protected](\"Xlogy\")\ndef _XLogyGrad(op, grad):\n \"\"\"Returns gradient of xlogy(x, y) with respect to x and y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n with ops.control_dependencies([grad]):\n not_zero_x = math_ops.cast(\n math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)\n partial_x = gen_math_ops.xlogy(not_zero_x, y)\n partial_y = gen_math_ops.xdivy(x, y)\n return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))\n\n\[email protected](\"Xdivy\")\ndef _XDivyGrad(op, grad):\n \"\"\"Returns gradient of xdivy(x, y) with respect to x and y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n with ops.control_dependencies([grad]):\n not_zero_x = math_ops.cast(\n math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)\n partial_x = gen_math_ops.xdivy(not_zero_x, y)\n partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)\n return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))\n\n\[email protected](\"Sinh\")\ndef _SinhGrad(op, grad):\n \"\"\"Returns grad * cosh(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * math_ops.cosh(x)\n\n\[email protected](\"Cosh\")\ndef _CoshGrad(op, grad):\n \"\"\"Returns grad * sinh(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * math_ops.sinh(x)\n\n\[email protected](\"Tanh\")\ndef _TanhGrad(op, grad):\n \"\"\"Returns grad * (1 - tanh(x) * tanh(x)).\"\"\"\n y = op.outputs[0] # y = tanh(x)\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return gen_math_ops.tanh_grad(y, grad)\n\n\[email protected](\"Asinh\")\ndef _AsinhGrad(op, grad):\n \"\"\"Returns grad * 1/cosh(y).\"\"\"\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return grad / math_ops.cosh(y)\n\n\[email protected](\"Acosh\")\ndef _AcoshGrad(op, grad):\n \"\"\"Returns grad * 1/sinh(y).\"\"\"\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.xdivy(grad, math_ops.sinh(y))\n else:\n return grad / math_ops.sinh(y)\n\n\[email protected](\"Atanh\")\ndef _AtanhGrad(op, grad):\n \"\"\"Returns grad * 1/ (1 - x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n inv = math_ops.reciprocal(math_ops.subtract(one, x2))\n return grad * inv\n\n\[email protected](\"TanhGrad\")\ndef _TanhGradGrad(op, grad):\n with ops.control_dependencies([grad]):\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)\n\n\[email protected](\"Erf\")\ndef _ErfGrad(op, grad):\n \"\"\"Returns grad * 2/sqrt(pi) * exp(-x**2).\"\"\"\n x = op.inputs[0]\n two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))\n\n\[email protected](\"Erfc\")\ndef _ErfcGrad(op, grad):\n \"\"\"Returns -grad * 2/sqrt(pi) * exp(-x**2).\"\"\"\n x = op.inputs[0]\n minus_two_over_root_pi = constant_op.constant(\n -2 / np.sqrt(np.pi), dtype=grad.dtype)\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))\n\n\[email protected](\"Erfinv\")\ndef _ErfinvGrad(op, grad):\n \"\"\"Returns grad * sqrt(pi) / 2 * exp(erfinv(x)**2).\"\"\"\n root_pi_over_two = constant_op.constant(np.sqrt(np.pi) / 2, dtype=grad.dtype)\n with ops.control_dependencies([grad]):\n return grad * root_pi_over_two * math_ops.exp(\n math_ops.square(op.outputs[0]))\n\n\[email protected](\"Ndtri\")\ndef _NdtriGrad(op, grad):\n \"\"\"Returns grad * sqrt(2 * pi) * exp(ndtri(x)**2 / 2).\"\"\"\n root_two_pi = constant_op.constant(np.sqrt(2 * np.pi), dtype=grad.dtype)\n with ops.control_dependencies([grad]):\n return grad * root_two_pi * math_ops.exp(\n math_ops.square(op.outputs[0]) / 2.)\n\n\[email protected](\"Lgamma\")\ndef _LgammaGrad(op, grad):\n \"\"\"Returns grad * digamma(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(math_ops.digamma(x), grad)\n else:\n return grad * math_ops.digamma(x)\n\n\[email protected](\"Digamma\")\ndef _DigammaGrad(op, grad):\n \"\"\"Compute gradient of the digamma function with respect to its argument.\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(partial_x, grad)\n else:\n return grad * partial_x\n\n\[email protected](\"BesselI0e\")\ndef _BesselI0eGrad(op, grad):\n \"\"\"Compute gradient of bessel_i0e(x) with respect to its argument.\"\"\"\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(partial_x, grad)\n else:\n return grad * partial_x\n\n\[email protected](\"BesselI1e\")\ndef _BesselI1eGrad(op, grad):\n \"\"\"Compute gradient of bessel_i1e(x) with respect to its argument.\"\"\"\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n # For x = 0, the correct gradient is 0.5.\n # However, the main branch gives NaN because of the division by x, so\n # we impute the gradient manually.\n # An alternative solution is to express the gradient via bessel_i0e and\n # bessel_i2e, but the latter is not yet implemented in Eigen.\n eps = np.finfo(x.dtype.as_numpy_dtype).eps\n zeros = array_ops.zeros_like(x)\n x_is_not_tiny = math_ops.abs(x) > eps\n safe_x = array_ops.where_v2(x_is_not_tiny, x, eps + zeros)\n dy_dx = math_ops.bessel_i0e(safe_x) - y * (\n math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))\n dy_dx = array_ops.where_v2(x_is_not_tiny, dy_dx, 0.5 + zeros)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(dy_dx, grad)\n else:\n return grad * dy_dx\n\n\[email protected](\"Igamma\")\ndef _IgammaGrad(op, grad):\n \"\"\"Returns gradient of igamma(a, x) with respect to a and x.\"\"\"\n a = op.inputs[0]\n x = op.inputs[1]\n sa = array_ops.shape(a)\n sx = array_ops.shape(x)\n ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)\n\n with ops.control_dependencies([grad]):\n partial_a = gen_math_ops.igamma_grad_a(a, x)\n # Perform operations in log space before summing, because Gamma(a)\n # and Gamma'(a) can grow large.\n partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -\n math_ops.lgamma(a))\n if compat.forward_compatible(2019, 12, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa),\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),\n sx))\n else:\n return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Igammac\")\ndef _IgammacGrad(op, grad):\n \"\"\"Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x.\"\"\"\n igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)\n return (-igamma_grad_a, -igamma_grad_x)\n\n\[email protected](\"Betainc\")\ndef _BetaincGrad(op, grad):\n \"\"\"Returns gradient of betainc(a, b, x) with respect to x.\"\"\"\n # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b\n a, b, x = op.inputs\n\n # two cases: x is a scalar and a/b are same-shaped tensors, or vice\n # versa; so its sufficient to check against shape(a).\n sa = array_ops.shape(a)\n sx = array_ops.shape(x)\n _, rx = gen_array_ops.broadcast_gradient_args(sa, sx)\n\n # Perform operations in log space before summing, because terms\n # can grow large.\n log_beta = (\n gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -\n gen_math_ops.lgamma(a + b))\n partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +\n (a - 1) * math_ops.log(x) - log_beta)\n\n # TODO(b/36815900): Mark None return values as NotImplemented\n if compat.forward_compatible(2019, 12, 14):\n return (\n None, # da\n None, # db\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx))\n else:\n return (\n None, # da\n None, # db\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Zeta\")\ndef _ZetaGrad(op, grad):\n \"\"\"Returns gradient of zeta(x, q) with respect to x and q.\"\"\"\n # TODO(tillahoffmann): Add derivative with respect to x\n x = op.inputs[0]\n q = op.inputs[1]\n # Broadcast gradients\n sx = array_ops.shape(x)\n sq = array_ops.shape(q)\n unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)\n # Evaluate gradient\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n q = math_ops.conj(q)\n partial_q = -x * math_ops.zeta(x + 1, q)\n # TODO(b/36815900): Mark None return values as NotImplemented\n if compat.forward_compatible(2019, 12, 14):\n return (None,\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq),\n sq))\n else:\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))\n\n\[email protected](\"Polygamma\")\ndef _PolygammaGrad(op, grad):\n \"\"\"Returns gradient of psi(n, x) with respect to n and x.\"\"\"\n # TODO(tillahoffmann): Add derivative with respect to n\n n = op.inputs[0]\n x = op.inputs[1]\n # Broadcast gradients\n sn = array_ops.shape(n)\n sx = array_ops.shape(x)\n unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)\n # Evaluate gradient\n with ops.control_dependencies([grad]):\n n = math_ops.conj(n)\n x = math_ops.conj(x)\n partial_x = math_ops.polygamma(n + 1, x)\n # TODO(b/36815900): Mark None return values as NotImplemented\n if compat.forward_compatible(2019, 12, 14):\n return (None,\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),\n sx))\n else:\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Sigmoid\")\ndef _SigmoidGrad(op, grad):\n \"\"\"Returns grad * sigmoid(x) * (1 - sigmoid(x)).\"\"\"\n y = op.outputs[0] # y = sigmoid(x)\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return gen_math_ops.sigmoid_grad(y, grad)\n\n\[email protected](\"SigmoidGrad\")\ndef _SigmoidGradGrad(op, grad):\n with ops.control_dependencies([grad]):\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n gb = grad * b\n return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)\n\n\[email protected](\"Sign\")\ndef _SignGrad(op, _):\n \"\"\"Returns 0.\"\"\"\n x = op.inputs[0]\n return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)\n\n\[email protected](\"Sin\")\ndef _SinGrad(op, grad):\n \"\"\"Returns grad * cos(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * math_ops.cos(x)\n\n\[email protected](\"Cos\")\ndef _CosGrad(op, grad):\n \"\"\"Returns grad * -sin(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return -grad * math_ops.sin(x)\n\n\[email protected](\"Tan\")\ndef _TanGrad(op, grad):\n \"\"\"Returns grad * 1/sec^2(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n secx = math_ops.reciprocal(math_ops.cos(x))\n secx2 = math_ops.square(secx)\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.mul_no_nan(secx2, grad)\n else:\n return secx2 * grad\n\n\[email protected](\"Asin\")\ndef _AsinGrad(op, grad):\n \"\"\"Returns grad * 1/sqrt(1-x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n den = math_ops.sqrt(math_ops.subtract(one, x2))\n if compat.forward_compatible(2019, 12, 14):\n return math_ops.xdivy(grad, den)\n else:\n inv = math_ops.reciprocal(den)\n return grad * inv\n\n\[email protected](\"Acos\")\ndef _AcosGrad(op, grad):\n \"\"\"Returns grad * -1/sqrt(1-x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n den = math_ops.sqrt(math_ops.subtract(one, x2))\n if compat.forward_compatible(2019, 12, 14):\n return -math_ops.xdivy(grad, den)\n else:\n inv = math_ops.reciprocal(den)\n return -grad * inv\n\n\[email protected](\"Atan\")\ndef _AtanGrad(op, grad):\n \"\"\"Returns grad * 1/ (1 + x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n inv = math_ops.reciprocal(math_ops.add(one, x2))\n return grad * inv\n\n\[email protected](\"Atan2\")\ndef _Atan2Grad(op, grad):\n \"\"\"Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2).\"\"\"\n y = op.inputs[0]\n x = op.inputs[1]\n with ops.control_dependencies([grad]):\n if compat.forward_compatible(2019, 12, 14):\n grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y)))\n else:\n grad_inv = grad / (math_ops.square(x) + math_ops.square(y))\n return x * grad_inv, -y * grad_inv\n\n\[email protected](\"AddN\")\ndef _AddNGrad(op, grad):\n \"\"\"Copies the gradient to all inputs.\"\"\"\n # Not broadcasting.\n return [grad] * len(op.inputs)\n\n\ndef _ShapesFullySpecifiedAndEqual(x, y, grad):\n # pylint: disable=protected-access\n x_shape = x._shape_tuple()\n y_shape = y._shape_tuple()\n grad_shape = grad._shape_tuple()\n # pylint: enable=protected-access\n return (x_shape == y_shape and x_shape == grad_shape and\n x_shape is not None and None not in x_shape)\n\n\[email protected](\"Add\")\[email protected](\"AddV2\")\ndef _AddGrad(op, grad):\n \"\"\"Gradient for Add.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n return grad, None\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return grad, grad\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif not must_reduce_x:\n gx = grad\n else:\n gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif not must_reduce_y:\n gy = grad\n else:\n gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)\n return (gx, gy)\n\n\[email protected](\"Sub\")\ndef _SubGrad(op, grad):\n \"\"\"Gradient for Sub.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n return grad, None\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return grad, -grad\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif not must_reduce_x:\n gx = grad\n else:\n gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif not must_reduce_y:\n gy = -grad\n else:\n gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy)\n return (gx, gy)\n\n\[email protected](\"Mul\")\ndef _MulGrad(op, grad):\n \"\"\"The gradient of scalar multiplication.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n return gen_math_ops.mul(grad, math_ops.conj(y)), None\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad) and\n grad.dtype in (dtypes.int32, dtypes.float32)):\n return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)\n assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, \" vs. \", y.dtype)\n\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif not must_reduce_x:\n gx = gen_math_ops.mul(grad, y)\n else:\n gx = array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx)\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif not must_reduce_y:\n gy = gen_math_ops.mul(x, grad)\n else:\n gy = array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)\n return (gx, gy)\n\n\[email protected](\"MulNoNan\")\ndef _MulNoNanGrad(op, grad):\n \"\"\"The gradient of scalar multiplication with NaN-suppression.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)\n assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, \" vs. \", y.dtype)\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))\n\n\[email protected](\"Div\")\ndef _DivGrad(op, grad):\n \"\"\"The gradient for the Div operator.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 12, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n math_ops.mul_no_nan(\n math_ops.divide(math_ops.divide(-x, y), y), grad), ry),\n sy))\n else:\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))\n\n\[email protected](\"FloorDiv\")\ndef _FloorDivGrad(_, unused_grad):\n \"\"\"The gradient for the FloorDiv operator.\"\"\"\n return None, None\n\n\[email protected](\"FloorMod\")\ndef _FloorModGrad(op, grad):\n \"\"\"Returns grad * (1, -floor(x/y)).\"\"\"\n x = math_ops.conj(op.inputs[0])\n y = math_ops.conj(op.inputs[1])\n\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n floor_xy = math_ops.floor_div(x, y)\n gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)\n gy = array_ops.reshape(\n math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)\n return gx, gy\n\n\[email protected](\"TruncateDiv\")\ndef _TruncateDivGrad(_, unused_grad):\n return None, None\n\n\[email protected](\"RealDiv\")\ndef _RealDivGrad(op, grad):\n \"\"\"RealDiv op gradient.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 12, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n math_ops.mul_no_nan(\n math_ops.realdiv(math_ops.realdiv(-x, y), y), grad),\n ry), sy))\n else:\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry),\n sy))\n\n\[email protected](\"DivNoNan\")\ndef _DivNoNanGrad(op, grad):\n \"\"\"DivNoNan op gradient.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 12, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n math_ops.mul_no_nan(\n math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),\n grad), ry), sy))\n else:\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),\n ry), sy))\n\n\[email protected](\"Pow\")\ndef _PowGrad(op, grad):\n \"\"\"Returns grad * (y*x^(y-1), z*log(x)).\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n use_mul_no_nan = compat.forward_compatible(2019, 12, 14)\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n # TODO(mrry): If `y` is a constant, we can combine `tf.sub()` and the\n # constant `1` into a single constant op.\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if use_mul_no_nan:\n return gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), None\n else:\n return grad * y * math_ops.pow(x, y - 1), None\n\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n\n if skip_input_indices is None or 0 not in skip_input_indices:\n if use_mul_no_nan:\n gx = gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad)\n else:\n gx = grad * y * math_ops.pow(x, y - 1)\n if must_reduce_x:\n gx = array_ops.reshape(math_ops.reduce_sum(gx, rx), sx)\n else:\n gx = None\n\n if skip_input_indices is None or 1 not in skip_input_indices:\n z = math_ops.conj(op.outputs[0])\n\n # Avoid false singularity at x = 0\n if x.dtype.is_complex:\n # real(x) < 0 is fine for the complex case\n mask = math_ops.not_equal(x, 0)\n else:\n # There's no sensible real value to return if x < 0, so return 0\n mask = x > 0\n safe_x = array_ops.where(mask, x, array_ops.ones_like(x))\n log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))\n if use_mul_no_nan:\n gy = gen_math_ops.mul_no_nan(z * log_x, grad)\n else:\n gy = grad * z * log_x\n if must_reduce_y:\n gy = array_ops.reshape(math_ops.reduce_sum(gy, ry), sy)\n else:\n gy = None\n\n return gx, gy\n\n\ndef _MaximumMinimumGradInputOnly(op, grad, selector_op):\n x = op.inputs[0]\n y = op.inputs[1]\n zeros = array_ops.zeros_like(grad)\n xmask = selector_op(x, y)\n xgrad = array_ops.where_v2(xmask, grad, zeros)\n ygrad = None # Return None for ygrad since the config allows that.\n return (xgrad, ygrad)\n\n\ndef _MaximumMinimumGrad(op, grad, selector_op):\n \"\"\"Factor out the code for the gradient of Maximum or Minimum.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n # When we want to get gradients for the first input only, and the second\n # input tensor is a scalar, we can do a much simpler calculation\n return _MaximumMinimumGradInputOnly(op, grad, selector_op)\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n gdtype = grad.dtype\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n gradshape = array_ops.shape(grad)\n zeros = array_ops.zeros(gradshape, gdtype)\n xmask = selector_op(x, y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n else:\n xgrad = array_ops.where_v2(xmask, grad, zeros)\n gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)\n\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n else:\n ygrad = array_ops.where_v2(xmask, zeros, grad)\n gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)\n\n return (gx, gy)\n\n\[email protected](\"Maximum\")\ndef _MaximumGrad(op, grad):\n \"\"\"Returns grad*(x > y, x <= y) with type of grad.\"\"\"\n return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)\n\n\[email protected](\"Minimum\")\ndef _MinimumGrad(op, grad):\n \"\"\"Returns grad*(x < y, x >= y) with type of grad.\"\"\"\n return _MaximumMinimumGrad(op, grad, math_ops.less_equal)\n\n\[email protected](\"SquaredDifference\")\ndef _SquaredDifferenceGrad(op, grad):\n \"\"\"Returns the gradient for (x-y)^2.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n\n with ops.control_dependencies([grad]):\n # The parens ensure that if grad is IndexedSlices, it'll get multiplied by\n # Tensor (not a number like 2.0) which causes it to convert to Tensor.\n x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)\n\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return x_grad, -x_grad\n\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif must_reduce_x:\n gx = array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx)\n else:\n gx = x_grad\n\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif must_reduce_y:\n gy = -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)\n else:\n gy = -x_grad\n return (gx, gy)\n\n\n# Logical operations have no gradients.\nops.NotDifferentiable(\"Less\")\nops.NotDifferentiable(\"LessEqual\")\nops.NotDifferentiable(\"Greater\")\nops.NotDifferentiable(\"GreaterEqual\")\nops.NotDifferentiable(\"Equal\")\nops.NotDifferentiable(\"ApproximateEqual\")\nops.NotDifferentiable(\"NotEqual\")\nops.NotDifferentiable(\"LogicalAnd\")\nops.NotDifferentiable(\"LogicalOr\")\nops.NotDifferentiable(\"LogicalNot\")\n\n\[email protected](\"Select\")\ndef _SelectGrad(op, grad):\n c = op.inputs[0]\n x = op.inputs[1]\n zeros = array_ops.zeros_like(x)\n return (None, array_ops.where(c, grad, zeros), array_ops.where(\n c, zeros, grad))\n\n\[email protected](\"SelectV2\")\ndef _SelectGradV2(op, grad):\n c = op.inputs[0]\n x = op.inputs[1]\n y = op.inputs[2]\n zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)\n gx = array_ops.where_v2(c, grad, zeros)\n x_shape = array_ops.shape(x)\n output_shape = array_ops.shape(op.outputs[0])\n # Reduce away broadcasted leading dims.\n reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)\n gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)\n gx = array_ops.reshape(gx, x_shape)\n\n gy = array_ops.where_v2(c, zeros, grad)\n y_shape = array_ops.shape(y)\n # Reduce away broadcasted leading dims.\n reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)\n gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)\n gy = array_ops.reshape(gy, y_shape)\n\n return (None, gx, gy)\n\n\ndef _MatMulGradAgainstFirstOnly(op, grad):\n \"\"\"Gradient for MatMul, only for the first input.\"\"\"\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n b = math_ops.conj(op.inputs[1])\n if not t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)\n elif not t_a and t_b:\n grad_a = gen_math_ops.mat_mul(grad, b)\n elif t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)\n elif t_a and t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)\n return grad_a, None\n\n\ndef _MatMulGradAgainstSecondOnly(op, grad):\n \"\"\"Gradient for MatMul, only for the second input.\"\"\"\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n a = math_ops.conj(op.inputs[0])\n if not t_a and not t_b:\n grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)\n elif not t_a and t_b:\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)\n elif t_a and not t_b:\n grad_b = gen_math_ops.mat_mul(a, grad)\n elif t_a and t_b:\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)\n return None, grad_b\n\n\[email protected](\"MatMul\")\ndef _MatMulGrad(op, grad):\n \"\"\"Gradient for MatMul.\"\"\"\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None:\n if 1 in skip_input_indices:\n return _MatMulGradAgainstFirstOnly(op, grad)\n elif 0 in skip_input_indices:\n return _MatMulGradAgainstSecondOnly(op, grad)\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n if not t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)\n grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)\n elif not t_a and t_b:\n grad_a = gen_math_ops.mat_mul(grad, b)\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)\n elif t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)\n grad_b = gen_math_ops.mat_mul(a, grad)\n elif t_a and t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)\n return grad_a, grad_b\n\n\[email protected](\"SparseMatMul\")\ndef _SparseMatMulGrad(op, grad):\n \"\"\"Gradient for SparseMatMul.\"\"\"\n\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n is_sparse = {}\n is_sparse[op.inputs[0].experimental_ref()] = op.get_attr(\"a_is_sparse\")\n is_sparse[op.inputs[1].experimental_ref()] = op.get_attr(\"b_is_sparse\")\n # Use heuristic to figure out if grad might be sparse\n is_sparse[grad.experimental_ref()] = not context.executing_eagerly() and (\n grad.op.type == \"ReluGrad\")\n\n def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):\n \"\"\"Helper function to create SparseMatMul op.\"\"\"\n\n assert t1.experimental_ref() in is_sparse and t2.experimental_ref(\n ) in is_sparse\n t1_sparse = is_sparse[t1.experimental_ref()]\n t2_sparse = is_sparse[t2.experimental_ref()]\n if transpose_b:\n t2 = array_ops.transpose(t2)\n transpose_b = False\n prod = math_ops.matmul(\n t1,\n t2,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=t1_sparse,\n b_is_sparse=t2_sparse)\n if prod.dtype != out_dtype:\n prod = math_ops.cast(prod, out_dtype)\n return prod\n\n dtype_a = op.inputs[0].dtype\n dtype_b = op.inputs[1].dtype\n if not t_a and not t_b:\n return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),\n _SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))\n elif not t_a and t_b:\n return (_SparseMatMul(grad, op.inputs[1], dtype_a),\n _SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))\n elif t_a and not t_b:\n return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),\n _SparseMatMul(op.inputs[0], grad, dtype_b))\n elif t_a and t_b:\n return (_SparseMatMul(\n op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),\n _SparseMatMul(\n grad, op.inputs[0], dtype_b, transpose_a=True,\n transpose_b=True))\n\n\[email protected](\"Floor\")\ndef _FloorGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"Ceil\")\ndef _CeilGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"Round\")\ndef _RoundGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"Rint\")\ndef _RintGrad(_, unused_grad):\n # the gradient of Rint is zero\n return [None]\n\n\[email protected](\"BatchMatMul\")\ndef _BatchMatMul(op, grad):\n \"\"\"Returns the gradient of x and y given the gradient of x * y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n adj_x = op.get_attr(\"adj_x\")\n adj_y = op.get_attr(\"adj_y\")\n\n if not adj_x:\n if not adj_y:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)\n else:\n if not adj_y:\n grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)\n\n return grad_x, grad_y\n\n\[email protected](\"BatchMatMulV2\")\ndef _BatchMatMulV2(op, grad):\n \"\"\"Returns the gradient of x and y given the gradient of x * y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n adj_x = op.get_attr(\"adj_x\")\n adj_y = op.get_attr(\"adj_y\")\n\n if not adj_x:\n if not adj_y:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)\n else:\n if not adj_y:\n grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)\n\n # Reduce along the broadcasted batch dimensions, if broadcasting is required.\n shape_x_static = x.get_shape()\n shape_y_static = y.get_shape()\n if not (shape_x_static.is_fully_defined() and\n shape_y_static.is_fully_defined() and\n shape_x_static == shape_y_static):\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])\n grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)\n grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)\n\n return grad_x, grad_y\n\n\nops.NotDifferentiable(\"Range\")\nops.NotDifferentiable(\"LinSpace\")\n\n\[email protected](\"Complex\")\ndef _ComplexGrad(op, grad):\n \"\"\"Returns the real and imaginary components of 'grad', respectively.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),\n array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))\n\n\[email protected](\"Real\")\ndef _RealGrad(_, grad):\n \"\"\"Returns 'grad' as the real part and set the imaginary part 0.\"\"\"\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(grad, zero)\n\n\[email protected](\"Imag\")\ndef _ImagGrad(_, grad):\n \"\"\"Returns 'grad' as the imaginary part and set the real part 0.\"\"\"\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(zero, grad)\n\n\[email protected](\"Angle\")\ndef _AngleGrad(op, grad):\n \"\"\"Returns -grad / (Im(x) + iRe(x))\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n re = math_ops.real(x)\n im = math_ops.imag(x)\n z = math_ops.reciprocal(math_ops.complex(im, re))\n zero = constant_op.constant(0, dtype=grad.dtype)\n complex_grad = math_ops.complex(grad, zero)\n return -complex_grad * z\n\n\[email protected](\"Conj\")\ndef _ConjGrad(_, grad):\n \"\"\"Returns the complex conjugate of grad.\"\"\"\n return math_ops.conj(grad)\n\n\[email protected](\"ComplexAbs\")\ndef _ComplexAbsGrad(op, grad):\n \"\"\"Returns the gradient of ComplexAbs.\"\"\"\n return math_ops.div_no_nan(\n math_ops.complex(\n grad, array_ops.zeros_like(grad)) * op.inputs[0],\n math_ops.complex(\n op.outputs[0], array_ops.zeros_like(op.outputs[0])))\n\n\[email protected](\"Cast\")\ndef _CastGrad(op, grad):\n t = [\n dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,\n dtypes.complex64, dtypes.complex128\n ]\n src_type = op.inputs[0].dtype.base_dtype\n dst_type = grad.dtype.base_dtype\n if src_type in t and dst_type in t:\n return math_ops.cast(grad, src_type)\n else:\n return None\n\n\[email protected](\"Cross\")\ndef _CrossGrad(op, grad):\n u = op.inputs[0]\n v = op.inputs[1]\n return (math_ops.cross(v, grad), math_ops.cross(grad, u))\n\n\[email protected](\"Cumsum\")\ndef _CumsumGrad(op, grad):\n axis = op.inputs[1]\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n return [\n math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),\n None\n ]\n\n\[email protected](\"Cumprod\")\ndef _CumprodGrad(op, grad):\n x = op.inputs[0]\n axis = op.inputs[1]\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n\n # TODO This fails when x contains 0 and should be fixed\n prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)\n out = math_ops.cumsum(\n prod * grad, axis, exclusive=exclusive, reverse=not reverse)\n return [out / x, None]\n\n\[email protected](\"CumulativeLogsumexp\")\ndef _CumulativeLogsumexpGrad(op, grad):\n x = op.inputs[0]\n axis = op.inputs[1]\n cumulative_logsumexp = op.outputs[0]\n\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n\n # Split the incoming gradient into positive and negative part\n # in order to take logs. This is required for stable results.\n log_grad_positive = array_ops.where_v2(\n math_ops.greater(grad, 0),\n math_ops.log(grad),\n grad.dtype.min)\n\n log_grad_negative = array_ops.where_v2(\n math_ops.less(grad, 0),\n math_ops.log(-grad),\n grad.dtype.min)\n\n output_pos = math_ops.exp(\n math_ops.cumulative_logsumexp(\n log_grad_positive - cumulative_logsumexp,\n axis=axis, reverse=not reverse, exclusive=exclusive) + x)\n\n output_neg = math_ops.exp(\n math_ops.cumulative_logsumexp(\n log_grad_negative - cumulative_logsumexp,\n axis=axis, reverse=not reverse, exclusive=exclusive) + x)\n\n return [output_pos - output_neg, None]\n\n\[email protected](\"NextAfter\")\ndef _NextAfterGrad(op, grad):\n \"\"\"Returns gradient of nextafter(x1, x2) with respect to x1 and x2.\"\"\"\n x1 = op.inputs[0]\n x2 = op.inputs[1]\n s_x1 = array_ops.shape(x1)\n s_x2 = array_ops.shape(x2)\n r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)\n with ops.control_dependencies([grad]):\n partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)\n partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)\n return (array_ops.reshape(\n math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),\n array_ops.reshape(\n math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A tf.distribute.Strategy for running on a single device.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# TODO(josh11b): Do we wrap values in types to generate errors if you are\n# doing something that won't work with other DistributionStrategy\n# implementations?\n\n\n@tf_export(\"distribute.OneDeviceStrategy\", v1=[])\nclass OneDeviceStrategy(distribute_lib.Strategy):\n \"\"\"A distribution strategy for running on a single device.\n\n Using this strategy will place any variables created in its scope on the\n specified device. Input distributed through this strategy will be\n prefetched to the specified device. Moreover, any functions called via\n `strategy.experimental_run_v2` will also be placed on the specified device\n as well.\n\n Typical usage of this strategy could be testing your code with the\n tf.distribute.Strategy API before switching to other strategies which\n actually distribute to multiple devices/machines.\n\n For example:\n ```\n strategy = tf.distribute.OneDeviceStrategy(device=\"/gpu:0\")\n\n with strategy.scope():\n v = tf.Variable(1.0)\n print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0\n\n def step_fn(x):\n return x * 2\n\n result = 0\n for i in range(10):\n result += strategy.experimental_run_v2(step_fn, args=(i,))\n print(result) # 90\n ```\n \"\"\"\n\n def __init__(self, device):\n \"\"\"Creates a `OneDeviceStrategy`.\n\n Args:\n device: Device string identifier for the device on which the variables\n should be placed. See class docs for more details on how the device is\n used. Examples: \"/cpu:0\", \"/gpu:0\", \"/device:CPU:0\", \"/device:GPU:0\"\n \"\"\"\n super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))\n\n def experimental_distribute_dataset(self, dataset): # pylint: disable=useless-super-delegation\n \"\"\"Distributes a tf.data.Dataset instance provided via dataset.\n\n In this case, there is only one device, so this is only a thin wrapper\n around the input dataset. It will, however, prefetch the input data to the\n specified device. The returned distributed dataset can be iterated over\n similar to how regular datasets can.\n\n NOTE: Currently, the user cannot add any more transformations to a\n distributed dataset.\n\n Example:\n ```\n strategy = tf.distribute.OneDeviceStrategy()\n dataset = tf.data.Dataset.range(10).batch(2)\n dist_dataset = strategy.experimental_distribute_dataset(dataset)\n for x in dist_dataset:\n print(x) # [0, 1], [2, 3],...\n ```\n Args:\n dataset: `tf.data.Dataset` to be prefetched to device.\n\n Returns:\n A \"distributed `Dataset`\" that the caller can iterate over.\n \"\"\"\n return super(OneDeviceStrategy, self).experimental_distribute_dataset(\n dataset)\n\n def experimental_distribute_datasets_from_function(self, dataset_fn): # pylint: disable=useless-super-delegation\n \"\"\"Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.\n\n `dataset_fn` will be called once for each worker in the strategy. In this\n case, we only have one worker and one device so `dataset_fn` is called\n once.\n\n The `dataset_fn` should take an `tf.distribute.InputContext` instance where\n information about batching and input replication can be accessed:\n\n ```\n def dataset_fn(input_context):\n batch_size = input_context.get_per_replica_batch_size(global_batch_size)\n d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)\n return d.shard(\n input_context.num_input_pipelines, input_context.input_pipeline_id)\n\n inputs = strategy.experimental_distribute_datasets_from_function(dataset_fn)\n\n for batch in inputs:\n replica_results = strategy.experimental_run_v2(replica_fn, args=(batch,))\n ```\n\n IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a\n per-replica batch size, unlike `experimental_distribute_dataset`, which uses\n the global batch size. This may be computed using\n `input_context.get_per_replica_batch_size`.\n\n Args:\n dataset_fn: A function taking a `tf.distribute.InputContext` instance and\n returning a `tf.data.Dataset`.\n\n Returns:\n A \"distributed `Dataset`\", which the caller can iterate over like regular\n datasets.\n \"\"\"\n return super(\n OneDeviceStrategy, self).experimental_distribute_datasets_from_function(\n dataset_fn)\n\n def experimental_local_results(self, value): # pylint: disable=useless-super-delegation\n \"\"\"Returns the list of all local per-replica values contained in `value`.\n\n In `OneDeviceStrategy`, the `value` is always expected to be a single\n value, so the result is just the value in a tuple.\n\n Args:\n value: A value returned by `experimental_run()`, `experimental_run_v2()`,\n `extended.call_for_each_replica()`, or a variable created in `scope`.\n\n Returns:\n A tuple of values contained in `value`. If `value` represents a single\n value, this returns `(value,).`\n \"\"\"\n return super(OneDeviceStrategy, self).experimental_local_results(value)\n\n def experimental_run_v2(self, fn, args=(), kwargs=None): # pylint: disable=useless-super-delegation\n \"\"\"Run `fn` on each replica, with the given arguments.\n\n In `OneDeviceStrategy`, `fn` is simply called within a device scope for the\n given device, with the provided arguments.\n\n Args:\n fn: The function to run. The output must be a `tf.nest` of `Tensor`s.\n args: (Optional) Positional arguments to `fn`.\n kwargs: (Optional) Keyword arguments to `fn`.\n\n Returns:\n Return value from running `fn`.\n \"\"\"\n return super(OneDeviceStrategy, self).experimental_run_v2(fn, args, kwargs)\n\n def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation\n \"\"\"Reduce `value` across replicas.\n\n In `OneDeviceStrategy`, there is only one replica, so if axis=None, value\n is simply returned. If axis is specified as something other than None,\n such as axis=0, value is reduced along that axis and returned.\n\n Example:\n ```\n t = tf.range(10)\n\n result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=None).numpy()\n # result: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=0).numpy()\n # result: 45\n ```\n\n Args:\n reduce_op: A `tf.distribute.ReduceOp` value specifying how values should\n be combined.\n value: A \"per replica\" value, e.g. returned by `experimental_run_v2` to\n be combined into a single tensor.\n axis: Specifies the dimension to reduce along within each\n replica's tensor. Should typically be set to the batch dimension, or\n `None` to only reduce across replicas (e.g. if the tensor has no batch\n dimension).\n\n Returns:\n A `Tensor`.\n \"\"\"\n return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)\n\n def scope(self): # pylint: disable=useless-super-delegation\n \"\"\"Returns a context manager selecting this Strategy as current.\n\n Inside a `with strategy.scope():` code block, this thread\n will use a variable creator set by `strategy`, and will\n enter its \"cross-replica context\".\n\n In `OneDeviceStrategy`, all variables created inside `strategy.scope()`\n will be on `device` specified at strategy construction time.\n See example in the docs for this class.\n\n Returns:\n A context manager to use for creating variables with this strategy.\n \"\"\"\n return super(OneDeviceStrategy, self).scope()\n\n\n@tf_export(v1=[\"distribute.OneDeviceStrategy\"]) # pylint: disable=missing-docstring\nclass OneDeviceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = OneDeviceStrategy.__doc__.replace(\n \"For example:\\n ```\",\n \"For example:\\n ```\\n tf.enable_eager_execution()\")\n\n def __init__(self, device):\n super(OneDeviceStrategyV1, self).__init__(OneDeviceExtended(self, device))\n __init__.__doc__ = OneDeviceStrategy.__init__.__doc__\n\n\n# TODO(josh11b): Switch to V2 after callers have been updated to only V2 APIs.\nclass OneDeviceExtended(distribute_lib.StrategyExtendedV1):\n \"\"\"Implementation of OneDeviceStrategy.\"\"\"\n\n def __init__(self, container_strategy, device):\n super(OneDeviceExtended, self).__init__(container_strategy)\n self._device = device_util.resolve(device)\n suffix_loc = self._device.rfind(\"/\")\n self._input_device = self._device[:suffix_loc] + \"/device:CPU:0\"\n worker_device_pairs = [(self._input_device, [self._device])]\n device_map = values.SingleDeviceMap(self._device)\n self._input_workers = input_lib.InputWorkers(\n device_map, worker_device_pairs)\n\n def _create_variable(self, next_creator, *args, **kwargs):\n colocate_with = kwargs.pop(\"colocate_with\", None)\n if colocate_with is None:\n with ops.device(self._device):\n return next_creator(*args, **kwargs)\n elif isinstance(colocate_with, numpy_dataset.SingleDevice):\n with ops.device(colocate_with.device):\n return next_creator(*args, **kwargs)\n else:\n with ops.colocate_with(colocate_with):\n return next_creator(*args, **kwargs)\n\n def _validate_colocate_with_variable(self, colocate_with_variable):\n values.validate_colocate(colocate_with_variable, self)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Make iterator from dataset without splitting the batch.\"\"\"\n # Note that split_batch_by argument is not passed because it is always 1 in\n # this strategy, and adding it adds unnecessary overhead to the dataset.\n return input_lib.DatasetIterator(dataset, self._input_workers,\n self._container_strategy())\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [distribute_lib.InputContext()],\n self._container_strategy())\n\n def _experimental_make_numpy_dataset(self, numpy_input, session):\n return numpy_dataset.one_host_numpy_dataset(\n numpy_input, numpy_dataset.SingleDevice(self._input_device), session)\n\n def _broadcast_to(self, tensor, destinations):\n del destinations\n return tensor\n\n def _experimental_distribute_dataset(self, dataset):\n # Note that split_batch_by argument is not passed because it is always 1 in\n # this strategy, and adding it adds unnecessary overhead to the dataset.\n return input_lib.get_distributed_dataset(dataset, self._input_workers,\n self._container_strategy())\n\n def _experimental_distribute_datasets_from_function(self, dataset_fn):\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn,\n self._input_workers,\n [distribute_lib.InputContext()],\n self._container_strategy())\n\n # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.\n def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,\n initial_loop_values=None):\n if initial_loop_values is None:\n initial_loop_values = {}\n initial_loop_values = nest.flatten(initial_loop_values)\n\n ctx = input_lib.MultiStepContext()\n def body(i, *args):\n \"\"\"A wrapper around `fn` to create the while loop body.\"\"\"\n del args\n fn_result = fn(ctx, iterator.get_next())\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs\n\n # We capture the control_flow_context at this point, before we run `fn`\n # inside a while_loop. This is useful in cases where we might need to exit\n # these contexts and get back to the outer context to do some things, for\n # e.g. create an op which should be evaluated only once at the end of the\n # loop on the host. One such usage is in creating metrics' value op.\n self._outer_control_flow_context = (\n ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access\n\n # TODO(priyag): Use max_iterations instead of an explicit counter.\n cond = lambda i, *args: i < iterations\n i = constant_op.constant(0)\n loop_result = control_flow_ops.while_loop(\n cond, body, [i] + initial_loop_values, name=\"\",\n parallel_iterations=1, back_prop=False, swap_memory=False,\n return_same_structure=True)\n del self._outer_control_flow_context\n\n ctx.run_op = control_flow_ops.group(loop_result)\n\n # Convert the last_step_outputs from a list to the original dict structure\n # of last_step_outputs.\n last_step_tensor_outputs = loop_result[1:]\n last_step_tensor_outputs_dict = nest.pack_sequence_as(\n ctx.last_step_outputs, last_step_tensor_outputs)\n\n ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access\n return ctx\n\n def _call_for_each_replica(self, fn, args, kwargs):\n strategy = self._container_strategy()\n with ops.device(self._device), _OneDeviceReplicaContext(strategy):\n return fn(*args, **kwargs)\n\n def _reduce_to(self, reduce_op, value, destinations):\n del reduce_op, destinations\n return value\n\n def _update(self, var, fn, args, kwargs, group):\n # The implementations of _update() and _update_non_slot() are identical\n # except _update() passes `var` as the first argument to `fn()`.\n return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n del colocate_with\n with ops.device(self._device), distribute_lib.UpdateContext(self._device):\n result = fn(*args, **kwargs)\n if group:\n return result\n else:\n return nest.map_structure(self._local_results, result)\n\n def read_var(self, replica_local_var):\n \"\"\"Read the aggregate value of a replica-local variable.\"\"\"\n return array_ops.identity(replica_local_var)\n\n def _local_results(self, value):\n return (value,)\n\n def value_container(self, value):\n return value\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return False\n\n @property\n def _num_replicas_in_sync(self):\n return 1\n\n @property\n def worker_devices(self):\n return (self._device,)\n\n @property\n def parameter_devices(self):\n return (self._device,)\n\n def non_slot_devices(self, var_list):\n del var_list\n return (self._device,)\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def experimental_between_graph(self):\n return False\n\n @property\n def should_checkpoint(self):\n return True\n\n @property\n def should_save_summary(self):\n return True\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"Global and per-replica batching are equivalent for OneDeviceStrategy.\"\"\"\n return True\n\n @property\n def _support_per_replica_values(self):\n return False\n\n\nclass _OneDeviceReplicaContext(distribute_lib.ReplicaContext):\n \"\"\"ReplicaContext for OneDeviceStrategy.\"\"\"\n\n def __init__(self, strategy):\n zero = constant_op.constant(0, dtypes.int32)\n distribute_lib.ReplicaContext.__init__(\n self, strategy, replica_id_in_sync_group=zero)\n\n @property\n def devices(self):\n return self._strategy.extended.worker_devices\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements the graph generation for computation of gradients.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\n\nfrom six.moves import xrange, zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import backprop_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function as framework_function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework.func_graph import FuncGraph\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_state\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import default_gradient\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops.unconnected_gradients import UnconnectedGradients\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _MarkReachedOps(from_ops, reached_ops, func_graphs):\n \"\"\"Mark all ops reached from \"from_ops\".\n\n Args:\n from_ops: list of Operations.\n reached_ops: set of Operations.\n func_graphs: list of FuncGraphs. This method will traverse through\n these functions if they capture from_ops or any reachable ops.\n \"\"\"\n queue = collections.deque()\n queue.extend(from_ops)\n while queue:\n op = queue.popleft()\n if op not in reached_ops:\n reached_ops.add(op)\n for output in op.outputs:\n if _IsBackpropagatable(output):\n queue.extend(_Consumers(output, func_graphs))\n\n\ndef _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,\n xs_set):\n \"\"\"Initialize the pending count for ops between two lists of Operations.\n\n 'pending_count[op]' indicates the number of backprop inputs\n to this operation.\n\n Args:\n to_ops: list of Operations.\n from_ops: list of Operations.\n colocate_gradients_with_ops: Python bool. See docstring of gradients().\n func_graphs: list of FuncGraphs. This method will traverse through\n these functions if they capture from_ops or any reachable ops. This is\n useful if to_ops occur in a function and from_ops are in an outer function\n or graph.\n xs_set: ObjectIdentitySet of Tensors.\n\n Returns:\n A tuple containing: (1) the subset of to_ops reachable from from_ops by a\n path of zero or more backpropagatable tensors, (2) a mapping from operation\n to the number of backprop inputs to that op, and (3) a ControlFlowState\n object which is not None if the ops between from_ops and to_ops contain\n control flow loops.\n \"\"\"\n # Mark reachable ops from from_ops.\n reached_ops = set()\n _MarkReachedOps(from_ops, reached_ops, func_graphs)\n # X in reached_ops iff X is reachable from from_ops by a path of zero or more\n # backpropagatable tensors.\n\n reachable_to_ops = set(op for op in to_ops if op in reached_ops)\n\n # Mark between ops.\n between_ops = set()\n between_op_list = []\n queue = collections.deque()\n queue.extend(to_ops)\n while queue:\n op = queue.popleft()\n # We are interested in this op.\n if op in reached_ops:\n between_ops.add(op)\n between_op_list.append(op)\n # Clear the boolean so we won't add the inputs again.\n reached_ops.remove(op)\n for inp in _NonEagerInputs(op, xs_set):\n queue.append(inp.op)\n # X in between_ops iff X is on a path of zero or more backpropagatable tensors\n # between from_ops and to_ops\n\n # 'loop_state' is None if there are no while loops.\n loop_state = control_flow_state.MaybeCreateControlFlowState(\n between_op_list, between_ops, colocate_gradients_with_ops)\n\n # Initialize pending count for between ops.\n pending_count = collections.defaultdict(int)\n for op in between_op_list:\n for x in _NonEagerInputs(op, xs_set):\n if x.op in between_ops:\n pending_count[x.op] += 1\n\n return reachable_to_ops, pending_count, loop_state\n\n\ndef _AsList(x):\n return x if isinstance(x, (list, tuple)) else [x]\n\n\ndef _DefaultGradYs(grad_ys,\n ys,\n colocate_gradients_with_ops,\n gradient_uid=\"__unsupported__\"):\n \"\"\"Fill in default values for grad_ys.\n\n Args:\n grad_ys: List of gradients, can contain None.\n ys: List of tensors.\n colocate_gradients_with_ops: If True, try colocating gradients with\n the corresponding op.\n gradient_uid: A unique identifier within the graph indicating\n which invocation of gradients is being executed. Used to cluster\n ops for compilation.\n\n Returns:\n A list of gradients to use, without None.\n\n Raises:\n ValueError: If sizes of gradients and inputs don't match\n TypeError: If type of any gradient is not valid for its input.\n \"\"\"\n if len(grad_ys) != len(ys):\n raise ValueError(\"Passed %d grad_ys for %d ys\" % (len(grad_ys), len(ys)))\n grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name=\"grad_y\")\n new_grad_ys = []\n for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):\n with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):\n if grad_y is None:\n if y.dtype.is_complex:\n raise TypeError(\n \"Gradients of complex tensors must set grad_ys (y.dtype = %r)\" %\n y.dtype)\n new_grad_ys.append(\n array_ops.fill(\n array_ops.shape(y),\n constant_op.constant(1, dtype=y.dtype, name=\"grad_ys_%d\" % i)))\n continue\n if y.dtype.is_floating or y.dtype.is_integer:\n if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:\n raise TypeError(\n \"Gradient type %s generated for real or \"\n \"integer-valued tensor %s with type %s must be \"\n \"real or integer\" % (dtypes.as_dtype(grad_y.dtype).name, y,\n dtypes.as_dtype(y.dtype).name))\n elif y.dtype.is_complex:\n if not grad_y.dtype.is_complex:\n raise TypeError(\n \"Gradient type %s generated for complex-valued \"\n \"tensor %s with type %s must be real\" % (dtypes.as_dtype(\n grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))\n elif y.dtype == dtypes.variant:\n if grad_y.dtype != dtypes.variant:\n raise TypeError(\n \"Gradient type %s generated for variant \"\n \"tensor %s with type %s must be variant\" % (dtypes.as_dtype(\n grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))\n elif y.dtype == dtypes.resource:\n # We assume y is the handle of a ResourceVariable. The gradient of a\n # ResourceVariable should be a numeric value, not another resource.\n if grad_y.dtype == dtypes.resource:\n raise TypeError(\"Input gradient %s for resource tensor %s should not \"\n \"be a resource\" % (grad_y, y))\n else:\n raise TypeError(\n \"Tensor %s with type %s must be numeric \"\n \"to obtain a default gradient\" % (y, dtypes.as_dtype(y.dtype).name))\n # Create a grad_y tensor in the name scope of the gradient.\n # Required for TensorArrays to identify which gradient call a\n # grad_y value is coming from.\n if isinstance(grad_y, ops.IndexedSlices):\n new_grad_ys.append(\n ops.IndexedSlices(\n indices=(array_ops.identity(\n grad_y.indices, name=\"grad_ys_%d_indices\" % i)\n if isinstance(grad_y.indices, ops.Tensor) else\n grad_y.indices),\n values=(array_ops.identity(\n grad_y.values, name=\"grad_ys_%d_values\" % i) if isinstance(\n grad_y.values, ops.Tensor) else grad_y.values),\n dense_shape=(array_ops.identity(\n grad_y.dense_shape, name=\"grad_ys_%d_shape\" % i)\n if isinstance(grad_y.dense_shape, ops.Tensor) else\n grad_y.dense_shape)))\n else:\n new_grad_ys.append(array_ops.identity(grad_y, name=\"grad_ys_%d\" % i))\n\n return new_grad_ys\n\n\ndef _IsBackpropagatable(tensor):\n if backprop_util.IsTrainable(tensor):\n return True\n dtype = dtypes.as_dtype(tensor.dtype)\n return dtype.base_dtype == dtypes.bfloat16\n\n\ndef _VerifyGeneratedGradients(grads, op):\n \"\"\"Verify that gradients are valid in number and type.\n\n Args:\n grads: List of generated gradients.\n op: Operation for which the gradients where generated.\n\n Raises:\n ValueError: if sizes of gradients and inputs don't match.\n TypeError: if type of any gradient is not valid for its input.\n \"\"\"\n # While ops have inputs added to them during the gradient computation, so we\n # skip the below check. See while_v2 for details.\n if op.type == \"While\" or op.type == \"StatelessWhile\":\n return\n\n if len(grads) != len(op.inputs):\n raise ValueError(\"Num gradients %d generated for op %s do not match num \"\n \"inputs %d\" % (len(grads), op.node_def, len(op.inputs)))\n\n\ndef _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set):\n \"\"\"The set of ops that terminate the gradient computation.\n\n This computes the frontier of the forward graph *before* which backprop\n should stop. Operations in the returned set will not be differentiated.\n This set is defined as the subset of `from_ops` containing ops that have\n no predecessor in `from_ops`. `pending_count` is the result of\n `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`\n iff pending_count[op] > 0.\n\n In addition, none of `stop_gradient_ops` will be differentiated.\n\n Args:\n from_ops: list of Operations.\n stop_gradient_ops: list of Operations never to backprop through.\n pending_count: mapping from operation to number of backprop inputs.\n xs_set: ObjectIdentitySet of Tensors.\n\n Returns:\n The set of operations.\n \"\"\"\n stop_ops = set()\n for op in from_ops:\n is_stop_op = True\n for inp in _NonEagerInputs(op, xs_set):\n if pending_count[inp.op] > 0:\n is_stop_op = False\n break\n if is_stop_op:\n stop_ops.add(op)\n stop_ops.update(op for op in stop_gradient_ops)\n return stop_ops\n\n\[email protected]\ndef _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name\n \"\"\"Context to colocate with `op` if `colocate_gradients_with_ops`.\"\"\"\n if colocate_gradients_with_ops:\n with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access\n yield\n else:\n yield\n\n\ndef _IsPartitionedCall(op):\n return op.type == \"PartitionedCall\" or op.type == \"StatefulPartitionedCall\"\n\n\ndef _SymGrad(op, out_grads):\n \"\"\"Backprop through a function call node op given its outputs' gradients.\"\"\"\n f_in = [x for x in op.inputs] + out_grads\n f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs]\n f = attr_value_pb2.NameAttrList()\n if _IsPartitionedCall(op):\n f.name = op.get_attr(\"f\").name\n else:\n f.name = op.type\n for k in op.node_def.attr:\n f.attr[k].CopyFrom(op.node_def.attr[k])\n in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)\n return in_grads\n\n\ndef _MaybeCompile(scope, op, func, grad_fn):\n \"\"\"Compile the calculation in grad_fn if op was marked as compiled.\"\"\"\n scope = scope.rstrip(\"/\").replace(\"/\", \"_\")\n if func is not None:\n xla_compile = func.definition.attr[\"_XlaCompile\"].b\n xla_separate_compiled_gradients = func.definition.attr[\n \"_XlaSeparateCompiledGradients\"].b\n xla_scope = func.definition.attr[\"_XlaScope\"].s.decode()\n else:\n try:\n xla_compile = op.get_attr(\"_XlaCompile\")\n xla_separate_compiled_gradients = op.get_attr(\n \"_XlaSeparateCompiledGradients\")\n xla_scope = op.get_attr(\"_XlaScope\").decode()\n except ValueError:\n return grad_fn() # Exit early\n\n if not xla_compile:\n return grad_fn() # Exit early\n\n # If the gradients are supposed to be compiled separately, we give them a\n # _XlaScope name that is based on the name_scope of the gradients. Otherwise\n # they just inherit the existing _XlaScope name, which lets them be merged\n # together with the non-gradient computation.\n if xla_separate_compiled_gradients:\n xla_grad_scope = \"%s_grad_%s\" % (xla_scope, scope)\n else:\n xla_grad_scope = xla_scope\n\n attrs = {\n \"_XlaCompile\": attr_value_pb2.AttrValue(b=xla_compile),\n \"_XlaScope\": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())\n }\n with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access\n return grad_fn()\n\n\ndef _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set):\n \"\"\"Raises an error if we backprop through a loop var.\"\"\"\n # Find the nearest 'to_op' reachable from 'op' to provide a more helpful error\n # message.\n target_op = None\n queue = collections.deque([op])\n visited = set()\n while queue:\n curr_op = queue.popleft()\n if curr_op in visited: continue\n visited.add(curr_op)\n if curr_op in from_ops:\n target_op = curr_op\n break\n queue.extend(t.op for t in _NonEagerInputs(curr_op, xs_set))\n assert target_op\n raise ValueError(\n \"Cannot compute gradient inside while loop with respect to op '%s'. \"\n \"We do not support taking the gradient wrt or through the initial value \"\n \"of a loop variable. Gradients can be computed through loop invariants \"\n \"or wrt the input parameters to the loop body.\"\n % target_op.name)\n\n\ndef _IsFunction(graph):\n return (isinstance(graph, FuncGraph) or\n isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access\n\n\ndef _Captures(func_graph):\n if isinstance(func_graph, FuncGraph):\n return func_graph.captures\n else:\n assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access\n return func_graph.captures\n\n\ndef _MaybeCaptured(t):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n t: Tensor\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n # pylint: disable=protected-access\n if (not isinstance(t, ops.EagerTensor) and\n _IsFunction(t.op.graph) and t.op.type == \"Placeholder\"):\n for input_t, placeholder_t in _Captures(t.op.graph):\n if t is placeholder_t:\n return _MaybeCaptured(input_t)\n # pylint: enable=protected-access\n return t\n\n\ndef _NonEagerInputs(op, xs_set):\n \"\"\"Returns the inputs of op, crossing closure boundaries where necessary.\n\n Does not return any captured EagerTensors, i.e., the number of tensors\n returned may be less than than the actual number of inputs.\n\n Args:\n op: Operation\n xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.\n\n Returns:\n A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\n is in a FuncGraph and has captured inputs.\n \"\"\"\n return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]\n\n\n# TODO(skyewm): plumbing xs through everywhere is ugly, consider making\n# _GradientsHelper a class with xs as a member variable.\ndef _Inputs(op, xs_set):\n \"\"\"Returns the inputs of op, crossing closure boundaries where necessary.\n\n Args:\n op: Operation\n xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.\n\n Returns:\n A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\n is in a FuncGraph and has captured inputs.\n \"\"\"\n if _IsFunction(op.graph): # pylint: disable=protected-access\n inputs = []\n for t in op.inputs:\n # If we're differentiating w.r.t. `t`, do not attempt to traverse through\n # it to a captured value. The algorithm needs to \"see\" `t` in this case,\n # even if it's a function input for a captured value, whereas usually we'd\n # like to traverse through these closures as if the captured value was the\n # direct input to op.\n if t not in xs_set:\n t = _MaybeCaptured(t)\n inputs.append(t)\n return inputs\n else:\n return op.inputs\n\n\ndef _Consumers(t, func_graphs):\n \"\"\"Returns the consumers of t, crossing closure boundaries where necessary.\n\n Args:\n t: Tensor\n func_graphs: a list of FuncGraphs that may have captured t.\n\n Returns:\n A list of tensors. The tensors will be from the current graph and/or\n func_graphs.\n \"\"\"\n consumers = t.consumers()\n for func in func_graphs:\n for input_t, placeholder in _Captures(func):\n if input_t is t:\n consumers.extend(_Consumers(placeholder, func_graphs))\n return consumers\n\n\ndef _GradientsHelper(ys,\n xs,\n grad_ys=None,\n name=\"gradients\",\n colocate_gradients_with_ops=False,\n gate_gradients=False,\n aggregation_method=None,\n stop_gradients=None,\n unconnected_gradients=UnconnectedGradients.NONE,\n src_graph=None):\n \"\"\"Implementation of gradients().\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.gradients is not supported when eager execution \"\n \"is enabled. Use tf.GradientTape instead.\")\n if src_graph is None:\n src_graph = ops.get_default_graph()\n try:\n unconnected_gradients = UnconnectedGradients(unconnected_gradients)\n except ValueError:\n raise ValueError(\n \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\n # If src_graph is a _FuncGraph (i.e. a function body), gather it and all\n # ancestor graphs. This is necessary for correctly handling captured values.\n func_graphs = []\n curr_graph = src_graph\n while _IsFunction(curr_graph):\n func_graphs.append(curr_graph)\n if isinstance(curr_graph, FuncGraph):\n curr_graph = curr_graph.outer_graph\n else:\n assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access\n curr_graph = curr_graph._outer_graph # pylint: disable=protected-access\n\n ys = _AsList(ys)\n xs = _AsList(xs)\n stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)\n if grad_ys is None:\n grad_ys = [None] * len(ys)\n else:\n grad_ys = _AsList(grad_ys)\n\n with ops.name_scope(\n name, \"gradients\",\n list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:\n # Get a uid for this call to gradients that can be used to help\n # cluster ops for compilation.\n gradient_uid = ops.get_default_graph().unique_name(\"uid\")\n ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name=\"y\")\n xs = [\n x.handle if resource_variable_ops.is_resource_variable(x) else x\n for x in xs\n ]\n xs = ops.internal_convert_n_to_tensor_or_indexed_slices(\n xs, name=\"x\", as_ref=True)\n xs_set = object_identity.ObjectIdentitySet(xs)\n grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,\n gradient_uid)\n\n # The approach we take here is as follows: Create a list of all ops in the\n # subgraph between the ys and xs. Visit these ops in reverse order of ids\n # to ensure that when we visit an op the gradients w.r.t its outputs have\n # been collected. Then aggregate these gradients if needed, call the op's\n # gradient function, and add the generated gradients to the gradients for\n # its input.\n\n # Initialize the pending count for ops in the connected subgraph from ys\n # to the xs.\n to_ops = [t.op for t in ys]\n from_ops = [t.op for t in xs]\n stop_gradient_ops = [t.op for t in stop_gradients]\n reachable_to_ops, pending_count, loop_state = _PendingCount(\n to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs_set)\n\n # Iterate over the collected ops.\n #\n # grads: op => list of gradients received on each output endpoint of the\n # op. The gradients for each endpoint are initially collected as a list.\n # When it is time to call the op's gradient function, for each endpoint we\n # aggregate the list of received gradients into a Add() Operation if there\n # is more than one.\n grads = {}\n\n # Add the initial gradients for the ys.\n for y, grad_y in zip(ys, grad_ys):\n _SetGrad(grads, y, grad_y)\n\n # Initialize queue with to_ops.\n queue = collections.deque()\n # Add the ops in 'to_ops' into the queue.\n to_ops_set = set()\n for op in to_ops:\n # 'ready' handles the case where one output gradient relies on\n # another output's gradient.\n ready = (pending_count[op] == 0)\n if ready and op not in to_ops_set and op in reachable_to_ops:\n to_ops_set.add(op)\n queue.append(op)\n\n if loop_state:\n loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)\n for y in loop_exits:\n if backprop_util.IsTrainable(y):\n _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n queue.append(y.op)\n\n stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set)\n while queue:\n # generate gradient subgraph for op.\n op = queue.popleft()\n with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops):\n if loop_state:\n loop_state.EnterGradWhileContext(op, before=True)\n out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,\n aggregation_method)\n if loop_state:\n loop_state.ExitGradWhileContext(op, before=True)\n\n grad_fn = None\n func_call = None\n is_partitioned_call = _IsPartitionedCall(op)\n # pylint: disable=protected-access\n is_func_call = (\n src_graph._is_function(op.type) or is_partitioned_call)\n # pylint: enable=protected-access\n has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)\n if has_out_grads and (op not in stop_ops):\n try:\n grad_fn = ops.get_gradient_function(op)\n except LookupError:\n if is_func_call:\n if is_partitioned_call:\n func_call = src_graph._get_function( # pylint: disable=protected-access\n compat.as_bytes(op.get_attr(\"f\").name))\n else:\n func_call = src_graph._get_function(op.type) # pylint: disable=protected-access\n # Note that __defun is not set if the graph is\n # imported. If it's set, we prefer to access the original\n # defun.\n func_call = getattr(op, \"__defun\", func_call)\n grad_fn = func_call.python_grad_func\n else:\n raise LookupError(\n \"No gradient defined for operation '%s' (op type: %s)\" %\n (op.name, op.type))\n if loop_state:\n loop_state.EnterGradWhileContext(op, before=False)\n\n # NOTE(skyewm): We don't support computing gradients wrt a loop variable\n # unless it's within the context of a single iteration (i.e. the\n # gradient is wrt to the loop parameter in the body function, not wrt or\n # through the initial value). This means if we're in a while loop\n # context, we should never see a switch node from this context.\n # pylint: disable=protected-access\n if (control_flow_util.IsSwitch(op) and\n op._control_flow_context is not None and\n op._control_flow_context.IsWhileContext() and\n op._control_flow_context ==\n ops.get_default_graph()._get_control_flow_context()):\n _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set)\n # pylint: enable=protected-access\n\n if (grad_fn or is_func_call) and has_out_grads:\n # NOTE: If _AggregatedGrads didn't compute a value for the i'th\n # output, it means that the cost does not depend on output[i],\n # therefore dC/doutput[i] is 0.\n for i, out_grad in enumerate(out_grads):\n if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (\n (not grad_fn and is_func_call)\n or backprop_util.IsTrainable(op.outputs[i])):\n # Only trainable outputs or outputs for a function call that\n # will use SymbolicGradient get a zero gradient. Gradient\n # functions should ignore the gradient for other outputs.\n # TODO(apassos) gradients of resource handles might be an\n # issue here because of zeros.\n if loop_state:\n out_grads[i] = loop_state.ZerosLike(op, i)\n elif default_gradient.supports_default_grad(op.outputs[i]):\n # TODO(b/143286622): The supports_default_grad check is needed\n # because While op emits non-differentiable resource tensors\n # as outputs. Remove this check when that is not the case.\n out_grads[i] = control_flow_state.ZerosLikeOutsideLoop(op, i)\n with ops.name_scope(op.name + \"_grad\"):\n # pylint: disable=protected-access\n with src_graph._original_op(op):\n # pylint: enable=protected-access\n if grad_fn:\n # If grad_fn was found, do not use SymbolicGradient even for\n # functions.\n in_grads = _MaybeCompile(grad_scope, op, func_call,\n lambda: grad_fn(op, *out_grads))\n else:\n # For function call ops, we add a 'SymbolicGradient'\n # node to the graph to compute gradients.\n in_grads = _MaybeCompile(grad_scope, op, func_call,\n lambda: _SymGrad(op, out_grads))\n in_grads = _AsList(in_grads)\n _VerifyGeneratedGradients(in_grads, op)\n if gate_gradients and len([x for x in in_grads\n if x is not None]) > 1:\n with ops.device(None):\n with ops._colocate_with_for_gradient( # pylint: disable=protected-access\n None,\n gradient_uid,\n ignore_existing=True):\n in_grads = control_flow_ops.tuple(in_grads)\n _LogOpGradients(op, out_grads, in_grads)\n else:\n # If no grad_fn is defined or none of out_grads is available,\n # just propagate a list of None backwards.\n in_grads = [None] * len(_Inputs(op, xs_set))\n # Note: we don't filter out eager inputs here because the inputs need to\n # line up with in_grads.\n for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs_set), in_grads)):\n if in_grad is not None:\n if (isinstance(in_grad, ops.Tensor) and\n t_in.dtype != dtypes.resource):\n try:\n in_grad.set_shape(t_in.get_shape())\n except ValueError:\n raise ValueError(\n \"Incompatible shapes between op input and calculated \"\n \"input gradient. Forward operation: %s. Input index: %d. \"\n \"Original input shape: %s. \"\n \"Calculated input gradient shape: %s\" %\n (op.name, i, t_in.shape, in_grad.shape))\n if not isinstance(t_in, ops.EagerTensor):\n _SetGrad(grads, t_in, in_grad)\n if loop_state:\n loop_state.ExitGradWhileContext(op, before=False)\n\n # Update pending count for the inputs of op and enqueue ready ops.\n _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,\n xs_set)\n\n if loop_state:\n loop_state.PostProcessing()\n return [_GetGrad(grads, x, unconnected_gradients) for x in xs]\n\n\ndef _HasAnyNotNoneGrads(grads, op):\n \"\"\"Return true iff op has real gradient.\"\"\"\n out_grads = _GetGrads(grads, op)\n for out_grad in out_grads:\n if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):\n return True\n if out_grad and isinstance(out_grad, collections_abc.Sequence):\n if any(g is not None for g in out_grad):\n return True\n return False\n\n\ndef _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,\n xs_set):\n \"\"\"Update pending count for the inputs of op and enqueue ready ops.\"\"\"\n for x in _NonEagerInputs(op, xs_set):\n pending_count[x.op] -= 1\n ready = (pending_count[x.op] == 0)\n if loop_state and not ready:\n ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)\n if ready:\n if control_flow_util.IsLoopExit(x.op):\n # if x is an exit without real gradient, defer processing them.\n grad_state = loop_state.GetGradState(x.op, before=False)\n grad_state.deferred_exits.append(x)\n grad_state.pending_exits_count -= 1\n if grad_state.pending_exits_count == 0:\n # We now have all the exits so process them.\n has_not_none_grad = False\n for y in grad_state.deferred_exits:\n if _HasAnyNotNoneGrads(grads, y.op):\n has_not_none_grad = True\n queue.append(y.op)\n else:\n grad_state.unused_exits.append(y)\n if has_not_none_grad:\n # For an unused exit, if it has trainable outputs, backprop\n # a zero gradient. Otherwise, just ignore it.\n for y in grad_state.unused_exits:\n if backprop_util.IsTrainable(y):\n _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n queue.append(y.op)\n else:\n # All exits are \"unused\" so use None as gradient.\n for y in grad_state.unused_exits:\n queue.append(y.op)\n else:\n queue.append(x.op)\n\n\ndef _SetGrad(grads, t, grad):\n \"\"\"Sets gradient \"grad\" in \"grads\" for tensor \"t\".\"\"\"\n op = t.op\n op_grads = grads.get(op)\n if not op_grads:\n op_grads = [[] for _ in xrange(len(op.outputs))]\n grads[op] = op_grads\n t_grads = op_grads[t.value_index]\n if isinstance(t_grads, list):\n t_grads.append(grad)\n else:\n assert control_flow_util.IsLoopSwitch(op)\n op_grads[t.value_index] = grad\n\n\ndef _GetGrad(grads, t, unconnected_gradients):\n \"\"\"Gets gradient for tensor \"t\".\"\"\"\n op = t.op\n op_grads = grads.get(op)\n if not op_grads:\n if unconnected_gradients == UnconnectedGradients.ZERO:\n t_dtype = default_gradient.get_zeros_dtype(t)\n if t.dtype == dtypes.resource:\n return array_ops.zeros(\n resource_variable_ops.variable_shape(t), dtype=t_dtype)\n else:\n return array_ops.zeros_like(t, dtype=t_dtype)\n elif unconnected_gradients == UnconnectedGradients.NONE:\n return None\n else:\n raise ValueError(\n \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\n t_grad = op_grads[t.value_index]\n assert not isinstance(\n t_grad, list), (\"gradients list should have been aggregated by now.\")\n return t_grad\n\n\ndef _GetGrads(grads, op):\n \"\"\"Gets all gradients for op.\"\"\"\n if op in grads:\n return grads[op]\n else:\n return [[] for _ in xrange(len(op.outputs))]\n\n\ndef _AccumulatorShape(inputs):\n shape = tensor_shape.unknown_shape()\n for i in inputs:\n if isinstance(i, ops.Tensor):\n shape = shape.merge_with(i.get_shape())\n return shape\n\n\ndef _LogOpGradients(op, out_grads, in_grads):\n \"\"\"Log the in and out grads of an op.\"\"\"\n logging.vlog(1, \"Gradient for '\" + op.name + \"'\")\n\n def _FilterGrad(x):\n if x is None:\n return False\n if isinstance(x, (list, tuple)):\n return bool(x)\n else:\n return True\n\n logging.vlog(1, \" in --> %s\",\n \", \".join([x.name for x in out_grads if _FilterGrad(x)]))\n logging.vlog(1, \" out --> %s\",\n \", \".join([x.name for x in in_grads if _FilterGrad(x)]))\n\n\ndef _MultiDeviceAddN(tensor_list, gradient_uid):\n \"\"\"Adds tensors from potentially multiple devices.\"\"\"\n # Basic function structure comes from control_flow_ops.group().\n # Sort tensors according to their devices.\n tensors_on_device = collections.defaultdict(lambda: [])\n for tensor in tensor_list:\n tensors_on_device[tensor.device].append(tensor)\n\n # For each device, add the tensors on that device first.\n # Then gather the partial sums from multiple devices.\n # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.\n # E.g., aggregate per GPU, then per task, and so on.\n summands = []\n\n def DeviceKey(dev):\n return \"\" if dev is None else dev\n\n for dev in sorted(tensors_on_device, key=DeviceKey):\n tensors = tensors_on_device[dev]\n with ops._colocate_with_for_gradient( # pylint: disable=protected-access\n tensors[0].op,\n gradient_uid,\n ignore_existing=True):\n summands.append(math_ops.add_n(tensors))\n\n return math_ops.add_n(summands)\n\n\n@tf_export(\"AggregationMethod\")\nclass AggregationMethod(object):\n \"\"\"A class listing aggregation methods used to combine gradients.\n\n Computing partial derivatives can require aggregating gradient\n contributions. This class lists the various methods that can\n be used to combine gradients in the graph.\n\n The following aggregation methods are part of the stable API for\n aggregating gradients:\n\n * `ADD_N`: All of the gradient terms are summed as part of one\n operation using the \"AddN\" op (see `tf.add_n`). This\n method has the property that all gradients must be ready and\n buffered separately in memory before any aggregation is performed.\n * `DEFAULT`: The system-chosen default aggregation method.\n\n The following aggregation methods are experimental and may not\n be supported in future releases:\n\n * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using\n using the \"AddN\" op. This method of summing gradients may reduce\n performance, but it can improve memory utilization because the\n gradients can be released earlier.\n\n \"\"\"\n ADD_N = 0\n DEFAULT = ADD_N\n # The following are experimental and may not be supported in future releases.\n EXPERIMENTAL_TREE = 1\n EXPERIMENTAL_ACCUMULATE_N = 2 # An alias for EXPERIMENTAL_ADD_N = 1\n\n\ndef _AggregatedGrads(grads,\n op,\n gradient_uid,\n loop_state,\n aggregation_method=None):\n \"\"\"Get the aggregated gradients for op.\n\n Args:\n grads: The map of memoized gradients.\n op: The op to get gradients for.\n gradient_uid: A unique identifier within the graph indicating\n which invocation of gradients is being executed. Used to cluster\n ops for compilation.\n loop_state: An object for maintaining the state of the while loops in the\n graph. It is of type ControlFlowState. None if the graph\n contains no while loops.\n aggregation_method: Specifies the method used to combine gradient terms.\n Accepted values are constants defined in the class `AggregationMethod`.\n\n Returns:\n A list of gradients, one per each output of `op`. If the gradients\n for a particular output is a list, this function aggregates it\n before returning.\n\n Raises:\n TypeError: if the incoming grads are not Tensors or IndexedSlices.\n ValueError: if the arguments are invalid.\n\n \"\"\"\n if aggregation_method is None:\n aggregation_method = AggregationMethod.DEFAULT\n if aggregation_method not in [\n AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,\n AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n ]:\n raise ValueError(\n \"Invalid aggregation_method specified %s.\" % aggregation_method)\n out_grads = _GetGrads(grads, op)\n for i, out_grad in enumerate(out_grads):\n if loop_state:\n if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):\n assert control_flow_util.IsLoopSwitch(op)\n continue\n # Grads have to be Tensors or IndexedSlices\n if (isinstance(out_grad, collections_abc.Sequence) and not all(\n isinstance(g, (ops.Tensor, ops.IndexedSlices))\n for g in out_grad\n if g is not None)):\n raise TypeError(\"gradients have to be either all Tensors \"\n \"or all IndexedSlices\")\n # Aggregate multiple gradients, and convert [] to None.\n if out_grad:\n if len(out_grad) < 2:\n used = \"nop\"\n out_grads[i] = out_grad[0]\n elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None):\n tensor_shape = _AccumulatorShape(out_grad)\n if aggregation_method in [\n AggregationMethod.EXPERIMENTAL_TREE,\n AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n ]:\n # Aggregate all gradients by doing pairwise sums: this may\n # reduce performance, but it can improve memory because the\n # gradients can be released earlier.\n #\n # TODO(vrv): Consider replacing this with a version of\n # tf.AddN() that eagerly frees its inputs as soon as they are\n # ready, so the order of this tree does not become a problem.\n used = \"tree\"\n with ops.name_scope(op.name + \"_gradient_sum\"):\n running_sum = out_grad[0]\n for grad in out_grad[1:]:\n running_sum = math_ops.add_n([running_sum, grad])\n out_grads[i] = running_sum\n else:\n used = \"add_n\"\n out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)\n logging.vlog(2, \" _AggregatedGrads %d x %s using %s\", len(out_grad),\n tensor_shape, used)\n else:\n out_grads[i] = backprop.aggregate_indexed_slices_gradients(out_grad) # pylint: disable=protected-access\n else: # not out_grad\n # out_grads[i] is [], thus its aggregation is simply None.\n out_grads[i] = None\n return out_grads\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base class for testing saving/loading with DS.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import model_combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.saved_model import saved_model\n\n_RANDOM_SEED = 1337\n_DEFAULT_FUNCTION_KEY = 'serving_default'\n\n_TOLERANCE = 1e-30\n# TPU uses bfloat16 for computation in hardware underlying, so it has less\n# precision than CPU/GPU.\n_TPU_TOLERANCE = 1e-7\n\nPREDICT_STEPS = 1\n\nsimple_models = [\n model_combinations.simple_functional_model,\n model_combinations.simple_sequential_model,\n\n # TODO(b/131715604): figure out why subclass model does not work\n # model_combinations.simple_subclass_model,\n]\n\n\nstrategies = [\n strategy_combinations.default_strategy,\n strategy_combinations.one_device_strategy,\n strategy_combinations.one_device_strategy_gpu,\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_one_gpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.mirrored_strategy_with_two_gpus,\n strategy_combinations.tpu_strategy\n]\n\n\ndef is_tpu_strategy(distribution):\n return (distribution is not None and\n distribution.__class__.__name__.startswith('TPUStrategy'))\n\n\ndef get_tolerance(save_distribution, restore_distribution):\n if is_tpu_strategy(save_distribution) or is_tpu_strategy(\n restore_distribution):\n return _TPU_TOLERANCE\n return _TOLERANCE\n\n\ndef simple_models_with_strategies():\n return combinations.combine(\n model_and_input=simple_models,\n distribution=strategies,\n mode=['eager'],\n experimental_run_tf_function=[True, False])\n\n\ndef simple_models_with_strategy_pairs():\n return combinations.combine(\n model_and_input=simple_models,\n distribution_for_saving=strategies,\n distribution_for_restoring=strategies,\n mode=['eager'],\n experimental_run_tf_function=[True, False])\n\n\ndef tfmodule_models_with_strategies():\n return combinations.combine(\n model_and_input=[model_combinations.simple_tfmodule_model],\n distribution=strategies,\n mode=['eager'],\n experimental_run_tf_function=[True])\n\n\ndef tfmodule_models_with_strategy_pairs():\n return combinations.combine(\n model_and_input=[model_combinations.simple_tfmodule_model],\n distribution_for_saving=strategies,\n distribution_for_restoring=strategies,\n mode=['eager'],\n experimental_run_tf_function=[True])\n\n\ndef load_and_run_with_saved_model_api(distribution, saved_dir, predict_dataset,\n output_name):\n \"\"\"Loads a saved_model using tf.saved_model API, and runs it.\"\"\"\n func = saved_model.load(saved_dir)\n if distribution:\n dist_predict_dataset = distribution.experimental_distribute_dataset(\n predict_dataset)\n per_replica_predict_data = next(iter(dist_predict_dataset))\n result = distribution.experimental_run_v2(\n func.signatures[_DEFAULT_FUNCTION_KEY],\n args=(per_replica_predict_data,))\n result = result[output_name]\n\n # Convert the per_replica value to a list, then concatenate them\n reduced = distribution.experimental_local_results(result)\n concat = array_ops.concat(reduced, 0)\n return concat\n else:\n result = func.signatures[_DEFAULT_FUNCTION_KEY](next(iter(predict_dataset)))\n return result[output_name]\n\n\nclass TestSavedModelBase(test.TestCase, parameterized.TestCase):\n \"\"\"Base class for testing saving/loading with DS.\"\"\"\n\n def setUp(self):\n np.random.seed(_RANDOM_SEED)\n random_seed.set_random_seed(_RANDOM_SEED)\n self._root_dir = 'base'\n super(TestSavedModelBase, self).setUp()\n\n def _save_model(self, model, saved_dir):\n \"\"\"Save the given model to the given saved_dir.\n\n This method needs to be implemeted by the subclasses.\n\n Args:\n model: a keras model object to save.\n saved_dir: a string representing the path to save the keras model\n \"\"\"\n raise NotImplementedError('must be implemented in descendants')\n\n def _load_and_run_model(self, distribution, saved_dir, predict_dataset,\n output_name, experimental_run_tf_function):\n \"\"\"Load the model and run 1 step of predict with it.\n\n This method must be implemented by the subclasses.\n\n Args:\n distribution: the distribution strategy used to load the model. None if no\n distribution strategy is used\n saved_dir: the string representing the path where the model is saved.\n predict_dataset: the data used to do the predict on the model for\n cross_replica context.\n output_name: the string representing the name of the output layer of the\n model.\n experimental_run_tf_function: Whether to use the single execution path\n for models.\n \"\"\"\n\n raise NotImplementedError('must be implemented in descendants')\n\n def _train_model(self, model, x_train, y_train, batch_size):\n training_dataset = dataset_ops.Dataset.from_tensor_slices(\n (x_train, y_train))\n training_dataset = training_dataset.repeat()\n training_dataset = training_dataset.batch(batch_size)\n\n # Train the model for 1 epoch\n model.fit(x=training_dataset, epochs=1, steps_per_epoch=100)\n\n def _predict_with_model(self, distribution, model, predict_dataset):\n return model.predict(predict_dataset, steps=PREDICT_STEPS)\n\n def _get_predict_dataset(self, x_predict, batch_size):\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)\n predict_dataset = predict_dataset.repeat()\n predict_dataset = predict_dataset.batch(batch_size)\n return predict_dataset\n\n def run_test_save_no_strategy_restore_strategy(self, model_and_input,\n distribution,\n experimental_run_tf_function):\n \"\"\"Save a model without DS, and restore it with DS.\"\"\"\n\n saved_dir = os.path.join(self.get_temp_dir(), '0')\n\n model, output_name = model_and_input.get_model(\n experimental_run_tf_function=experimental_run_tf_function)\n x_train, y_train, x_predict = model_and_input.get_data()\n batch_size = model_and_input.get_batch_size()\n predict_dataset = self._get_predict_dataset(x_predict, batch_size)\n\n self._train_model(model, x_train, y_train, batch_size)\n result_before_save = self._predict_with_model(None, model, predict_dataset)\n\n self._save_model(model, saved_dir)\n\n with distribution.scope():\n result_after_save = self._load_and_run_model(\n distribution=distribution,\n saved_dir=saved_dir,\n predict_dataset=predict_dataset,\n output_name=output_name,\n experimental_run_tf_function=experimental_run_tf_function)\n\n tolerance = get_tolerance(None, distribution)\n self.assertAllClose(result_before_save, result_after_save, atol=tolerance)\n\n def run_test_save_strategy_restore_no_strategy(self, model_and_input,\n distribution, save_in_scope,\n experimental_run_tf_function):\n \"\"\"Save a model with DS, and restore it without DS.\"\"\"\n\n saved_dir = os.path.join(self.get_temp_dir(), '1')\n\n with distribution.scope():\n model, output_name = model_and_input.get_model(\n experimental_run_tf_function=experimental_run_tf_function)\n x_train, y_train, x_predict = model_and_input.get_data()\n batch_size = model_and_input.get_batch_size()\n\n self._train_model(model, x_train, y_train, batch_size)\n predict_dataset = self._get_predict_dataset(x_predict, batch_size)\n result_before_save = self._predict_with_model(\n distribution, model, predict_dataset)\n\n if save_in_scope:\n with distribution.scope():\n self._save_model(model, saved_dir)\n else:\n self._save_model(model, saved_dir)\n\n load_result = self._load_and_run_model(\n distribution=None,\n saved_dir=saved_dir,\n predict_dataset=predict_dataset,\n output_name=output_name,\n experimental_run_tf_function=experimental_run_tf_function)\n\n tolerance = get_tolerance(distribution, None)\n self.assertAllClose(result_before_save, load_result, atol=tolerance)\n\n def run_test_save_strategy_restore_strategy(self, model_and_input,\n distribution_for_saving,\n distribution_for_restoring,\n save_in_scope,\n experimental_run_tf_function):\n \"\"\"Save a model with DS, and restore it with potentially different DS.\"\"\"\n saved_dir = os.path.join(self.get_temp_dir(), '2')\n\n with distribution_for_saving.scope():\n model, output_name = model_and_input.get_model(\n experimental_run_tf_function=experimental_run_tf_function)\n x_train, y_train, x_predict = model_and_input.get_data()\n batch_size = model_and_input.get_batch_size()\n\n self._train_model(model, x_train, y_train, batch_size)\n predict_dataset = self._get_predict_dataset(x_predict, batch_size)\n result_before_save = self._predict_with_model(\n distribution_for_saving, model, predict_dataset)\n\n if save_in_scope:\n with distribution_for_saving.scope():\n self._save_model(model, saved_dir)\n else:\n self._save_model(model, saved_dir)\n\n with distribution_for_restoring.scope():\n\n load_result = self._load_and_run_model(\n distribution=distribution_for_restoring,\n saved_dir=saved_dir,\n predict_dataset=predict_dataset,\n output_name=output_name,\n experimental_run_tf_function=experimental_run_tf_function)\n\n tolerance = get_tolerance(distribution_for_saving,\n distribution_for_restoring)\n self.assertAllClose(result_before_save, load_result, atol=tolerance)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Gradients for operators defined in linalg_ops.py.\n\nUseful reference for derivative formulas is (Mike Giles, 2008).\n\nIonescu et al. (2015) provide a detailed derivation of formulas for\nbackpropagating through spectral layers (SVD and Eig).\n\nReferences:\n An extended collection of matrix derivative results for\n forward and reverse mode automatic differentiation:\n [Mike Giles, 2008]\n (https://ora.ox.ac.uk/objects/uuid:8d0c0a29-c92b-4153-a1d2-38b276e93124)\n ([pdf](http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf))\n Matrix Backpropagation for Deep Networks with Structured Layers\n [Ionescu et al., 2015]\n (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/Ionescu_Matrix_Backpropagation_for_ICCV_2015_paper.html)\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Ionescu_Matrix_Backpropagation_for_ICCV_2015_paper.pdf))\n Training Deep Networks with Structured Layers by Matrix Backpropagation:\n [Ionescu et al., 2015](https://arxiv.org/abs/1509.07838)\n ([pdf](https://arxiv.org/pdf/1509.07838.pdf))\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_linalg_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl as _linalg\n\n\[email protected](\"MatrixInverse\")\ndef _MatrixInverseGrad(op, grad):\n \"\"\"Gradient for MatrixInverse.\"\"\"\n ainv = op.outputs[0]\n return -math_ops.matmul(\n ainv, math_ops.matmul(grad, ainv, adjoint_b=True), adjoint_a=True)\n\n\[email protected](\"Einsum\")\ndef _EinsumGrad(op, grad):\n \"\"\"Gradient for Einsum.\"\"\"\n ellipsis = \"...\"\n\n def _GetAxisFromLabel(subscripts, label):\n \"\"\"Returns the axis (possibly negative) corresponding to a label.\n\n Returns the axis index of the axis label if it is before an ellipsis (or if\n the ellipsis is not present), and the negative index if it occurs after the\n ellipsis. E.g. index of `b` in `ab...cd`, is `1`, but that of `c` is `-2`.\n\n For multiple occurrences, returns the leftmost one. If not found, returns\n None.\n\n Args:\n subscripts: A string denoting the einsum subscript (e.g. `ab...cd`)\n label: The single character axis label.\n \"\"\"\n splits = subscripts.split(ellipsis)\n index = splits[0].find(label)\n if index != -1:\n return index\n if len(splits) < 2:\n return None\n index = splits[1].find(label)\n if index != -1:\n return index - len(splits[1])\n return None\n\n def _GetBcastSubshape(subscripts):\n \"\"\"Returns a tuple denoting the slice mapping to ellipsis.\n\n For a given subscript, returns a tuple (start, end) denoting the start\n axis index and the (negative) end axis index respectively. For any input\n Tensor `x` described by the subscript, `x[start:end]` would be the slice\n represented by the ellipsis. E.g. For `ab...cd` returns `[1, -2]`.\n\n If ellipsis is not present in `subscripts`, returns `(0, 0)`.\n\n Args:\n subscripts: A string denoting the einsum subscript.\n \"\"\"\n start = subscripts.find(ellipsis)\n if start == -1:\n return 0, 0\n remaining = len(subscripts) - (start + len(ellipsis))\n end = -remaining if remaining > 0 else None\n return start, end\n\n def _GetReducedSubscripts(reduced_label_set, input_shape, subscripts):\n \"\"\"Returns reduced subscripts and their corresponding dimensions and axes.\n\n Given a set of axis labels, returns their concatenated subscript, their\n corresponding dimensions from input_shape, and their corresponding axes.\n Note that the concatenated subscript `reduced_subs` may have axis labels\n from `reduced_label_set` in any order. For example, for the reduced label\n set `{b, d}`, subscripts `aabbcd` and input shape `[2,2,5,5,3,4]`, returns\n subscripts `bd`, dimensions `[5,4]` and axes `[2,5]`.\n\n Args:\n reduced_label_set: Set of axis labels which appear in `subscripts`.\n input_shape: A `Tensor` representing the shape of the einsum operand\n corresponding to `subscripts`.\n subscripts: A string denoting the einsum subscript.\n\n Returns:\n reduced_subs: Subscripts formed by a concatenation of labels in\n `reduced_label_set`.\n reduced_dims: Dimensions from `input_shape` corresponding to each label\n in `reduced_subs`.\n reduced_axes: Axes described by `subscripts` corresponding to each label\n in `reduced_subs`. If there are multiple occurrences in `subscripts`,\n we consider only the leftmost one.\n\n \"\"\"\n # Concatenate the sequence of reduced axis labels.\n reduced_subs = \"\".join(list(reduced_label_set))\n # Get the axis (may be positive, negative or zero) for each of the reduced\n # labels. If the same label appears multiple times, get the left-most axis.\n reduced_axes = [_GetAxisFromLabel(subscripts, s) for s in reduced_subs]\n # Get the corresponding dimensions for each reduced axis.\n reduced_dims = array_ops.stack([input_shape[ax] for ax in reduced_axes])\n return reduced_subs, reduced_dims, reduced_axes\n\n def _GetGradReduced(output_grad, output_subs, input_subs, input_shape,\n reduced_label_set):\n \"\"\"Returns the gradient wrt input for a unary einsum with reductions.\n\n Args:\n output_grad: The gradient wrt the output of a unary einsum operation.\n output_subs: The output subscript. (E.g. `ac` for equation `abc->ac`).\n input_subs: The input subscript. (E.g. `abc` for equation `abc->ac`).\n input_shape: A `Tensor` representing the shape of the input operand.\n reduced_label_set: The set of axis labels appearing in `input_subs` but\n not in `output_subs`.\n \"\"\"\n # Let's say the einsum operation was \"aabbcd->ca\", where axis labels 'b' and\n # 'd' are reduced with input_shape [2,2,5,5,3,4]. Then obtain the reduced\n # subscripts \"bd\", corresponding dimensions [5,4] and axes [2,5].\n reduced_subs, reduced_dims, reduced_axes = _GetReducedSubscripts(\n reduced_label_set, input_shape, input_subs)\n # Whether either the input or the output subscripts have a repeated label.\n # This is true for \"aabbcd->ca\" or \"abd->cca\" but false for \"abcd->ca\".\n has_repeated_labels = (\n len(set(input_subs)) + len(set(output_subs)) <\n len(input_subs) + len(output_subs))\n # Compute the input subscripts without the reduced axis labels, e.g. \"aac\"\n # for the equation \"aabbcd->ca\".\n input_subs_without_reduced_labels = \"\".join(\n [s for s in input_subs if s not in reduced_label_set])\n\n # The gradient wrt the input for the equation \"abc->ac\" (or, equivalently\n # reduce_sum(..., axis=1)) is just the gradient of the output tiled N times\n # along axis 1, where label 'b' represents a dimension of size N.\n #\n # If we're not dealing with repeated labels, and the non-reduced labels\n # doesn't need to be transposed, then just tiling is enough and there is no\n # need to call another einsum. For example, tiling is sufficient for\n # \"abcd->ac\". But for equations like \"aabbcd->ac\" (generalized traces) or\n # \"abc->ca\" (transpose), we'd need another einsum operation after tiling.\n if (not has_repeated_labels and\n input_subs_without_reduced_labels == output_subs):\n # Obtain the shape of the output, as if keepdims=True on reduce sum. E.g.\n # for the equation \"abcd->ac\" with input shape [2,5,3,4], we get the\n # reduced shape [2,1,3,1].\n reduced_shape = math_ops.reduced_shape(\n input_shape, ops.convert_to_tensor(reduced_axes))\n # Reshaping the gradient (wrt \"ac\") to [2,1,3,1] and broadcasting it to\n # the shape [2,5,3,4] results in the gradient wrt \"abcd\".\n return array_ops.broadcast_to(\n array_ops.reshape(output_grad, reduced_shape), input_shape)\n\n # If we *do* have traces or transpose operations, then prepend the extra\n # reduced dimensions to the front. E.g. Given the equation \"aabbcd->ca\" we'd\n # first obtain the VJP for \"bdca->ca\", and then the VJP for \"aabbcd->bdca\".\n #\n # Obtain the input shape with reduced dimensions prepended, viz. [5,4,3,2].\n # This is the shape of the intermediate \"bdca\".\n grad_shape_with_reduced_labels = array_ops.concat(\n [reduced_dims, array_ops.shape(output_grad)], axis=0)\n # Obtain the output shape of the reduction-only equation \"bdca->ca\" as if\n # keepdims=True; viz. [1,1,3,2]. Since we prepended the reduced labels, we\n # just have to prepend that many 1s to the output shape.\n reduced_shape = (\n array_ops.concat([\n array_ops.ones(len(reduced_label_set), dtype=dtypes.int32),\n array_ops.shape(output_grad)\n ],\n axis=0))\n # Compute the VJP for the intermediate (viz. \"bdca->ca\") for which\n # broadcasting is sufficient.\n broadcasted_grad = array_ops.broadcast_to(\n array_ops.reshape(output_grad, reduced_shape),\n grad_shape_with_reduced_labels)\n # Compute the VJP for the final step (viz. \"aabbcd->bdca\"). We can use\n # einsum with the input and output subscripts reversed (viz. \"bdca->aabbcd\")\n # since the output axis labels now appear in the input subscripts.\n return gen_linalg_ops.einsum([broadcasted_grad],\n \"{}->{}\".format(reduced_subs + output_subs,\n input_subs))\n\n def _GetGradWrt(output_grad, other_operand, input_shape, input_subs,\n other_subs, output_subs):\n \"\"\"Returns the gradient wrt an input operand for a binary einsum.\n\n This function does not handle (un)broadcasting. This must be done separately\n on the returned gradient.\n\n Args:\n output_grad: The gradient wrt the output of a binary einsum operation.\n other_operand: The complementary `Tensor` operand i.e. which is not the\n input operand.\n input_shape: A `Tensor` representing the shape of input operand.\n input_subs: The subscripts of the input operand.\n other_subs: The subscripts of the complementary operand.\n output_subs: The output subscripts.\n \"\"\"\n # Claim: For the einsum operation z = einsum(\"{eq_x},{eq_y}->{eq_z}\", x, y),\n # where the equation involves only Tensor contractions, generalized traces\n # and transposes, the input gradients are given by the vector-jacobian\n # products (VJPs):\n #\n # grad_wrt_x = einsum(\"{eq_y},{eq_z}->{eq_x}\", y, grad_wrt_z)\n # grad_wrt_y = einsum(\"{eq_x},{eq_z}->{eq_y}\", x, grad_wrt_z}\n #\n # where grad_wrt_x and grad_wrt_y are the gradients with respect to inputs\n # x and y and grad_wrt_z is the given gradient with respect to output z.\n #\n # Proof: For unary einsum equations involving only transpose (\"ij->ji\") and\n # traces (\"ii->i\"), the linear mapping's Jacobian at input x is given\n # by the function itself. We can verify that the linear map given by the\n # VJP are einsums with the equations \"ji->ij\" and \"i->ii\" respectively,\n # where the latter represents 'un-tracing', or filling the diagonal with\n # the input axis and non-diagonal entries are zeros.\n # Furthermore, recall that matrix multiplication, which is\n # represented by the equation \"ab,bc->ac\", has its VJPs given by the\n # einsum equations \"ac,bc->ab\" and \"ab,ac->bc\" (see, for example\n # https://math.stackexchange.com/a/2755680). Combined with transposes and\n # traces we can rewrite Tensor contractions as regular matrix\n # multiplication. Since each of these operations have their VJPs described\n # by einsums of the required pattern, the result follows.\n #\n # Accordingly, einsum operations except for those with reductions, e.g.\n # \"abc,cd->ad\" have their VJPs defined by:\n # \"{output_subs},{other_subs}->{input_subs}\".\n #\n # But if there is a reduction, this would lead to the equation \"ad,cd->abc\"\n # which is invalid because the reduced axis label 'b' is present in the\n # output but not in any of the inputs. Therefore, we compute the VJP in two\n # steps: first we obtain VJP for \"ac,cd->ad\" and then we compute the VJP of\n # \"abc->ac\" or, equivalently, reduce_sum(..., axis=1).\n #\n # Compute the set of input axis labels which doesn't appear in either the\n # output subscripts or the other operand's subscript. E.g. the set {'b'} for\n # the equation \"abc,cd->ad\".\n reduced_label_set = set(input_subs).difference(\n set(output_subs + other_subs + \".\"))\n # Obtain the input subscripts with the reduced axis labels removed. E.g.\n # \"ac\" in the above example.\n left_subs = \"\".join([s for s in input_subs if s not in reduced_label_set])\n\n # Compute the gradient wrt the input, without accounting for the operation\n # \"abc->ac\". So, now we have the VJP of the operation \"ac,cd->ad\".\n grad_reduced = gen_linalg_ops.einsum([output_grad, other_operand],\n \"{},{}->{}\".format(\n output_subs, other_subs,\n left_subs))\n # If the reduced_label_set is empty, then we already have the gradient\n # wrt the input.\n if not reduced_label_set:\n return grad_reduced\n # Otherwise, we currently have the gradient wrt the output of the reduction\n # operation \"abc->ac\". Invoke the subroutine for the gradient for unary\n # einsum with reductions.\n return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape,\n reduced_label_set)\n\n equation = op.get_attr(\"equation\")\n if isinstance(equation, bytes):\n equation = equation.decode()\n input_subs, output_subs = equation.split(\"->\")\n\n if len(op.inputs) == 1:\n # For the unary einsum z = einsum(\"{eq_x}->{eq_z}\", x), the gradient wrt the\n # input (VJP) is given by the reversed equation:\n # grad_wrt_x = einsum(\"{eq_z}->{eq_x}\", grad_wrt_z)\n # (See the justification in _GetGradWrt). This is valid unless there are\n # reduced axis labels; i.e. axis labels appearing in the input but not in\n # the output subscripts.\n input_shape = array_ops.shape(op.inputs[0])\n # Find the axis labels which appear only in the input.\n reduced_label_set = set(input_subs).difference(set(output_subs + ellipsis))\n if not reduced_label_set:\n # Return the einsum given by the reversed equation, since we don't have\n # reduced axes.\n return gen_linalg_ops.einsum([grad],\n \"{}->{}\".format(output_subs, input_subs))\n # We do have reduced axes, so we invoke the subroutine for reduced unary\n # einsums.\n return _GetGradReduced(grad, output_subs, input_subs, input_shape,\n reduced_label_set)\n\n x_subs, y_subs = input_subs.split(\",\")\n # Add ellipsis for broadcasted dimensions if any operand does not have it.\n # This is because the equation \"...ij,jk->ik\" may be valid if the 0th input's\n # batch shape is empty, but the VJP equation \"jk,ik->...ij\" is not valid\n # because only the output subscripts contain ellipsis.\n if ellipsis in output_subs:\n if ellipsis not in x_subs:\n x_subs += ellipsis\n if ellipsis not in y_subs:\n y_subs += ellipsis\n\n # Obtain the gradients wrt the inputs x and y, without taking into account\n # the unbroadcasting.\n x, y = op.inputs[0], op.inputs[1]\n x_shape = array_ops.shape(x)\n y_shape = array_ops.shape(y)\n grad_x = _GetGradWrt(grad, y, x_shape, x_subs, y_subs, output_subs)\n grad_y = _GetGradWrt(grad, x, y_shape, y_subs, x_subs, output_subs)\n\n if ellipsis not in output_subs:\n # If no ellipsis in the output; then no need to unbroadcast.\n return grad_x, grad_y\n\n # Below we handle the case that broadcasting between x and y was necessary,\n # with x and y having possibly different batch shapes.\n\n # Obtain the range of axes which map to ellipsis. E.g. for subscripts 'ab...c'\n # and shape of rank 10; the range [3:-1] denotes the broadcasted axes.\n bx_start, bx_end = _GetBcastSubshape(x_subs)\n by_start, by_end = _GetBcastSubshape(y_subs)\n # If the static batch shapes are equal, we don't need to unbroadcast.\n x_shape_static = x.get_shape()\n y_shape_static = y.get_shape()\n if (x_shape_static.is_fully_defined() and\n y_shape_static.is_fully_defined() and\n x_shape_static[bx_start:bx_end] == y_shape_static[by_start:by_end]):\n return grad_x, grad_y\n\n # Sum the gradient across the broadcasted axes.\n rx, ry = array_ops.broadcast_gradient_args(x_shape[bx_start:bx_end],\n y_shape[by_start:by_end])\n grad_x = array_ops.reshape(\n math_ops.reduce_sum(grad_x, bx_start + rx), x_shape)\n grad_y = array_ops.reshape(\n math_ops.reduce_sum(grad_y, by_start + ry), y_shape)\n return grad_x, grad_y\n\n\[email protected](\"MatrixDeterminant\")\ndef _MatrixDeterminantGrad(op, grad):\n \"\"\"Gradient for MatrixDeterminant.\"\"\"\n a = op.inputs[0]\n c = op.outputs[0]\n a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)\n multipliers = array_ops.reshape(grad * c,\n array_ops.concat([array_ops.shape(c), [1, 1]],\n 0))\n return multipliers * a_adj_inv\n\n\[email protected](\"MatrixSquareRoot\")\ndef _MatrixSquareRootGrad(op, grad):\n \"\"\"Gradient for MatrixSquareRoot.\"\"\"\n\n # Let A be an m x m square matrix (or batch of matrices)\n # Let R = sqrtm(A)\n # By definition, A = RR\n # Take the differential: dA = d(RR) = RdR + dRR\n # Solve the resulting Sylvester equation for dR\n\n # Used to find Kronecker products within the Sylvester equation\n def _KroneckerProduct(b1, b2):\n \"\"\"Computes the Kronecker product of two batches of square matrices.\"\"\"\n b1_shape = array_ops.shape(b1)\n b2_shape = array_ops.shape(b2)\n b1_order = b1_shape[-1]\n b2_order = b2_shape[-1]\n\n shape_slice_size = [math_ops.subtract(array_ops.size(b1_shape), 2)]\n shape_slice = array_ops.slice(b1_shape, [0],\n shape_slice_size) # Same for both batches\n b1_reshape_shape = array_ops.concat(\n [shape_slice, [b1_order], [1], [b1_order], [1]], 0)\n b2_reshape_shape = array_ops.concat(\n [shape_slice, [1], [b2_order], [1], [b2_order]], 0)\n\n b1_reshape = array_ops.reshape(b1, b1_reshape_shape)\n b2_reshape = array_ops.reshape(b2, b2_reshape_shape)\n\n order_prod = b1_order * b2_order\n kprod_shape = array_ops.concat([shape_slice, [order_prod], [order_prod]], 0)\n return array_ops.reshape(b1_reshape * b2_reshape, kprod_shape)\n\n sqrtm = op.outputs[0] # R\n shape = array_ops.shape(sqrtm)\n order = shape[-1] # m\n matrix_count = math_ops.reduce_prod(shape[0:-2])\n\n # Get batch of m x m identity matrices\n eye = linalg_ops.eye(order, dtype=sqrtm.dtype) # m x m identity matrix\n eye_flat = array_ops.reshape(eye, [-1])\n eye_tiled = array_ops.tile(eye_flat, [matrix_count])\n eye_batch = array_ops.reshape(eye_tiled, shape)\n\n # The transpose of R is taken in the k1 term instead of k2 in\n # order to prevent redundant transposition of R (i.e. (R')' = R)\n sqrtm_transpose = array_ops.matrix_transpose(sqrtm)\n k1 = _KroneckerProduct(eye_batch, sqrtm_transpose)\n k2 = _KroneckerProduct(sqrtm, eye_batch)\n ksum = math_ops.add(k1, k2)\n\n # Vectorize dA\n shape_slice_size = [math_ops.subtract(array_ops.size(shape), 2)]\n shape_slice = array_ops.slice(shape, [0], shape_slice_size)\n shape_vec_da = array_ops.concat([shape_slice, [order * order], [1]], 0)\n vec_da = array_ops.reshape(array_ops.matrix_transpose(grad), shape_vec_da)\n\n # Solve for vec(dR)\n vec_dsqrtm = linalg_ops.matrix_solve(ksum, vec_da)\n\n # Solve for dR by inverse vectorizing vec(dR)\n dsqrtm_transpose = array_ops.reshape(vec_dsqrtm, shape)\n return array_ops.matrix_transpose(dsqrtm_transpose)\n\n\[email protected](\"LogMatrixDeterminant\")\ndef _LogMatrixDeterminantGrad(op, _, grad_b):\n \"\"\"Gradient for LogMatrixDeterminant.\"\"\"\n a = op.inputs[0]\n c = op.outputs[1]\n a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)\n multipliers = array_ops.reshape(\n grad_b, array_ops.concat([array_ops.shape(c), [1, 1]], 0))\n return multipliers * a_adj_inv\n\n\[email protected](\"Cholesky\")\ndef _CholeskyGrad(op, grad):\n \"\"\"Gradient for Cholesky.\"\"\"\n\n # Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}\n l = op.outputs[0]\n num_rows = array_ops.shape(l)[-1]\n batch_shape = array_ops.shape(l)[:-2]\n l_inverse = linalg_ops.matrix_triangular_solve(l,\n linalg_ops.eye(\n num_rows,\n batch_shape=batch_shape,\n dtype=l.dtype))\n\n middle = math_ops.matmul(l, grad, adjoint_a=True)\n middle = array_ops.matrix_set_diag(middle,\n 0.5 * array_ops.matrix_diag_part(middle))\n middle = array_ops.matrix_band_part(middle, -1, 0)\n\n grad_a = math_ops.matmul(\n math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)\n\n grad_a += _linalg.adjoint(grad_a)\n return grad_a * 0.5\n\n\[email protected](\"Qr\")\ndef _QrGrad(op, dq, dr):\n \"\"\"Gradient for Qr.\"\"\"\n q, r = op.outputs\n if q.dtype.is_complex:\n raise NotImplementedError(\"QrGrad not implemented for dtype: %s\" % q.dtype)\n if (r.shape.ndims is None or r.shape.as_list()[-2] is None or\n r.shape.as_list()[-1] is None):\n raise NotImplementedError(\"QrGrad not implemented with dynamic shapes.\")\n if r.shape.dims[-2].value != r.shape.dims[-1].value:\n raise NotImplementedError(\"QrGrad not implemented when ncols > nrows \"\n \"or full_matrices is true and ncols != nrows.\")\n\n qdq = math_ops.matmul(q, dq, adjoint_a=True)\n qdq_ = qdq - _linalg.adjoint(qdq)\n rdr = math_ops.matmul(r, dr, adjoint_b=True)\n rdr_ = rdr - _linalg.adjoint(rdr)\n tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)\n\n def _TriangularSolve(x, r):\n \"\"\"Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri.\"\"\"\n return _linalg.adjoint(\n linalg_ops.matrix_triangular_solve(\n r, _linalg.adjoint(x), lower=False, adjoint=False))\n\n grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))\n grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)\n return grad_a + grad_b\n\n\[email protected](\"MatrixSolve\")\ndef _MatrixSolveGrad(op, grad):\n \"\"\"Gradient for MatrixSolve.\"\"\"\n a = op.inputs[0]\n adjoint_a = op.get_attr(\"adjoint\")\n c = op.outputs[0]\n grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)\n if adjoint_a:\n grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)\n else:\n grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)\n return (grad_a, grad_b)\n\n\[email protected](\"MatrixSolveLs\")\ndef _MatrixSolveLsGrad(op, grad):\n \"\"\"Gradients for MatrixSolveLs.\"\"\"\n\n # TODO(rmlarsen): The implementation could be more efficient:\n # a) Output the Cholesky factorization from forward op instead of\n # recomputing it here.\n # b) Implement a symmetric rank-k update op instead of computing\n # x*z + transpose(x*z). This pattern occurs other places in TensorFlow.\n\n def _Overdetermined(op, grad):\n \"\"\"Gradients for the overdetermined case of MatrixSolveLs.\n\n This is the backprop for the solution to the normal equations of the first\n kind:\n X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B\n which solve the least squares problem\n min ||A * X - B||_F^2 + lambda ||X||_F^2.\n \"\"\"\n a = op.inputs[0]\n b = op.inputs[1]\n x = op.outputs[0]\n l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)\n # pylint: disable=protected-access\n chol = linalg_ops._RegularizedGramianCholesky(\n a, l2_regularizer=l2_regularizer, first_kind=True)\n # pylint: enable=protected-access\n # Temporary z = (A^T * A + lambda * I)^{-1} * grad.\n z = linalg_ops.cholesky_solve(chol, grad)\n xzt = math_ops.matmul(x, z, adjoint_b=True)\n zx_sym = xzt + array_ops.matrix_transpose(xzt)\n grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)\n grad_b = math_ops.matmul(a, z)\n return (grad_a, grad_b, None)\n\n def _Underdetermined(op, grad):\n \"\"\"Gradients for the underdetermined case of MatrixSolveLs.\n\n This is the backprop for the solution to the normal equations of the second\n kind:\n X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B\n that (for lambda=0) solve the least squares problem\n min ||X||_F subject to A*X = B.\n \"\"\"\n a = op.inputs[0]\n b = op.inputs[1]\n l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)\n # pylint: disable=protected-access\n chol = linalg_ops._RegularizedGramianCholesky(\n a, l2_regularizer=l2_regularizer, first_kind=False)\n # pylint: enable=protected-access\n grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))\n # Temporary tmp = (A * A^T + lambda * I)^{-1} * B.\n tmp = linalg_ops.cholesky_solve(chol, b)\n a1 = math_ops.matmul(tmp, a, adjoint_a=True)\n a1 = -math_ops.matmul(grad_b, a1)\n a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)\n a2 = math_ops.matmul(tmp, a2, adjoint_b=True)\n grad_a = a1 + a2\n return (grad_a, grad_b, None)\n\n fast = op.get_attr(\"fast\")\n if fast is False:\n raise ValueError(\"Gradient not defined for fast=False\")\n matrix_shape = op.inputs[0].get_shape()[-2:]\n if matrix_shape.is_fully_defined():\n if matrix_shape[-2] >= matrix_shape[-1]:\n return _Overdetermined(op, grad)\n else:\n return _Underdetermined(op, grad)\n else:\n # We have to defer determining the shape to runtime and use\n # conditional execution of the appropriate graph.\n matrix_shape = array_ops.shape(op.inputs[0])[-2:]\n return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],\n lambda: _Overdetermined(op, grad),\n lambda: _Underdetermined(op, grad))\n\n\[email protected](\"MatrixTriangularSolve\")\ndef _MatrixTriangularSolveGrad(op, grad):\n \"\"\"Gradient for MatrixTriangularSolve.\"\"\"\n a = op.inputs[0]\n adjoint_a = op.get_attr(\"adjoint\")\n lower_a = op.get_attr(\"lower\")\n c = op.outputs[0]\n grad_b = linalg_ops.matrix_triangular_solve(\n a, grad, lower=lower_a, adjoint=not adjoint_a)\n if adjoint_a:\n grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)\n else:\n grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)\n if lower_a:\n grad_a = array_ops.matrix_band_part(grad_a, -1, 0)\n else:\n grad_a = array_ops.matrix_band_part(grad_a, 0, -1)\n return (grad_a, grad_b)\n\n\[email protected](\"SelfAdjointEigV2\")\ndef _SelfAdjointEigV2Grad(op, grad_e, grad_v):\n \"\"\"Gradient for SelfAdjointEigV2.\"\"\"\n e = op.outputs[0]\n compute_v = op.get_attr(\"compute_v\")\n # a = op.inputs[0], which satisfies\n # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]\n with ops.control_dependencies([grad_e, grad_v]):\n if compute_v:\n v = op.outputs[1]\n # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).\n # Notice that because of the term involving f, the gradient becomes\n # infinite (or NaN in practice) when eigenvalues are not unique.\n # Mathematically this should not be surprising, since for (k-fold)\n # degenerate eigenvalues, the corresponding eigenvectors are only defined\n # up to arbitrary rotation in a (k-dimensional) subspace.\n f = array_ops.matrix_set_diag(\n math_ops.reciprocal(\n array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),\n array_ops.zeros_like(e))\n grad_a = math_ops.matmul(\n v,\n math_ops.matmul(\n array_ops.matrix_diag(grad_e) +\n f * math_ops.matmul(v, grad_v, adjoint_a=True),\n v,\n adjoint_b=True))\n else:\n _, v = linalg_ops.self_adjoint_eig(op.inputs[0])\n grad_a = math_ops.matmul(v,\n math_ops.matmul(\n array_ops.matrix_diag(grad_e),\n v,\n adjoint_b=True))\n # The forward op only depends on the lower triangular part of a, so here we\n # symmetrize and take the lower triangle\n grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)\n grad_a = array_ops.matrix_set_diag(grad_a,\n 0.5 * array_ops.matrix_diag_part(grad_a))\n return grad_a\n\n\[email protected](\"Svd\")\ndef _SvdGrad(op, grad_s, grad_u, grad_v):\n \"\"\"Gradient for the singular value decomposition.\"\"\"\n\n # The derivation for the compute_uv=False case, and most of\n # the derivation for the full_matrices=True case, are in\n # Giles' paper (see reference at top of file). A derivation for\n # the full_matrices=False case is available at\n # https://j-towns.github.io/papers/svd-derivative.pdf\n # The derivation for complex valued SVD can be found in\n # https://re-ra.xyz/misc/complexsvd.pdf or\n # https://giggleliu.github.io/2019/04/02/einsumbp.html\n a = op.inputs[0]\n a_shape = a.get_shape().with_rank_at_least(2)\n grad_s = math_ops.cast(grad_s, a.dtype)\n grad_s_mat = array_ops.matrix_diag(grad_s)\n\n if not op.get_attr(\"compute_uv\"):\n s, u, v = linalg_ops.svd(a, compute_uv=True)\n grad_a = math_ops.matmul(u, math_ops.matmul(grad_s_mat, v, adjoint_b=True))\n grad_a.set_shape(a_shape)\n return grad_a\n\n full_matrices = op.get_attr(\"full_matrices\")\n\n grad_u_shape = grad_u.get_shape().with_rank_at_least(2)\n grad_v_shape = grad_v.get_shape().with_rank_at_least(2)\n m = a_shape.dims[-2].merge_with(grad_u_shape[-2])\n n = a_shape.dims[-1].merge_with(grad_v_shape[-2])\n batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with(\n grad_v_shape[:-2])\n a_shape = batch_shape.concatenate([m, n])\n\n m = a_shape.dims[-2].value\n n = a_shape.dims[-1].value\n # TODO(rmlarsen): Make this work with placeholders.\n if m is None or n is None:\n raise NotImplementedError(\n \"SVD gradient has not been implemented for input with unknown \"\n \"inner matrix shape.\")\n\n s = op.outputs[0]\n u = op.outputs[1]\n v = op.outputs[2]\n s = math_ops.cast(s, a.dtype)\n\n use_adjoint = False\n if m > n:\n # Compute the gradient for A^H = V * S^T * U^H, and (implicitly) take the\n # Hermitian transpose of the gradient at the end.\n use_adjoint = True\n m, n = n, m\n u, v = v, u\n grad_u, grad_v = grad_v, grad_u\n\n with ops.control_dependencies([grad_s, grad_u, grad_v]):\n if full_matrices and abs(m - n) > 1:\n raise NotImplementedError(\n \"svd gradient is not implemented for abs(m - n) > 1 \"\n \"when full_matrices is True\")\n s_mat = array_ops.matrix_diag(s)\n s2 = math_ops.square(s)\n\n # NOTICE: Because of the term involving f, the gradient becomes\n # infinite (or NaN in practice) when singular values are not unique.\n # Mathematically this should not be surprising, since for (k-fold)\n # degenerate singular values, the corresponding singular vectors are\n # only defined up a (k-dimensional) subspace. In practice, this can\n # lead to numerical instability when singular values are close but not\n # exactly equal.\n # To avoid nan in cases with degenrate sigular values or zero sigular values\n # in calculating f and s_inv_mat, we introduce a Lorentz brodening.\n\n def _SafeReciprocal(x, epsilon=1E-20):\n return x * math_ops.reciprocal(x * x + epsilon)\n\n s_shape = array_ops.shape(s)\n f = array_ops.matrix_set_diag(\n _SafeReciprocal(\n array_ops.expand_dims(s2, -2) - array_ops.expand_dims(s2, -1)),\n array_ops.zeros_like(s))\n s_inv_mat = array_ops.matrix_diag(_SafeReciprocal(s))\n\n v1 = v[..., :, :m]\n grad_v1 = grad_v[..., :, :m]\n\n u_gu = math_ops.matmul(u, grad_u, adjoint_a=True)\n v_gv = math_ops.matmul(v1, grad_v1, adjoint_a=True)\n\n f_u = f * u_gu\n f_v = f * v_gv\n\n term1_nouv = (\n grad_s_mat + math_ops.matmul(f_u + _linalg.adjoint(f_u), s_mat) +\n math_ops.matmul(s_mat, f_v + _linalg.adjoint(f_v)))\n\n term1 = math_ops.matmul(u, math_ops.matmul(term1_nouv, v1, adjoint_b=True))\n\n if m == n:\n grad_a_before_transpose = term1\n else:\n gv1t = array_ops.matrix_transpose(grad_v1, conjugate=True)\n gv1t_v1 = math_ops.matmul(gv1t, v1)\n term2_nous = gv1t - math_ops.matmul(gv1t_v1, v1, adjoint_b=True)\n\n if full_matrices:\n v2 = v[..., :, m:n]\n grad_v2 = grad_v[..., :, m:n]\n\n v1t_gv2 = math_ops.matmul(v1, grad_v2, adjoint_a=True)\n term2_nous -= math_ops.matmul(v1t_gv2, v2, adjoint_b=True)\n\n u_s_inv = math_ops.matmul(u, s_inv_mat)\n term2 = math_ops.matmul(u_s_inv, term2_nous)\n\n grad_a_before_transpose = term1 + term2\n\n if a.dtype.is_complex:\n eye = _linalg.eye(s_shape[-1], batch_shape=s_shape[:-1], dtype=a.dtype)\n l = eye * v_gv\n term3_nouv = math_ops.matmul(s_inv_mat, _linalg.adjoint(l) - l)\n term3 = 1 / 2. * math_ops.matmul(\n u, math_ops.matmul(term3_nouv, v1, adjoint_b=True))\n\n grad_a_before_transpose += term3\n\n if use_adjoint:\n grad_a = array_ops.matrix_transpose(\n grad_a_before_transpose, conjugate=True)\n else:\n grad_a = grad_a_before_transpose\n\n grad_a.set_shape(a_shape)\n return grad_a\n\n\ndef _LeftShift(x):\n \"\"\"Shifts next-to-last dimension to the left, adding zero on the right.\"\"\"\n rank = array_ops.rank(x)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n pad = array_ops.concat([zeros, array_ops.constant([[0, 1], [0, 0]])], axis=0)\n return array_ops.pad(x[..., 1:, :], pad)\n\n\ndef _RightShift(x):\n \"\"\"Shifts next-to-last dimension to the right, adding zero on the left.\"\"\"\n rank = array_ops.rank(x)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n pad = array_ops.concat([zeros, array_ops.constant([[1, 0], [0, 0]])], axis=0)\n return array_ops.pad(x[..., :-1, :], pad)\n\n\[email protected](\"TridiagonalMatMul\")\ndef _TridiagonalMatMulGrad(op, grad):\n \"\"\"Gradient for TridiagonalMatMul.\"\"\"\n superdiag_conj = array_ops.matrix_transpose(op.inputs[0], conjugate=True)\n maindiag_conj = array_ops.matrix_transpose(op.inputs[1], conjugate=True)\n subdiag_conj = array_ops.matrix_transpose(op.inputs[2], conjugate=True)\n rhs_conj = math_ops.conj(op.inputs[3])\n\n superdiag_grad = math_ops.reduce_sum(_LeftShift(rhs_conj) * grad, axis=-1)\n maindiag_grad = math_ops.reduce_sum(rhs_conj * grad, axis=-1)\n subdiag_grad = math_ops.reduce_sum(_RightShift(rhs_conj) * grad, axis=-1)\n rhs_grad = _RightShift(superdiag_conj * grad) + \\\n maindiag_conj * grad + _LeftShift(subdiag_conj * grad)\n\n superdiag_grad = array_ops.expand_dims(superdiag_grad, -2)\n maindiag_grad = array_ops.expand_dims(maindiag_grad, -2)\n subdiag_grad = array_ops.expand_dims(subdiag_grad, -2)\n\n return superdiag_grad, maindiag_grad, subdiag_grad, rhs_grad\n\n\[email protected](\"TridiagonalSolve\")\ndef _TridiagonalSolveGrad(op, grad):\n \"\"\"Gradient for TridiagonalSolveGrad.\"\"\"\n diags = op.inputs[0]\n x = op.outputs[0]\n partial_pivoting = op.get_attr(\"partial_pivoting\")\n\n # Transposing the matrix within tridiagonal_solve kernel by interchanging\n # superdiagonal and subdiagonal wouldn't work on GPU due to mismatch with\n # paddings required by cusparse*gtsv routines.\n # So constructing the transposed matrix in Python.\n diags_transposed = _TransposeTridiagonalMatrix(diags)\n\n grad_rhs = linalg_ops.tridiagonal_solve(diags_transposed, grad,\n partial_pivoting=partial_pivoting)\n grad_diags = -_MatmulExtractingThreeDiagonals(grad_rhs, x)\n return grad_diags, grad_rhs\n\n\ndef _TransposeTridiagonalMatrix(diags):\n \"\"\"Transposes a tridiagonal matrix.\n\n Args:\n diags: the diagonals of the input matrix in the compact form (see\n linalg_ops.tridiagonal_solve).\n\n Returns:\n Diagonals of the transposed matrix in the compact form.\n \"\"\"\n\n diag = diags[..., 1, :]\n\n if diags.shape.is_fully_defined():\n # For fully defined tensor we can concat with a tensor of zeros, which is\n # faster than using array_ops.pad().\n zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype)\n superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1)\n subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1)\n else:\n rank = array_ops.rank(diags)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])),\n axis=0)\n superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad)\n subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])),\n axis=0)\n subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad)\n return array_ops.stack([superdiag, diag, subdiag], axis=-2)\n\n\ndef _MatmulExtractingThreeDiagonals(x, y_tr):\n \"\"\"Multiplies matrices and extracts three diagonals from the product.\n\n With sizes M x K and K x M, this function takes O(MK) time and O(M) space,\n while using math_ops.matmul, and then extracting the diagonals would take\n O(M^2 K) time and O(M^2) space.\n\n Args:\n x: first matrix\n y_tr: second matrix transposed\n\n Returns:\n Diagonals of the product in compact format (see\n linalg_ops.tridiagonal_solve)\n\n \"\"\"\n diag = math_ops.reduce_sum(x * y_tr, axis=-1)\n\n if y_tr.shape.is_fully_defined():\n zeros = array_ops.zeros(\n list(x.shape[:-2]) + [1, x.shape[-1]], dtype=x.dtype)\n superdiag = math_ops.reduce_sum(\n x * array_ops.concat((y_tr[..., 1:, :], zeros), axis=-2), axis=-1)\n subdiag = math_ops.reduce_sum(\n x * array_ops.concat((zeros, y_tr[..., :-1, :]), axis=-2), axis=-1)\n else:\n rank = array_ops.rank(y_tr)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n superdiag_pad = array_ops.concat(\n (zeros, array_ops.constant([[0, 1], [0, 0]])), axis=0)\n superdiag = math_ops.reduce_sum(\n x * array_ops.pad(y_tr[..., 1:, :], superdiag_pad), axis=-1)\n subdiag_pad = array_ops.concat(\n (zeros, array_ops.constant([[1, 0], [0, 0]])), axis=0)\n subdiag = math_ops.reduce_sum(\n x * array_ops.pad(y_tr[..., :-1, :], subdiag_pad), axis=-1)\n return array_ops.stack([superdiag, diag, subdiag], axis=-2)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Cluster Resolvers are used for dynamic cluster IP/hostname resolution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport collections\nimport six\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training.server_lib import ClusterSpec\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef format_master_url(master, rpc_layer=None):\n if rpc_layer:\n return '%s://%s' % (rpc_layer, master)\n else:\n return master\n\n\ndef get_accelerator_devices(master, config_proto):\n \"\"\"Returns accelerator devices given a master and a configuration.\"\"\"\n if context.executing_eagerly():\n logical_devices = config.list_logical_devices()\n devices = []\n for d in logical_devices:\n if d.device_type == 'CPU' or d.device_type == 'XLA_CPU': # Filter CPUs\n continue\n devices.append(session._DeviceAttributes(d.name, d.device_type, 0, 0)) # pylint: disable=protected-access\n return devices\n else:\n with ops.Graph().as_default():\n with session.Session(master, config=config_proto) as s:\n devices = s.list_devices()\n return devices\n\n\n@tf_export('distribute.cluster_resolver.ClusterResolver')\[email protected]_metaclass(abc.ABCMeta)\nclass ClusterResolver(object):\n \"\"\"Abstract class for all implementations of ClusterResolvers.\n\n This defines the skeleton for all implementations of ClusterResolvers.\n ClusterResolvers are a way for TensorFlow to communicate with various cluster\n management systems (e.g. GCE, AWS, etc...).\n\n By letting TensorFlow communicate with these systems, we will be able to\n automatically discover and resolve IP addresses for various TensorFlow\n workers. This will eventually allow us to automatically recover from\n underlying machine failures and scale TensorFlow worker clusters up and down.\n\n Note to Implementors: In addition to these abstract methods, you must also\n implement the task_type, task_id, and rpc_layer attributes. You may choose\n to implement them either as properties with getters or setters or directly\n set the attributes.\n\n - task_type is the name of the server's current named job (e.g. 'worker',\n 'ps' in a distributed parameterized training job).\n - task_id is the ordinal index of the server within the task type.\n - rpc_layer is the protocol used by TensorFlow to communicate with other\n TensorFlow servers in a distributed environment.\n \"\"\"\n\n @abc.abstractmethod\n def cluster_spec(self):\n \"\"\"Retrieve the current state of the cluster and return a ClusterSpec.\n\n Returns:\n A ClusterSpec representing the state of the cluster at the moment this\n function is called.\n\n Implementors of this function must take care in ensuring that the\n ClusterSpec returned is up-to-date at the time of calling this function.\n This usually means retrieving the information from the underlying cluster\n management system every time this function is invoked and reconstructing\n a cluster_spec, rather than attempting to cache anything.\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Retrieves the name or URL of the session master.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC protocol for the given cluster.\n\n Returns:\n The name or URL of the session master.\n\n Implementors of this function must take care in ensuring that the master\n returned is up-to-date at the time to calling this function. This usually\n means retrieving the master every time this function is invoked.\n \"\"\"\n raise NotImplementedError()\n\n def num_accelerators(self,\n task_type=None,\n task_id=None,\n config_proto=None):\n \"\"\"Returns the number of accelerator cores per worker.\n\n This returns the number of accelerator cores (such as GPUs and TPUs)\n available per worker.\n\n Optionally, we allow callers to specify the task_type, and task_id, for\n if they want to target a specific TensorFlow process to query\n the number of accelerators. This is to support heterogenous environments,\n where the number of accelerators cores per host is different.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the machine we\n want to query.\n task_id: (Optional) The index of the TensorFlow task of the machine we\n want to query.\n config_proto: (Optional) Configuration for starting a new session to\n query how many accelerator cores it has.\n\n Returns:\n A map of accelerator types to number of cores.\n \"\"\"\n master = self.master(task_type, task_id)\n devices = get_accelerator_devices(master, config_proto)\n mapping = collections.defaultdict(int)\n for device in devices:\n if task_type is not None and task_id is not None:\n job_path = '/job:%s' % task_type\n task_path = '/task:%s' % task_id\n if job_path not in device.name or task_path not in device.name:\n continue\n mapping[device.device_type] += 1\n return mapping\n\n @property\n def environment(self):\n \"\"\"Returns the current environment which TensorFlow is running in.\n\n There are two possible return values, \"google\" (when TensorFlow is running\n in a Google-internal environment) or an empty string (when TensorFlow is\n running elsewhere).\n\n If you are implementing a ClusterResolver that works in both the Google\n environment and the open-source world (for instance, a TPU ClusterResolver\n or similar), you will have to return the appropriate string depending on the\n environment, which you will have to detect.\n\n Otherwise, if you are implementing a ClusterResolver that will only work\n in open-source TensorFlow, you do not need to implement this property.\n \"\"\"\n return ''\n\n\n@tf_export('distribute.cluster_resolver.SimpleClusterResolver')\nclass SimpleClusterResolver(ClusterResolver):\n \"\"\"Simple implementation of ClusterResolver that accepts a ClusterSpec.\"\"\"\n\n def __init__(self, cluster_spec, master='', task_type=None, task_id=None,\n environment='', num_accelerators=None,\n rpc_layer=None):\n \"\"\"Creates a SimpleClusterResolver from a ClusterSpec.\"\"\"\n super(SimpleClusterResolver, self).__init__()\n\n self._task_type = task_type\n self._task_id = task_id\n self._environment = environment\n\n self._num_accelerators = num_accelerators\n self._rpc_layer = rpc_layer\n\n if not isinstance(cluster_spec, ClusterSpec):\n raise TypeError('cluster_spec must be a ClusterSpec.')\n self._cluster_spec = cluster_spec\n\n if not isinstance(master, str):\n raise TypeError('master must be a string.')\n self._master = master\n\n def cluster_spec(self):\n \"\"\"Returns the ClusterSpec passed into the constructor.\"\"\"\n return self._cluster_spec\n\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Returns the master address to use when creating a session.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC used by distributed TensorFlow.\n\n Returns:\n The name or URL of the session master.\n\n If a task_type and task_id is given, this will override the `master`\n string passed into the initialization function.\n \"\"\"\n if task_type is not None and task_id is not None:\n master = self.cluster_spec().task_address(task_type, task_id)\n else:\n master = self._master\n\n return format_master_url(master, rpc_layer=rpc_layer or self._rpc_layer)\n\n @property\n def task_type(self):\n return self._task_type\n\n @property\n def task_id(self):\n return self._task_id\n\n @task_type.setter\n def task_type(self, task_type):\n self._task_type = task_type\n\n @task_id.setter\n def task_id(self, task_id):\n self._task_id = task_id\n\n @property\n def environment(self):\n return self._environment\n\n def num_accelerators(self,\n task_type=None,\n task_id=None,\n config_proto=None):\n \"\"\"Returns the number of accelerator cores per worker.\n\n The SimpleClusterResolver does not do automatic detection of accelerators,\n so a TensorFlow session will never be created, and thus all arguments are\n unused and we simply assume that the type of accelerator is a GPU and return\n the value in provided to us in the constructor.\n\n Args:\n task_type: Unused.\n task_id: Unused.\n config_proto: Unused.\n \"\"\"\n # Unused\n del task_type, task_id, config_proto\n if self._num_accelerators is None:\n return {}\n return self._num_accelerators\n\n @property\n def rpc_layer(self):\n return self._rpc_layer\n\n @rpc_layer.setter\n def rpc_layer(self, rpc_layer):\n self._rpc_layer = rpc_layer\n\n\n@tf_export('distribute.cluster_resolver.UnionResolver')\nclass UnionClusterResolver(ClusterResolver):\n \"\"\"Performs a union on underlying ClusterResolvers.\n\n This class performs a union given two or more existing ClusterResolvers. It\n merges the underlying ClusterResolvers, and returns one unified ClusterSpec\n when cluster_spec is called. The details of the merge function is\n documented in the cluster_spec function.\n\n For additional ClusterResolver properties such as task type, task index,\n rpc layer, environment, etc..., we will return the value from the first\n ClusterResolver in the union.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes a UnionClusterResolver with other ClusterResolvers.\n\n Args:\n *args: `ClusterResolver` objects to be unionized.\n **kwargs:\n rpc_layer - (Optional) Override value for the RPC layer used by\n TensorFlow.\n task_type - (Optional) Override value for the current task type.\n task_id - (Optional) Override value for the current task index.\n\n Raises:\n TypeError: If any argument is not a subclass of `ClusterResolvers`.\n ValueError: If there are no arguments passed.\n \"\"\"\n super(UnionClusterResolver, self).__init__()\n\n self._rpc_layer = kwargs.pop('rpc_layer', None)\n self._task_type = kwargs.pop('task_type', None)\n self._task_id = kwargs.pop('task_id', None)\n\n if kwargs:\n raise ValueError('Unexpected kwargs provided {!r}'.format(kwargs))\n\n if not args:\n raise ValueError('At least one ClusterResolver is required.')\n\n for cluster_resolver in args:\n if not isinstance(cluster_resolver, ClusterResolver):\n raise TypeError('All arguments must be a sub-class of '\n '`ClusterResolver.`')\n self._cluster_resolvers = args\n\n def cluster_spec(self):\n \"\"\"Returns a union of all the ClusterSpecs from the ClusterResolvers.\n\n Returns:\n A ClusterSpec containing host information merged from all the underlying\n ClusterResolvers.\n\n Raises:\n KeyError: If there are conflicting keys detected when merging two or\n more dictionaries, this exception is raised.\n\n Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the\n same job name, we will merge the list/dict of workers.\n\n If *all* underlying ClusterSpecs expose the set of workers as lists, we will\n concatenate the lists of workers, starting with the list of workers from\n the first ClusterResolver passed into the constructor.\n\n If *any* of the ClusterSpecs expose the set of workers as a dict, we will\n treat all the sets of workers as dicts (even if they are returned as lists)\n and will only merge them into a dict if there is no conflicting keys. If\n there is a conflicting key, we will raise a `KeyError`.\n \"\"\"\n\n merged_cluster = {}\n\n # We figure out whether it is all lists for a particular job, or whether\n # there are dicts inside.\n for cluster_resolver in self._cluster_resolvers:\n cluster_spec = cluster_resolver.cluster_spec()\n cluster_dict = cluster_spec.as_dict()\n\n for job_name, tasks in cluster_dict.items():\n if job_name in merged_cluster:\n # If we see a dict, then we write a dict out regardless.\n if isinstance(tasks, dict):\n merged_cluster[job_name] = {}\n else:\n # We take whichever type is present.\n if isinstance(tasks, list):\n merged_cluster[job_name] = []\n else:\n merged_cluster[job_name] = {}\n\n # We then do the merge as appropriate in merged_cluster[job].\n for cluster_resolver in self._cluster_resolvers:\n cluster_spec = cluster_resolver.cluster_spec()\n cluster_dict = cluster_spec.as_dict()\n\n for job_name, tasks in cluster_dict.items():\n if isinstance(merged_cluster[job_name], list):\n # We all have lists, we can just concatenate and be done.\n merged_cluster[job_name].extend(tasks)\n else:\n if isinstance(tasks, list):\n # We convert to a dictionary if the type is a list.\n task_dict = dict(zip(range(0, len(tasks)), tasks))\n else:\n # We can simply make a copy (for update) and be done.\n task_dict = tasks.copy()\n\n # We detect if there are duplicates, and raise an error if so.\n task_keys = set(task_dict)\n merged_keys = set(merged_cluster[job_name].keys())\n intersected_keys = task_keys.intersection(merged_keys)\n if intersected_keys:\n raise KeyError('Duplicate keys detected when merging two '\n 'ClusterSpecs: %s' % repr(intersected_keys))\n\n # We do the merge after all the processing.\n merged_cluster[job_name].update(task_dict)\n\n return ClusterSpec(merged_cluster)\n\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Returns the master address to use when creating a session.\n\n This usually returns the master from the first ClusterResolver passed in,\n but you can override this by specifying the task_type and task_id.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC protocol for the given cluster.\n\n Returns:\n The name or URL of the session master.\n \"\"\"\n if task_type is not None and task_id is not None:\n master = self.cluster_spec().task_address(task_type, task_id)\n return format_master_url(master, rpc_layer or self._rpc_layer)\n\n return self._cluster_resolvers[0].master(rpc_layer=rpc_layer)\n\n @property\n def task_type(self):\n return self._task_type or self._cluster_resolvers[0].task_type\n\n @property\n def task_id(self):\n return self._task_id or self._cluster_resolvers[0].task_id\n\n @task_type.setter\n def task_type(self, task_type):\n self._task_type = task_type\n\n @task_id.setter\n def task_id(self, task_id):\n self._task_id = task_id\n\n @property\n def environment(self):\n return self._cluster_resolvers[0].environment\n\n def num_accelerators(self,\n task_type=None,\n task_id=None,\n config_proto=None):\n return self._cluster_resolvers[0].num_accelerators(\n task_type, task_id, config_proto)\n\n @property\n def rpc_layer(self):\n return self._rpc_layer or self._cluster_resolvers[0].rpc_layer\n\n @rpc_layer.setter\n def rpc_layer(self, rpc_layer):\n self._rpc_layer = rpc_layer\n"
] |
[
[
"tensorflow.python.ops.math_ops.imag",
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.math_ops.subtract",
"numpy.sqrt",
"tensorflow.python.compat.compat.forward_compatible",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.math_ops.real",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.mul_no_nan",
"tensorflow.python.ops.math_ops.divide",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.math_ops.div_no_nan",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.math_ops.cumprod",
"tensorflow.python.ops.math_ops.negative",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.truediv",
"tensorflow.python.ops.gen_math_ops.reciprocal_grad",
"tensorflow.python.ops.gen_math_ops.unsorted_segment_prod",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.math_ops.sparse_segment_sqrt_n_grad",
"tensorflow.python.ops.math_ops.zeta",
"tensorflow.python.ops.math_ops.sparse_segment_mean_grad",
"tensorflow.python.ops.math_ops.greater_equal",
"tensorflow.python.ops.math_ops.polygamma",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.ops.math_ops.cross",
"tensorflow.python.ops.math_ops.bessel_i1e",
"tensorflow.python.ops.math_ops.logical_and",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.math_ops.bessel_i0e",
"tensorflow.python.ops.math_ops.cosh",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.gen_math_ops.xlogy",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.gen_math_ops.tanh_grad",
"tensorflow.python.ops.math_ops.reduced_shape",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.array_ops.broadcast_gradient_args",
"tensorflow.python.ops.array_ops.shape_internal",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.cos",
"tensorflow.python.ops.math_ops.scalar_mul",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_math_ops.mat_mul",
"tensorflow.python.ops.math_ops.cumulative_logsumexp",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.math_ops.realdiv",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.gen_math_ops.rsqrt_grad",
"tensorflow.python.ops.array_ops.setdiff1d",
"tensorflow.python.ops.gen_math_ops.mul_no_nan",
"tensorflow.python.ops.math_ops.digamma",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.conj",
"tensorflow.python.ops.array_ops.broadcast_to",
"tensorflow.python.ops.gen_math_ops.sigmoid_grad",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.gen_math_ops.sqrt_grad",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.array_ops.invert_permutation",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.ops.gen_math_ops.mul",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.array_ops.rank",
"numpy.arange",
"tensorflow.python.ops.gen_math_ops.xdivy",
"numpy.finfo",
"tensorflow.python.ops.math_ops.lgamma",
"tensorflow.python.ops.math_ops.xdivy",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.sinh",
"tensorflow.python.ops.math_ops.complex",
"tensorflow.python.ops.math_ops.segment_sum",
"tensorflow.python.ops.gen_math_ops.lgamma",
"tensorflow.python.ops.gen_array_ops.broadcast_gradient_args",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.math_ops.reciprocal",
"tensorflow.python.ops.math_ops.sign",
"numpy.prod",
"tensorflow.python.ops.gen_math_ops.igamma_grad_a",
"tensorflow.python.ops.math_ops.floor_div",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.distribute.distribute_lib.UpdateContext",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.distribute.values.validate_colocate",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.distribute.values.SingleDeviceMap",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.distribute.input_lib.MultiStepContext",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.distribute.device_util.resolve",
"tensorflow.python.distribute.distribute_lib.ReplicaContext.__init__",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.distribute.input_lib.InputWorkers",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.control_flow_util.IsSwitch",
"tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices",
"tensorflow.python.framework.ops.device",
"tensorflow.python.framework.ops.get_gradient_function",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.default_gradient.supports_default_grad",
"tensorflow.python.framework.ops.internal_convert_n_to_tensor_or_indexed_slices",
"tensorflow.python.ops.control_flow_util.IsLoopSwitch",
"tensorflow.python.framework.ops._colocate_with_for_gradient",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.control_flow_state.MaybeCreateControlFlowState",
"tensorflow.python.ops.resource_variable_ops.variable_shape",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.platform.tf_logging.vlog",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.ops.control_flow_ops.tuple",
"tensorflow.core.framework.attr_value_pb2.NameAttrList",
"tensorflow.python.eager.backprop.aggregate_indexed_slices_gradients",
"tensorflow.python.ops.functional_ops.symbolic_gradient",
"tensorflow.python.ops.control_flow_util.IsLoopExit",
"tensorflow.python.ops.unconnected_gradients.UnconnectedGradients",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.util.object_identity.ObjectIdentitySet",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.eager.backprop_util.IsTrainable",
"tensorflow.python.ops.control_flow_state.ZerosLikeOutsideLoop",
"tensorflow.python.ops.default_gradient.get_zeros_dtype",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.array_ops.concat",
"numpy.random.seed",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.saved_model.saved_model.load",
"tensorflow.python.framework.random_seed.set_random_seed"
],
[
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.linalg_ops.eye",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.linalg_ops.matrix_triangular_solve",
"tensorflow.python.ops.linalg_ops.self_adjoint_eig",
"tensorflow.python.ops.linalg_ops.tridiagonal_solve",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.linalg_ops.cholesky_solve",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.linalg_ops.matrix_inverse",
"tensorflow.python.ops.linalg_ops.svd",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.matrix_diag_part",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.linalg_ops.matrix_solve",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.array_ops.broadcast_gradient_args",
"tensorflow.python.ops.array_ops.matrix_transpose",
"tensorflow.python.ops.array_ops.matrix_band_part",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.conj",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.linalg_ops._RegularizedGramianCholesky",
"tensorflow.python.ops.linalg.linalg_impl.adjoint",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.matrix_diag",
"tensorflow.python.ops.linalg.linalg_impl.eye",
"tensorflow.python.ops.math_ops.reciprocal",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.python.framework.config.list_logical_devices",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.client.session._DeviceAttributes",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.client.session.Session",
"tensorflow.python.eager.context.executing_eagerly"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.7"
]
}
] |
Borrk/DeepLearning-Engine
|
[
"54f6cdb8a76e76d9f439f8562652f545e4dbc02e"
] |
[
"source/engine/steps/prepare.py"
] |
[
"from engine.steps.IStep import IStep\nfrom sklearn.preprocessing import LabelBinarizer\nimport numpy as np\nimport os\n\nclass step_prepare(IStep):\n \"\"\"Calculate or parse the config file to determine the count of classess\"\"\"\n def __init__(self, output_channel, name=None ):\n super().__init__(self, output_channel, name)\n\n def IRun(self):\n # binarize the labels\n lb = LabelBinarizer()\n encoded_labels = np.array(self.labels)\n encoded_labels = lb.fit_transform(encoded_labels)\n\n self.output_channel['n_classes'] = self.n_classes\n self.output_channel['train_path'] = self.train_path\n self.output_channel['labels'] = self.labels\n self.output_channel['encoded_labels'] = encoded_labels\n self.output_channel['labelBinarizer'] = lb\n\n def IParseConfig( self, config_json ):\n self.train_path = config_json['train_path']\n if 'test_path' in config_json:\n self.test_path = config_json['test_path']\n if 'test_split_ratio' in config_json:\n self.test_split_ratio = config_json['test_split_ratio']\n\n self.labels = self.pick_labels( self, config_json['labels'] )\n self.n_classes = len(self.labels)\n \n def IDispose( self ):\n pass\n\n def pick_labels( self, label_source ):\n if label_source == \"extract_from_train_data_path\":\n from utils.data_preparer_splitdataset import data_preparer_splitdataset\n labels = data_preparer_splitdataset.pick_labels_from_folders( self.train_path )\n elif isinstance(label_source, list):\n labels = label_source\n elif os.path.isfile( label_source): # read from label file\n labels = data_preparer_splitdataset.pick_labels_from_file( label_source )\n else:\n raise Exception( \"Unknown label source\" )\n\n return labels # add later"
] |
[
[
"numpy.array",
"sklearn.preprocessing.LabelBinarizer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AllenChen1998/QueryNet
|
[
"1ab74d7f4cc9d25af30abe0631581cf7be81a07f",
"1ab74d7f4cc9d25af30abe0631581cf7be81a07f",
"1ab74d7f4cc9d25af30abe0631581cf7be81a07f"
] |
[
"models/cifar/pyramidnet.py",
"models/cifar/gdas/lib/nas/SE_Module.py",
"models/cifar/gdas/lib/datasets/LanguageDataset.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nimport math\r\n\r\n__all__ = ['pyramidnet272']\r\n\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n \"3x3 convolution with padding\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n\r\n\r\ndef calc_prob(curr_layer, total_layers, p_l):\r\n \"\"\"Calculates drop prob depending on the current layer.\"\"\"\r\n return 1 - (float(curr_layer) / total_layers) * p_l\r\n\r\n\r\nclass Bottleneck(nn.Module):\r\n outchannel_ratio = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, prob=1.):\r\n super(Bottleneck, self).__init__()\r\n self.bn1 = nn.BatchNorm2d(inplanes)\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n if stride == 1:\r\n self.conv2 = nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n else:\r\n self.conv2 = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)),\r\n nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,\r\n padding=0, bias=False))\r\n self.bn3 = nn.BatchNorm2d((planes * 1))\r\n self.conv3 = nn.Conv2d((planes * 1), planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False)\r\n self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n self.prob = prob\r\n self.padding = None\r\n\r\n def forward(self, x):\r\n\r\n out = self.bn1(x)\r\n out = self.conv1(out)\r\n\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n out = self.conv2(out)\r\n\r\n out = self.bn3(out)\r\n out = self.relu(out)\r\n out = self.conv3(out)\r\n\r\n out = self.bn4(out)\r\n\r\n # shake drop inference\r\n # we may support shake drop training in a future version\r\n assert not self.training\r\n out = out * self.prob\r\n\r\n if self.downsample is not None:\r\n shortcut = self.downsample(x)\r\n featuremap_size = shortcut.size()[2:4]\r\n else:\r\n shortcut = x\r\n featuremap_size = out.size()[2:4]\r\n\r\n batch_size = out.size()[0]\r\n residual_channel = out.size()[1]\r\n shortcut_channel = shortcut.size()[1]\r\n if residual_channel != shortcut_channel:\r\n self.padding = torch.zeros(batch_size, residual_channel - shortcut_channel,\r\n featuremap_size[0], featuremap_size[1])\r\n self.padding = self.padding.to(x.device)\r\n out += torch.cat((shortcut, self.padding), 1)\r\n else:\r\n out += shortcut\r\n\r\n return out\r\n\r\n\r\nclass PyramidNet(nn.Module):\r\n\r\n def __init__(self, depth, alpha, num_classes):\r\n super(PyramidNet, self).__init__()\r\n self.inplanes = 16\r\n n = int((depth - 2) / 9)\r\n block = Bottleneck\r\n\r\n self.addrate = alpha / (3 * n * 1.0)\r\n\r\n self.input_featuremap_dim = self.inplanes\r\n self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)\r\n\r\n self.featuremap_dim = self.input_featuremap_dim\r\n\r\n self.p_l = 0.5\r\n self.layer_num = 1\r\n self.total_layers = n * 3\r\n\r\n self.layer1 = self.pyramidal_make_layer(block, n)\r\n self.layer2 = self.pyramidal_make_layer(block, n, stride=2)\r\n self.layer3 = self.pyramidal_make_layer(block, n, stride=2)\r\n\r\n self.final_featuremap_dim = self.input_featuremap_dim\r\n self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)\r\n self.relu_final = nn.ReLU(inplace=True)\r\n self.avgpool = nn.AvgPool2d(8)\r\n self.fc = nn.Linear(self.final_featuremap_dim, num_classes)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n def pyramidal_make_layer(self, block, block_depth, stride=1):\r\n downsample = None\r\n if stride != 1: # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio:\r\n downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)\r\n\r\n layers = []\r\n self.featuremap_dim = self.featuremap_dim + self.addrate\r\n prob = calc_prob(self.layer_num, self.total_layers, self.p_l)\r\n layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, prob))\r\n self.layer_num += 1\r\n for i in range(1, block_depth):\r\n temp_featuremap_dim = self.featuremap_dim + self.addrate\r\n prob = calc_prob(self.layer_num, self.total_layers, self.p_l)\r\n layers.append(\r\n block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1,\r\n prob=prob))\r\n self.layer_num += 1\r\n self.featuremap_dim = temp_featuremap_dim\r\n self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n\r\n x = self.layer1(x)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n\r\n x = self.bn_final(x)\r\n x = self.relu_final(x)\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n\r\n return x\r\n\r\n\r\ndef pyramidnet272(**kwargs):\r\n return PyramidNet(depth=272, alpha=200, **kwargs)\r\n",
"import torch\r\nimport torch.nn as nn\r\n# Squeeze and Excitation module\r\n\r\nclass SqEx(nn.Module):\r\n\r\n def __init__(self, n_features, reduction=16):\r\n super(SqEx, self).__init__()\r\n\r\n if n_features % reduction != 0:\r\n raise ValueError('n_features must be divisible by reduction (default = 16)')\r\n\r\n self.linear1 = nn.Linear(n_features, n_features // reduction, bias=True)\r\n self.nonlin1 = nn.ReLU(inplace=True)\r\n self.linear2 = nn.Linear(n_features // reduction, n_features, bias=True)\r\n self.nonlin2 = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n\r\n y = F.avg_pool2d(x, kernel_size=x.size()[2:4])\r\n y = y.permute(0, 2, 3, 1)\r\n y = self.nonlin1(self.linear1(y))\r\n y = self.nonlin2(self.linear2(y))\r\n y = y.permute(0, 3, 1, 2)\r\n y = x * y\r\n return y\r\n\r\n",
"import os\r\nimport torch\r\n\r\nfrom collections import Counter\r\n\r\n\r\nclass Dictionary(object):\r\n def __init__(self):\r\n self.word2idx = {}\r\n self.idx2word = []\r\n self.counter = Counter()\r\n self.total = 0\r\n\r\n def add_word(self, word):\r\n if word not in self.word2idx:\r\n self.idx2word.append(word)\r\n self.word2idx[word] = len(self.idx2word) - 1\r\n token_id = self.word2idx[word]\r\n self.counter[token_id] += 1\r\n self.total += 1\r\n return self.word2idx[word]\r\n\r\n def __len__(self):\r\n return len(self.idx2word)\r\n\r\n\r\nclass Corpus(object):\r\n def __init__(self, path):\r\n self.dictionary = Dictionary()\r\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\r\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\r\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\r\n\r\n def tokenize(self, path):\r\n \"\"\"Tokenizes a text file.\"\"\"\r\n assert os.path.exists(path)\r\n # Add words to the dictionary\r\n with open(path, 'r', encoding='utf-8') as f:\r\n tokens = 0\r\n for line in f:\r\n words = line.split() + ['<eos>']\r\n tokens += len(words)\r\n for word in words:\r\n self.dictionary.add_word(word)\r\n\r\n # Tokenize file content\r\n with open(path, 'r', encoding='utf-8') as f:\r\n ids = torch.LongTensor(tokens)\r\n token = 0\r\n for line in f:\r\n words = line.split() + ['<eos>']\r\n for word in words:\r\n ids[token] = self.dictionary.word2idx[word]\r\n token += 1\r\n\r\n return ids\r\n\r\nclass SentCorpus(object):\r\n def __init__(self, path):\r\n self.dictionary = Dictionary()\r\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\r\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\r\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\r\n\r\n def tokenize(self, path):\r\n \"\"\"Tokenizes a text file.\"\"\"\r\n assert os.path.exists(path)\r\n # Add words to the dictionary\r\n with open(path, 'r', encoding='utf-8') as f:\r\n tokens = 0\r\n for line in f:\r\n words = line.split() + ['<eos>']\r\n tokens += len(words)\r\n for word in words:\r\n self.dictionary.add_word(word)\r\n\r\n # Tokenize file content\r\n sents = []\r\n with open(path, 'r', encoding='utf-8') as f:\r\n for line in f:\r\n if not line:\r\n continue\r\n words = line.split() + ['<eos>']\r\n sent = torch.LongTensor(len(words))\r\n for i, word in enumerate(words):\r\n sent[i] = self.dictionary.word2idx[word]\r\n sents.append(sent)\r\n\r\n return sents\r\n\r\nclass BatchSentLoader(object):\r\n def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):\r\n self.sents = sents\r\n self.batch_size = batch_size\r\n self.sort_sents = sorted(sents, key=lambda x: x.size(0))\r\n self.cuda = cuda\r\n self.volatile = volatile\r\n self.pad_id = pad_id\r\n\r\n def __next__(self):\r\n if self.idx >= len(self.sort_sents):\r\n raise StopIteration\r\n\r\n batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)\r\n batch = self.sort_sents[self.idx:self.idx+batch_size]\r\n max_len = max([s.size(0) for s in batch])\r\n tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id)\r\n for i in range(len(batch)):\r\n s = batch[i]\r\n tensor[:s.size(0),i].copy_(s)\r\n if self.cuda:\r\n tensor = tensor.cuda()\r\n\r\n self.idx += batch_size\r\n\r\n return tensor\r\n \r\n next = __next__\r\n\r\n def __iter__(self):\r\n self.idx = 0\r\n return self\r\n"
] |
[
[
"torch.nn.ZeroPad2d",
"torch.nn.Sequential",
"torch.zeros",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.Sigmoid"
],
[
"torch.LongTensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mscroggs/bemplot
|
[
"5ab1ae9f87d4d7d99251e682bdf48cefd0821fc3"
] |
[
"generic.py"
] |
[
"def plot_slice(evaluator, plot_type='squared', x=None, y=None, z=None,\n n=(151,151), extent=(-5,5,-5,5), cmap='coolwarm', filename=None, title=None,\n show=None, interior=None, mesh=None, **kwargs):\n import numpy as np\n if show is None:\n show = filename==None\n\n if x is not None and y is None and z is None:\n x_p, y_p, z_p = np.mgrid[x:x:1j, extent[0]:extent[1]:n[0]*1j, extent[2]:extent[3]:n[1] * 1j]\n elif x is None and y is not None and z is None:\n x_p, y_p, z_p = np.mgrid[extent[0]:extent[1]:n[0]*1j, y:y:1j, extent[2]:extent[3]:n[1] * 1j]\n elif x is None and y is None and z is not None:\n x_p, y_p, z_p = np.mgrid[extent[0]:extent[1]:n[0]*1j, extent[2]:extent[3]:n[1] * 1j, z:z:1j]\n else:\n raise TypeError(\"Exactly one of x, y and z must be set.\")\n points = np.vstack((x_p.ravel(), y_p.ravel(), z_p.ravel()))\n\n plot_me = evaluator(points)\n\n import matplotlib\n from matplotlib import pyplot as plt\n\n if plot_type == 'squared':\n plot_me = np.real(np.sum(plot_me * plot_me.conj(), axis=0))\n elif plot_type == 'x':\n plot_me = np.real(plot_me[0,:])\n elif plot_type == 'y':\n plot_me = np.real(plot_me[1,:])\n elif plot_type == 'z':\n plot_me = np.real(plot_me[2,:])\n elif plot_type == 'radial':\n plot_me = np.real(np.sum(points * plot_me,axis=0) / np.sum(points * points,axis=0))\n elif plot_type == 'tangential':\n plot_me = np.array([np.cross(p,v) / np.linalg.norm(p) for p,v in zip(points.T,plot_me.T)]).T\n plot_me = np.real(np.sum(plot_me*plot_me.conj(),axis=0))\n else:\n raise ValueError(\"plot_type value invalid\")\n\n if interior is not None:\n for i,p in enumerate(points.T):\n if interior(p):\n plot_me[i] = None\n\n plt.imshow(plot_me.reshape(n).T,\n cmap=cmap, origin='lower',\n extent=extent, **kwargs)\n plt.colorbar()\n if title is None:\n if x is not None:\n title = \"Plot at x=\"+str(x)\n if y is not None:\n title = \"Plot at y=\"+str(y)\n if z is not None:\n title = \"Plot at z=\"+str(z)\n plt.title(title)\n if x is None:\n plt.xlabel(\"x\")\n if y is None:\n plt.ylabel(\"y\")\n else:\n plt.ylabel(\"z\")\n else:\n plt.xlabel(\"y\")\n plt.ylabel(\"z\")\n\n if mesh is not None:\n xxx = mesh.leaf_view.vertices[0]\n yyy = mesh.leaf_view.vertices[1]\n zzz = mesh.leaf_view.vertices[2]\n if x is not None:\n plt.plot(yyy,zzz,\"k-\")\n if y is not None:\n plt.plot(xxx,zzz,\"k-\")\n if z is not None:\n plt.plot(xxx,yyy,\"k-\")\n\n if filename is not None:\n plt.savefig(filename)\n if show:\n plt.show()\n\n plt.clf()\n\ndef slices(file_name, evaluator, extent=(-5,5,-5,5), x=None,y=None,z=None, ndims=(300,300)):\n if x is not None:\n mode = \"yz\"\n ny,nz = ndims\n ps = x\n axis=\"x\"\n elif y is not None:\n mode = \"xz\"\n nx,nz = ndims\n ps = y\n axis=\"y\"\n elif z is not None:\n mode = \"xy\"\n nx,ny = ndims\n ps = z\n axis=\"z\"\n else:\n raise ValueError(\"One of x, y and z must be set\")\n import bempp.api\n import os\n\n fname, extension = os.path.splitext(file_name)\n\n ll = (extent[0],extent[2])\n ur = (extent[1],extent[3])\n\n node_offset = 1\n element_offset = 1\n\n for p in ps:\n grid = bempp.api.structured_grid(ll, ur, ndims, axis=mode, offset=p)\n nnodes = grid.leaf_view.entity_count(2)\n nelements = grid.leaf_view.entity_count(0)\n space = bempp.api.function_space(grid, \"P\", 1, domains=[0], closed=True)\n points = space.global_dof_interpolation_points\n vals = evaluator(points)\n output_fun = bempp.api.GridFunction(space, coefficients=vals)\n bempp.api.export(file_name=fname + \"_\" + axis + \"=\" + str(p) + extension, grid_function=output_fun,\n data_type='node',\n vertex_index_to_file_key_map=range(\n node_offset, node_offset + nnodes),\n element_index_to_file_key_map=range(\n element_offset, element_offset + nelements))\n node_offset += nnodes\n element_offset += nelements\n"
] |
[
[
"matplotlib.pyplot.title",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"numpy.real",
"matplotlib.pyplot.clf",
"numpy.cross",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yucheng20170406/Paddle
|
[
"e543af14589e2311ae2f3f6c9887b537d2048666"
] |
[
"python/paddle/fluid/tests/unittests/op_test.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nimport random\nimport itertools\nimport paddle.fluid.core as core\nimport collections\nfrom paddle.fluid.backward import append_backward\nfrom paddle.fluid.op import Operator\nfrom paddle.fluid.executor import Executor\nfrom paddle.fluid.framework import Program, OpProtoHolder\n\n\ndef randomize_probability(batch_size, class_num, dtype='float32'):\n prob = np.random.uniform(\n 0.1, 1.0, size=(batch_size, class_num)).astype(dtype)\n prob_sum = prob.sum(axis=1)\n for i in xrange(len(prob)):\n prob[i] /= prob_sum[i]\n return prob\n\n\ndef create_op(scope, op_type, inputs, outputs, attrs):\n kwargs = dict()\n\n def __create_var__(name, var_name):\n scope.var(var_name).get_tensor()\n kwargs[name].append(var_name)\n\n for in_name, in_dup in Operator.get_op_inputs(op_type):\n if in_name in inputs:\n kwargs[in_name] = []\n if in_dup:\n sub_in = inputs[in_name]\n for item in sub_in:\n sub_in_name, _ = item[0], item[1]\n __create_var__(in_name, sub_in_name)\n else:\n __create_var__(in_name, in_name)\n\n for out_name, out_dup in Operator.get_op_outputs(op_type):\n if out_name in outputs:\n kwargs[out_name] = []\n if out_dup:\n sub_out = outputs[out_name]\n for item in sub_out:\n sub_out_name, _ = item[0], item[1]\n __create_var__(out_name, sub_out_name)\n else:\n __create_var__(out_name, out_name)\n\n for attr_name in Operator.get_op_attr_names(op_type):\n if attr_name in attrs:\n kwargs[attr_name] = attrs[attr_name]\n\n return Operator(op_type, **kwargs)\n\n\ndef set_input(scope, op, inputs, place):\n def __set_input__(var_name, var):\n if isinstance(var, tuple) or isinstance(var, np.ndarray):\n tensor = scope.find_var(var_name).get_tensor()\n if isinstance(var, tuple):\n tensor.set_lod(var[1])\n var = var[0]\n tensor.set_dims(var.shape)\n tensor.set(var, place)\n elif isinstance(var, float):\n scope.find_var(var_name).set_float(var)\n elif isinstance(var, int):\n scope.find_var(var_name).set_int(var)\n\n for in_name, in_dup in Operator.get_op_inputs(op.type()):\n if in_name in inputs:\n if in_dup:\n sub_in = inputs[in_name]\n for item in sub_in:\n sub_in_name, sub_in_val = item[0], item[1]\n __set_input__(sub_in_name, sub_in_val)\n else:\n __set_input__(in_name, inputs[in_name])\n\n\ndef get_numeric_gradient(place,\n scope,\n op,\n inputs,\n input_to_check,\n output_names,\n delta=0.005,\n in_place=False):\n # FIXME: change this method by compile time concepts\n set_input(scope, op, inputs, place)\n\n def product(dim):\n return reduce(lambda a, b: a * b, dim, 1)\n\n def get_output():\n sum = []\n for output_name in output_names:\n op.run(scope, place)\n sum.append(\n np.array(scope.find_var(output_name).get_tensor()).mean())\n return np.array(sum).mean()\n\n tensor_to_check = scope.find_var(input_to_check).get_tensor()\n tensor_size = product(tensor_to_check.get_dims())\n tensor_to_check_dtype = tensor_to_check.dtype()\n if tensor_to_check_dtype == core.VarDesc.VarType.FP32:\n tensor_to_check_dtype = np.float32\n elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:\n tensor_to_check_dtype = np.float64\n else:\n raise ValueError(\"Not supported data type \" + str(\n tensor_to_check_dtype))\n\n gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)\n\n def __get_elem__(tensor, i):\n if tensor_to_check_dtype == np.float32:\n return tensor.get_float_element(i)\n else:\n return tensor.get_double_element(i)\n\n def __set_elem__(tensor, i, e):\n if tensor_to_check_dtype == np.float32:\n tensor.set_float_element(i, e)\n else:\n tensor.set_double_element(i, e)\n\n # we only compute gradient of one element each time.\n # we use a for loop to compute the gradient of every element.\n for i in xrange(tensor_size):\n if in_place:\n set_input(scope, op, inputs, place)\n\n # get one input element throw it's index i.\n origin = __get_elem__(tensor_to_check, i)\n # add delta to it, run op and then get the sum of the result tensor.\n x_pos = origin + delta\n __set_elem__(tensor_to_check, i, x_pos)\n y_pos = get_output()\n\n if in_place:\n set_input(scope, op, inputs, place)\n\n x_neg = origin - delta\n __set_elem__(tensor_to_check, i, x_neg)\n y_neg = get_output()\n\n __set_elem__(tensor_to_check, i, origin)\n gradient_flat[i] = (y_pos - y_neg) / delta / 2\n\n return gradient_flat.reshape(tensor_to_check.get_dims())\n\n\ndef append_input_output(block, op_proto, np_list, is_input):\n '''Insert VarDesc and generate Python variable instance'''\n proto_list = op_proto.inputs if is_input else op_proto.outputs\n\n def create_var(block, name, np_list, var_proto):\n if name not in np_list:\n assert var_proto.intermediate, \"{} not found\".format(name)\n shape = None\n lod_level = None\n else:\n np_value = np_list[name]\n if isinstance(np_value, tuple):\n shape = list(np_value[0].shape)\n lod_level = len(np_value[1])\n else:\n shape = list(np_value.shape)\n lod_level = 0\n return block.create_var(\n dtype=\"float32\", shape=shape, lod_level=lod_level, name=name)\n\n var_dict = {}\n for var_proto in proto_list:\n var_name = str(var_proto.name)\n if is_input:\n if (var_name not in np_list) and var_proto.dispensable:\n continue\n assert (var_name in np_list) or (var_proto.dispensable), \\\n \"Missing {} as input\".format(var_name)\n if var_proto.duplicable:\n assert isinstance(np_list[var_name], list), \\\n \"Duplicable {} should be set as list\".format(var_name)\n var_list = []\n for (name, np_value) in np_list[var_name]:\n var_list.append(\n create_var(block, name, {name: np_value}, var_proto))\n var_dict[var_name] = var_list\n else:\n var_dict[var_name] = create_var(block, var_name, np_list, var_proto)\n\n return var_dict\n\n\nclass OpTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n '''Fix random seeds to remove randomness from tests'''\n cls._np_rand_state = np.random.get_state()\n cls._py_rand_state = random.getstate()\n\n np.random.seed(123)\n random.seed(124)\n\n @classmethod\n def tearDownClass(cls):\n '''Restore random seeds'''\n np.random.set_state(cls._np_rand_state)\n random.setstate(cls._py_rand_state)\n\n def feed_var(self, input_vars, place):\n feed_map = {}\n for var_name in input_vars:\n if isinstance(input_vars[var_name], list):\n for name, np_value in self.inputs[var_name]:\n tensor = core.LoDTensor()\n if isinstance(np_value, tuple):\n tensor.set(np_value[0], place)\n tensor.set_lod(np_value[1])\n else:\n tensor.set(np_value, place)\n feed_map[name] = tensor\n else:\n tensor = core.LoDTensor()\n if isinstance(self.inputs[var_name], tuple):\n tensor.set(self.inputs[var_name][0], place)\n tensor.set_lod(self.inputs[var_name][1])\n else:\n tensor.set(self.inputs[var_name], place)\n feed_map[var_name] = tensor\n\n return feed_map\n\n def calc_output(self, place):\n outs, _ = self._calc_output(place)\n return outs\n\n def _calc_output(self, place):\n op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)\n\n program = Program()\n block = program.global_block()\n\n inputs = append_input_output(block, op_proto, self.inputs, True)\n outputs = append_input_output(block, op_proto, self.outputs, False)\n op = block.append_op(\n type=self.op_type,\n inputs=inputs,\n outputs=outputs,\n attrs=self.attrs if hasattr(self, \"attrs\") else dict())\n # infer variable type and infer shape in compile-time\n op.desc.infer_var_type(block.desc)\n op.desc.infer_shape(block.desc)\n\n fetch_list = []\n for var_name, var in outputs.iteritems():\n if var_name in self.outputs:\n if isinstance(var, list):\n for v in var:\n fetch_list.append(v)\n else:\n fetch_list.append(var)\n\n feed_map = self.feed_var(inputs, place)\n\n exe = Executor(place)\n outs = exe.run(program,\n feed=feed_map,\n fetch_list=fetch_list,\n return_numpy=False)\n return outs, fetch_list\n\n def check_output_with_place(self, place, atol):\n outs, fetch_list = self._calc_output(place)\n for out_name, out_dup in Operator.get_op_outputs(self.op_type):\n if out_name not in self.outputs:\n continue\n\n def find_actual(target_name, fetch_list):\n found = [\n i for i, var in enumerate(fetch_list)\n if var.name == target_name\n ]\n self.assertTrue(\n len(found) == 1, \"Found {} {}\".format(\n len(found), target_name))\n return found[0]\n\n if out_dup:\n sub_out = self.outputs[out_name]\n if not isinstance(sub_out, list):\n raise AssertionError(\"sub_out type %s is not list\",\n type(sub_out))\n for item in sub_out:\n sub_out_name, expect = item[0], item[1]\n idx = find_actual(sub_out_name, fetch_list)\n actual = outs[idx]\n actual_t = np.array(actual)\n expect_t = expect[0] \\\n if isinstance(expect, tuple) else expect\n self.assertTrue(\n np.allclose(\n actual_t, expect_t, atol=atol),\n \"Output (\" + sub_out_name + \") has diff at \" +\n str(place))\n if isinstance(expect, tuple):\n self.assertListEqual(\n actual.lod(), expect[1], \"Output (\" + sub_out_name +\n \") has different lod at \" + str(place))\n else:\n idx = find_actual(out_name, fetch_list)\n actual = outs[idx]\n actual_t = np.array(actual)\n expect = self.outputs[out_name]\n expect_t = expect[0] if isinstance(expect, tuple) else expect\n self.assertTrue(\n np.allclose(\n actual_t, expect_t, atol=atol),\n \"Output (\" + out_name + \") has diff at \" + str(place) +\n str(actual_t) + str(expect_t))\n if isinstance(expect, tuple):\n self.assertListEqual(actual.lod(), expect[1],\n \"Output (\" + out_name +\n \") has different lod at \" + str(place))\n\n def check_output(self, atol=1e-5):\n places = [core.CPUPlace()]\n if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):\n places.append(core.CUDAPlace(0))\n for place in places:\n self.check_output_with_place(place, atol)\n\n def check_output_customized(self, checker):\n places = [core.CPUPlace()]\n if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):\n places.append(core.CUDAPlace(0))\n for place in places:\n outs = self.calc_output(place)\n outs = [np.array(out) for out in outs]\n checker(outs)\n\n def __assert_is_close(self, numeric_grads, analytic_grads, names,\n max_relative_error, msg_prefix):\n\n for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):\n abs_a = np.abs(a)\n abs_a[abs_a < 1e-3] = 1\n\n diff_mat = np.abs(a - b) / abs_a\n max_diff = np.max(diff_mat)\n\n def err_msg():\n offset = np.argmax(diff_mat > max_relative_error)\n return (\"%s Variable %s max gradient diff %f over limit %f, \"\n \"the first error element is %d, %f, %f\") % (\n msg_prefix, name, max_diff, max_relative_error,\n offset, a.flatten()[offset], b.flatten()[offset])\n\n self.assertLessEqual(max_diff, max_relative_error, err_msg())\n\n def check_grad(self,\n inputs_to_check,\n output_names,\n no_grad_set=None,\n numeric_grad_delta=0.005,\n in_place=False,\n max_relative_error=0.005,\n user_defined_grads=None):\n places = [core.CPUPlace()]\n if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):\n places.append(core.CUDAPlace(0))\n for place in places:\n self.check_grad_with_place(place, inputs_to_check, output_names,\n no_grad_set, numeric_grad_delta,\n in_place, max_relative_error,\n user_defined_grads)\n\n def check_grad_with_place(self,\n place,\n inputs_to_check,\n output_names,\n no_grad_set=None,\n numeric_grad_delta=0.005,\n in_place=False,\n max_relative_error=0.005,\n user_defined_grads=None):\n self.scope = core.Scope()\n op_inputs = self.inputs if hasattr(self, \"inputs\") else dict()\n op_outputs = self.outputs if hasattr(self, \"outputs\") else dict()\n op_attrs = self.attrs if hasattr(self, \"attrs\") else dict()\n self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,\n op_attrs)\n\n if no_grad_set is None:\n no_grad_set = set()\n\n if not type(output_names) is list:\n output_names = [output_names]\n\n numeric_grads = user_defined_grads or [\n get_numeric_gradient(\n place,\n self.scope,\n self.op,\n self.inputs,\n input_to_check,\n output_names,\n delta=numeric_grad_delta,\n in_place=in_place) for input_to_check in inputs_to_check\n ]\n analytic_grads = self._get_gradient(inputs_to_check, place,\n output_names, no_grad_set)\n\n self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check,\n max_relative_error,\n \"Gradient Check On %s\" % str(place))\n\n @staticmethod\n def _create_var_descs_(block, var_dict):\n # FIXME: Try unify with `append_input_output`\n for param_name in var_dict:\n var = var_dict[param_name]\n if not isinstance(var, list) and not isinstance(var, tuple):\n var = [(param_name, var, None)]\n if not isinstance(var[0], list) and not isinstance(var[0], tuple):\n var = [(param_name, var[0], var[1])]\n\n for i, item in enumerate(var):\n if not isinstance(item[0], basestring):\n item = [[param_name] + list(item)]\n if len(item) == 2:\n if isinstance(item[1], tuple):\n var[i] = [item[0], item[1][0], item[1][1]]\n else:\n # only set var name and value, set lod to None\n var[i] = list(item) + [None]\n var_descs = [(block.create_var(\n name=name, shape=each.shape, dtype=each.dtype), each, lod)\n for name, each, lod in var]\n\n yield param_name, var_descs\n\n @staticmethod\n def _merge_list(iterable):\n return reduce(lambda a, b: list(a) + list(b), iterable, [])\n\n @staticmethod\n def _numpy_to_lod_tensor(np_value, lod, place):\n tensor = core.LoDTensor()\n tensor.set(np_value, place)\n if lod is not None:\n tensor.set_lod(lod)\n return tensor\n\n def _get_gradient(self, input_to_check, place, output_names, no_grad_set):\n prog = Program()\n block = prog.global_block()\n inputs_with_np = {\n key: value\n for (key, value) in OpTest._create_var_descs_(\n block, getattr(self, 'inputs', {}))\n }\n outputs_with_np = {\n key: val\n for (key, val) in OpTest._create_var_descs_(\n block, getattr(self, 'outputs', {}))\n }\n inputs = {\n k: [item[0] for item in inputs_with_np[k]]\n for k in inputs_with_np\n }\n outputs = {\n k: [item[0] for item in outputs_with_np[k]]\n for k in outputs_with_np\n }\n\n op = block.append_op(\n type=self.op_type,\n inputs=inputs,\n outputs=outputs,\n attrs=getattr(self, 'attrs', {}))\n\n # infer variable type and infer shape in compile-time\n op.desc.infer_var_type(block.desc)\n op.desc.infer_shape(block.desc)\n\n mean_inputs = map(block.var, output_names)\n\n if len(mean_inputs) == 1:\n loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])\n op = block.append_op(\n inputs={\"X\": mean_inputs}, outputs={\"Out\": loss}, type='mean')\n op.desc.infer_var_type(block.desc)\n op.desc.infer_shape(block.desc)\n else:\n avg_sum = []\n for cur_loss in mean_inputs:\n cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])\n op = block.append_op(\n inputs={\"X\": [cur_loss]},\n outputs={\"Out\": [cur_avg_loss]},\n type=\"mean\")\n op.desc.infer_var_type(block.desc)\n op.desc.infer_shape(block.desc)\n avg_sum.append(cur_avg_loss)\n\n loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])\n op_sum = block.append_op(\n inputs={\"X\": avg_sum}, outputs={\"Out\": loss_sum}, type='sum')\n op_sum.desc.infer_var_type(block.desc)\n op_sum.desc.infer_shape(block.desc)\n\n loss = block.create_var(dtype=loss_sum.dtype, shape=[1])\n op_loss = block.append_op(\n inputs={\"X\": loss_sum},\n outputs={\"Out\": loss},\n type='scale',\n attrs={'scale': 1.0 / float(len(avg_sum))})\n op_loss.desc.infer_var_type(block.desc)\n op_loss.desc.infer_shape(block.desc)\n\n param_grad_list = append_backward(\n loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)\n\n feed_dict = {\n item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place)\n for p_name in inputs_with_np for item in inputs_with_np[p_name]\n }\n\n fetch_list = [g for p, g in param_grad_list]\n executor = Executor(place)\n return map(\n np.array,\n executor.run(prog, feed_dict, fetch_list, return_numpy=False))\n"
] |
[
[
"numpy.random.get_state",
"numpy.abs",
"numpy.random.seed",
"numpy.allclose",
"numpy.max",
"numpy.random.set_state",
"numpy.argmax",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CopperWasp/CNNField
|
[
"41d0af17c8be6c0dcb3c701926ebdd6221584e05",
"41d0af17c8be6c0dcb3c701926ebdd6221584e05"
] |
[
"os_clf_pool.py",
"os_main.py"
] |
[
"import os_clf as clf\nimport numpy as np\nfrom sklearn.linear_model import SGDClassifier\n\n# SGD\nimg_dim = 230400\n\nclass pool:\n def __init__(self):\n self.classifiers = {}\n self.errors = {}\n self.occurrences = {}\n \n \n def add_classifier(self, new_class, index):\n #self.classifiers[new_class] = clf.olsf()\n #\n self.classifiers[new_class] = SGDClassifier(loss=\"hinge\", penalty=\"l2\", max_iter=5, tol=-5, average=10) # sgd\n self.classifiers[new_class].partial_fit(np.ones(img_dim).reshape(1,-1), [1], classes=[-1,1]) # initialization\n #\n self.errors[new_class] = 1.0\n self.occurrences[new_class] = index\n \n \n def predict(self, row):\n self.predictions = {}\n for key in self.classifiers.keys():\n c = self.classifiers[key]\n #result = c.predict(row)\n result = c.predict(row.reshape(1,-1))[0] # sgd\n self.predictions[key] = result\n return self.predictions\n \n \n def expand(self, y, index):\n for label in y.keys():\n if label not in self.predictions.keys():\n self.add_classifier(label, index) \n \n \n def fit(self, row, y, index):\n for key in self.classifiers.keys():\n c = self.classifiers[key]\n y_hat = np.sign(self.predictions[key])\n if key in y.keys():\n #is_false = c.fit(row, y_hat, y[key])\n c.partial_fit(row.reshape(1,-1), [np.sign(y[key])]) # sgd\n self.errors[key] += (y_hat == np.sign(y[key])) # sgd\n else:\n #is_false = c.fit(row, y_hat, -1) # sgd\n c.partial_fit(row.reshape(1,-1), [-1]) # sgd \n self.errors[key] += (y_hat == -1) # sgd\n\n\n self.expand(y, index)\n\n \n ",
"import cv2\nimport torch\nimport numpy as np\nimport os_cnnField as field\nfrom pydarknet import Detector, Image\n\ndata_path = '/home/ege/Desktop/CNNField/data/traffic/out'\ndata_extension = '.jpg'\nyolo_net = Detector(bytes(\"cfg/yolov3.cfg\", encoding=\"utf-8\"), \\\n bytes(\"weights/yolov3.weights\", encoding=\"utf-8\"), 0, \\\n bytes(\"cfg/coco.data\", encoding=\"utf-8\"))\n\n\ndef ask_yolo(index): # tested\n file = data_path + str(index) + data_extension\n img = cv2.imread(file)\n img_darknet = Image(img)\n results = yolo_net.detect(img_darknet)\n return {r[0]: r[1] for r in results} # list of object-confidence-coordinate triplets\n\n\ndef get_image(index): # tested\n img = cv2.imread(data_path + str(index) + data_extension)\n img = np.transpose(img, (2, 0, 1))\n img_tensor = torch.from_numpy(img).unsqueeze(0).type('torch.FloatTensor')\n return img_tensor/255.0\n\n\ncf = field.cnnField()\n\nfor i in range(30):\n print(\"Iteration: \"+str(i))\n img = get_image(i+1)\n supervision = ask_yolo(i+1)\n cf.updateField(img, supervision)\n\nprint(\"Correct, False, Yolo:\")\nprint(cf.correct_counter, cf.false_counter, cf.yolo_counter)\n"
] |
[
[
"numpy.sign",
"sklearn.linear_model.SGDClassifier",
"numpy.ones"
],
[
"torch.from_numpy",
"numpy.transpose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
parquette/ParFrame
|
[
"0522aa6afdf529b3e91505b70e918f1500aae886"
] |
[
"oss_src/unity/python/sframe/test/test_graph.py"
] |
[
"'''\nCopyright (C) 2015 Dato, Inc.\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n'''\n# from nose import with_setup\nfrom ..data_structures.sgraph import SGraph, Vertex, Edge, load_graph\nfrom ..data_structures.sframe import SFrame\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport unittest\nimport tempfile\nimport util\nimport json\nimport os\n\n\nclass GraphTests(unittest.TestCase):\n def setUp(self):\n self.vertices = pd.DataFrame({\n 'vid': ['1', '2', '3'],\n 'color': ['g', None, 'b'],\n 'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})\n self.edges = pd.DataFrame({\n 'src_id': ['1', '2', '3'],\n 'dst_id': ['2', '3', '4'],\n 'weight': [0., None, 1.]})\n\n def test_empty_graph(self):\n g = SGraph()\n self.assertEqual(g.summary(), {'num_vertices': 0, 'num_edges': 0})\n self.assertEqual(len(g.get_fields()), 3)\n self.assertTrue(g.get_vertices(format='sframe').shape, (0, 1))\n self.assertTrue(g.get_edges(format='sframe').shape, (0, 2))\n self.assertTrue(g.vertices.shape, (0, 1))\n self.assertTrue(g.edges.shape, (0, 2))\n self.assertTrue(len(g.get_vertices(format='list')) == 0)\n self.assertTrue(len(g.get_edges(format='list')) == 0)\n\n def test_graph_constructor(self):\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n g2 = SGraph(g.vertices, g.edges)\n g3 = SGraph(g.vertices, g.edges, src_field=\"__dst_id\", dst_field=\"__src_id\") #flip around src and dst\n assert_frame_equal(g.vertices.to_dataframe().sort('__id').reset_index(drop=True),\n g2.vertices.to_dataframe().sort('__id').reset_index(drop=True))\n assert_frame_equal(g.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True),\n g2.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges)))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), 'vid', '__src_id', '__dst_id'))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), vid_field=None, src_field='src_id', dst_field='dst_id'))\n\n def test_simple_graph(self):\n for input_type in [pd.DataFrame, SFrame, list]:\n g = SGraph()\n if input_type is list:\n vertices = [Vertex(x[1]['vid'], {'color': x[1]['color'], 'vec': x[1]['vec']}) for x in self.vertices.iterrows()]\n edges = [Edge(x[1]['src_id'], x[1]['dst_id'], {'weight': x[1]['weight']}) for x in self.edges.iterrows()]\n g = g.add_vertices(vertices)\n g = g.add_edges(edges)\n else:\n g = g.add_vertices(input_type(self.vertices), vid_field='vid')\n g = g.add_edges(input_type(self.edges), src_field='src_id', dst_field='dst_id')\n self.assertEqual(g.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g.get_fields(), ['__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'])\n self.assertItemsEqual(g.get_vertices(format='dataframe').columns.values, ['color', 'vec'])\n self.assertItemsEqual(g.get_edges(format='dataframe').columns.values, ['__src_id', '__dst_id', 'weight'])\n self.assertTrue(g.get_edges(format='dataframe').shape, (3, 3))\n self.assertTrue(g.get_vertices(format='dataframe').shape, (4, 3))\n self.assertTrue(g.get_vertices(format='dataframe', fields={'color': 'g'}).shape, (1, 2))\n self.assertTrue(g.get_edges(format='dataframe', fields={'weight': 0.}).shape, (1, 3))\n\n self.assertItemsEqual(g.get_vertices(format='sframe').column_names(), ['__id', 'color', 'vec'])\n self.assertItemsEqual(g.get_edges(format='sframe').column_names(), ['__src_id', '__dst_id', 'weight'])\n self.assertTrue(g.get_edges(format='sframe').shape, (3, 3))\n self.assertTrue(g.get_vertices(format='sframe').shape, (4, 3))\n self.assertTrue(g.get_vertices(format='sframe', fields={'color': 'g'}).shape, (1, 2))\n self.assertTrue(g.get_edges(format='sframe', fields={'weight': 0.}).shape, (1, 3))\n\n vertices = g.get_vertices(format='list')\n edges = g.get_edges(format='list')\n self.assertEqual(len(vertices), 4)\n self.assertEqual(len(edges), 3)\n\n # get edges is lazy\n edges = g.get_edges()\n self.assertFalse(edges.__is_materialized__())\n\n def test_vertex_query(self):\n df = pd.DataFrame({'src': ['a', 'c', 'b', 'd', 'c', 'e', 'g', 'f'],\n 'dst': ['b', 'b', 'd', 'c', 'e', 'g', 'f', 'e']})\n g = SGraph().add_edges(df, src_field='src', dst_field='dst')\n\n # basic check\n g2 = g.get_neighborhood(ids=['b'], radius=1, full_subgraph=False)\n out = g2.get_edges(format='dataframe')\n out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)\n out.index = range(len(out))\n\n correct = pd.DataFrame.from_records([('b', 'd'),\n ('a', 'b'),\n ('c', 'b')],\n columns=['__src_id', '__dst_id'])\n correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)\n correct.index = range(len(correct))\n assert_frame_equal(out, correct, check_dtype=False)\n\n # check larger radius, full subgraph, and multiple vertices\n g2 = g.get_neighborhood(ids=['a', 'g'], radius=2, full_subgraph=True)\n out = g2.get_edges(format='dataframe')\n out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)\n out.index = range(len(out))\n\n correct = pd.DataFrame.from_records([('a', 'b'),\n ('b', 'd'),\n ('c', 'b'),\n ('c', 'e'),\n ('d', 'c'),\n ('e', 'g'),\n ('f', 'e'),\n ('g', 'f')],\n columns=['__src_id', '__dst_id'])\n correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)\n correct.index = range(len(correct))\n assert_frame_equal(out, correct, check_dtype=False)\n\n def test_select_query(self):\n g = SGraph()\n g = g.add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n g2 = g.select_fields([\"color\", \"weight\"])\n self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id', 'weight'])\n g2 = g.select_fields([\"color\"])\n self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id'])\n del g.edges['weight']\n del g.vertices['vec']\n g.vertices['color2'] = g.vertices['color']\n self.assertSequenceEqual((g.get_fields()), ['__id', 'color', 'color2', '__src_id', '__dst_id'])\n g2 = g.select_fields([])\n self.assertSequenceEqual((g2.get_fields()), ['__id', '__src_id', '__dst_id'])\n\n def test_select_query_with_same_vertex_edge_field(self):\n vertices = SFrame({'__id': range(10)})\n edges = SFrame({'__src_id': range(10), '__dst_id': range(1, 11)})\n g = SGraph(vertices, edges)\n g.vertices['weight'] = 0\n g.vertices['v'] = 0\n g.edges['weight'] = 0\n g.edges['e'] = 0\n self.assertItemsEqual(g.get_fields(), ['v', 'e', 'weight', 'weight', '__id', '__src_id', '__dst_id'])\n g2 = g.select_fields('weight')\n self.assertItemsEqual(g2.get_fields(), ['weight', 'weight', '__id', '__src_id', '__dst_id'])\n\n def test_save_load(self):\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n with util.TempDirectory() as f:\n g.save(f)\n g2 = load_graph(f, 'binary')\n self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})\n\n with util.TempDirectory() as f:\n g.save(f, format='csv')\n vertices = SFrame.read_csv(f + \"/vertices.csv\")\n edges = SFrame.read_csv(f + \"/edges.csv\")\n g2 = SGraph().add_edges(edges, '__src_id', '__dst_id').add_vertices(vertices, '__id')\n self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})\n\n temp_fn = None\n # The delete=False is for Windows sake\n with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:\n temp_fn = f.name\n g.save(f.name)\n with open(f.name, 'r') as f2:\n data = f2.read()\n g2 = json.loads(data)\n self.assertTrue(\"vertices\" in g2)\n self.assertTrue(\"edges\" in g2)\n if os.path.exists(temp_fn):\n os.remove(temp_fn)\n\n def test_load_graph_from_text(self):\n toy_graph_snap = \"\"\"#some comment string\n #some more comment string\n 1\\t2\n 1\\t3\n 2\\t3\n 2\\t1\n 3\\t1\n 3\\t2\"\"\"\n\n toy_graph_tsv = \"\"\"1\\t2\n 1\\t3\n 2\\t3\n 2\\t1\n 3\\t1\n 3\\t2\"\"\"\n toy_graph_csv = \"\"\"1,2\n 1,3\n 2,3\n 2,1\n 3,1\n 3,2\"\"\"\n\n temp_fnames = []\n with tempfile.NamedTemporaryFile(delete=False) as fsnap, tempfile.NamedTemporaryFile(delete=False) as ftsv, tempfile.NamedTemporaryFile(delete=False) as fcsv:\n fsnap.write(toy_graph_snap)\n fsnap.file.flush()\n ftsv.write(toy_graph_tsv)\n ftsv.file.flush()\n fcsv.write(toy_graph_csv)\n fcsv.file.flush()\n for (fname, fmt) in zip([fsnap.name, ftsv.name, fcsv.name], ['snap', 'tsv', 'csv']):\n g = load_graph('remote://' + fname, fmt)\n self.assertEqual(g.summary(), {'num_vertices': 3, 'num_edges': 6})\n temp_fnames.append(fname)\n\n for name in temp_fnames:\n if os.path.exists(name):\n os.remove(name)\n\n def test_robust_parse(self):\n df = pd.DataFrame({'int': [1, 2, 3],\n 'float': [1., 2., 3.],\n 'str': ['one', 'two', 'three'],\n 'nan': [np.nan, np.nan, np.nan],\n 'sparse_int': [1, 2, np.nan],\n 'sparse_float': [np.nan, 2., 3.],\n 'sparse_str': [None, 'two', None]\n })\n g = SGraph().add_vertices(df)\n self.assertItemsEqual(g.get_fields(), df.columns.tolist() + ['__id', '__src_id', '__dst_id'])\n\n df2 = g.get_vertices(format='dataframe')\n sf = g.get_vertices(format='sframe')\n for col in df.columns:\n # potential bug: df2 is missing the 'nan' column.\n if (col != 'nan'):\n self.assertItemsEqual(sorted(list(df2[col].dropna())), sorted(list(df[col].dropna())))\n self.assertItemsEqual(sorted(list(sf[col].dropna())), sorted(list(df[col].dropna())))\n\n def test_missing_value_vids(self):\n vertices = SFrame()\n vertices['vid'] = [1, 2, 3, None]\n edges = SFrame()\n edges['src'] = [1, 2, 3, None]\n edges['dst'] = [4, 4, 4, 4]\n self.assertRaises(RuntimeError, lambda : SGraph().add_vertices(vertices, 'vid').summary())\n self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'src', 'dst').summary())\n self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'dst', 'src').summary())\n\n def test_gframe(self):\n g = SGraph()\n v = g.vertices\n self.assertSequenceEqual(v.column_names(), ['__id'])\n e = g.edges\n self.assertSequenceEqual(e.column_names(), ['__src_id', '__dst_id'])\n\n # Test vertices and edge attributes cannot be modified\n def set_vertices_empty(g):\n g.vertices = SFrame()\n\n def set_edges_empty(g):\n g.edges = SFrame()\n\n def remove_vertices(g):\n del g.vertices\n\n def remove_edges(g):\n del g.edges\n\n def remove_edge_column(gf, name):\n del gf[name]\n\n self.assertRaises(AttributeError, lambda: remove_vertices(g))\n self.assertRaises(AttributeError, lambda: remove_edges(g))\n self.assertRaises(AttributeError, lambda: set_vertices_empty(g))\n self.assertRaises(AttributeError, lambda: set_edges_empty(g))\n\n # Test gframe operations has the same effect as its sframe+graph equivalent\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n v = g.vertices\n v['id_col'] = v['__id']\n e = g.edges\n e['src_id_col'] = e['__src_id']\n e['dst_id_col'] = e['__dst_id']\n g2 = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n new_vdata = g2.get_vertices()\n new_vdata['id_col'] = new_vdata['__id']\n new_edata = g2.get_edges()\n new_edata['src_id_col'] = new_edata['__src_id']\n new_edata['dst_id_col'] = new_edata['__dst_id']\n g2 = SGraph().add_vertices(new_vdata, '__id').add_edges(new_edata, '__src_id', '__dst_id')\n assert_frame_equal(g.get_vertices().to_dataframe().sort('__id').reset_index(drop=True),\n g2.get_vertices().to_dataframe().sort('__id').reset_index(drop=True))\n assert_frame_equal(g.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True),\n g2.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True))\n\n # check delete a column with exception, and edges is still in a valid state\n self.assertRaises(KeyError, lambda: remove_edge_column(g.edges, 'badcolumn'))\n g.edges.head()\n\n # test slicing\n assert_frame_equal(g.edges[:3].to_dataframe(), g.get_edges()[:3].to_dataframe())\n assert_frame_equal(g.vertices[:3].to_dataframe(), g.get_vertices()[:3].to_dataframe())\n\n # test add row number\n e_expected = g.get_edges().to_dataframe()\n v_expected = g.get_vertices().to_dataframe()\n e_expected['id'] = range(len(e_expected))\n v_expected['id'] = range(len(v_expected))\n\n def test_sframe_le_append_skip_row_bug_is_fixed(self):\n \"\"\"\n This test is actually for SFrame lazy evaluation.\n The reason it is here is because the repro can only be done in SGraph.\n\n The bug appears when the SFrame has lazy_append and when passing through\n the logical filter, skip_rows is not done correctly. So the edge_sframe\n is in a bad state when not materialized.\n\n This unit test stays here to ensure the bug is fixed until we can find\n a more clean repro.\n \"\"\"\n n = 12 # smallest n to repro the le_append bug\n\n # A graph with edge i -> i + 1\n g = SGraph().add_edges(SFrame({'src': range(n), 'dst': range(1, n + 1)}), 'src', 'dst')\n\n lazy_sf = g.get_edges()\n materialized_sf = g.get_edges()\n materialized_sf.__materialize__()\n assert_frame_equal(lazy_sf[lazy_sf['__dst_id'] == n].to_dataframe(), materialized_sf[materialized_sf['__dst_id'] == n].to_dataframe())\n"
] |
[
[
"pandas.DataFrame.from_records",
"pandas.util.testing.assert_frame_equal",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
goncamateus/pytorch-soft-actor-critic
|
[
"a810f2d2fc087521d0fcc020dac6268d8a121dee"
] |
[
"original_her.py"
] |
[
"import argparse\nimport datetime\nimport itertools\n\nimport gym\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport gym_line_follower\nimport wandb\nfrom replay_memory import ReplayGMemory, ReplayMemory\nfrom sac import SAC\n\nparser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')\nparser.add_argument('--env-name', default='LineFollowerGoal-v0',\n help='Mujoco Gym environment (default: LineFollowerGoal-v0)')\nparser.add_argument('--policy', default=\"Gaussian\",\n help='Policy Type: Gaussian | Deterministic (default: Gaussian)')\nparser.add_argument('--eval', type=bool, default=True,\n help='Evaluates a policy a policy every 10 episode (default: True)')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward (default: 0.99)')\nparser.add_argument('--tau', type=float, default=0.005, metavar='G',\n help='target smoothing coefficient(τ) (default: 0.005)')\nparser.add_argument('--lr', type=float, default=0.0003, metavar='G',\n help='learning rate (default: 0.0003)')\nparser.add_argument('--alpha', type=float, default=0.2, metavar='G',\n help='Temperature parameter α determines the relative importance of the entropy\\\n term against the reward (default: 0.2)')\nparser.add_argument('--automatic_entropy_tuning', type=bool, default=True, metavar='G',\n help='Automaically adjust α (default: False)')\nparser.add_argument('--seed', type=int, default=123456, metavar='N',\n help='random seed (default: 123456)')\nparser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='batch size (default: 256)')\nparser.add_argument('--num_steps', type=int, default=1500001, metavar='N',\n help='maximum number of steps (default: 1000000)')\nparser.add_argument('--hidden_size', type=int, default=256, metavar='N',\n help='hidden size (default: 256)')\nparser.add_argument('--updates_per_step', type=int, default=1, metavar='N',\n help='model updates per simulator step (default: 1)')\nparser.add_argument('--start_steps', type=int, default=10000, metavar='N',\n help='Steps sampling random actions (default: 10000)')\nparser.add_argument('--target_update_interval', type=int, default=1, metavar='N',\n help='Value target update per no. of updates per step (default: 1)')\nparser.add_argument('--replay_size', type=int, default=1000000, metavar='N',\n help='size of replay buffer (default: 10000000)')\nparser.add_argument('--cuda', action=\"store_true\",\n help='run on CUDA (default: False)')\nargs = parser.parse_args()\n\nwandb.init(name=f\"{args.env_name}-HER\", project=\"Cadeira-RL\")\n# Environment\nenv = gym.make(args.env_name)\n\nenv.seed(args.seed)\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\nif args.env_name.startswith('Fetch'):\n env_space = env.observation_space.spaces\n agent = SAC(\n env_space['observation'].shape[0]+env_space['desired_goal'].shape[0],\n env.action_space, args)\n\nelse:\n agent = SAC(\n env.observation_space.shape[0]+2,\n env.action_space, args)\n # path = 'models/sac_CHANGE_LineFollowerGoal-v0_her'\n # agent.load_model(path.replace('CHANGE', 'actor'),\n # path.replace('CHANGE', 'critic'))\n\n# Memory\nmemory = ReplayGMemory(args.replay_size, args.seed)\n\n# Training Loop\ntotal_numsteps = 0\nupdates = 0\nfor i_episode in range(500):\n episode_reward = 0\n episode_steps = 0\n done = False\n episode = []\n status = env.reset()\n state = status['observation']\n goal = status['desired_goal']\n while not done:\n if args.start_steps > total_numsteps:\n action = env.action_space.sample() # Sample random action\n else:\n action = agent.select_action(np.concatenate([state, goal]))\n\n if len(memory) > args.batch_size:\n # Number of updates per step in environment\n for i in range(args.updates_per_step):\n # Update parameters of all the networks\n critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(\n memory, args.batch_size, updates)\n\n wandb.log({\"critic_1\": critic_1_loss,\n \"critic_2\": critic_2_loss,\n \"policy\": policy_loss,\n \"entropy_loss\": ent_loss,\n \"temp_alpha\": alpha})\n updates += 1\n\n her_goal = status['achieved_goal']\n status, reward, done, info = env.step(action) # Step\n next_state = status['observation']\n next_her_goal = status['achieved_goal']\n\n episode_steps += 1\n total_numsteps += 1\n episode_reward += reward\n\n # Ignore the \"done\" signal if it comes from hitting the time horizon.\n # (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)\n mask = float(not done)\n # Append transition to memory\n memory.push(state, action, reward, next_state, mask, goal)\n episode.append((state, action, reward, mask,\n next_state, goal, her_goal, next_her_goal))\n\n state = next_state\n her_goal = next_her_goal\n new_goals = 5\n for i, (state, action, reward, done,\n next_state, goal, her_goal, next_her_goal) in enumerate(episode):\n for t in np.random.choice(len(episode), new_goals):\n try:\n episode[t]\n except:\n continue\n new_goal = episode[t][-1]\n reward = env.compute_reward(next_her_goal, new_goal, None)\n memory.push(state, action, reward, next_state, done, new_goal)\n\n if total_numsteps > args.num_steps:\n break\n\n wandb.log({'reward_train': episode_reward})\n print(\"Episode: {}, total numsteps: {}, episode steps: {}, reward: {}\".format(\n i_episode, total_numsteps, episode_steps, round(episode_reward, 2)))\n\n if i_episode % 100 == 0 and args.eval is True:\n avg_reward = 0.\n episodes = 3\n for _ in range(episodes):\n status = env.reset()\n state = status['observation']\n goal = status['desired_goal']\n episode_reward = 0\n done = False\n while not done:\n action = agent.select_action(np.concatenate(\n [state, goal]), evaluate=True)\n status, reward, done, info = env.step(action) # Step\n next_state = status['observation']\n next_her_goal = status['achieved_goal']\n episode_reward += reward\n\n state = next_state\n her_goal = next_her_goal\n avg_reward += episode_reward\n avg_reward /= episodes\n\n wandb.log({'reward_test': avg_reward})\n\n print(\"----------------------------------------\")\n print(\"Test Episodes: {}, Avg. Reward: {}\".format(\n episodes, round(avg_reward, 2)))\n print(\"----------------------------------------\")\n agent.save_model(env_name=args.env_name, suffix='her')\n\nenv.close()\n"
] |
[
[
"numpy.concatenate",
"torch.manual_seed",
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yifanc96/LearnViT
|
[
"ab50b7c269ceb95c9321045d5ffc193b492de768"
] |
[
"models/my_networks/ViT_Attn_MLP_separate.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nmodified from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom functools import partial\nfrom collections import OrderedDict\nimport math\n\n# basic layers\nfrom ..copied_layers.patch_embed import PatchEmbed\nfrom ..copied_layers.drop import DropPath\nfrom ..copied_layers.mlp import Mlp\nfrom ..copied_layers.weight_init import trunc_normal_, lecun_normal_\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\nclass Block_attn(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, layerscale = 0.0):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.layerscale = layerscale\n if layerscale > 0.0: \n self.gamma = nn.Parameter(layerscale * torch.ones((dim)), requires_grad=True)\n \n def forward(self, x):\n if self.layerscale > 0.0: \n x = x + self.drop_path(self.gamma*self.attn(self.norm1(x)))\n else:\n x = x + self.drop_path(self.attn(self.norm1(x)))\n return x\n\nclass Block_MLP(nn.Module):\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, layerscale = 0.0):\n super().__init__()\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n self.layerscale = layerscale\n if layerscale > 0.0: \n self.gamma = nn.Parameter(layerscale * torch.ones((dim)), requires_grad=True)\n\n def forward(self, x):\n if self.layerscale > 0.0: \n x = x + self.drop_path(self.gamma*self.mlp(self.norm2(x)))\n else:\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x \n\n\nclass VisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n - https://arxiv.org/abs/2010.11929\n Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\n - https://arxiv.org/abs/2012.12877\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., layerscale = 0.0, embed_layer=PatchEmbed, norm_layer=None,\n act_layer=None, weight_init=''):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n distilled (bool): model includes a distillation token and head as in DeiT models\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n embed_layer (nn.Module): patch embedding layer\n norm_layer: (nn.Module): normalization layer\n weight_init: (str): weight init scheme\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_tokens = 2 if distilled else 1\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = embed_layer(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n self.depth = depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n \n \n self.blocks_attn = nn.ModuleList([\n Block_attn(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, layerscale = layerscale)\n for i in range(depth)])\n self.blocks_MLP = nn.ModuleList([\n Block_MLP(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, layerscale = layerscale)\n for i in range(depth)])\n self.norm = norm_layer(embed_dim)\n # Representation layer\n if representation_size and not distilled:\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n ('fc', nn.Linear(embed_dim, representation_size)),\n ('act', nn.Tanh())\n ]))\n else:\n self.pre_logits = nn.Identity()\n\n # Classifier head(s)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n # Weight init\n assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')\n head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.\n trunc_normal_(self.pos_embed, std=.02)\n if self.dist_token is not None:\n trunc_normal_(self.dist_token, std=.02)\n if weight_init.startswith('jax'):\n # leave cls token as zeros to match jax impl\n for n, m in self.named_modules():\n _init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)\n else:\n trunc_normal_(self.cls_token, std=.02)\n self.apply(_init_vit_weights)\n\n def _init_weights(self, m):\n # this fn left here for compat with downstream users\n _init_vit_weights(m)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token', 'dist_token'}\n\n def get_classifier(self):\n if self.dist_token is None:\n return self.head\n else:\n return self.head, self.head_dist\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n if self.num_tokens == 2:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n if self.dist_token is None:\n x = torch.cat((cls_token, x), dim=1)\n else:\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n x = self.pos_drop(x + self.pos_embed)\n for i in range(self.depth):\n x = self.blocks_MLP[i](self.blocks_attn[i](x))\n x = self.norm(x)\n if self.dist_token is None:\n return self.pre_logits(x[:, 0])\n else:\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.head_dist is not None:\n x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple\n if self.training and not torch.jit.is_scripting():\n # during inference, return the average of both classifier predictions\n return x, x_dist\n else:\n return (x + x_dist) / 2\n else:\n x = self.head(x)\n return x\n\n\ndef _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):\n \"\"\" ViT weight initialization\n * When called without n, head_bias, jax_impl args it will behave exactly the same\n as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\n * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl\n \"\"\"\n if isinstance(m, nn.Linear):\n if n.startswith('head'):\n nn.init.zeros_(m.weight)\n nn.init.constant_(m.bias, head_bias)\n elif n.startswith('pre_logits'):\n lecun_normal_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n if jax_impl:\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n if 'mlp' in n:\n nn.init.normal_(m.bias, std=1e-6)\n else:\n nn.init.zeros_(m.bias)\n else:\n trunc_normal_(m.weight, std=.02)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif jax_impl and isinstance(m, nn.Conv2d):\n # NOTE conv was left to pytorch default in my original init\n lecun_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n nn.init.zeros_(m.bias)\n nn.init.ones_(m.weight)\n\n\ndef resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):\n # Rescale the grid of position embeddings when loading from state_dict. Adapted from\n # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224\n # _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)\n ntok_new = posemb_new.shape[1]\n if num_tokens:\n posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]\n ntok_new -= num_tokens\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n gs_old = int(math.sqrt(len(posemb_grid)))\n if not len(gs_new): # backwards compatibility\n gs_new = [int(math.sqrt(ntok_new))] * 2\n assert len(gs_new) >= 2\n # _logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)\n posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)\n posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')\n posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)\n posemb = torch.cat([posemb_tok, posemb_grid], dim=1)\n return posemb\n\n\ndef checkpoint_filter_fn(state_dict, model):\n \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\n out_dict = {}\n if 'model' in state_dict:\n # For deit models\n state_dict = state_dict['model']\n for k, v in state_dict.items():\n if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\n # For old models that I trained prior to conv based patchification\n O, I, H, W = model.patch_embed.proj.weight.shape\n v = v.reshape(O, -1, H, W)\n elif k == 'pos_embed' and v.shape != model.pos_embed.shape:\n # To resize pos embedding when using model at different size from pretrained weights\n v = resize_pos_embed(\n v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\n out_dict[k] = v\n return out_dict\n\n\"\"\"\ndef _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\n default_cfg = default_cfg or default_cfgs[variant]\n if kwargs.get('features_only', None):\n raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n # NOTE this extra code to support handling of repr size for in21k pretrained models\n default_num_classes = default_cfg['num_classes']\n num_classes = kwargs.get('num_classes', default_num_classes)\n repr_size = kwargs.pop('representation_size', None)\n if repr_size is not None and num_classes != default_num_classes:\n # Remove representation layer if fine-tuning. This may not always be the desired action,\n # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?\n _logger.warning(\"Removing representation layer for fine-tuning.\")\n repr_size = None\n\n model = build_model_with_cfg(\n VisionTransformer, variant, pretrained,\n default_cfg=default_cfg,\n representation_size=repr_size,\n pretrained_filter_fn=checkpoint_filter_fn,\n **kwargs)\n return model\n\"\"\""
] |
[
[
"torch.nn.Dropout",
"torch.linspace",
"torch.ones",
"torch.cat",
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.init.ones_",
"torch.nn.init.xavier_uniform_",
"torch.jit.is_scripting",
"torch.nn.functional.interpolate",
"torch.nn.init.zeros_",
"torch.nn.init.normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vladmandic/tf-cnn-classification
|
[
"af6c9cfe3edc4455d40430ed87a5a77f8ebc3548"
] |
[
"src/predict.py"
] |
[
"import tensorflow as tf\nimport sys\nfrom configuration import save_model_dir, test_image_dir\nfrom prepare_data import load_and_preprocess_image\nfrom train import get_model\n\n\ndef get_single_picture_prediction(model, picture_dir):\n image_tensor = load_and_preprocess_image(tf.io.read_file(filename=picture_dir), data_augmentation=True)\n image = tf.expand_dims(image_tensor, axis=0)\n prediction = model(image, training=False)\n pred_class = tf.math.argmax(prediction, axis=-1)\n return pred_class\n\n\nif __name__ == '__main__':\n # GPU settings\n gpus = tf.config.list_physical_devices('GPU')\n if gpus:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n # load the model\n model = get_model()\n model.load_weights(filepath=save_model_dir+\"model\")\n\n pred_class = get_single_picture_prediction(model, sys.argv[1])\n print(pred_class)\n"
] |
[
[
"tensorflow.math.argmax",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.expand_dims",
"tensorflow.config.list_physical_devices",
"tensorflow.io.read_file"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
OFR-IIASA/ixmp
|
[
"8b68c61e4d9a794c725ca6ffa572fd9e36a65855",
"8b68c61e4d9a794c725ca6ffa572fd9e36a65855"
] |
[
"ixmp/tests/core/test_platform.py",
"ixmp/core/scenario.py"
] |
[
"\"\"\"Tests of :class:`ixmp.Platform`.\"\"\"\nimport logging\nimport re\nfrom sys import getrefcount\nfrom weakref import getweakrefcount\n\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\nfrom pytest import raises\n\nimport ixmp\nfrom ixmp.backend import FIELDS\nfrom ixmp.testing import DATA, assert_logs, models\n\n\nclass TestPlatform:\n def test_init(self):\n with pytest.raises(\n ValueError, match=re.escape(\"backend class 'foo' not among ['jdbc']\")\n ):\n ixmp.Platform(backend=\"foo\")\n\n # name=\"default\" is used, referring to \"local\"\n mp = ixmp.Platform()\n assert \"local\" == mp.name\n\n def test_getattr(self, test_mp):\n \"\"\"Test __getattr__.\"\"\"\n with pytest.raises(AttributeError):\n test_mp.not_a_direct_backend_method\n\n\[email protected]\ndef log_level_mp(test_mp):\n \"\"\"A fixture that preserves the log level of *test_mp*.\"\"\"\n tmp = test_mp.get_log_level()\n yield test_mp\n test_mp.set_log_level(tmp)\n\n\[email protected](\n \"level, exc\",\n [\n (\"CRITICAL\", None),\n (\"ERROR\", None),\n (\"WARNING\", None),\n (\"INFO\", None),\n (\"DEBUG\", None),\n (\"NOTSET\", None),\n # An unknown string fails\n (\"FOO\", ValueError),\n ],\n)\ndef test_log_level(log_level_mp, level, exc):\n \"\"\"Log level can be set and retrieved.\"\"\"\n if exc is None:\n log_level_mp.set_log_level(level)\n assert log_level_mp.get_log_level() == level\n else:\n with pytest.raises(exc):\n log_level_mp.set_log_level(level)\n\n\ndef test_scenario_list(mp):\n scenario = mp.scenario_list(model=\"Douglas Adams\")[\"scenario\"]\n assert scenario[0] == \"Hitchhiker\"\n\n\ndef test_export_timeseries_data(test_mp, tmp_path):\n path = tmp_path / \"export.csv\"\n test_mp.export_timeseries_data(\n path, model=\"Douglas Adams\", unit=\"???\", region=\"World\"\n )\n\n obs = pd.read_csv(path, index_col=False, header=0)\n\n exp = (\n DATA[0]\n .assign(**models[\"h2g2\"], version=1, subannual=\"Year\", meta=0)\n .rename(columns=lambda c: c.upper())\n .reindex(columns=FIELDS[\"write_file\"])\n )\n\n assert_frame_equal(exp, obs)\n\n\ndef test_export_ts_wrong_params(test_mp, tmp_path):\n \"\"\"Platform.export_timeseries_data to raise error with wrong parameters.\"\"\"\n path = tmp_path / \"export.csv\"\n with raises(ValueError, match=\"Invalid arguments\"):\n test_mp.export_timeseries_data(\n path,\n model=\"Douglas Adams\",\n unit=\"???\",\n region=\"World\",\n export_all_runs=True,\n )\n\n\ndef test_export_ts_of_all_runs(mp, tmp_path):\n \"\"\"Export timeseries of all runs.\"\"\"\n path = tmp_path / \"export.csv\"\n\n # Add a new version of a run\n ts = ixmp.TimeSeries(mp, **models[\"h2g2\"], version=\"new\", annotation=\"fo\")\n ts.add_timeseries(DATA[0])\n ts.commit(\"create a new version\")\n ts.set_as_default()\n\n # Export all default model+scenario runs\n mp.export_timeseries_data(\n path, unit=\"???\", region=\"World\", default=True, export_all_runs=True\n )\n\n obs = pd.read_csv(path, index_col=False, header=0)\n exp = (\n DATA[0]\n .assign(**models[\"h2g2\"], version=2, subannual=\"Year\", meta=0)\n .rename(columns=lambda c: c.upper())\n .reindex(columns=FIELDS[\"write_file\"])\n )\n\n assert_frame_equal(exp, obs)\n\n # Export all model+scenario run versions (including non-default)\n mp.export_timeseries_data(\n path, unit=\"???\", region=\"World\", default=False, export_all_runs=True\n )\n obs = pd.read_csv(path, index_col=False, header=0)\n assert 4 == len(obs)\n\n\ndef test_export_timeseries_data_empty(mp, tmp_path):\n \"\"\"Dont export data if given models/scenarios do not have any runs.\"\"\"\n path = tmp_path / \"export.csv\"\n model = \"model-no-run\"\n mp.add_model_name(model)\n mp.add_scenario_name(\"scenario-no-run\")\n\n mp.export_timeseries_data(path, model=model, unit=\"???\", region=\"World\")\n\n assert 0 == len(pd.read_csv(path, index_col=False, header=0))\n\n\ndef test_unit_list(test_mp):\n units = test_mp.units()\n assert (\"cases\" in units) is True\n\n\ndef test_add_unit(test_mp):\n test_mp.add_unit(\"test\", \"just testing\")\n\n\ndef test_regions(test_mp):\n regions = test_mp.regions()\n\n # Result has the expected columns\n columns = [\"region\", \"mapped_to\", \"parent\", \"hierarchy\"]\n assert all(regions.columns == columns)\n\n # One row is as expected\n obs = regions[regions.region == \"World\"]\n assert all([list(obs.loc[0]) == [\"World\", None, \"World\", \"common\"]])\n\n\ndef test_add_region(test_mp):\n # Region can be added\n test_mp.add_region(\"foo\", \"bar\", \"World\")\n\n # Region can be retrieved\n regions = test_mp.regions()\n obs = regions[regions[\"region\"] == \"foo\"].reset_index(drop=True)\n assert all([list(obs.loc[0]) == [\"foo\", None, \"World\", \"bar\"]])\n\n\ndef test_add_region_synonym(test_mp):\n test_mp.add_region(\"foo\", \"bar\", \"World\")\n test_mp.add_region_synonym(\"foo2\", \"foo\")\n regions = test_mp.regions()\n obs = regions[regions.region.isin([\"foo\", \"foo2\"])].reset_index(drop=True)\n\n exp = pd.DataFrame(\n [\n [\"foo\", None, \"World\", \"bar\"],\n [\"foo2\", \"foo\", \"World\", \"bar\"],\n ],\n columns=[\"region\", \"mapped_to\", \"parent\", \"hierarchy\"],\n )\n assert_frame_equal(obs, exp)\n\n\ndef test_timeslices(test_mp):\n timeslices = test_mp.timeslices()\n obs = timeslices[timeslices.category == \"Common\"]\n # result has all attributes of time slice\n assert all(obs.columns == [\"name\", \"category\", \"duration\"])\n # result contains pre-defined YEAR time slice\n assert all([list(obs.iloc[0]) == [\"Year\", \"Common\", 1.0]])\n\n\ndef test_add_timeslice(test_mp):\n test_mp.add_timeslice(\"January, 1st\", \"Days\", 1.0 / 366)\n timeslices = test_mp.timeslices()\n obs = timeslices[timeslices.category == \"Days\"]\n # return only added time slice\n assert len(obs) == 1\n # returned time slice attributes have expected values\n assert all([list(obs.iloc[0]) == [\"January, 1st\", \"Days\", 1.0 / 366]])\n\n\ndef test_add_timeslice_duplicate(caplog, test_mp):\n test_mp.add_timeslice(\"foo_slice\", \"foo_category\", 0.2)\n\n # Adding same name with different duration raises an error\n msg = \"timeslice `foo_slice` already defined with duration 0.2\"\n with raises(ValueError, match=re.escape(msg)):\n test_mp.add_timeslice(\"foo_slice\", \"bar_category\", 0.3)\n\n # Re-adding with the same duration only logs a message\n with assert_logs(caplog, msg, at_level=logging.INFO):\n test_mp.add_timeslice(\"foo_slice\", \"bar_category\", 0.2)\n\n\ndef test_weakref():\n \"\"\"Weak references allow Platforms to be del'd while Scenarios live.\"\"\"\n mp = ixmp.Platform(\n backend=\"jdbc\",\n driver=\"hsqldb\",\n url=\"jdbc:hsqldb:mem:test_weakref\",\n )\n\n # There is one reference to the Platform, and zero weak references\n assert getrefcount(mp) - 1 == 1\n assert getweakrefcount(mp) == 0\n\n # Create a single Scenario\n s = ixmp.Scenario(mp, \"foo\", \"bar\", version=\"new\")\n\n # Still one reference to the Platform\n assert getrefcount(mp) - 1 == 1\n # …but additionally one weak reference\n assert getweakrefcount(mp) == 1\n\n # Make a local reference to the backend\n backend = mp._backend\n\n # Delete the Platform. Note that this only has an effect if there are no existing\n # references to it\n del mp\n\n # s.platform is a dead weak reference, so it can't be accessed\n with pytest.raises(ReferenceError):\n s.platform._backend\n\n # There is only one remaining reference to the backend: the *backend* name in the\n # local scope\n assert getrefcount(backend) - 1 == 1\n\n # The backend is garbage-collected at this point\n\n # The Scenario object still lives, but can't be used for anything\n assert s.model == \"foo\"\n\n # *s* is garbage-collected at this point\n\n\ndef test_add_model_name(test_mp):\n test_mp.add_model_name(\"new_model_name\")\n assert \"new_model_name\" in test_mp.get_model_names()\n\n\ndef test_add_scenario_name(test_mp):\n test_mp.add_scenario_name(\"new_scenario_name\")\n assert \"new_scenario_name\" in test_mp.get_scenario_names()\n",
"import logging\nfrom functools import partial\nfrom itertools import repeat, zip_longest\nfrom numbers import Real\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union\nfrom warnings import warn\n\nimport pandas as pd\n\nfrom ixmp.backend import ItemType\nfrom ixmp.core.platform import Platform\nfrom ixmp.core.timeseries import TimeSeries\nfrom ixmp.model import get_model\nfrom ixmp.utils import as_str_list, check_year\n\nlog = logging.getLogger(__name__)\n\n\nclass Scenario(TimeSeries):\n \"\"\"Collection of model-related data.\n\n See :class:`.TimeSeries` for the meaning of parameters `mp`, `model`, `scenario`,\n `version`, and `annotation`.\n\n Parameters\n ----------\n scheme : str, optional\n Use an explicit scheme to initialize the new scenario. The\n :meth:`~.base.Model.initialize` method of the corresponding :class:`.Model`\n class in :data:`.MODELS` is used to initialize items in the Scenario.\n cache:\n .. deprecated:: 3.0\n The `cache` keyword argument to :class:`Scenario` has no effect and raises a\n warning. Use `cache` as one of the `backend_args` to :class:`Platform` to\n disable/enable caching for storage backends that support it. Use\n :meth:`load_scenario_data` to load all data in the Scenario into an in-memory\n cache.\n \"\"\"\n\n #: Scheme of the Scenario.\n scheme = None\n\n def __init__(\n self,\n mp: Platform,\n model: str,\n scenario: str,\n version: Optional[Union[int, str]] = None,\n scheme: Optional[str] = None,\n annotation: Optional[str] = None,\n **model_init_args,\n ) -> None:\n # Check arguments\n if version == \"new\" and scheme is None:\n log.info(f\"No scheme for new Scenario {model}/{scenario}\")\n scheme = \"\"\n\n if \"cache\" in model_init_args:\n warn(\n \"Scenario(…, cache=…) is deprecated; use Platform(…, cache=…) instead\",\n DeprecationWarning,\n )\n model_init_args.pop(\"cache\")\n\n # Call the parent constructor\n super().__init__(\n mp=mp,\n model=model,\n scenario=scenario,\n version=version,\n scheme=scheme,\n annotation=annotation,\n )\n\n if self.scheme == \"MESSAGE\" and self.__class__ is Scenario:\n # Loaded scenario has an improper scheme\n raise RuntimeError(\n f\"{model}/{scenario} is a MESSAGE-scheme scenario; use \"\n \"message_ix.Scenario()\"\n )\n\n # Retrieve the Model class correlating to the *scheme*\n model_class = get_model(self.scheme).__class__\n\n # Use the model class to initialize the Scenario\n model_class.initialize(self, **model_init_args)\n\n def check_out(self, timeseries_only: bool = False) -> None:\n \"\"\"Check out the Scenario.\n\n Raises\n ------\n ValueError\n If :meth:`has_solution` is :obj:`True`.\n\n See Also\n --------\n TimeSeries.check_out\n utils.maybe_check_out\n \"\"\"\n if not timeseries_only and self.has_solution():\n raise ValueError(\n \"This Scenario has a solution, \"\n \"use `Scenario.remove_solution()` or \"\n \"`Scenario.clone(..., keep_solution=False)`\"\n )\n super().check_out(timeseries_only)\n\n def load_scenario_data(self) -> None:\n \"\"\"Load all Scenario data into memory.\n\n Raises\n ------\n ValueError\n If the Scenario was instantiated with ``cache=False``.\n \"\"\"\n if not getattr(self.platform._backend, \"cache_enabled\", False):\n raise ValueError(\"Cache must be enabled to load scenario data\")\n\n for ix_type in \"equ\", \"par\", \"set\", \"var\":\n log.debug(f\"Cache {repr(ix_type)} data\")\n get_func = getattr(self, ix_type)\n for name in getattr(self, \"{}_list\".format(ix_type))():\n get_func(name)\n\n def idx_sets(self, name: str) -> List[str]:\n \"\"\"Return the list of index sets for an item (set, par, var, equ).\n\n Parameters\n ----------\n name : str\n name of the item\n \"\"\"\n return self._backend(\"item_index\", name, \"sets\")\n\n def idx_names(self, name: str) -> List[str]:\n \"\"\"Return the list of index names for an item (set, par, var, equ).\n\n Parameters\n ----------\n name : str\n name of the item\n \"\"\"\n return self._backend(\"item_index\", name, \"names\")\n\n def _keys(self, name, key_or_keys):\n if isinstance(key_or_keys, (list, pd.Series)):\n return as_str_list(key_or_keys)\n elif isinstance(key_or_keys, (pd.DataFrame, dict)):\n if isinstance(key_or_keys, dict):\n key_or_keys = pd.DataFrame.from_dict(key_or_keys, orient=\"columns\")\n idx_names = self.idx_names(name)\n return [as_str_list(row, idx_names) for _, row in key_or_keys.iterrows()]\n else:\n return [str(key_or_keys)]\n\n def set_list(self) -> List[str]:\n \"\"\"List all defined sets.\"\"\"\n return self._backend(\"list_items\", \"set\")\n\n def has_set(self, name: str) -> bool:\n \"\"\"Check whether the scenario has a set *name*.\"\"\"\n return name in self.set_list()\n\n def init_set(\n self, name: str, idx_sets: Sequence[str] = None, idx_names: Sequence[str] = None\n ) -> None:\n \"\"\"Initialize a new set.\n\n Parameters\n ----------\n name : str\n Name of the set.\n idx_sets : sequence of str or str, optional\n Names of other sets that index this set.\n idx_names : sequence of str or str, optional\n Names of the dimensions indexed by `idx_sets`.\n\n Raises\n ------\n ValueError\n If the set (or another object with the same *name*) already exists.\n RuntimeError\n If the Scenario is not checked out (see :meth:`~TimeSeries.check_out`).\n \"\"\"\n idx_sets = as_str_list(idx_sets) or []\n idx_names = as_str_list(idx_names)\n return self._backend(\"init_item\", \"set\", name, idx_sets, idx_names)\n\n def set(\n self, name: str, filters: Dict[str, Sequence[str]] = None, **kwargs\n ) -> Union[List[str], pd.DataFrame]:\n \"\"\"Return the (filtered) elements of a set.\n\n Parameters\n ----------\n name : str\n Name of the set.\n filters : dict\n Mapping of `dimension_name` → `elements`, where `dimension_name` is one of\n the `idx_names` given when the set was initialized (see :meth:`init_set`),\n and `elements` is an iterable of labels to include in the return value.\n\n Returns\n -------\n :class:`pandas.DataFrame`\n \"\"\"\n return self._backend(\"item_get_elements\", \"set\", name, filters)\n\n def add_set(\n self,\n name: str,\n key: Union[str, Sequence[str], Dict, pd.DataFrame],\n comment: str = None,\n ) -> None:\n \"\"\"Add elements to an existing set.\n\n Parameters\n ----------\n name : str\n Name of the set.\n key : str or iterable of str or dict or :class:`pandas.DataFrame`\n Element(s) to be added. If `name` exists, the elements are appended to\n existing elements.\n comment : str or iterable of str, optional\n Comment describing the element(s). If given, there must be the same number\n of comments as elements.\n\n Raises\n ------\n KeyError\n If the set `name` does not exist. :meth:`init_set` must be called before\n :meth:`add_set`.\n ValueError\n For invalid forms or combinations of `key` and `comment`.\n \"\"\"\n # TODO expand docstring (here or in doc/source/api.rst) with examples, per\n # test_scenario.test_add_set.\n\n if isinstance(key, list) and len(key) == 0:\n return # No elements to add\n\n # Get index names for set *name*, may raise KeyError\n idx_names = self.idx_names(name)\n\n # Check arguments and convert to two lists: keys and comments\n if len(idx_names) == 0:\n # Basic set. Keys must be strings.\n if isinstance(key, (dict, pd.DataFrame)):\n raise TypeError(\n f\"keys for basic set {repr(name)} must be str or list of str; got \"\n f\"{type(key)}\"\n )\n\n # Ensure keys is a list of str\n keys = as_str_list(key)\n else:\n # Set defined over 1+ other sets\n\n # Check for ambiguous arguments\n if comment and isinstance(key, (dict, pd.DataFrame)) and \"comment\" in key:\n raise ValueError(\"ambiguous; both key['comment'] and comment= given\")\n\n if isinstance(key, pd.DataFrame):\n # DataFrame of key values and perhaps comments\n try:\n # Pop a 'comment' column off the DataFrame, convert to list\n comment = key.pop(\"comment\").to_list()\n except KeyError:\n pass\n\n # Convert key to list of list of key values\n keys = []\n for row in key.to_dict(orient=\"records\"):\n keys.append(as_str_list(row, idx_names=idx_names))\n elif isinstance(key, dict):\n # Dict of lists of key values\n\n # Pop a 'comment' list from the dict\n comment = key.pop(\"comment\", None)\n\n # Convert to list of list of key values\n keys = list(map(as_str_list, zip(*[key[i] for i in idx_names])))\n elif isinstance(key[0], str):\n # List of key values; wrap\n keys = [as_str_list(key)]\n elif isinstance(key[0], list):\n # List of lists of key values; convert to list of list of str\n keys = list(map(as_str_list, key))\n elif isinstance(key, str) and len(idx_names) == 1:\n # Bare key given for a 1D set; wrap for convenience\n keys = [[key]]\n else:\n # Other, invalid value\n raise ValueError(key)\n\n # Process comments to a list of str, or let them all be None\n comments = as_str_list(comment) if comment else repeat(None, len(keys))\n\n # Combine iterators to tuples. If the lengths are mismatched, the sentinel\n # value 'False' is filled in\n to_add = list(zip_longest(keys, comments, fillvalue=False))\n\n # Check processed arguments\n for e, c in to_add:\n # Check for sentinel values\n if e is False:\n raise ValueError(f\"Comment {repr(c)} without matching key\")\n elif c is False:\n raise ValueError(f\"Key {repr(e)} without matching comment\")\n elif len(idx_names) and len(idx_names) != len(e):\n raise ValueError(\n f\"{len(e)}-D key {repr(e)} invalid for \"\n f\"{len(idx_names)}-D set {name}{repr(idx_names)}\"\n )\n\n # Send to backend\n elements = ((kc[0], None, None, kc[1]) for kc in to_add)\n self._backend(\"item_set_elements\", \"set\", name, elements)\n\n def remove_set(\n self, name: str, key: Union[str, Sequence[str], Dict, pd.DataFrame] = None\n ) -> None:\n \"\"\"Delete set elements or an entire set.\n\n Parameters\n ----------\n name : str\n Name of the set to remove (if `key` is :obj:`None`) or from which to remove\n elements.\n key : :class:`pandas.DataFrame` or list of str, optional\n Elements to be removed from set `name`.\n \"\"\"\n if key is None:\n self._backend(\"delete_item\", \"set\", name)\n else:\n self._backend(\"item_delete_elements\", \"set\", name, self._keys(name, key))\n\n def par_list(self) -> List[str]:\n \"\"\"List all defined parameters.\"\"\"\n return self._backend(\"list_items\", \"par\")\n\n def has_par(self, name: str) -> bool:\n \"\"\"Check whether the scenario has a parameter with that name.\"\"\"\n return name in self.par_list()\n\n def init_par(\n self, name: str, idx_sets: Sequence[str], idx_names: Sequence[str] = None\n ) -> None:\n \"\"\"Initialize a new parameter.\n\n Parameters\n ----------\n name : str\n Name of the parameter.\n idx_sets : sequence of str or str, optional\n Names of sets that index this parameter.\n idx_names : sequence of str or str, optional\n Names of the dimensions indexed by `idx_sets`.\n \"\"\"\n idx_sets = as_str_list(idx_sets) or []\n idx_names = as_str_list(idx_names)\n return self._backend(\"init_item\", \"par\", name, idx_sets, idx_names)\n\n def par(\n self, name: str, filters: Dict[str, Sequence[str]] = None, **kwargs\n ) -> pd.DataFrame:\n \"\"\"Return parameter data.\n\n If `filters` is provided, only a subset of data, matching the filters, is\n returned.\n\n Parameters\n ----------\n name : str\n Name of the parameter\n filters : dict (str -> list of str), optional\n Index names mapped to lists of index set elements. Elements not appearing\n in the respective index set(s) are silently ignored.\n \"\"\"\n if len(kwargs):\n warn(\n \"ignored kwargs to Scenario.par(); will raise TypeError in 4.0\",\n DeprecationWarning,\n )\n return self._backend(\"item_get_elements\", \"par\", name, filters)\n\n def items(\n self, type: ItemType = ItemType.PAR, filters: Dict[str, Sequence[str]] = None\n ) -> Iterable[Tuple[str, Any]]:\n \"\"\"Iterate over model data items.\n\n Parameters\n ----------\n type : ItemType, optional\n Types of items to iterate, e.g. :data:`ItemType.PAR` for parameters, the\n only value currently supported.\n filters : dict, optional\n Filters for values along dimensions; same as the `filters` argument to\n :meth:`par`.\n\n Yields\n ------\n (str, object)\n Tuples of item name and data.\n \"\"\"\n if type != ItemType.PAR:\n raise NotImplementedError(\n f\"Scenario.items(type={type}); only ItemType.PAR is supported\"\n )\n\n filters = filters or dict()\n\n names = sorted(self.par_list())\n\n for name in sorted(names):\n idx_names = set(self.idx_names(name))\n if len(filters) and not set(filters.keys()) & idx_names:\n # No overlap between the filters and this item's dimensions\n continue\n\n # Retrieve the data, reducing the filters to only the dimensions of the item\n yield name, self.par(\n name, filters={k: v for k, v in filters.items() if k in idx_names}\n )\n\n def add_par(\n self,\n name: str,\n key_or_data: Union[str, Sequence[str], Dict, pd.DataFrame] = None,\n value=None,\n unit: str = None,\n comment: str = None,\n ) -> None:\n \"\"\"Set the values of a parameter.\n\n Parameters\n ----------\n name : str\n Name of the parameter.\n key_or_data : str or iterable of str or range or dict or\n :class:`pandas.DataFrame`\n Element(s) to be added.\n value : numeric or iterable of numeric, optional\n Values.\n unit : str or iterable of str, optional\n Unit symbols.\n comment : str or iterable of str, optional\n Comment(s) for the added values.\n \"\"\"\n # Number of dimensions in the index of *name*\n idx_names = self.idx_names(name)\n N_dim = len(idx_names)\n\n # Convert valid forms of arguments to pd.DataFrame\n if isinstance(key_or_data, dict):\n # dict containing data\n data = pd.DataFrame.from_dict(key_or_data, orient=\"columns\")\n elif isinstance(key_or_data, pd.DataFrame):\n data = key_or_data.copy()\n if value is not None:\n if \"value\" in data.columns:\n raise ValueError(\"both key_or_data.value and value supplied\")\n else:\n data[\"value\"] = value\n else:\n # One or more keys; convert to a list of strings\n if isinstance(key_or_data, range):\n key_or_data = list(key_or_data)\n keys = self._keys(name, key_or_data)\n\n # Check the type of value\n if isinstance(value, (float, int)):\n # Single value\n\n if N_dim > 1 and len(keys) == N_dim:\n # Ambiguous case: ._key() above returns ['dim_0', 'dim_1'], when we\n # really want [['dim_0', 'dim_1']]\n keys = [keys]\n\n # Use the same value for all keys\n values = [float(value)] * len(keys)\n else:\n # Multiple values\n values = value\n\n data = pd.DataFrame(zip(keys, values), columns=[\"key\", \"value\"])\n if data.isna().any(axis=None):\n raise ValueError(\"Length mismatch between keys and values\")\n\n # Column types\n types = {\n \"key\": str if N_dim == 1 else object,\n \"value\": float,\n \"unit\": str,\n \"comment\": str,\n }\n\n # Further handle each column\n if \"key\" not in data.columns:\n # Form the 'key' column from other columns\n if N_dim > 1 and len(data):\n data[\"key\"] = data.apply(\n partial(as_str_list, idx_names=idx_names), axis=1\n )\n else:\n data[\"key\"] = data[idx_names[0]]\n\n if \"value\" not in data.columns:\n raise ValueError(\"no parameter values supplied\")\n\n if \"unit\" not in data.columns:\n # Broadcast single unit across all values. pandas raises ValueError\n # if *unit* is iterable but the wrong length\n data[\"unit\"] = unit or \"???\"\n\n if \"comment\" not in data.columns:\n if comment:\n # Broadcast single comment across all values. pandas raises\n # ValueError if *comment* is iterable but the wrong length\n data[\"comment\"] = comment\n else:\n # Store a 'None' comment\n data[\"comment\"] = None\n types.pop(\"comment\")\n\n # Convert types, generate tuples\n elements = map(\n lambda e: (e.key, e.value, e.unit, e.comment),\n data.astype(types).itertuples(),\n )\n\n # Store\n self._backend(\"item_set_elements\", \"par\", name, elements)\n\n def init_scalar(self, name: str, val: Real, unit: str, comment=None) -> None:\n \"\"\"Initialize a new scalar.\n\n Parameters\n ----------\n name : str\n Name of the scalar\n val : number\n Initial value of the scalar.\n unit : str\n Unit of the scalar.\n comment : str, optional\n Description of the scalar.\n \"\"\"\n self.init_par(name, [], [])\n self.change_scalar(name, val, unit, comment)\n\n def scalar(self, name: str) -> Dict[str, Union[Real, str]]:\n \"\"\"Return the value and unit of a scalar.\n\n Parameters\n ----------\n name : str\n Name of the scalar.\n\n Returns\n -------\n {'value': value, 'unit': unit}\n \"\"\"\n return self._backend(\"item_get_elements\", \"par\", name, None)\n\n def change_scalar(\n self, name: str, val: Real, unit: str, comment: str = None\n ) -> None:\n \"\"\"Set the value and unit of a scalar.\n\n Parameters\n ----------\n name : str\n Name of the scalar.\n val : number\n New value of the scalar.\n unit : str\n New unit of the scalar.\n comment : str, optional\n Description of the change.\n \"\"\"\n self._backend(\n \"item_set_elements\", \"par\", name, [(None, float(val), unit, comment)]\n )\n\n def remove_par(self, name: str, key=None) -> None:\n \"\"\"Remove parameter values or an entire parameter.\n\n Parameters\n ----------\n name : str\n Name of the parameter.\n key : dataframe or key list or concatenated string, optional\n Elements to be removed\n \"\"\"\n if key is None:\n self._backend(\"delete_item\", \"par\", name)\n else:\n self._backend(\"item_delete_elements\", \"par\", name, self._keys(name, key))\n\n def var_list(self) -> List[str]:\n \"\"\"List all defined variables.\"\"\"\n return self._backend(\"list_items\", \"var\")\n\n def has_var(self, name: str) -> bool:\n \"\"\"Check whether the scenario has a variable with that name.\"\"\"\n return name in self.var_list()\n\n def init_var(\n self, name: str, idx_sets: Sequence[str] = None, idx_names: Sequence[str] = None\n ) -> None:\n \"\"\"Initialize a new variable.\n\n Parameters\n ----------\n name : str\n Name of the variable.\n idx_sets : sequence of str or str, optional\n Name(s) of index sets for a 1+-dimensional variable.\n idx_names : sequence of str or str, optional\n Names of the dimensions indexed by `idx_sets`.\n \"\"\"\n idx_sets = as_str_list(idx_sets) or []\n idx_names = as_str_list(idx_names)\n return self._backend(\"init_item\", \"var\", name, idx_sets, idx_names)\n\n def var(self, name: str, filters=None, **kwargs):\n \"\"\"Return a dataframe of (filtered) elements for a specific variable.\n\n Parameters\n ----------\n name : str\n name of the variable\n filters : dict\n index names mapped list of index set elements\n \"\"\"\n return self._backend(\"item_get_elements\", \"var\", name, filters)\n\n def equ_list(self) -> List[str]:\n \"\"\"List all defined equations.\"\"\"\n return self._backend(\"list_items\", \"equ\")\n\n def init_equ(self, name: str, idx_sets=None, idx_names=None) -> None:\n \"\"\"Initialize a new equation.\n\n Parameters\n ----------\n name : str\n Name of the equation.\n idx_sets : sequence of str or str, optional\n Name(s) of index sets for a 1+-dimensional variable.\n idx_names : sequence of str or str, optional\n Names of the dimensions indexed by `idx_sets`.\n \"\"\"\n idx_sets = as_str_list(idx_sets) or []\n idx_names = as_str_list(idx_names)\n return self._backend(\"init_item\", \"equ\", name, idx_sets, idx_names)\n\n def has_equ(self, name: str) -> bool:\n \"\"\"Check whether the scenario has an equation with that name.\"\"\"\n return name in self.equ_list()\n\n def equ(self, name: str, filters=None, **kwargs) -> pd.DataFrame:\n \"\"\"Return a dataframe of (filtered) elements for a specific equation.\n\n Parameters\n ----------\n name : str\n name of the equation\n filters : dict\n index names mapped list of index set elements\n \"\"\"\n return self._backend(\"item_get_elements\", \"equ\", name, filters)\n\n def clone(\n self,\n model: str = None,\n scenario: str = None,\n annotation: str = None,\n keep_solution: bool = True,\n shift_first_model_year: int = None,\n platform: Platform = None,\n ) -> \"Scenario\":\n \"\"\"Clone the current scenario and return the clone.\n\n If the (`model`, `scenario`) given already exist on the :class:`.Platform`, the\n `version` for the cloned Scenario follows the last existing version. Otherwise,\n the `version` for the cloned Scenario is 1.\n\n .. note::\n :meth:`clone` does not set or alter default versions. This means that a\n clone to new (`model`, `scenario`) names has no default version, and will\n not be returned by :meth:`Platform.scenario_list` unless `default=False` is\n given.\n\n Parameters\n ----------\n model : str, optional\n New model name. If not given, use the existing model name.\n scenario : str, optional\n New scenario name. If not given, use the existing scenario name.\n annotation : str, optional\n Explanatory comment for the clone commit message to the database.\n keep_solution : bool, optional\n If :py:const:`True`, include all timeseries data and the solution (vars and\n equs) from the source scenario in the clone. If :py:const:`False`, only\n include timeseries data marked `meta=True` (see :meth:`.add_timeseries`).\n shift_first_model_year: int, optional\n If given, all timeseries data in the Scenario is omitted from the clone for\n years from `first_model_year` onwards. Timeseries data with the `meta` flag\n (see :meth:`.add_timeseries`) are cloned for all years.\n platform : :class:`Platform`, optional\n Platform to clone to (default: current platform)\n \"\"\"\n if shift_first_model_year is not None:\n if keep_solution:\n log.warning(\"Override keep_solution=True for shift_first_model_year\")\n keep_solution = False\n\n platform = platform or self.platform\n model = model or self.model\n scenario = scenario or self.scenario\n\n args = [platform, model, scenario, annotation, keep_solution]\n if check_year(shift_first_model_year, \"first_model_year\"):\n args.append(shift_first_model_year)\n\n return self._backend(\"clone\", *args)\n\n def has_solution(self) -> bool:\n \"\"\"Return :obj:`True` if the Scenario contains model solution data.\"\"\"\n return self._backend(\"has_solution\")\n\n def remove_solution(self, first_model_year: int = None) -> None:\n \"\"\"Remove the solution from the scenario.\n\n This function removes the solution (variables and equations) and timeseries\n data marked as `meta=False` from the scenario (see :meth:`.add_timeseries`).\n\n Parameters\n ----------\n first_model_year: int, optional\n If given, timeseries data marked as `meta=False` is removed only for years\n from `first_model_year` onwards.\n\n Raises\n ------\n ValueError\n If Scenario has no solution or if `first_model_year` is not `int`.\n \"\"\"\n if self.has_solution():\n check_year(first_model_year, \"first_model_year\")\n self._backend(\"clear_solution\", first_model_year)\n else:\n raise ValueError(\"This Scenario does not have a solution!\")\n\n def solve(\n self,\n model: str = None,\n callback: Callable = None,\n cb_kwargs: Dict[str, Any] = {},\n **model_options,\n ) -> None:\n \"\"\"Solve the model and store output.\n\n ixmp 'solves' a model by invoking the run() method of a :class:`.Model`\n subclass—for instance, :meth:`.GAMSModel.run`. Depending on the underlying\n model code, different steps are taken; see each model class for details. In\n general:\n\n 1. Data from the Scenario are written to a **model input file**.\n 2. Code or an external program is invoked to perform calculations or\n optimizations, **solving the model**.\n 3. Data representing the model outputs or solution are read from a **model\n output file** and stored in the Scenario.\n\n If the optional argument `callback` is given, additional steps are performed:\n\n 4. Execute the `callback` with the Scenario as an argument. The Scenario has an\n `iteration` attribute that stores the number of times the underlying model\n has been solved (#2).\n 5. If the `callback` returns :obj:`False` or similar, iterate by repeating from\n step #1. Otherwise, exit.\n\n Parameters\n ----------\n model : str\n model (e.g., MESSAGE) or GAMS file name (excluding '.gms')\n callback : callable, optional\n Method to execute arbitrary non-model code. Must accept a single argument:\n the Scenario. Must return a non-:obj:`False` value to indicate convergence.\n cb_kwargs : dict, optional\n Keyword arguments to pass to `callback`.\n model_options :\n Keyword arguments specific to the `model`. See :class:`.GAMSModel`.\n\n Warns\n -----\n UserWarning\n If `callback` is given and returns :obj:`None`. This may indicate that the\n user has forgotten a ``return`` statement, in which case the iteration will\n continue indefinitely.\n\n Raises\n ------\n ValueError\n If the Scenario has already been solved.\n \"\"\"\n if self.has_solution():\n raise ValueError(\n \"Scenario contains a model solution; call .remove_solution() before \"\n \"solve()\"\n )\n\n # Instantiate a model\n model_obj = get_model(model or self.scheme, **model_options)\n\n # Validate `callback`\n if callback is not None:\n if not callable(callback):\n raise ValueError(f\"callback={repr(callback)} is not callable\")\n cb = callback\n else:\n\n def cb(scenario, **kwargs):\n return True\n\n # Flag to warn if the *callback* appears not to return anything\n warn_none = True\n\n # Iterate until convergence\n while True:\n model_obj.run(self)\n\n # Store an iteration number to help the callback\n if not hasattr(self, \"iteration\"):\n self.iteration = 0\n\n self.iteration += 1\n\n # Invoke the callback\n cb_result = cb(self, **cb_kwargs)\n\n if cb_result is None and warn_none:\n warn(\n \"solve(callback=...) argument returned None; will loop \"\n \"indefinitely unless True is returned.\"\n )\n # Don't repeat the warning\n warn_none = False\n\n if cb_result:\n # Callback indicates convergence is reached\n break\n\n # Input and output\n def to_excel(\n self,\n path: PathLike,\n items: ItemType = ItemType.SET | ItemType.PAR,\n filters: Dict[str, Union[Sequence[str], \"Scenario\"]] = None,\n max_row: int = None,\n ) -> None:\n \"\"\"Write Scenario to a Microsoft Excel file.\n\n Parameters\n ----------\n path : os.PathLike\n File to write. Must have suffix :file:`.xlsx`.\n items : ItemType, optional\n Types of items to write. Either :attr:`.SET` | :attr:`.PAR` (i.e. only sets\n and parameters), or :attr:`.MODEL` (also variables and equations, i.e.\n model solution data).\n filters : dict, optional\n Filters for values along dimensions; same as the `filters` argument to\n :meth:`par`.\n max_row: int, optional\n Maximum number of rows in each sheet. If the number of elements in an item\n exceeds this number or :data:`.EXCEL_MAX_ROWS`, then an item is written to\n multiple sheets named, e.g. 'foo', 'foo(2)', 'foo(3)', etc.\n\n See also\n --------\n :ref:`excel-data-format`\n read_excel\n \"\"\"\n # Default filters: empty dict\n filters = filters or dict()\n\n # Select the current scenario\n filters[\"scenario\"] = self\n\n # Invoke the backend method\n self.platform._backend.write_file(\n Path(path), items, filters=filters, max_row=max_row\n )\n\n def read_excel(\n self,\n path: PathLike,\n add_units: bool = False,\n init_items: bool = False,\n commit_steps: bool = False,\n ) -> None:\n \"\"\"Read a Microsoft Excel file into the Scenario.\n\n Parameters\n ----------\n path : os.PathLike\n File to read. Must have suffix '.xlsx'.\n add_units : bool, optional\n Add missing units, if any, to the Platform instance.\n init_items : bool, optional\n Initialize sets and parameters that do not already exist in the Scenario.\n commit_steps : bool, optional\n Commit changes after every data addition.\n\n See also\n --------\n :ref:`excel-data-format`\n .TimeSeries.read_file\n to_excel\n \"\"\"\n self.platform._backend.read_file(\n Path(path),\n ItemType.MODEL,\n filters=dict(scenario=self),\n add_units=add_units,\n init_items=init_items,\n commit_steps=commit_steps,\n )\n"
] |
[
[
"pandas.read_csv",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
],
[
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
r-sendra/SpikeSwarmSim
|
[
"a5bd71cb93df0963588640c5d44b3891fa07457c"
] |
[
"spike_swarm_sim/utils/decorators.py"
] |
[
"import time\r\nimport logging \r\nfrom functools import wraps\r\nfrom collections import deque\r\nimport numpy as np\r\nfrom spike_swarm_sim.globals import global_states\r\n\r\ndef time_elapsed(func):\r\n \"\"\" Computes the amount of time that a function elapses.\r\n Only works in DEBUG mode.\r\n \"\"\"\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n t0 = time.time()\r\n result = func(*args, **kwargs)\r\n logging.info('Function {} elapsed {}.'.format(func.__qualname__, round(time.time() - t0, 4)))\r\n return result\r\n return wrapper\r\n\r\n\r\ndef mov_average_timeit(func):\r\n \"\"\" Computes the mean time that a function elapses using 50 buffered samples.\r\n It logs the mean every 50 executions and only works in DEBUG mode. \r\n \"\"\"\r\n times_buffer = deque([])\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n t0 = time.time()\r\n result = func(*args, **kwargs)\r\n if global_states.DEBUG:\r\n if len(times_buffer) == 50:\r\n times_buffer.appendleft(time.time() - t0)\r\n mean_time = np.mean(times_buffer)\r\n std_time = np.std(times_buffer)\r\n logging.debug('Function {} mean elapsed time is {} (50 samples).'\\\r\n .format(func.__qualname__, round(mean_time, 4)))\r\n times_buffer.clear()\r\n else:\r\n times_buffer.appendleft(time.time() - t0)\r\n return result\r\n return wrapper\r\n\r\ndef increase_time(func):\r\n \"\"\" Decorator that increases a time or counter variable of a class. \r\n The variable name must be called t.\r\n \"\"\"\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n result = func(*args, **kwargs)\r\n args[0].t += 1\r\n return result\r\n return wrapper"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
natasasdj/Deep-SVDD-PyTorch
|
[
"0c9e0003acd4c3fe42505984c0d92b8dec46e9fc"
] |
[
"src/utils/collect_results.py"
] |
[
"import json\nimport numpy as np\n\n\nbase_path = '/Users/lukasruff/Repos/Deep-SVDD-PyTorch/log/mnist/test/mnist/soft_deepSVDD'\nn_exps = 3\nn_seeds = 3\n\nexps = range(n_exps)\nseeds = range(1, n_seeds)\n\nfor exp in exps:\n\n exp_folder = str(exp) + 'vsall'\n aucs = np.zeros(n_seeds, dtype=np.float32)\n\n for seed in seeds:\n\n seed_folder = 'seed_' + str(seed)\n file_name = 'results.json'\n file_path = base_path + '/' + exp_folder + '/' + seed_folder + '/' + file_name\n\n with open(file_path, 'r') as fp:\n results = json.load(fp)\n\n aucs[seed - 1] = results['test_auc']\n\n mean = np.mean(aucs[aucs > 0])\n std = np.std(aucs[aucs > 0])\n\n # Write results\n log_file = '{}/result.txt'.format(base_path)\n log = open(log_file, 'a')\n log.write('Experiment: {}\\n'.format(exp_folder))\n log.write('Test Set AUC [mean]: {} %\\n'.format(round(float(mean * 100), 4)))\n log.write('Test Set AUC [std]: {} %\\n'.format(round(float(std * 100), 4)))\n log.write('\\n')\n\nlog.write('\\n')\nlog.close()\n"
] |
[
[
"numpy.std",
"numpy.zeros",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rpwils/data-analytics-tools
|
[
"ed68648b2a8698a473e656de4149c15dce64663f"
] |
[
"data_analytics_tools/tests/conftest.py"
] |
[
"import pytest\nimport pandas as pd\nimport os\nfrom distutils import dir_util\n\n\[email protected]()\ndef valid_df():\n check_data = [[\"jjety\", \"AB9960\", \"response1\", \"response2\", \"Sun Nov 19 23:59:59 2018\", pd.to_datetime(\"2018-11-20 00:00:00\")],\n [\"jjety\", \"AB9958\", \"response1\", \"response2\", \"Sun Nov 19 23:56:59 2018\", pd.to_datetime(\"2018-11-19 23:55:00\")],\n [\"hqoh\", \"AB9953\", \"response1\", \"response2\", \"Sun Nov 19 23:29:59 2018\", pd.to_datetime(\"2018-11-19 23:30:00\")],\n [\"hhawe\", \"AB8769\", \"response1\", \"response2\", \"Sun Nov 19 23:20:01 2018\", pd.to_datetime(\"2018-11-19 23:20:00\")]]\n\n return pd.DataFrame(check_data, columns=['userID', 'studyID', \"getData.response1\", \"getData.response2\", \"time\", \"roundedTime\"])\n\n\[email protected]()\ndef current_path_load():\n return os.path.dirname(os.path.realpath(__file__)) + \"/load\"\n\[email protected]()\ndef current_path_clean():\n return os.path.dirname(os.path.realpath(__file__)) + \"/clean\"\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Bob620/xraylarch
|
[
"f8d38e6122cc0e8c990b0f024db3b503a5fbf057"
] |
[
"larch/xrd/cifdb.py"
] |
[
"#!/usr/bin/env python\n'''\nbuild American Mineralogist Crystal Structure Databse (amcsd)\n'''\n\nimport os\nimport requests\n\nimport numpy as np\nimport six\n\nfrom itertools import groupby\nfrom distutils.version import StrictVersion\n\nimport larch\nfrom .xrd_fitting import peaklocater\nfrom .xrd_cif import create_cif, SPACEGROUPS\nfrom .xrd_tools import lambda_from_E\n\nimport json\nfrom larch.utils.jsonutils import encode4js, decode4js\n\nfrom sqlalchemy import (create_engine, MetaData, Table, Column, Integer,\n String, Unicode, PrimaryKeyConstraint,\n ForeignKeyConstraint, ForeignKey, Numeric, func,\n and_, or_, not_, tuple_)\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.pool import SingletonThreadPool\n\nSYMMETRIES = ['triclinic', 'monoclinic', 'orthorhombic', 'tetragonal',\n 'trigonal', 'hexagonal', 'cubic']\n\nELEMENTS = [['1', 'Hydrogen', 'H'], ['2', 'Helium', 'He'], ['3', 'Lithium', 'Li'],\n ['4', 'Beryllium', 'Be'], ['5', 'Boron', 'B'], ['6', 'Carbon', 'C'],\n ['7', 'Nitrogen', 'N'], ['8', 'Oxygen', 'O'], ['9', 'Fluorine', 'F'],\n ['10', 'Neon', 'Ne'], ['11', 'Sodium', 'Na'], ['12', 'Magnesium', 'Mg'],\n ['13', 'Aluminum', 'Al'], ['14', 'Silicon', 'Si'], ['15', 'Phosphorus', 'P'],\n ['16', 'Sulfur', 'S'], ['17', 'Chlorine', 'Cl'], ['18', 'Argon', 'Ar'],\n ['19', 'Potassium', 'K'], ['20', 'Calcium', 'Ca'], ['21', 'Scandium', 'Sc'],\n ['22', 'Titanium', 'Ti'], ['23', 'Vanadium', 'V'], ['24', 'Chromium', 'Cr'],\n ['25', 'Manganese', 'Mn'], ['26', 'Iron', 'Fe'], ['27', 'Cobalt', 'Co'],\n ['28', 'Nickel', 'Ni'], ['29', 'Copper', 'Cu'], ['30', 'Zinc', 'Zn'],\n ['31', 'Gallium', 'Ga'], ['32', 'Germanium', 'Ge'], ['33', 'Arsenic', 'As'],\n ['34', 'Selenium', 'Se'], ['35', 'Bromine', 'Br'], ['36', 'Krypton', 'Kr'],\n ['37', 'Rubidium', 'Rb'], ['38', 'Strontium', 'Sr'], ['39', 'Yttrium', 'Y'],\n ['40', 'Zirconium', 'Zr'], ['41', 'Niobium', 'Nb'], ['42', 'Molybdenum', 'Mo'],\n ['43', 'Technetium', 'Tc'], ['44', 'Ruthenium', 'Ru'], ['45', 'Rhodium', 'Rh'],\n ['46', 'Palladium', 'Pd'], ['47', 'Silver', 'Ag'], ['48', 'Cadmium', 'Cd'],\n ['49', 'Indium', 'In'], ['50', 'Tin', 'Sn'], ['51', 'Antimony', 'Sb'],\n ['52', 'Tellurium', 'Te'], ['53', 'Iodine', 'I'], ['54', 'Xenon', 'Xe'],\n ['55', 'Cesium', 'Cs'], ['56', 'Barium', 'Ba'], ['57', 'Lanthanum', 'La'],\n ['58', 'Cerium', 'Ce'], ['59', 'Praseodymium', 'Pr'], ['60', 'Neodymium', 'Nd'],\n ['61', 'Promethium', 'Pm'], ['62', 'Samarium', 'Sm'], ['63', 'Europium', 'Eu'],\n ['64', 'Gadolinium', 'Gd'], ['65', 'Terbium', 'Tb'], ['66', 'Dysprosium', 'Dy'],\n ['67', 'Holmium', 'Ho'], ['68', 'Erbium', 'Er'], ['69', 'Thulium', 'Tm'],\n ['70', 'Ytterbium', 'Yb'], ['71', 'Lutetium', 'Lu'], ['72', 'Hafnium', 'Hf'],\n ['73', 'Tantalum', 'Ta'], ['74', 'Tungsten', 'W'], ['75', 'Rhenium', 'Re'],\n ['76', 'Osmium', 'Os'], ['77', 'Iridium', 'Ir'], ['78', 'Platinum', 'Pt'],\n ['79', 'Gold', 'Au'], ['80', 'Mercury', 'Hg'], ['81', 'Thallium', 'Tl'],\n ['82', 'Lead', 'Pb'], ['83', 'Bismuth', 'Bi'], ['84', 'Polonium', 'Po'],\n ['85', 'Astatine', 'At'], ['86', 'Radon', 'Rn'], ['87', 'Francium', 'Fr'],\n ['88', 'Radium', 'Ra'], ['89', 'Actinium', 'Ac'], ['90', 'Thorium', 'Th'],\n ['91', 'Protactinium', 'Pa'], ['92', 'Uranium', 'U'], ['93', 'Neptunium', 'Np'],\n ['94', 'Plutonium', 'Pu'], ['95', 'Americium', 'Am'], ['96', 'Curium', 'Cm'],\n ['97', 'Berkelium', 'Bk'], ['98', 'Californium', 'Cf'], ['99', 'Einsteinium', 'Es'],\n ['100', 'Fermium', 'Fm'], ['101', 'Mendelevium', 'Md'], ['102', 'Nobelium', 'No'],\n ['103', 'Lawrencium', 'Lr'], ['104', 'Rutherfordium', 'Rf'], ['105', 'Dubnium', 'Db'],\n ['106', 'Seaborgium', 'Sg'], ['107', 'Bohrium', 'Bh'], ['108', 'Hassium', 'Hs'],\n ['109', 'Meitnerium', 'Mt'], ['110', 'Darmstadtium', 'Ds'], ['111', 'Roentgenium', 'Rg'],\n ['112', 'Ununbium', 'Uub'], ['113', 'Ununtrium', 'Uut'], ['114', 'Ununquadium', 'Uuq'],\n ['115', 'Ununpentium', 'Uup'], ['116', 'Ununhexium', 'Uuh'], ['117', 'Ununseptium', 'Uus'],\n ['118', 'Ununoctium', 'Uuo']]\n\nCATEGORIES = ['soil',\n 'salt',\n 'clay']\n\nQMIN = 0.2\nQMAX = 10.0\nQSTEP = 0.01\nQAXIS = np.arange(QMIN, QMAX+QSTEP, QSTEP)\n\nENERGY = 19000 ## units eV\n_cifdb = None\n\ndef get_cifdb(dbname='amcsd_cif.db', _larch=None):\n global _cifdb\n if _cifdb is None:\n _cifdb = cifDB(dbname=dbname)\n if _larch is not None:\n symname = '_xray._cifdb'\n if not _larch.symtable.has_symbol(symname):\n _larch.symtable.set_symbol(symname, _cifdb)\n return _cifdb\n\ndef make_engine(dbname):\n return create_engine('sqlite:///%s' % (dbname),\n poolclass=SingletonThreadPool)\n\ndef iscifDB(dbname):\n '''\n test if a file is a valid scan database:\n must be a sqlite db file, with tables named according to _tables\n '''\n _tables = ('ciftbl',\n 'elemtbl',\n 'nametbl',\n #'formtbl',\n 'spgptbl',\n 'symtbl',\n 'authtbl',\n 'qtbl',\n 'cattbl',\n 'symref',\n #'compref',\n #'qref',\n 'authref',\n 'catref')\n result = False\n try:\n engine = make_engine(dbname)\n meta = MetaData(engine)\n meta.reflect()\n result = all([t in meta.tables for t in _tables])\n except:\n pass\n return result\n\n\nclass cifDB(object):\n '''\n interface to the American Mineralogist Crystal Structure Database\n '''\n def __init__(self, dbname=None, read_only=True,verbose=False):\n\n ## This needs to be modified for creating new if does not exist.\n self.version = '0.0.2'\n self.dbname = dbname\n if verbose:\n print('\\n\\n================ %s ================\\n' % self.dbname)\n if not os.path.exists(self.dbname):\n parent, child = os.path.split(__file__)\n self.dbname = os.path.join(parent, self.dbname)\n if not os.path.exists(self.dbname):\n print(\"File '%s' not found; building a new database!\" % self.dbname)\n self.create_cifdb(name=self.dbname)\n else:\n if not iscifDB(self.dbname):\n raise ValueError(\"'%s' is not a valid cif database file!\" % self.dbname)\n\n self.dbname = self.dbname\n self.engine = make_engine(self.dbname)\n self.conn = self.engine.connect()\n kwargs = {}\n if read_only:\n kwargs = {'autoflush': True, 'autocommit':False}\n def readonly_flush(*args, **kwargs):\n return\n self.session = sessionmaker(bind=self.engine, **kwargs)()\n self.session.flush = readonly_flush\n else:\n self.session = sessionmaker(bind=self.engine, **kwargs)()\n\n self.metadata = MetaData(self.engine)\n self.metadata.reflect()\n tables = self.tables = self.metadata.tables\n\n ## Load tables\n self.elemtbl = Table('elemtbl', self.metadata)\n self.nametbl = Table('nametbl', self.metadata)\n self.formtbl = Table('formtbl', self.metadata)\n self.spgptbl = Table('spgptbl', self.metadata)\n self.symtbl = Table('symtbl', self.metadata)\n self.authtbl = Table('authtbl', self.metadata)\n self.qtbl = Table('qtbl', self.metadata)\n self.cattbl = Table('cattbl', self.metadata)\n\n self.symref = Table('symref', self.metadata)\n self.compref = Table('compref', self.metadata)\n self.qref = Table('qref', self.metadata)\n self.authref = Table('authref', self.metadata)\n self.catref = Table('catref', self.metadata)\n\n self.ciftbl = Table('ciftbl', self.metadata)\n\n self.axis = np.array([float(q[0]) for q in self.query(self.qtbl.c.q).all()])\n\n\n def query(self, *args, **kws):\n \"generic query\"\n return self.session.query(*args, **kws)\n\n def close(self):\n \"close session\"\n self.session.flush()\n self.session.close()\n\n def create_cifdb(self,name=None,verbose=False):\n\n if name is None:\n self.dbname = 'amcsd%02d.db'\n counter = 0\n while os.path.exists(self.dbname % counter):\n counter += 1\n self.dbname = self.dbname % counter\n else:\n self.dbname = name\n\n self.open_database()\n\n ###################################################\n ## Look up tables\n elemtbl = Table('elemtbl', self.metadata,\n Column('z', Integer, primary_key=True),\n Column('element_name', String(40), unique=True, nullable=True),\n Column('element_symbol', String(2), unique=True, nullable=False)\n )\n nametbl = Table('nametbl', self.metadata,\n Column('mineral_id', Integer, primary_key=True),\n Column('mineral_name', String(30), unique=True, nullable=True)\n )\n formtbl = Table('formtbl', self.metadata,\n Column('formula_id', Integer, primary_key=True),\n Column('formula_name', String(30), unique=True, nullable=True)\n )\n spgptbl = Table('spgptbl', self.metadata,\n Column('iuc_id', Integer),\n Column('hm_notation', String(16), unique=True, nullable=True),\n PrimaryKeyConstraint('iuc_id', 'hm_notation')\n )\n symtbl = Table('symtbl', self.metadata,\n Column('symmetry_id', Integer, primary_key=True),\n Column('symmetry_name', String(16), unique=True, nullable=True)\n )\n authtbl = Table('authtbl', self.metadata,\n Column('author_id', Integer, primary_key=True),\n Column('author_name', String(40), unique=True, nullable=True)\n )\n qtbl = Table('qtbl', self.metadata,\n Column('q_id', Integer, primary_key=True),\n #Column('q', Float()) ## how to make this work? mkak 2017.02.14\n Column('q', String())\n )\n cattbl = Table('cattbl', self.metadata,\n Column('category_id', Integer, primary_key=True),\n Column('category_name', String(16), unique=True, nullable=True)\n )\n ###################################################\n ## Cross-reference tables\n symref = Table('symref', self.metadata,\n Column('iuc_id', None, ForeignKey('spgptbl.iuc_id')),\n Column('symmetry_id', None, ForeignKey('symtbl.symmetry_id')),\n PrimaryKeyConstraint('iuc_id', 'symmetry_id')\n )\n compref = Table('compref', self.metadata,\n Column('z', None, ForeignKey('elemtbl.z')),\n Column('amcsd_id', None, ForeignKey('ciftbl.amcsd_id')),\n PrimaryKeyConstraint('z', 'amcsd_id')\n )\n qref = Table('qref', self.metadata,\n Column('q_id', None, ForeignKey('qtbl.q_id')),\n Column('amcsd_id', None, ForeignKey('ciftbl.amcsd_id')),\n PrimaryKeyConstraint('q_id', 'amcsd_id')\n )\n authref = Table('authref', self.metadata,\n Column('author_id', None, ForeignKey('authtbl.author_id')),\n Column('amcsd_id', None, ForeignKey('ciftbl.amcsd_id')),\n PrimaryKeyConstraint('author_id', 'amcsd_id')\n )\n catref = Table('catref', self.metadata,\n Column('category_id', None, ForeignKey('cattbl.category_id')),\n Column('amcsd_id', None, ForeignKey('ciftbl.amcsd_id')),\n PrimaryKeyConstraint('category_id', 'amcsd_id')\n )\n ###################################################\n ## Main table\n ciftbl = Table('ciftbl', self.metadata,\n Column('amcsd_id', Integer, primary_key=True),\n Column('mineral_id', Integer),\n Column('formula_id', Integer),\n Column('iuc_id', ForeignKey('spgptbl.iuc_id')),\n Column('a', String(5)),\n Column('b', String(5)),\n Column('c', String(5)),\n Column('alpha', String(5)),\n Column('beta', String(5)),\n Column('gamma', String(5)),\n Column('cif', String(25)), ## , nullable=True\n Column('zstr',String(25)),\n Column('qstr',String(25)),\n Column('url',String(25))\n )\n ###################################################\n ## Add all to file\n self.metadata.create_all() ## if not exists function (callable when exists)\n\n ###################################################\n ## Define 'add/insert' functions for each table\n def_elem = elemtbl.insert()\n def_name = nametbl.insert()\n def_form = formtbl.insert()\n def_spgp = spgptbl.insert()\n def_sym = symtbl.insert()\n def_auth = authtbl.insert()\n def_q = qtbl.insert()\n def_cat = cattbl.insert()\n\n add_sym = symref.insert()\n add_comp = compref.insert()\n add_q = qref.insert()\n add_auth = authref.insert()\n add_cat = catref.insert()\n\n new_cif = ciftbl.insert()\n\n\n ###################################################\n ## Populate the fixed tables of the database\n\n ## Adds all elements into database\n for element in ELEMENTS:\n z, name, symbol = element\n def_elem.execute(z=int(z), element_name=name, element_symbol=symbol)\n\n ## Adds all crystal symmetries\n for symmetry_id,symmetry in enumerate(SYMMETRIES):\n def_sym.execute(symmetry_name=symmetry.strip())\n if symmetry.strip() == 'triclinic': ## triclinic : 1 - 2\n for iuc_id in range(1,2+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n elif symmetry.strip() == 'monoclinic': ## monoclinic : 3 - 15\n for iuc_id in range(3,15+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n elif symmetry.strip() == 'orthorhombic': ## orthorhombic : 16 - 74\n for iuc_id in range(16,74+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n elif symmetry.strip() == 'tetragonal': ## tetragonal : 75 - 142\n for iuc_id in range(75,142+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n elif symmetry.strip() == 'trigonal': ## trigonal : 143 - 167\n for iuc_id in range(143,167+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n elif symmetry.strip() == 'hexagonal': ## hexagonal : 168 - 194\n for iuc_id in range(168,194+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n elif symmetry.strip() == 'cubic': ## cubic : 195 - 230\n for iuc_id in range(195,230+1):\n add_sym.execute(iuc_id=iuc_id,symmetry_id=(symmetry_id+1))\n\n for cat in CATEGORIES:\n def_cat.execute(category_name=cat)\n\n ## Adds qrange\n for q in QAXIS:\n def_q.execute(q='%0.2f' % q)\n\n ## Adds all space groups\n for spgrp_no in SPACEGROUPS.keys():\n for spgrp_name in SPACEGROUPS[spgrp_no]:\n try:\n def_spgp.execute(iuc_id=spgrp_no,hm_notation=spgrp_name)\n except:\n if verbose:\n print('Duplicate: %s %s' % (spgrp_no,spgrp_name))\n pass\n\n\n def __add_space_groups(self):\n ## Add missing space groups\n for spgrp_no in SPACEGROUPS.keys():\n for spgrp_name in SPACEGROUPS[spgrp_no]:\n match = False\n search_spgrp = self.spgptbl.select(self.spgptbl.c.hm_notation == spgrp_name)\n for row in search_spgrp.execute():\n match = True\n if match is False:\n print('Adding: %s %s' % (spgrp_no,spgrp_name))\n self.spgptbl.insert().execute(iuc_id=spgrp_no,hm_notation=spgrp_name)\n\n def add_ciffile(self, ciffile, verbose=True, url=False, ijklm=1, file=None):\n '''\n ## Adds ciffile into database\n When reading in new CIF:\n i. put entire cif into field\n ii. read _database_code_amcsd; write 'amcsd_id' to 'cif data'\n iii. read _chemical_name_mineral; find/add in' minerallist'; write\n 'mineral_id' to 'cif data'\n iv. read _symmetry_space_group_name_H-M - find in 'spacegroup'; write\n iuc_id to 'cif data'\n v. read author name(s) - find/add in 'authorlist'; write 'author_id',\n 'amcsd_id' to 'authref'\n vi. read _chemical_formula_sum; write 'z' (atomic no.), 'amcsd_id'\n to 'compref'\n vii. calculate q - find each corresponding 'q_id' for all peaks; in write\n 'q_id','amcsd_id' to 'qpeak'\n '''\n\n if url:\n cifstr = requests.get(ciffile).text\n else:\n with open(ciffile,'r') as file:\n cifstr = str(file.read())\n cif = create_cif(text=cifstr)\n\n if cif.id_no is None:\n cif_no = 99999\n search_cif = self.query(self.ciftbl.c.amcsd_id).filter(self.ciftbl.c.amcsd_id == cif_no).all()\n cnt_lp = 0\n while len(search_cif) > 0:\n cif_no += 1\n cnt_lp += 1\n search_cif = self.query(self.ciftbl.c.amcsd_id).filter(self.ciftbl.c.amcsd_id == cif_no).all()\n if cnt_lp > 500: ## safe guards against infinite loop\n print(' *** too many loops to find unassigned AMCSD number.')\n return\n print(' *** Assigning unnumbered CIF to AMCSD %i' % cif_no)\n cif.id_no = cif_no\n\n ## check for amcsd in file already\n ## Find amcsd_id in database\n self.ciftbl = Table('ciftbl', self.metadata)\n search_cif = self.ciftbl.select(self.ciftbl.c.amcsd_id == cif.id_no)\n for row in search_cif.execute():\n if verbose:\n if url:\n print('AMCSD %i already exists in database.\\n' % cif.id_no)\n else:\n print('%s: AMCSD %i already exists in database %s.' %\n (os.path.split(ciffile)[-1],cif.id_no,self.dbname))\n return\n\n ## Define q-array for each entry at given energy\n qhkl = cif.calc_q(wvlgth=lambda_from_E(ENERGY), q_min=QMIN, q_max=QMAX)\n qarr = self.create_q_array(qhkl)\n\n ###################################################\n def_name = self.nametbl.insert()\n def_form = self.formtbl.insert()\n def_spgp = self.spgptbl.insert()\n def_sym = self.symtbl.insert()\n def_auth = self.authtbl.insert()\n def_q = self.qtbl.insert()\n def_cat = self.cattbl.insert()\n add_sym = self.symref.insert()\n add_comp = self.compref.insert()\n add_q = self.qref.insert()\n add_auth = self.authref.insert()\n add_cat = self.catref.insert()\n new_cif = self.ciftbl.insert()\n\n ## Find mineral_name\n match = False\n search_mineral = self.nametbl.select(self.nametbl.c.mineral_name == cif.label)\n for row in search_mineral.execute():\n mineral_id = row.mineral_id\n match = True\n if match is False:\n def_name.execute(mineral_name=cif.label)\n search_mineral = self.nametbl.select(self.nametbl.c.mineral_name == cif.label)\n for row in search_mineral.execute():\n mineral_id = row.mineral_id\n\n ## Find formula_name\n match = False\n search_formula = self.formtbl.select(self.formtbl.c.formula_name == cif.formula)\n for row in search_formula.execute():\n formula_id = row.formula_id\n match = True\n if match is False:\n def_form.execute(formula_name=cif.formula)\n search_formula = self.formtbl.select(self.formtbl.c.formula_name == cif.formula)\n for row in search_formula.execute():\n formula_id = row.formula_id\n\n ## Find composition (loop over all elements)\n z_list = []\n for element in set(cif.atom.label):\n search_elements = self.elemtbl.select(self.elemtbl.c.element_symbol == element)\n for row in search_elements.execute():\n z_list += [row.z]\n zarr = self.create_z_array(z_list)\n\n\n ## Save CIF entry into database\n new_cif.execute(amcsd_id=cif.id_no,\n mineral_id=int(mineral_id),\n formula_id=int(formula_id),\n iuc_id=cif.symmetry.no,\n a=str(cif.unitcell[0]),\n b=str(cif.unitcell[1]),\n c=str(cif.unitcell[2]),\n alpha=str(cif.unitcell[3]),\n beta=str(cif.unitcell[4]),\n gamma=str(cif.unitcell[5]),\n cif=cifstr,\n zstr=json.dumps(zarr.tolist(),default=str),\n qstr=json.dumps(qarr.tolist(),default=str),\n url=str(ciffile))\n\n ## Build q cross-reference table\n for q in qhkl:\n search_q = self.qtbl.select(self.qtbl.c.q == '%0.2f' % (int(q * 100) / 100.))\n for row in search_q.execute():\n q_id = row.q_id\n\n try:\n add_q.execute(q_id=q_id,amcsd_id=cif.id_no)\n except:\n pass\n\n\n ## Build composition cross-reference table\n for element in set(cif.atom.label):\n search_elements = self.elemtbl.select(self.elemtbl.c.element_symbol == element)\n for row in search_elements.execute():\n z = row.z\n\n try:\n add_comp.execute(z=z, amcsd_id=cif.id_no)\n except:\n print('could not find element: %s (amcsd: %i)' % (element,cif.id_no))\n pass\n\n ## Find author_name\n for author_name in cif.publication.author:\n match = False\n search_author = self.authtbl.select(self.authtbl.c.author_name == author_name)\n for row in search_author.execute():\n author_id = row.author_id\n match = True\n if match is False:\n def_auth.execute(author_name=author_name)\n search_author = self.authtbl.select(self.authtbl.c.author_name == author_name)\n for row in search_author.execute():\n author_id = row.author_id\n match = True\n if match == True:\n add_auth.execute(author_id=author_id,\n amcsd_id=cif.id_no)\n\n # ## not ready for defined categories\n # cif_category.execute(category_id='none',\n # amcsd_id=cif.id_no)\n\n if url:\n self.amcsd_info(cif.id_no, no_qpeaks=np.sum(qarr))\n else:\n self.amcsd_info(cif.id_no, no_qpeaks=np.sum(qarr),ciffile=ciffile)\n\n def url_to_cif(self, url=None, verbose=False, savecif=False, addDB=True,\n all=False, minval=None):\n\n maxi = 20573\n exceptions = [0,7271,10783,14748,15049,15050,15851,18368,\n 18449,18450,18451,18452,18453,20029]\n\n ## ALL CAUSE FAILURE IN CIFFILE FUNCTION:\n ## 7271 : author name doubled in cif\n ## 14748 : has label of amcsd code but no number (or anything) assigned\n ## 15049 : page number 'L24307 1' could not be parsed as number\n ## 15050 : page number 'L24307 1' could not be parsed as number\n ## 15851 : no first page number provided despite providing field label\n ## 18368 : non-numerical entries in B_iso fields\n ## 18449 : no first page number provided despite providing field label\n ## 18450 : no first page number provided despite providing field label\n ## 20029 : no volume number provided despite providing field label\n\n if url is None:\n url = 'http://rruff.geo.arizona.edu/AMS/download.php?id=%05d.cif&down=cif'\n\n ## Defines url range for searching and adding to cif database\n if all:\n iindex = range(99999) ## trolls whole database online\n elif minval is not None:\n iindex = np.arange(minval, 99999) ## starts at given min and counts up\n else:\n iindex = np.arange(13600, 13700) ## specifies small range including CeO2 match\n\n for i in iindex:\n if i not in exceptions and i < maxi:\n url_to_scrape = url % i\n r = requests.get(url_to_scrape)\n if r.text.split()[0] == \"Can't\" or '':\n if verbose:\n print('\\t---> ERROR on amcsd%05d.cif' % i)\n else:\n if verbose:\n print('Reading %s' % url_to_scrape)\n if savecif:\n file = 'amcsd%05d.cif' % i\n f = open(file,'w')\n f.write(r.text)\n f.close()\n if verbose:\n print('Saved %s' % file)\n if addDB:\n try:\n self.add_ciffile(url_to_scrape, url=True, verbose=verbose, ijklm=i)\n except:\n pass\n\n\n\n\n##################################################################################\n##################################################################################\n\n# usr_qry = self.query(self.ciftbl,\n# self.elemtbl,self.nametbl,self.spgptbl,self.symtbl,\n# self.authtbl,self.qtbl,self.cattbl,\n# self.authref,self.compref,self.catref,self.symref)\\\n# .filter(self.authref.c.amcsd_id == self.ciftbl.c.amcsd_id)\\\n# .filter(self.authtbl.c.author_id == self.authref.c.author_id)\\\n# .filter(self.compref.c.amcsd_id == self.ciftbl.c.amcsd_id)\\\n# .filter(self.compref.c.z == self.elemtbl.c.z)\\\n# .filter(self.catref.c.amcsd_id == self.ciftbl.c.amcsd_id)\\\n# .filter(self.catref.c.category_id == self.cattbl.c.category_id)\\\n# .filter(self.nametbl.c.mineral_id == self.ciftbl.c.mineral_id)\\\n# .filter(self.symref.c.symmetry_id == self.symtbl.c.symmetry_id)\\\n# .filter(self.symref.c.iuc_id == self.spgptbl.c.iuc_id)\\\n# .filter(self.spgptbl.c.iuc_id == self.ciftbl.c.iuc_id)\n\n##################################################################################\n##################################################################################\n\n\n##################################################################################\n\n def amcsd_info(self, amcsd_id, no_qpeaks=None, ciffile=None):\n\n mineral_id,iuc_id = self.cif_by_amcsd(amcsd_id,only_ids=True)\n\n mineral_name = self.search_for_mineral(minid=mineral_id)[0].mineral_name\n authors = self.author_by_amcsd(amcsd_id)\n\n ## ALLelements,mineral_name,iuc_id,authors = self.all_by_amcsd(amcsd_id)\n\n if ciffile:\n print(' ==== File : %s ====' % os.path.split(ciffile)[-1])\n else:\n print(' ===================== ')\n print(' AMCSD: %i' % amcsd_id)\n print(' Name: %s' % mineral_name)\n print(' %s' % self.composition_by_amcsd(amcsd_id,string=True))\n try:\n print(' Space Group No.: %s (%s)' % (iuc_id,self.symm_id(iuc_id)))\n except:\n print(' Space Group No.: %s' % iuc_id)\n if no_qpeaks:\n print(' No. q-peaks in range : %s' % no_qpeaks)\n\n authorstr = ' Author(s): '\n for author in authors:\n authorstr = '%s %s' % (authorstr,author.split()[0])\n print(authorstr)\n print(' ===================== ')\n\n def symm_id(sel, iuc_id):\n\n if not isinstance(iuc_id, int):\n iuc_id = int(iuc_id.split(':')[0])\n\n if iuc_id < 3 : return 'triclinic' ## 1 - 2 : Triclinic\n elif iuc_id < 16 : return 'monoclinic' ## 3 - 15 : Monoclinic\n elif iuc_id < 75 : return 'orthorhombic' ## 16 - 74 : Orthorhombic\n elif iuc_id < 143: return 'tetragonal' ## 75 - 142 : Tetragonal\n elif iuc_id < 168: return 'trigonal' ## 143 - 167 : Trigonal\n elif iuc_id < 195: return 'hexagonal' ## 168 - 194 : Hexagonal\n elif iuc_id < 231: return 'cubic' ## 195 - 230 : Cubic\n else:\n return\n\n def return_cif(self,amcsd_id):\n search_cif = self.ciftbl.select(self.ciftbl.c.amcsd_id == amcsd_id)\n for row in search_cif.execute():\n return row.cif\n\n##################################################################################\n\n def all_by_amcsd(self, amcsd_id):\n\n mineral_id,iuc_id = self.cif_by_amcsd(amcsd_id,only_ids=True)\n\n mineral_name = self.search_for_mineral(minid=mineral_id)[0].mineral_name\n ALLelements = self.composition_by_amcsd(amcsd_id)\n authors = self.author_by_amcsd(amcsd_id)\n\n return ALLelements, mineral_name, iuc_id, authors\n\n def q_by_amcsd(self,amcsd_id,qmin=QMIN,qmax=QMAX):\n\n q_results = self.query(self.ciftbl.c.qstr).filter(self.ciftbl.c.amcsd_id == amcsd_id).all()\n q_all = [json.loads(qrow[0]) for qrow in q_results]\n\n return [self.axis[i] for i,qi in enumerate(q_all[0]) if qi == 1 and self.axis[i] >= qmin and self.axis[i] <= qmax]\n\n def author_by_amcsd(self,amcsd_id):\n\n search_authors = self.authref.select(self.authref.c.amcsd_id == amcsd_id)\n authors = []\n for row in search_authors.execute():\n authors.append(self.search_for_author(row.author_id,id_no=False)[0][0])\n return authors\n\n def composition_by_amcsd(self, amcsd_id):\n q = self.query(self.compref).filter(self.compref.c.amcsd_id==amcsd_id)\n return [row.z for row in q.all()]\n\n def cif_by_amcsd(self,amcsd_id,only_ids=False):\n\n search_cif = self.ciftbl.select(self.ciftbl.c.amcsd_id == amcsd_id)\n for row in search_cif.execute():\n if only_ids:\n return row.mineral_id, row.iuc_id\n else:\n return row.cif\n\n def mineral_by_amcsd(self,amcsd_id):\n\n search_cif = self.ciftbl.select(self.ciftbl.c.amcsd_id == amcsd_id)\n for row in search_cif.execute():\n cifstr = row.cif\n mineral_id = row.mineral_id\n iuc_id = row.iuc_id\n\n search_mineralname = self.nametbl.select(self.nametbl.c.mineral_id == mineral_id)\n for row in search_mineralname.execute():\n mineral_name = row.mineral_name\n return mineral_name\n\n##################################################################################\n##################################################################################\n\n def amcsd_by_q(self, peaks, qmin=None, qmax=None, qstep=None, list=None,\n verbose=False):\n\n if qmin is None: qmin = QMIN\n if qmax is None: qmax = QMAX\n if qstep is None: qstep = QSTEP\n\n ## Defines min/max limits of q-range\n imin, imax = 0, len(self.axis)\n if qmax < np.max(self.axis):\n imax = abs(self.axis-qmax).argmin()\n if qmin > np.min(self.axis):\n imin = abs(self.axis-qmin).argmin()\n qaxis = self.axis[imin:imax]\n stepq = (qaxis[1]-qaxis[0])\n\n amcsd, q_amcsd = self.match_q(list=list, qmin=qmin, qmax=qmax)\n\n ## Re-bins data if different step size is specified\n if qstep > stepq:\n new_qaxis = np.arange(np.min(qaxis),np.max(qaxis)+stepq,qstep)\n new_q_amcsd = np.zeros((np.shape(q_amcsd)[0],np.shape(new_qaxis)[0]))\n for m,qrow in enumerate(q_amcsd):\n for n,qn in enumerate(qrow):\n if qn == 1:\n k = np.abs(new_qaxis-qaxis[n]).argmin()\n new_q_amcsd[m][k] = 1\n qaxis = new_qaxis\n q_amcsd = new_q_amcsd\n\n\n ## Create data array\n peaks_weighting = np.ones(len(qaxis),dtype=int)*-1\n peaks_true = np.zeros(len(qaxis),dtype=int)\n peaks_false = np.ones(len(qaxis),dtype=int)\n for p in peaks:\n i = np.abs(qaxis-p).argmin()\n peaks_weighting[i],peaks_true[i],peaks_false[i] = 1,1,0\n\n ## Calculate score/matches/etc.\n total_peaks = np.sum((q_amcsd),axis=1)\n match_peaks = np.sum((peaks_true*q_amcsd),axis=1)\n miss_peaks = np.sum((peaks_false*q_amcsd),axis=1)\n scores = np.sum((peaks_weighting*q_amcsd),axis=1)\n\n return sorted(zip(scores, amcsd, total_peaks, match_peaks, miss_peaks), reverse=True)\n\n\n def amcsd_by_chemistry(self, include=[], exclude=[]):\n\n amcsd_incld = []\n amcsd_excld = []\n z_incld = []\n z_excld = []\n\n if len(include) > 0:\n for element in include:\n z = self.get_element(element).z\n if z is not None and z not in z_incld:\n z_incld += [z]\n if isinstance(exclude,bool):\n if exclude:\n for element in ELEMENTS:\n z, name, symbol = element\n z = int(z)\n if z not in z_incld:\n z_excld += [z]\n else:\n if len(exclude) > 0:\n for element in exclude:\n z = self.get_element(element).z\n if z is not None and z not in z_excld:\n z_excld += [z]\n\n z_list_include = [1 if z in z_incld else 0 for z in np.arange(len(ELEMENTS)+1)]\n z_list_exclude = [1 if z in z_excld else 0 for z in np.arange(len(ELEMENTS)+1)]\n\n amcsd,z_amcsd = self.return_z_matches(list=list)\n\n ## Calculate score/matches/etc.\n match_z = np.sum((np.array(z_list_include)*np.array(z_amcsd)),axis=1)\n miss_z = np.sum((np.array(z_list_exclude)*np.array(z_amcsd)),axis=1)\n\n for i,amcsd_id in enumerate(amcsd):\n if match_z[i] == np.sum(z_list_include) and miss_z[i] <= 0:\n amcsd_incld += [amcsd_id]\n else:\n amcsd_excld += [amcsd_id]\n\n return amcsd_incld\n\n\n def amcsd_by_mineral(self, min_name, list=None, verbose=True):\n \"\"\"\n search by mineral name\n \"\"\"\n out = []\n minerals = self.search_for_mineral(name=min_name)\n\n q = self.query(self.ciftbl)\n if list is not None:\n q = q.filter(self.ciftbl.c.amcsd_id.in_(list))\n\n ## Searches mineral name for database entries\n if len(minerals) > 0:\n mids = [m.mineral_id for m in minerals]\n q = q.filter(self.ciftbl.c.mineral_id.in_(mids))\n for row in q.all():\n if row.amcsd_id not in out:\n out.append(row.amcsd_id)\n return out\n\n def amcsd_by_author(self,include=[''],list=None,verbose=True):\n\n amcsd_incld = []\n auth_id = []\n\n for author in include:\n id = self.search_for_author(author)\n auth_id += id\n\n ## Searches mineral name for database entries\n usr_qry = self.query(self.ciftbl,self.authtbl,self.authref)\\\n .filter(self.authref.c.amcsd_id == self.ciftbl.c.amcsd_id)\\\n .filter(self.authref.c.author_id == self.authtbl.c.author_id)\n if list is not None:\n usr_qry = usr_qry.filter(self.ciftbl.c.amcsd_id.in_(list))\n\n ## Searches author name in database entries\n if len(auth_id) > 0:\n fnl_qry = usr_qry.filter(self.authref.c.author_id.in_(auth_id))\n ## This currently works in an 'or' fashion, as each name in list\n ## can be matched to multiple auth_id values, so it is simpler to\n ## consider them all separately. Making a 2D list and restructuring\n ## query could improve this\n ## mkak 2017.02.24\n for row in fnl_qry.all():\n if row.amcsd_id not in amcsd_incld:\n amcsd_incld += [row.amcsd_id]\n\n return amcsd_incld\n\n\n def match_elements(self, elems, exclude=None):\n \"\"\"match structues containing all elements in a list\n\n Arguments:\n ----------\n elems list of elements to match\n exclude list of elements to exclude for match (default None)\n\n Returns:\n --------\n list of amcsd ids for structures\n\n \"\"\"\n matches = None\n q = self.query(self.compref)\n\n for elem in elems:\n elem = self.get_element(elem).z\n rows = q.filter(self.compref.c.z==elem).all()\n sids = [row.amcsd_id for row in rows]\n if matches is None:\n matches = sids\n else:\n matches = [s for s in sids if s in matches]\n\n if exclude is not None:\n for elem in exclude:\n elem = self.get_element(elem).z\n for row in q.filter(self.compref.c.z==elem).all():\n if row.amcsd_id in matches:\n matches.remove(row.amcsd_id)\n return matches\n\n def create_z_array(self,z):\n z_array = np.zeros((len(ELEMENTS)+1),dtype=int) ## + 1 gives index equal to z; z[0]:nothing\n for zn in z:\n z_array[zn] = 1\n return z_array\n\n\n##################################################################################\n##################################################################################\n def match_qc(self, list=None, qmin=QMIN, qmax=QMAX):\n\n if list is None:\n qqry = self.query(self.ciftbl.c.qstr).all()\n idqry = self.query(self.ciftbl.c.amcsd_id).all()\n else:\n qqry = self.query(self.ciftbl.c.qstr)\\\n .filter(self.ciftbl.c.amcsd_id.in_(list))\\\n .all()\n idqry = self.query(self.ciftbl.c.amcsd_id)\\\n .filter(self.ciftbl.c.amcsd_id.in_(list))\\\n .all()\n\n imin,imax = 0,len(self.axis)\n if qmax < QMAX: imax = abs(self.axis-qmax).argmin()\n if qmin > QMIN: imin = abs(self.axis-qmin).argmin()\n\n return [id[0] for id in idqry],[json.loads(q[0])[imin:imax] for q in qqry]\n\n def create_q_array(self, q):\n\n q_array = np.zeros(len(self.axis), dtype=int)\n for qn in q:\n i = np.abs(self.axis-qn).argmin()\n q_array[i] = 1\n return q_array\n\n##################################################################################\n\n def get_element(self, element):\n '''\n searches elements for match in symbol, name, or atomic number;\n match must be exact.\n\n returns row with attributes .z, .element_name, .element_symbol\n '''\n if isinstance(element, int):\n element = '%d' % element\n elif isinstance(element, six.string_types):\n element = element.title()\n q = self.query(self.elemtbl)\n row = q.filter(or_(self.elemtbl.c.z == element,\n self.elemtbl.c.element_symbol == element,\n self.elemtbl.c.element_name == element)).one()\n return row\n\n def search_for_author(self,name,exact=False,id_no=True,verbose=False):\n '''\n searches database for author matching criteria given in 'name'\n - if name is a string:\n - will match author name containing text\n - will match id number if integer given in string\n - will only look for exact match if exact flag is given\n - if name is an integer, will only match id number from database\n id_no: if True, will only return the id number of match(es)\n if False, returns name and id number\n e.g. as INTEGER\n >>> cif.search_for_author(6,id_no=False)\n ([u'Chao G Y'], [6])\n as STRING\n >>> cif.search_for_author('6',id_no=False)\n ([u'Chao G Y', u'Geology Team 654'], [6, 7770])\n '''\n\n authname = []\n authid = []\n\n id, name = filter_int_and_str(name,exact=exact)\n authrow = self.query(self.authtbl)\\\n .filter(or_(self.authtbl.c.author_name.like(name),\n self.authtbl.c.author_id == id))\n if len(authrow.all()) == 0:\n if verbose: print('%s not found in author database.' % name)\n else:\n for row in authrow.all():\n authname += [row.author_name]\n authid += [row.author_id]\n\n if id_no: return authid\n else: return authname,authid\n\n def search_for_mineral(self, name=None, minid=None, exact=False):\n '''\n searches database for mineral by name or by ID\n\n Arguments:\n ----------\n name (str or None): mineral name to match\n minid (int or None): mineral ID in database to match\n exact (bool): whether to match name exactly [False]\n\n\n Returns:\n --------\n list of matching rows\n\n # [row.mineral_name, row.mineral_id]\n '''\n\n rows = []\n q = self.query(self.nametbl)\n\n if name is not None:\n if not exact:\n name = '%%%s%%' % name\n rows = q.filter(self.nametbl.c.mineral_name.like(name)).all()\n elif minid is not None:\n rows = q.filter(self.nametbl.c.mineral_id == minid).all()\n return rows\n\n def cif_count(self):\n return self.query(self.ciftbl).count()\n\n def return_q(self):\n q = [float(row.q) for row in self.query(self.qtbl).all()]\n return np.array(q)\n\n def get_mineral_names(self):\n names = []\n for name in self.query(self.nametbl.c.mineral_name).all():\n if isinstance(name[0], six.string_types):\n names.append(name[0])\n return sorted(names)\n\n def return_author_names(self):\n\n authorqry = self.query(self.authtbl)\n names = []\n for row in authorqry.all():\n names += [row.author_name]\n\n return sorted(names)\n\ndef filter_int_and_str(s, exact=False):\n try:\n i = int(s)\n except:\n i = 0\n if not exact:\n try:\n s = '%'+s+'%'\n except:\n pass\n return i, s\n\n\ndef column(matrix, i):\n return [row[i] for row in matrix]\n\nclass RangeParameter(object):\n def __init__(self,min=None,max=None,unit=None):\n self.min = min\n self.max = max\n self.unit = unit\n\nclass SearchCIFdb(object):\n '''\n interface to the search the cif database\n '''\n def __init__(self, verbose=False):\n\n self.verbose = verbose\n\n ## running list of included amcsd id numbers\n self.amcsd_id = []\n\n ## tags for searching\n self.authors = []\n self.keywords = []\n self.categories = []\n self.amcsd = []\n self.qpks = []\n\n self.mnrlname = ''\n\n self.elem_incl = []\n self.elem_excl = []\n self.allelem = column(ELEMENTS, 2)\n\n self.lattice_keys = ['a', 'b', 'c', 'alpha', 'beta', 'gamma']\n\n self.sg = None\n self.a = RangeParameter()\n self.b = RangeParameter()\n self.c = RangeParameter()\n self.alpha = RangeParameter()\n self.beta = RangeParameter()\n self.gamma = RangeParameter()\n\n\n def show_all(self):\n for key in ['authors','mnrlname','keywords','categories','amcsd','qpks']:\n print('%s : %s' % (key,self.show_parameter(key=key)))\n print('chemistry : %s' % self.show_chemistry())\n print('geometry : %s' % self.show_geometry())\n\n def show_parameter(self, key='authors'):\n s = ''\n if len(self.__dict__[key]) > 0:\n for i,item in enumerate(self.__dict__[key]):\n item = item.split()[0]\n if i == 0:\n s = '%s' % (item)\n else:\n s = '%s, %s' % (s,item)\n return s\n\n\n def read_parameter(self,s,clear=True,key='authors'):\n '''\n This function works for keys:\n 'authors'\n 'mnrlname\n keywords','categories','amcsd','qpks'\n '''\n\n if clear:\n self.__dict__[key] = []\n if len(s) > 0:\n for a in s.split(','):\n try:\n self.__dict__[key] += [a.split()[0]]\n except:\n pass\n\n def read_chemistry(self,s,clear=True):\n\n if clear:\n self.elem_incl,self.elem_excl = [],[]\n chem_incl,chem_excl = [],[]\n\n chemstr = re.sub('[( )]','',s)\n ii = -1\n for i,s in enumerate(chemstr):\n if s == '-':\n ii = i\n if ii > 0:\n chem_incl = chemstr[0:ii].split(',')\n if len(chemstr)-ii == 1:\n for elem in self.allelem:\n if elem not in chem_incl:\n chem_excl += [elem]\n elif ii < len(chemstr)-1:\n chem_excl = chemstr[ii+1:].split(',')\n else:\n chem_incl = chemstr.split(',')\n\n for elem in chem_incl:\n elem = capitalize_string(elem)\n if elem in self.allelem and elem not in self.elem_incl:\n self.elem_incl += [elem]\n if elem in self.elem_excl:\n j = self.elem_excl.index(elem)\n self.elem_excl.pop(j)\n for elem in chem_excl:\n elem = capitalize_string(elem)\n if elem in self.allelem and elem not in self.elem_excl and elem not in self.elem_incl:\n self.elem_excl += [elem]\n\n def show_chemistry(self):\n\n s = ''\n for i,elem in enumerate(self.elem_incl):\n if i==0:\n s = '(%s' % elem\n else:\n s = '%s,%s' % (s,elem)\n if len(self.elem_incl) > 0:\n s = '%s) ' % s\n if len(self.elem_excl) > 0:\n s = '%s- ' % s\n # if all else excluded, don't list\n if (len(self.allelem)-20) > (len(self.elem_incl)+len(self.elem_excl)):\n for i,elem in enumerate(self.elem_excl):\n if i==0:\n s = '%s(%s' % (s,elem)\n else:\n s = '%s,%s' % (s,elem)\n if len(self.elem_excl) > 0:\n s = '%s)' % s\n return s\n\n def show_geometry(self,unit='A'):\n\n s = ''\n\n key = 'sg'\n if self.__dict__[key] is not None:\n s = '%s%s=%s,' % (s,key,self.__dict__[key])\n for i,key in enumerate(self.lattice_keys):\n if self.__dict__[key].min is not None:\n s = '%s%s=%0.2f' % (s,key,float(self.__dict__[key].min))\n if self.__dict__[key].max is not None:\n s = '%sto%0.2f' % (s,float(self.__dict__[key].max))\n s = '%s%s,' % (s,self.__dict__[key].unit)\n\n if len(s) > 1:\n if s[-1] == ',':\n s = s[:-1]\n\n return s\n\n def read_geometry(self,s):\n\n geostr = s.split(',')\n used = []\n for par in geostr:\n key = par.split('=')[0]\n val = par.split('=')[1]\n if key in 'sg':\n self.__dict__[key] = val\n used += [key]\n elif key in self.lattice_keys:\n values = [''.join(g) for _, g in groupby(val, str.isalpha)]\n self.__dict__[key].min = values[0]\n if len(values) > 1:\n self.__dict__[key].unit = values[-1]\n if len(values) > 2:\n self.__dict__[key].max = values[2]\n else:\n self.__dict__[key].max = None\n used += [key]\n\n ## Resets undefined to None\n for key in self.lattice_keys:\n if key not in used:\n self.__dict__[key] = RangeParameter()\n key = 'sg'\n if key not in used:\n self.__dict__[key] = None\n\ndef match_database(cifdb, peaks, minq=QMIN, maxq=QMAX, verbose=True):\n \"\"\"\n fracq : min. ratio of matched q to possible in q range, i.e. 'goodness gauge'\n pk_wid : maximum range in q which qualifies as a match between fitted and ideal\n \"\"\"\n stepq = 0.05\n scores,amcsd,total_peaks,match_peaks,miss_peaks = zip(*cifdb.amcsd_by_q(peaks,\n qmin=minq,qmax=maxq,qstep=stepq,\n list=None,verbose=False))\n\n MATCHES = [match for i,match in enumerate(amcsd) if scores[i] > 0]\n\n if verbose:\n print('\\n')\n if len(MATCHES) > 100:\n print('DISPLAYING TOP 100 of %i TOTAL MATCHES FOUND.' % len(MATCHES))\n else:\n print('%i TOTAL MATCHES FOUND.' % len(MATCHES))\n j = 0\n for i,id_no in enumerate(amcsd):\n if j < 100:\n if scores[i] > 0:\n j += 1\n str = 'AMCSD %5d, %s (score of %2d --> %i of %i peaks)' % (id_no,\n cifdb.mineral_by_amcsd(id_no),scores[i],\n match_peaks[i],total_peaks[i])\n print(str)\n print('')\n\n return MATCHES\n\n\ndef cif_match(peaks, qmin=None, qmax=None, verbose=False, _larch=None):\n \"\"\"\n fracq : min. ratio of matched q to possible in q range, i.e. 'goodness gauge'\n pk_wid : maximum range in q which qualifies as a match between fitted and ideal\n \"\"\"\n cifdb = get_cifdb(_larch=_larch)\n qstep = 0.05\n\n rows = cifdb.amcsd_by_q(peaks, qmin=qmin,qmax=qmax, qstep=qstep)\n\n scores, amcsd, total_peaks, match_peaks, miss_peaks = rows\n\n matches = []\n for i, cdat in enumerate(amcsd):\n if score[i] > 0:\n matches.append(cdat)\n\n if verbose:\n print('\\n')\n if len(MATCHES) > 100:\n print('DISPLAYING TOP 100 of %i TOTAL MATCHES FOUND.' % len(MATCHES))\n else:\n print('%i TOTAL MATCHES FOUND.' % len(MATCHES))\n matches = matches[:100]\n for i, id_no in enumerate(amcsd):\n if j < 100:\n if scores[i] > 0:\n j += 1\n str = 'AMCSD %5d, %s (score of %2d --> %i of %i peaks)' % (id_no,\n cifdb.mineral_by_amcsd(id_no),scores[i],\n match_peaks[i],total_peaks[i])\n print(str)\n print('')\n\n return matches\n\n\ndef read_cif(filename=None, amcsd_id=None, _larch=None):\n \"\"\"make a representation of a CIF data structure\n for crystallographic computations\n\n Arguments:\n ----------\n filename (str or None) name of CIF file\n amcsd_id (int or None) index of CIF in Am Min Cystal Structure database\n\n Returns\n -------\n CIF representation\n \"\"\"\n cifdb = get_cifdb(_larch=_larch)\n return create_cif(filename=filename, cifdb=cifdb, amcsd_id=amcsd_id)\n"
] |
[
[
"numpy.abs",
"numpy.min",
"numpy.arange",
"numpy.max",
"numpy.shape",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChendiWang/MetaXcan
|
[
"1f0eb0660f9b2a0d25b7083a34e9418cd8ba9566"
] |
[
"software/metax/misc/GWASAndModels.py"
] |
[
"import logging\nimport os\nimport re\nimport pandas\n\nfrom .. import Constants\nfrom .. import PredictionModel\nfrom ..gwas import Utilities as GWASUtilities\n\ndef align_data_to_alleles(data, base, left_on, right_on):\n EA, NEA = Constants.EFFECT_ALLELE, Constants.NON_EFFECT_ALLELE\n EA_BASE, NEA_BASE = EA+\"_BASE\", NEA+\"_BASE\"\n merged = pandas.merge(data, base, left_on=left_on, right_on=right_on, suffixes=(\"\", \"_BASE\"))\n\n alleles_1 = pandas.Series([set(e) for e in zip(merged[EA], merged[NEA])])\n alleles_2 = pandas.Series([set(e) for e in zip(merged[EA_BASE], merged[NEA_BASE])])\n eq = alleles_1 == alleles_2\n merged = merged[eq]\n if eq.shape[0] == 0:\n return merged\n\n flipped = merged[EA] != merged[EA_BASE]\n Z = Constants.ZSCORE\n if Z in merged:\n merged.loc[flipped, Z] = - merged.loc[flipped, Z]\n B = Constants.BETA\n if B in merged:\n merged.loc[flipped, B] = - merged.loc[flipped, B]\n\n merged.loc[flipped, EA] = merged.loc[flipped, EA_BASE]\n merged.loc[flipped, NEA] = merged.loc[flipped, NEA_BASE]\n\n return merged\n\ndef gwas_model_intersection(args):\n gwas= GWASUtilities.load_plain_gwas_from_args(args)\n paths = PredictionModel._model_paths(args.models_folder, args.models_name_filter)\n PF = PredictionModel.WDBQF\n intersection = set()\n for db_path in sorted(paths):\n logging.log(9, \"loading %s\", db_path)\n model = PredictionModel.load_model(db_path)\n base = model.weights[[PF.K_RSID, PF.K_EFFECT_ALLELE, PF.K_NON_EFFECT_ALLELE]].drop_duplicates()\n b = align_data_to_alleles(gwas, base, Constants.SNP, PF.K_RSID)\n intersection.update(b[Constants.SNP])\n return intersection\n\n\n"
] |
[
[
"pandas.merge"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kukalbriiwa7/COMP511_CS231n
|
[
"1537e98cdca43fad906e56a22f48d884523414b0",
"1537e98cdca43fad906e56a22f48d884523414b0"
] |
[
"assignment2/comp411/layers.py",
"assignment1/comp411/classifiers/linear_svm.py"
] |
[
"from builtins import range\nimport numpy as np\n\n\ndef affine_forward(x, w, b):\n \"\"\"\n Computes the forward pass for an affine (fully-connected) layer.\n\n The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N\n examples, where each example x[i] has shape (d_1, ..., d_k). We will\n reshape each input into a vector of dimension D = d_1 * ... * d_k, and\n then transform it to an output vector of dimension M.\n\n Inputs:\n - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)\n - w: A numpy array of weights, of shape (D, M)\n - b: A numpy array of biases, of shape (M,)\n\n Returns a tuple of:\n - out: output, of shape (N, M)\n - cache: (x, w, b)\n \"\"\"\n out = None\n ###########################################################################\n # TODO: Implement the affine forward pass. Store the result in out. You #\n # will need to reshape the input into rows. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n datapoints_dimension = x[0].shape\n X = x.reshape(x.shape[0], np.prod(datapoints_dimension))\n out = X.dot(w) + b\n \n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b)\n return out, cache\n\n\ndef affine_backward(dout, cache):\n \"\"\"\n Computes the backward pass for an affine layer.\n\n Inputs:\n - dout: Upstream derivative, of shape (N, M)\n - cache: Tuple of:\n - x: Input data, of shape (N, d_1, ... d_k)\n - w: Weights, of shape (D, M)\n - b: Biases, of shape (M,)\n\n Returns a tuple of:\n - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)\n - dw: Gradient with respect to w, of shape (D, M)\n - db: Gradient with respect to b, of shape (M,)\n \"\"\"\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n datapoints_dimension_stretched = np.prod(x[0].shape)\n N = x.shape[0]\n X = x.reshape(N, datapoints_dimension_stretched)\n dx = dout.dot(w.T)\n dx = dx.reshape(x.shape)\n dw = X.T.dot(dout)\n db = dout.sum(axis=0)\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db\n\n\ndef relu_forward(x):\n \"\"\"\n Computes the forward pass for a layer of rectified linear units (ReLUs).\n\n Input:\n - x: Inputs, of any shape\n\n Returns a tuple of:\n - out: Output, of the same shape as x\n - cache: x\n \"\"\"\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n out = np.maximum(0, x)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache\n\n\ndef relu_backward(dout, cache):\n \"\"\"\n Computes the backward pass for a layer of rectified linear units (ReLUs).\n\n Input:\n - dout: Upstream derivatives, of any shape\n - cache: Input x, of same shape as dout\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n dx = dout * (x > 0)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx\n\n\ndef leaky_relu_forward(x, lrelu_param):\n \"\"\"\n Computes the forward pass for a layer of leaky rectified linear units (Leaky ReLUs).\n\n Input:\n - x: Inputs, of any shape\n - lrelu_param: Dictionary with the following key:\n - alpha: scalar value for negative slope\n\n Returns a tuple of:\n - out: Output, of the same shape as x\n - cache: (x, lrelu_param).\n Input x, of same shape as dout,\n lrelu_param, needed for backward pass.\n \"\"\"\n out = None\n alpha = lrelu_param.get('alpha', 2e-3)\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n out = np.maximum(alpha*x, x)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, lrelu_param)\n return out, cache\n\n\ndef leaky_relu_backward(dout, cache):\n \"\"\"\n Computes the backward pass for a layer of leaky rectified linear units (Leaky ReLUs).\n Note that, the negative slope parameter (i.e. alpha) is fixed in this implementation.\n Therefore, you should not calculate any gradient for alpha.\n Input:\n - dout: Upstream derivatives, of any shape\n - cache: (x, lr_param)\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx = None\n (x, lr_param) = cache\n alpha = lr_param[\"alpha\"]\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n dx = np.where(x>0, dout, dout * alpha)\n \n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx\n\n\ndef dropout_forward(x, dropout_param):\n \"\"\"\n Performs the forward pass for (inverted) dropout.\n\n Inputs:\n - x: Input data, of any shape\n - dropout_param: A dictionary with the following keys:\n - p: Dropout parameter. We keep each neuron output with probability p.\n - mode: 'test' or 'train'. If the mode is train, then perform dropout;\n if the mode is test, then just return the input.\n - seed: Seed for the random number generator. Passing seed makes this\n function deterministic, which is needed for gradient checking but not\n in real networks.\n\n Outputs:\n - out: Array of the same shape as x.\n - cache: tuple (dropout_param, mask). In training mode, mask is the dropout\n mask that was used to multiply the input; in test mode, mask is None.\n\n NOTE: Please implement **inverted** dropout, not the vanilla version of dropout.\n See http://cs231n.github.io/neural-networks-2/#reg for more details.\n\n NOTE 2: Keep in mind that p is the probability of **keep** a neuron\n output; this might be contrary to some sources, where it is referred to\n as the probability of dropping a neuron output.\n \"\"\"\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n mask = ( np.random.rand(*x.shape) < p ) / p\n out = x * mask\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n out = x\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache\n\n\ndef dropout_backward(dout, cache):\n \"\"\"\n Perform the backward pass for (inverted) dropout.\n\n Inputs:\n - dout: Upstream derivatives, of any shape\n - cache: (dropout_param, mask) from dropout_forward.\n \"\"\"\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dx = dout * mask\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx\n\n\ndef conv_forward_naive(x, w, b, conv_param):\n \"\"\"\n A naive implementation of the forward pass for a convolutional layer.\n Note that, the filter is not flipped as in the regular convolution operation\n in signal processing domain. Therefore, technically this implementation\n is a cross-correlation.\n\n The input consists of N data points, each with C channels, height H and\n width W. We convolve each input with F different filters, where each filter\n spans all C channels and has height HH and width WW.\n\n Input:\n - x: Input data of shape (N, C, H, W)\n - w: Filter weights of shape (F, C, HH, WW)\n - b: Biases, of shape (F,)\n - conv_param: A dictionary with the following keys:\n - 'stride': The number of pixels between adjacent receptive fields in the\n horizontal and vertical directions.\n - 'pad': The number of pixels that will be used to zero-pad the input. \n \n\n During padding, 'pad' zeros should be placed symmetrically (i.e equally on both sides)\n along the height and width axes of the input. Be careful not to modfiy the original\n input x directly.\n\n Returns a tuple of:\n - out: Output data, of shape (N, F, H', W') where H' and W' are given by\n H' = 1 + (H + 2 * pad - HH) / stride\n W' = 1 + (W + 2 * pad - WW) / stride\n - cache: (x, w, b, conv_param)\n \"\"\"\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n N, C, H, W = x.shape\n F, _, HH, WW = w.shape\n stride = conv_param.get('stride', 1)\n pad = conv_param.get('pad', 0)\n assert (H + 2 * pad - HH) % stride == 0, 'Just to check whether the filter fits properly or not'\n assert (W + 2 * pad - WW) % stride == 0, 'Just to check whether the filter fits properly or not'\n H_prime = 1 + (H + 2 * pad - HH) // stride\n W_prime = 1 + (W + 2 * pad - WW) // stride\n x_pad = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), 'constant', constant_values=0)\n out = np.zeros((N, F, H_prime, W_prime))\n for n in range(N):\n for f in range(F):\n for j in range(0, H_prime):\n for i in range(0, W_prime):\n out[n, f, j, i] = (x_pad[n, :, j*stride:j*stride+HH, i*stride:i*stride+WW] * w[f, :, :, :]).sum() + b[f]\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b, conv_param)\n return out, cache\n\n\ndef conv_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a convolutional layer.\n\n Inputs:\n - dout: Upstream derivatives.\n - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive\n\n Returns a tuple of:\n - dx: Gradient with respect to x\n - dw: Gradient with respect to w\n - db: Gradient with respect to b\n \"\"\"\n dx, dw, db = None, None, None\n ###########################################################################\n ###########################################################################\n x, w, b, conv_param = cache\n padding, stride = conv_param['pad'], conv_param['stride']\n f, c, filter_height, filter_width = w.shape\n n, _, output_height, output_width = dout.shape\n\n # pad x\n N, C, H, W = x.shape\n pad_horiz = np.zeros((N, C, H, padding))\n x_horiz_padded = np.concatenate((pad_horiz, x, pad_horiz), axis=3)\n pad_vert = np.zeros((N, C, padding, x_horiz_padded.shape[3]))\n x_padded = np.concatenate((pad_vert, x_horiz_padded, pad_vert), axis=2)\n\n dx_padded = np.zeros(x_padded.shape)\n dw = np.zeros(w.shape)\n db = np.zeros(b.shape)\n\n w_flat = w.reshape((f, -1))\n\n for i in range(output_height):\n for j in range(output_width):\n dout_slice = dout[:, :, i, j]\n\n dx_slice_flattened = dout_slice.dot(w_flat)\n dx_slice = dx_slice_flattened.reshape((n, c, filter_height, filter_width))\n dx_padded[:, :, i * stride: i * stride + filter_height, j * stride: j * stride + filter_width] += dx_slice\n\n x_padded_slice = x_padded[:, :, i * stride: i * stride + filter_height, j * stride: j * stride + filter_width]\n x_slice_flattened = x_padded_slice.reshape((n, -1))\n\n dw += dout_slice.T.dot(x_slice_flattened).reshape(dw.shape)\n db += dout_slice.sum(axis=0)\n\n # crop padding from dx\n dx = dx_padded[:, :, padding:-padding, padding:-padding]\n ###########################################################################\n ###########################################################################\n return dx, dw, db\n\n\ndef max_pool_forward_naive(x, pool_param):\n \"\"\"\n A naive implementation of the forward pass for a max-pooling layer.\n\n Inputs:\n - x: Input data, of shape (N, C, H, W)\n - pool_param: dictionary with the following keys:\n - 'pool_height': The height of each pooling region\n - 'pool_width': The width of each pooling region\n - 'stride': The distance between adjacent pooling regions\n\n No padding is necessary here. Output size is given by \n\n Returns a tuple of:\n - out: Output data, of shape (N, C, H', W') where H' and W' are given by\n H' = 1 + (H - pool_height) / stride\n W' = 1 + (W - pool_width) / stride\n - cache: (x, pool_param)\n \"\"\"\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n N, C, H, W = x.shape\n HH = pool_param.get('pool_height', 2)\n WW = pool_param.get('pool_width', 2)\n stride = pool_param.get('stride', 2)\n assert (H - HH) % stride == 0, 'Sanity Check Status: Max Pool Failed in Height'\n assert (W - WW) % stride == 0, 'Sanity Check Status: Max Pool Failed in Width'\n H_prime = 1 + (H - HH) // stride\n W_prime = 1 + (W - WW) // stride\n out = np.zeros((N, C, H_prime, W_prime))\n for n in range(N):\n for j in range(H_prime):\n for i in range(W_prime):\n out[n, :, j, i] = np.amax(x[n, :, j*stride:j*stride+HH, i*stride:i*stride+WW], axis=(-1, -2))\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache\n\n\ndef max_pool_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a max-pooling layer.\n\n Inputs:\n - dout: Upstream derivatives\n - cache: A tuple of (x, pool_param) as in the forward pass.\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n # Extract constants and shapes\n x, pool_param = cache\n N, C, H, W = x.shape\n HH = pool_param.get('pool_height', 2)\n WW = pool_param.get('pool_width', 2)\n stride = pool_param.get('stride', 2)\n H_prime = 1 + (H - HH) // stride\n W_prime = 1 + (W - WW) // stride\n dx = np.zeros_like(x)\n for n in range(N):\n for c in range(C):\n for j in range(H_prime):\n for i in range(W_prime):\n ind = np.argmax(x[n, c, j*stride:j*stride+HH, i*stride:i*stride+WW])\n ind1, ind2 = np.unravel_index(ind, (HH, WW))\n dx[n, c, j*stride:j*stride+HH, i*stride:i*stride+WW][ind1, ind2] = dout[n, c, j, i]\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx\n\n\ndef svm_loss(x, y):\n \"\"\"\n Computes the loss and gradient using for multiclass SVM classification.\n\n Inputs:\n - x: Input data, of shape (N, C) where x[i, j] is the score for the jth\n class for the ith input.\n - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and\n 0 <= y[i] < C\n\n Returns a tuple of:\n - loss: Scalar giving the loss\n - dx: Gradient of the loss with respect to x\n \"\"\"\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx\n\n\ndef softmax_loss(x, y):\n \"\"\"\n Computes the loss and gradient for softmax classification.\n\n Inputs:\n - x: Input data, of shape (N, C) where x[i, j] is the score for the jth\n class for the ith input.\n - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and\n 0 <= y[i] < C\n\n Returns a tuple of:\n - loss: Scalar giving the loss\n - dx: Gradient of the loss with respect to x\n \"\"\"\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx\n",
"from builtins import range\nimport numpy as np\nfrom random import shuffle\nfrom past.builtins import xrange\n\ndef svm_loss_naive(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, naive implementation (with loops).\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - W: A numpy array of shape (D, C) containing weights.\n - X: A numpy array of shape (N, D) containing a minibatch of data.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n - reg: (float) regularization strength\n\n Returns a tuple of:\n - loss as single float\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in range(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:, y[i]] = dW[:, y[i]] - X[i]\n dW[:,j] = dW[:,j] + X[i]\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW = dW / num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n dW = dW + reg * 2 * W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Implemented above\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n return loss, dW\n\n\n\ndef svm_loss_vectorized(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, vectorized implementation.\n\n Inputs and outputs are the same as svm_loss_naive.\n \"\"\"\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_classes = W.shape[1]\n num_train = X.shape[0]\n scores = X.dot(W)\n correct_class_scores = scores[ np.arange(num_train), y].reshape(num_train,1)\n margin = np.maximum(0, scores - correct_class_scores + 1)\n margin[ np.arange(num_train), y] = 0 # do not consider correct class in loss\n loss = margin.sum() / num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Compute gradient\n margin[margin > 0] = 1\n valid_margin_count = margin.sum(axis=1)\n # Subtract in correct class (-s_y)\n margin[np.arange(num_train),y ] -= valid_margin_count\n dW = (X.T).dot(margin) / num_train\n\n # Regularization gradient\n dW = dW + reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW\n"
] |
[
[
"numpy.log",
"numpy.amax",
"numpy.maximum",
"numpy.pad",
"numpy.sum",
"numpy.random.seed",
"numpy.unravel_index",
"numpy.arange",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.zeros_like",
"numpy.random.rand",
"numpy.prod",
"numpy.exp",
"numpy.zeros",
"numpy.where"
],
[
"numpy.arange",
"numpy.maximum",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joelyancey/neuroglancer
|
[
"5f267160167c65a3e5f285e5b74d32e7b0160a60"
] |
[
"python/examples/flood_filling_simulation.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"Example of display interactive flood-filling \"inference\" results.\n\nshift+mousedown0 triggers the simulated flood filling to start with an initial\nseed at the mouse position. The computed mask values are displayed as an image,\nwhile the seed points chosen are displayed as point annotations.\n\nkeyt causes the simulated flood filling to stop.\n\nIn this example, the mask values are actually just computed as a distance\ntransform of the ground truth segmentation, and the seed points are restricted\nto the ground truth segment and assign random priorities. In actual use, this\nsame visualization approach can be used to display the actual mask and seed\npoints computed by a flood filling TensorFlow model.\n\nThe cloudvolume library (https://github.com/seung-lab/cloud-volume) is used to\nretrieve patches of the ground truth volume.\n\nThe zarr library is used to represent the sparse in-memory array containing the\ncomputed inference results that are displayed in neuroglancer.\n\n\"\"\"\n\nimport argparse\nimport random\nimport time\nimport threading\n\nimport neuroglancer\nimport neuroglancer.cli\nimport cloudvolume\nimport zarr\nimport numpy as np\nimport scipy.ndimage\n\n\nclass InteractiveInference(object):\n def __init__(self):\n viewer = self.viewer = neuroglancer.Viewer()\n self.gt_vol = cloudvolume.CloudVolume(\n 'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',\n mip=0,\n bounded=True,\n progress=False,\n provenance={})\n viewer.actions.add('start-fill', self._start_fill_action)\n viewer.actions.add('stop-fill', self._stop_fill_action)\n self.dimensions = neuroglancer.CoordinateSpace(\n names=['x', 'y', 'z'],\n units='nm',\n scales=[8, 8, 8],\n )\n with viewer.config_state.txn() as s:\n s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'\n s.input_event_bindings.data_view['keyt'] = 'stop-fill'\n\n with viewer.txn() as s:\n s.layers['image'] = neuroglancer.ImageLayer(\n source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',\n )\n s.layers['ground_truth'] = neuroglancer.SegmentationLayer(\n source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',\n )\n s.layers['ground_truth'].visible = False\n self.flood_fill_event = None\n\n def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):\n initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2]))\n\n gt_vol_zarr = zarr.zeros(\n self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint64)\n\n gt_blocks_seen = set()\n\n block_size = np.array((64, 64, 64), np.int64)\n\n def fetch_gt_block(block):\n spos = block * block_size\n epos = spos + block_size\n slice_expr = np.s_[int(spos[0]):int(epos[0]),\n int(spos[1]):int(epos[1]),\n int(spos[2]):int(epos[2])]\n gt_data = self.gt_vol[slice_expr][..., 0]\n gt_vol_zarr[slice_expr] = gt_data\n\n def get_patch(spos, epos):\n spos = np.array(spos)\n epos = np.array(epos)\n sblock = spos // block_size\n eblock = (epos - 1) // block_size\n for blockoff in np.ndindex(tuple(eblock - sblock + 1)):\n block = np.array(blockoff) + sblock\n block_tuple = tuple(block)\n if block_tuple in gt_blocks_seen: continue\n gt_blocks_seen.add(block_tuple)\n fetch_gt_block(block)\n slice_expr = np.s_[int(spos[0]):int(epos[0]),\n int(spos[1]):int(epos[1]),\n int(spos[2]):int(epos[2])]\n result = gt_vol_zarr[slice_expr]\n return result\n\n segment_id = self.gt_vol[initial_pos][0]\n\n patch_size = np.array((33, ) * 3, np.int64)\n lower_bound = patch_size // 2\n upper_bound = np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2\n d = 8\n\n seen = set()\n q = []\n\n last_invalidate = [time.time()]\n invalidate_interval = 3\n\n def enqueue(pos):\n if np.any(pos < lower_bound) or np.any(pos >= upper_bound): return\n if pos in seen: return\n seen.add(pos)\n q.append(pos)\n\n def update_view():\n if event.is_set():\n return\n cur_time = time.time()\n if cur_time < last_invalidate[0] + invalidate_interval:\n return\n last_invalidate[0] = cur_time\n inf_volume.invalidate()\n with self.viewer.txn() as s:\n s.layers['points'].annotations = [\n neuroglancer.PointAnnotation(id=repr(pos), point=pos) for pos in list(seen)\n ]\n\n def process_pos(pos):\n spos = pos - patch_size // 2\n epos = spos + patch_size\n slice_expr = np.s_[int(spos[0]):int(epos[0]),\n int(spos[1]):int(epos[1]),\n int(spos[2]):int(epos[2])]\n gt_data = get_patch(spos, epos)\n mask = gt_data == segment_id\n for offset in ((0, 0, d), (0, 0, -d), (0, d, 0), (0, -d, 0), (d, 0, 0), (-d, 0, 0)):\n if not mask[tuple(patch_size // 2 + offset)[::-1]]: continue\n new_pos = np.array(pos) + np.array(offset)\n enqueue(tuple(new_pos))\n\n dist_transform = scipy.ndimage.morphology.distance_transform_edt(~mask)\n inf_results[slice_expr] = 1 + np.cast[np.uint8](\n np.minimum(dist_transform, 5) / 5.0 * 254)\n\n self.viewer.defer_callback(update_view)\n\n enqueue(initial_pos)\n\n while len(q) > 0 and not event.is_set():\n i = random.randint(0, len(q) - 1)\n pos = q[i]\n q[i] = q[-1]\n del q[-1]\n process_pos(pos)\n self.viewer.defer_callback(update_view)\n\n def _stop_flood_fill(self):\n if self.flood_fill_event is not None:\n self.flood_fill_event.set()\n self.flood_fill_event = None\n\n def _start_flood_fill(self, pos):\n self._stop_flood_fill()\n inf_results = zarr.zeros(\n self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)\n inf_volume = neuroglancer.LocalVolume(\n data=inf_results, dimensions=self.dimensions)\n\n with self.viewer.txn() as s:\n s.layers['points'] = neuroglancer.LocalAnnotationLayer(self.dimensions)\n s.layers['inference'] = neuroglancer.ImageLayer(\n source=inf_volume,\n shader='''\nvoid main() {\n float v = toNormalized(getDataValue(0));\n vec4 rgba = vec4(0,0,0,0);\n if (v != 0.0) {\n rgba = vec4(colormapJet(v), 1.0);\n }\n emitRGBA(rgba);\n}\n''',\n )\n self.flood_fill_event = threading.Event()\n t = threading.Thread(\n target=self._do_flood_fill,\n kwargs=dict(\n initial_pos=pos,\n inf_results=inf_results,\n inf_volume=inf_volume,\n event=self.flood_fill_event,\n ))\n t.daemon = True\n t.start()\n\n def _start_fill_action(self, action_state):\n pos = action_state.mouse_voxel_coordinates\n if pos is None:\n return\n self._start_flood_fill(pos)\n\n def _stop_fill_action(self, action_state):\n self._stop_flood_fill()\n\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n neuroglancer.cli.add_server_arguments(ap)\n args = ap.parse_args()\n neuroglancer.cli.handle_server_arguments(args)\n\n inf = InteractiveInference()\n print(inf.viewer)\n\n while True:\n time.sleep(1000)\n"
] |
[
[
"numpy.array",
"numpy.minimum",
"numpy.any"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kbdering/ncscli
|
[
"853055b390238111efb31cb466c857a5ea886896"
] |
[
"examples/neoload/plotAgentMap.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"\nplots loadtest results produced by runBatchJMeter\n\"\"\"\n# standard library modules\nimport argparse\nimport csv\nimport json\nimport logging\nimport math\nimport os\nimport sys\nimport warnings\n# third-party modules\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef getColumn(inputList,column):\n return [inputList[i][column] for i in range(0,len(inputList))]\n\ndef scriptDirPath():\n '''returns the absolute path to the directory containing this script'''\n return os.path.dirname(os.path.realpath(__file__))\n\n\nif __name__ == \"__main__\":\n # configure logger formatting\n logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'\n logDateFmt = '%Y/%m/%d %H:%M:%S'\n formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )\n logging.basicConfig(format=logFmt, datefmt=logDateFmt)\n\n # treat numpy deprecations as errors\n warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)\n\n ap = argparse.ArgumentParser( description=__doc__, fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )\n ap.add_argument( '--dataDirPath', required=True, help='the path to to directory for input and output data' )\n args = ap.parse_args()\n\n logger.info( 'plotting data in directory %s', os.path.realpath(args.dataDirPath) )\n\n outputDir = args.dataDirPath\n launchedJsonFilePath = outputDir + \"/startedAgents.json\"\n print(\"launchedJsonFilePath = %s\" % launchedJsonFilePath)\n\n if not os.path.isfile( launchedJsonFilePath ):\n logger.error( 'file not found: %s', launchedJsonFilePath )\n sys.exit( 1 )\n\n launchedInstances = []\n with open( launchedJsonFilePath, 'r') as jsonInFile:\n try:\n launchedInstances = json.load(jsonInFile) # an array\n except Exception as exc:\n sys.exit( 'could not load json (%s) %s' % (type(exc), exc) )\n\n print(\"number of launchedInstances = %d\" % len(launchedInstances))\n\n mappedFrameNumLocation = []\n mappedFrameNumLocationUnitedStates = []\n mappedFrameNumLocationRussia = []\n mappedFrameNumLocationOther = []\n \n for j in range(0,len(launchedInstances)):\n mappedFrameNumLocation.append([j,\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n if launchedInstances[j][\"device-location\"][\"country\"] ==\"United States\":\n mappedFrameNumLocationUnitedStates.append([j,\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n elif launchedInstances[j][\"device-location\"][\"country\"] == \"Russia\":\n mappedFrameNumLocationRussia.append([j,\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n else:\n mappedFrameNumLocationOther.append([j,\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n\n print(\"\\nLocations:\")\n for i in range(0,len(mappedFrameNumLocation)):\n print(\"%s\" % mappedFrameNumLocation[i][3])\n\n print(\"\\nReading World Map data\")\n mapFilePath = os.path.join( scriptDirPath(), \"WorldCountryBoundaries.csv\" )\n mapFile = open(mapFilePath, \"r\")\n mapLines = mapFile.readlines()\n mapFile.close()\n mapNumLines = len(mapLines) \n\n CountryData = []\n CountrySphericalData = []\n\n for i in range(1,mapNumLines) :\n firstSplitString = mapLines[i].split(\"\\\"\")\n nonCoordinateString = firstSplitString[2] \n noncoordinates = nonCoordinateString.split(\",\")\n countryString = noncoordinates[6]\n\n if firstSplitString[1].startswith('<Polygon><outerBoundaryIs><LinearRing><coordinates>') and firstSplitString[1].endswith('</coordinates></LinearRing></outerBoundaryIs></Polygon>'):\n coordinateString = firstSplitString[1].replace('<Polygon><outerBoundaryIs><LinearRing><coordinates>','').replace('</coordinates></LinearRing></outerBoundaryIs></Polygon>','').replace(',0 ',',0,')\n coordinates = [float(j) for j in coordinateString.split(\",\")] \n coordinateList = np.zeros([int(len(coordinates)/3),2])\n for j in range(0,len(coordinateList)) :\n coordinateList[j,:] = coordinates[j*3:j*3+2]\n coordinateSphericalList = np.zeros([int(len(coordinates)/3),3])\n for j in range(0,len(coordinateSphericalList)) :\n r = 1\n phi = 2*math.pi*coordinates[j*3]/360\n theta = 2*math.pi*(90-coordinates[j*3+1])/360\n coordinateSphericalList[j,0] = r * np.sin(theta) * np.cos(phi)\n coordinateSphericalList[j,1] = r * np.sin(theta) * np.sin(phi)\n coordinateSphericalList[j,2] = r * np.cos(theta)\n\n CountryData.append([countryString,coordinateList])\n CountrySphericalData.append([countryString,coordinateSphericalList])\n else :\n reducedCoordinateString = firstSplitString[1].replace('<MultiGeometry>','').replace('</MultiGeometry>','').replace('<Polygon>','').replace('</Polygon>','').replace('<outerBoundaryIs>','').replace('</outerBoundaryIs>','').replace('<innerBoundaryIs>','').replace('</innerBoundaryIs>','').replace('<LinearRing>','').replace('</LinearRing>','').replace('</coordinates>','').replace(',0 ',',0,')\n coordinateStringSets = reducedCoordinateString.split(\"<coordinates>\")\n coordinateSets= []\n for j in range(1,len(coordinateStringSets)) :\n coordinateSets.append([float(k) for k in coordinateStringSets[j].split(\",\")])\n coordinateList = []\n coordinateSphericalList = []\n for j in range(0,len(coordinateSets)) :\n coordinateList.append(np.zeros([int(len(coordinateSets[j])/3),2]))\n for k in range(0,len(coordinateList[j])) :\n coordinateList[j][k,:] = coordinateSets[j][k*3:k*3+2]\n coordinateSphericalList.append(np.zeros([int(len(coordinateSets[j])/3),3]))\n for k in range(0,len(coordinateSphericalList[j])) :\n r = 1\n phi = 2*math.pi*coordinateSets[j][k*3]/360\n theta = 2*math.pi*(90-coordinateSets[j][k*3+1])/360\n coordinateSphericalList[j][k,0] = r * np.sin(theta) * np.cos(phi)\n coordinateSphericalList[j][k,1] = r * np.sin(theta) * np.sin(phi)\n coordinateSphericalList[j][k,2] = r * np.cos(theta)\n\n CountryData.append([countryString,coordinateList])\n CountrySphericalData.append([countryString,coordinateSphericalList])\n\n print(\"Plotting\")\n figSize1 = (19.2, 10.8)\n fontFactor = 0.75\n mpl.rcParams.update({'font.size': 22})\n mpl.rcParams['axes.linewidth'] = 2 #set the value globally\n markerSize = 10\n\n # plot world map\n fig = plt.figure(3, figsize=figSize1)\n ax = fig.gca()\n # Turn off tick labels\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n colorValue = 0.85\n edgeColor = (colorValue*.85, colorValue*.85, colorValue*.85)\n\n for i in range(0,len(CountryData)) :\n if isinstance( CountryData[i][1], np.ndarray ):\n ax.add_artist(plt.Polygon(CountryData[i][1],edgecolor=edgeColor,\n facecolor=(colorValue,colorValue,colorValue),aa=True))\n else :\n for j in range(0,len(CountryData[i][1])) :\n ax.add_artist(plt.Polygon(CountryData[i][1][j],edgecolor=edgeColor,\n facecolor=(colorValue,colorValue,colorValue),aa=True))\n\n plt.plot(getColumn(mappedFrameNumLocationUnitedStates,2),\n getColumn(mappedFrameNumLocationUnitedStates,1),\n linestyle='',color=(0.0, 0.5, 1.0),marker='o',markersize=markerSize,\n markeredgecolor='black', markeredgewidth=0.75)\n plt.plot(getColumn(mappedFrameNumLocationRussia,2),\n getColumn(mappedFrameNumLocationRussia,1),\n linestyle='', color=(1.0, 0.0, 0.0),marker='o',markersize=markerSize,\n markeredgecolor='black', markeredgewidth=0.75)\n plt.plot(getColumn(mappedFrameNumLocationOther,2),\n getColumn(mappedFrameNumLocationOther,1),\n linestyle='', color=(0.0, 0.9, 0.0),marker='o',markersize=markerSize,\n markeredgecolor='black', markeredgewidth=0.75)\n plt.xlim([-180,180])\n plt.ylim([-60,90])\n #plt.show()\n plt.savefig( outputDir+'/worldMap.png', bbox_inches='tight')\n plt.savefig( outputDir+'/worldMap.svg', bbox_inches='tight')\n"
] |
[
[
"matplotlib.pyplot.ylim",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.xlim",
"matplotlib.rcParams.update",
"matplotlib.pyplot.Polygon",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GeorgiyDemo/FA
|
[
"641a29d088904302f5f2164c9b3e1f1c813849ec",
"641a29d088904302f5f2164c9b3e1f1c813849ec",
"641a29d088904302f5f2164c9b3e1f1c813849ec",
"641a29d088904302f5f2164c9b3e1f1c813849ec"
] |
[
"Course I/Алгоритмы Python/Part2/семинары/pract4/xlsx_module.py",
"Course I/Алгоритмы Python/Part2/семинары/pract5/task11.py",
"Course I/Алгоритмы Python/Part1/семинары/pract2/pract2.py",
"Course I/Алгоритмы Python/Part2/семинары/pract5/защита/computer_module.py"
] |
[
"\"\"\"Модуль для формирования отчетов в файл MS Excel\"\"\"\n\nimport pandas as pd\n\nfrom task_module import UtilClass\n\n\nclass XlsxClass:\n OUT_XLSX_FILE = \"отчет.xlsx\"\n\n def __init__(self, obj_list):\n self.obj_list = obj_list\n self.processing()\n\n def transpose(self, l):\n return [list(e) for e in list(zip(*l))]\n\n def report_processing(self, report_dict):\n \"\"\"Формирование главного отчета из словаря\"\"\"\n\n report_results_list = []\n for current_cert, subdict in report_dict.items():\n\n sublist = []\n # Список заголовков\n mainheaders_list = []\n mainsubheaders_list = []\n for student, value in subdict.items():\n buf_list = []\n headers_list = [\" \"]\n subheaders_list = [\"ФИО\"]\n\n buf_list.append(student)\n for work in value[\"works\"]:\n headers_list.append(\"{} ({})\".format(work[0], work[1]))\n work = work[2:]\n subheaders_list.extend(\n [\"Баллы\", \"Дата дедлайна\", \"Дата завершения\", \"Дата защиты\"]\n )\n headers_list.extend([\"\" for _ in range(len(work) - 1)])\n buf_list.extend(work)\n\n headers_list.extend([\"ИТОГИ\", \"\", \"\", \"\"])\n # Это след этап странности, но мне нужна стат последовательность, что dict.items() сделать не может\n for k, v in {\n \"cert_points\": \"Баллы за аттестацию\",\n \"exam_points\": \"Баллы за экзамен\",\n \"total_points\": \"Общее кол-во баллов\",\n \"total_mark\": \"Общая оценка\",\n }.items():\n buf_list.append(value[\"info\"][k])\n subheaders_list.append(v)\n\n # В случае, если у разных студентов разные хедеры - ориентируемся на того, у кого их больше\n # По-хорошему, тогда надо синхронить столбцы\n if mainheaders_list == []:\n mainheaders_list = headers_list\n elif len(mainheaders_list) < len(headers_list):\n mainheaders_list = headers_list\n\n if mainsubheaders_list == []:\n mainsubheaders_list = subheaders_list\n elif len(mainsubheaders_list) < len(subheaders_list):\n mainsubheaders_list = subheaders_list\n sublist.append(buf_list)\n\n sublist.insert(0, subheaders_list)\n report_results_list.append([current_cert, mainheaders_list, sublist])\n\n return report_results_list\n\n def processing(self):\n\n c = UtilClass.converter\n report_dict = {}\n student_list, work_list, exam_list, cert_list = [[] for _ in range(4)]\n for obj in self.obj_list:\n student_list.append([obj.name, obj.group, obj.course, obj.points, obj.mark])\n\n # Аттестации\n for cert in obj.cert_obj_list:\n cert_list.append(\n [\n obj.name,\n cert.name,\n cert.points,\n c(cert.date_begin),\n c(cert.date_end),\n ]\n )\n\n if report_dict.get(cert.name) == None:\n report_dict[cert.name] = {}\n report_dict[cert.name][obj.name] = {\n \"info\": {\n \"cert_points\": cert.points,\n \"exam_points\": obj.exam_obj.points,\n \"total_points\": obj.points,\n \"total_mark\": obj.mark,\n },\n \"works\": [],\n }\n\n for work in cert.work_obj_list:\n report_dict[cert.name][obj.name][\"works\"].append(\n [\n work.name,\n work.work_type,\n work.points,\n c(work.date_deadline),\n c(work.date_completed),\n c(work.date_protected),\n ]\n )\n work_list.append(\n [\n obj.name,\n cert.name,\n work.name,\n work.work_type,\n work.points,\n c(work.date_deadline),\n c(work.date_completed),\n c(work.date_protected),\n ]\n )\n\n for obj in self.obj_list:\n exam_list.append([obj.name, obj.exam_obj.name, obj.exam_obj.points])\n\n student_list = dict(\n zip(\n [\"ФИО\", \"Группа\", \"Курс\", \"Баллы\", \"Оценка\"],\n self.transpose(student_list),\n )\n )\n exam_list = dict(\n zip([\"Студент\", \"Название\", \"Баллы\"], self.transpose(exam_list))\n )\n cert_list = dict(\n zip(\n [\"Студент\", \"Название\", \"Баллы\", \"Дата начала\", \"Дата конца\"],\n self.transpose(cert_list),\n )\n )\n work_list = dict(\n zip(\n [\n \"Студент\",\n \"Аттестация\",\n \"Название работы\",\n \"Тип работы\",\n \"Баллы\",\n \"Дата дедлайна\",\n \"Дата завершения\",\n \"Дата защиты\",\n ],\n self.transpose(work_list),\n )\n )\n\n cert1, cert2 = self.report_processing(report_dict)\n df1 = pd.DataFrame(cert1[2])\n df2 = pd.DataFrame(cert2[2])\n df3 = pd.DataFrame(student_list)\n df4 = pd.DataFrame(work_list)\n df5 = pd.DataFrame(cert_list)\n df6 = pd.DataFrame(exam_list)\n\n writer = pd.ExcelWriter(XlsxClass.OUT_XLSX_FILE, engine=\"xlsxwriter\")\n\n df1.to_excel(writer, sheet_name=cert1[0], index=False, header=cert1[1])\n df2.to_excel(writer, sheet_name=cert2[0], index=False, header=cert2[1])\n df3.to_excel(writer, sheet_name=\"Студенты\", index=False)\n df4.to_excel(writer, sheet_name=\"Работы\", index=False)\n df5.to_excel(writer, sheet_name=\"Аттестации\", index=False)\n df6.to_excel(writer, sheet_name=\"Экзамены\", index=False)\n\n writer.save()\n",
"\"\"\"\nЗадача 11:\nДан список/массив. После каждого элемента добавьте предшествующую ему часть списка.\nnp.array([1, 2, 3]) -> [1, 1, 2, 1, 2, 3]\n\"\"\"\nimport numpy as np\n\n\nclass MainClass:\n def __init__(self, arr=None):\n\n self.result = None\n if arr is None:\n self.arr_input()\n else:\n self.arr = arr\n\n self.processing()\n self.printer()\n\n def digital_checker(self, n):\n try:\n return int(n)\n except ValueError:\n return n\n\n def arr_input(self):\n base_arr = np.array([])\n try:\n n = int(input(\"Введите размерноcть массива -> \"))\n except ValueError:\n print(\"Некорректный ввод данных!\")\n return\n\n print(\"\\n*Заполнение массива №1*\")\n for i in np.arange(n):\n base_arr = np.append(\n base_arr,\n self.digital_checker(input(\"Введите элемент №{} -> \".format(i + 1))),\n )\n\n self.arr = np.array(base_arr)\n\n def processing(self):\n arr_a = self.arr\n arr_b = np.array([])\n for i in range(len(arr_a)):\n for j in range(i + 1):\n arr_b = np.append(arr_b, arr_a[j])\n self.result = arr_b\n\n def printer(self):\n print(\"Исходный массив:\\n{}\".format(self.arr))\n print(\"Результат:\\n{}\".format(self.result))\n\n\nif __name__ == \"__main__\":\n MainClass(np.array([1, 2, 3]))\n MainClass()\n",
"\"\"\"\nДеменчук Г.М., вариант 6, задания со звёздочкой *\n\"\"\"\nimport matplotlib.pyplot as plt\n\n\ndef get_scale(x, y):\n \"\"\"\n Метод для возврата точки на основании c и d\n Для динамического масштаба\n \"\"\"\n x = abs(x)\n y = abs(y)\n if x > y:\n return x\n return y\n\n\nclass LineGraphClass:\n \"\"\"\n Функции y=y(x), заданные графически*\n \"\"\"\n\n def __init__(self, c, d):\n\n self.c = c\n self.d = d\n self.graph()\n\n def graph(self):\n c = self.c\n d = self.d\n\n # Необходимо для адекватного масштаба\n p = get_scale(c, d)\n\n # Координаты основной оси\n m = ([-p * 4, p * 4], [0, 0])\n # Исходные точки\n x = [-p * 2, -c, 0]\n y = [0, 0, d]\n # Аннотации к слевой линии\n n1 = [\"\", \"-c\", \"d\"]\n # Аннотации к правой линии\n n2 = [\"-d\", \"c\", \"\"]\n\n plt.style.use(\"seaborn-dark\")\n ax = plt.figure().gca()\n\n # Ось координат X\n ax.plot(m[0], m[1], linewidth=1, color=\"k\")\n # Ось координат Y\n ax.plot(m[1], m[0], linewidth=1, color=\"k\")\n\n # Линия левая\n ax.plot(x, y, linewidth=2, marker=\"o\", color=\"b\")\n for i in range(len(x)):\n ax.annotate(n1[i], (x[i], y[i] + 0.5))\n\n # Линия правая\n x = [-e for e in reversed(x)]\n y = [-e for e in reversed(y)]\n\n ax.plot(x, y, linewidth=2, marker=\"o\", color=\"b\")\n for i in range(len(x)):\n ax.annotate(n2[i], (x[i], y[i] + 0.5))\n plt.show()\n\n\ndef main():\n try:\n d = float(input(\"Введите d -> \"))\n c = float(input(\"Введите c -> \"))\n except:\n print(\"Проблема ввода данных!\")\n return\n\n LineGraphClass(c, d)\n\n\nif __name__ == \"__main__\":\n main()\n",
"import random\n\nimport numpy as np\nfrom user_module import UserAnalyserClass\nfrom util_module import UtilClass\n\n\nclass ComputerAnalyserClass(UserAnalyserClass):\n \"\"\"\n Класс ограничений и выявление некорректного хода со стороны компьютера\n - figure_detector - у UserAnalyserClass\n - backstep_detector - переписываем\n - diagonal_detector - переписываем\n - war_detector - переписываем\n - fieldtype_detector - у UserAnalyserClass\n \"\"\"\n\n def __init__(self, command_dict, board_obj, info=False):\n\n self.boolean_result = False\n self.results_list = []\n self.command_dict = command_dict\n self.board_obj = board_obj\n self.info = info\n\n self.figure_detector()\n self.backstep_detector()\n self.diagonal_detector()\n self.fieldtype_detector()\n if command_dict[\"mode\"] == \"war\":\n self.war_detector()\n\n if all(self.results_list):\n self.boolean_result = True\n\n def backstep_detector(self):\n \"\"\"Проверка на перемещение назад\"\"\"\n d = self.command_dict\n if d[\"from\"][\"x\"] < d[\"to\"][\"x\"]:\n self.results_list.append(False)\n else:\n self.results_list.append(True)\n\n def diagonal_detector(self):\n \"\"\"Проверка на осуществление перехода по диагонали\"\"\"\n # Возможные пути, куда может пойти шашка (их всего 4)\n board_obj = self.board_obj\n d = self.command_dict\n target_x = d[\"from\"][\"x\"]\n target_y = UtilClass.char2xint(d[\"from\"][\"y\"])\n # Возможные клетки, куда можно пойти и которые есть на доске\n\n # Т.к. использование \"коротких\" перемещений при атаке просто невозможно\n if d[\"mode\"] == \"war\":\n allowedfields_list = [\n [target_x - 2, target_y + 2],\n [target_x - 2, target_y - 2],\n ]\n # При тихом ходе возмодны только короткие перемещения\n else:\n allowedfields_list = [\n [target_x - 1, target_y + 1],\n [target_x - 1, target_y - 1],\n ]\n\n validated_points = [\n e for e in allowedfields_list if board_obj.detect_element(*e)\n ]\n\n if [d[\"to\"][\"x\"], UtilClass.char2xint(d[\"to\"][\"y\"])] in validated_points:\n self.results_list.append(True)\n else:\n self.results_list.append(False)\n\n def war_detector(self):\n \"\"\"\n Проверка на осуществление перехода с боем\n - Проверка на то, чтоб была фигура, которую мы атакуем\n - Поиск и установление координат фигуры, выставление в self.command_dict\n - Проверка на то, чтоб цвет фигуры был не наш\n \"\"\"\n d = self.command_dict\n board_obj = self.board_obj\n\n x_start = d[\"from\"][\"x\"]\n y_start = UtilClass.char2xint(d[\"from\"][\"y\"])\n\n x_finish = d[\"to\"][\"x\"]\n y_finish = UtilClass.char2xint(d[\"to\"][\"y\"])\n\n # Соседние точки относительно точки назначения\n middle_points = np.array(\n [\n e\n for e in [[x_finish + 1, y_finish - 1], [x_finish + 1, y_finish + 1]]\n if board_obj.detect_element(*e)\n ]\n )\n\n # Возможные точки, где стоит фигура\n validated_points = np.array(\n [\n e\n for e in [[x_start - 1, y_start + 1], [x_start - 1, y_start - 1]]\n if board_obj.detect_element(*e)\n ]\n )\n\n attack_points = []\n for i in np.arange(middle_points.shape[0]):\n for j in np.arange(validated_points.shape[0]):\n if (\n middle_points[i][0] == validated_points[j][0]\n and middle_points[i][1] == validated_points[j][1]\n ):\n attack_points = middle_points[i]\n break\n\n # Если нет точек пересечения\n if len(attack_points) == 0:\n self.results_list.append(False)\n return\n\n self.command_dict[\"enemy\"] = {}\n self.command_dict[\"enemy\"][\"x\"], self.command_dict[\"enemy\"][\"y\"] = attack_points\n attack_x, attack_y = attack_points\n\n # Выбрали точку, где располагается предполагаемый враг\n attack_field = board_obj.board[attack_x][attack_y]\n # Если есть чужая фигура на этой точке\n if (\n not attack_field.isfree()\n and attack_field.figure_obj.color != d[\"user_color\"]\n ):\n self.results_list.append(True)\n else:\n self.results_list.append(False)\n\n\nclass ComputerGameClass:\n \"\"\"\n Класс с основной логикой автоматического хода компьютера\n - Суть заключается просто в рандомном генерировани словаря dict относительно своих фигур по типу\n {'from': {'x': 2, 'y': 'c'}, 'to': {'x': 3, 'y': 'b'}, 'mode': 'peace', 'user_color': 'black'}\n \"\"\"\n\n def __init__(self, board_obj, user_color):\n # Используется для контроля тупикового хода со стороны компьютера\n self.result = True\n # Статистика шагов\n self.result_dict = {}\n\n self.board_obj = board_obj\n self.user_color = user_color\n self.processing()\n\n def processing(self):\n uc = self.user_color\n # Цвет, за который ходит компьютер\n reverse_uc = \"black\" if uc == \"white\" else \"white\"\n myfields_arr = np.array([])\n all_d = []\n board = self.board_obj.board\n\n for i in np.arange(board.shape[0]):\n for j in np.arange(board.shape[1]):\n if (\n not board[i][j].isfree()\n and board[i][j].figure_obj.color == reverse_uc\n ):\n myfields_arr = np.append(myfields_arr, board[i][j])\n\n # Для каждой шашки формируем возможные новые координаты, перемешиваем это и закидываем в ComputerAnalyserClass\n for field in myfields_arr:\n\n x, y = field.figure_obj.coord_x, field.figure_obj.coord_y\n y_char = UtilClass.xint2char(y)\n\n # Возможные короткие шаги\n # [x-1,y-1]\n if self.board_obj.detect_element(y - 1, x - 1):\n new_y, new_x = UtilClass.xint2char(y - 1), x - 1\n all_d.append(\n {\n \"from\": {\"x\": x, \"y\": y_char},\n \"to\": {\"x\": new_x, \"y\": new_y},\n \"mode\": \"peace\",\n \"user_color\": reverse_uc,\n }\n )\n\n # [x-1,y+1]\n if self.board_obj.detect_element(y + 1, x - 1):\n new_y, new_x = UtilClass.xint2char(y + 1), x - 1\n all_d.append(\n {\n \"from\": {\"x\": x, \"y\": y_char},\n \"to\": {\"x\": new_x, \"y\": new_y},\n \"mode\": \"peace\",\n \"user_color\": reverse_uc,\n }\n )\n\n # Длинные шаги\n # [x-2,y+2]\n if self.board_obj.detect_element(y + 2, x - 2):\n new_y, new_x = UtilClass.xint2char(y + 2), x - 2\n all_d.append(\n {\n \"from\": {\"x\": x, \"y\": y_char},\n \"to\": {\"x\": new_x, \"y\": new_y},\n \"mode\": \"war\",\n \"user_color\": reverse_uc,\n }\n )\n\n # [x-2,y-2]\n if self.board_obj.detect_element(y - 2, x - 2):\n new_y, new_x = UtilClass.xint2char(y - 2), x - 2\n all_d.append(\n {\n \"from\": {\"x\": x, \"y\": y_char},\n \"to\": {\"x\": new_x, \"y\": new_y},\n \"mode\": \"war\",\n \"user_color\": reverse_uc,\n }\n )\n\n random.shuffle(all_d)\n all_d.sort(key=lambda x: x[\"mode\"], reverse=True)\n\n for d in all_d:\n obj = ComputerAnalyserClass(d, self.board_obj)\n if obj.boolean_result:\n self.result_dict = obj.command_dict\n self.computer_mode()\n break\n else:\n self.result = False\n\n def computer_mode(self):\n \"\"\"\n Осуществление хода компьютера\n - Выбираются координаты ячейки из сформированного словаря\n - Берется объект фигуры, меняются координаты фигуры на обновлённые\n - Осуществляется привязка к новой ячейке фигуры\n - Из старой ячейки освобождается объект фигуры\n\n - Если осуществилась атака, то удаляем фигуру врага из ячейки enemy\n \"\"\"\n\n d = self.result_dict\n board = self.board_obj.board\n\n mode = d[\"mode\"]\n f1 = [d[\"from\"][\"x\"], UtilClass.char2xint(d[\"from\"][\"y\"])]\n f2 = [d[\"to\"][\"x\"], UtilClass.char2xint(d[\"to\"][\"y\"])]\n x1, y1 = f1\n x2, y2 = f2\n field_from = board[x1][y1]\n field_to = board[x2][y2]\n\n figure_obj = field_from.figure_obj\n figure_obj.coord_x, figure_obj.coord_y = f2\n\n # Присваиваем фигуру обновленной ячейке\n field_to.field_reserve(figure_obj)\n # Освобождаем из старой\n field_from.field_free()\n\n # Если мы кого-то бъём, то удаляем фигуру с той ячейки\n if mode == \"war\":\n attack_x, attack_y = d[\"enemy\"][\"x\"], d[\"enemy\"][\"y\"]\n board[attack_x][attack_y].field_free()\n\n self.board_obj.board = board\n"
] |
[
[
"pandas.DataFrame",
"pandas.ExcelWriter"
],
[
"numpy.arange",
"numpy.append",
"numpy.array"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
],
[
"numpy.arange",
"numpy.append",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chackoge/ERNIE_Plus
|
[
"454518f28b39a6f37ad8dde4f3be15d4dccc6f61"
] |
[
"P2_studies/theta_plus/Analysis/Mapping/get_rated_mcl_graclus_all_data.py"
] |
[
"import pandas as pd\nfrom sqlalchemy import create_engine\nfrom sys import argv\n\nuser_name = argv[1]\npassword = arv[2]\nschema = \"theta_plus\"\nsql_scheme = 'postgresql://' + user_name + ':' + password + '@localhost:5432/ernie'\nengine = create_engine(sql_scheme)\n\n\nmain_table_query = \"\"\"\nSELECT er.expert_rating, er.imm1985_1995_cluster_no, s.current_cluster_size AS imm1985_1995_cluster_size, \n s.match_year, s.match_cluster_no mcl_year_cluster_no\nFROM theta_plus.expert_ratings er\nJOIN theta_plus.imm1985_1995_mcl_size_30_350_match_to_year_slice s\n ON er.imm1985_1995_cluster_no = s.current_cluster_number;\n\"\"\"\n\n\nmain_table = pd.read_sql(main_table_query, con=engine)\nmain_table.name = 'rated_mcl_graclus_all_data'\n\nnew_columns = ['mcl_year_cluster_size', 'mcl_year_conductance','mcl_year_coherence', 'mcl_year_int_edges', 'mcl_year_boundary', 'mcl_year_sum_article_score', 'mcl_year_max_article_score','mcl_year_median_article_score', 'graclus_100_cluster_no', 'graclus_100_cluster_size', 'graclus_100_to_mcl_ratio', 'graclus_100_total_intersection', 'graclus_100_total_union', 'graclus_100_intersection_union_ratio', 'graclus_100_multiple_options','graclus_100_conductance','graclus_100_coherence', 'graclus_100_int_edges', 'graclus_100_boundary', 'graclus_100_sum_article_score', 'graclus_100_max_article_score', 'graclus_100_median_article_score', 'graclus_half_mclsize_cluster_no', 'graclus_half_mclsize_cluster_size', 'graclus_half_mclsize_to_mcl_ratio', 'graclus_half_mclsize_total_intersection', 'graclus_half_mclsize_total_union', 'graclus_half_mclsize_intersection_union_ratio', 'graclus_half_mclsize_multiple_options', 'graclus_half_mclsize_conductance', 'graclus_half_mclsize_coherence', 'graclus_half_mclsize_int_edges', 'graclus_half_mclsize_boundary', 'graclus_half_mclsize_sum_article_score','graclus_half_mclsize_max_article_score','graclus_half_mclsize_median_article_score']\n\nfor column in new_columns:\n main_table[column] = None\n\n\nprint(f'Working on table: {schema}.{main_table.name}')\nprint(f'The size of the table is {len(main_table)}')\nfor i in range(len(main_table)):\n \n match_year = 'imm' + str(main_table.at[ i, 'match_year'])\n \n mcl_year_table_name = match_year + \"_all_merged_unshuffled\"\n mcl_year_cluster_no = str(main_table.at[ i, 'mcl_year_cluster_no'])\n mcl_year_query = \"SELECT cluster_size, conductance, coherence, int_edges, boundary, sum_article_score, max_article_score, median_article_score FROM theta_plus.\" + mcl_year_table_name + \" WHERE cluster_no=\" + mcl_year_cluster_no + \";\"\n \n graclus_100_cluster_no_query = \"SELECT * FROM theta_plus.\" + match_year + \"_match_to_graclus WHERE mcl_cluster_no=\" + mcl_year_cluster_no + \";\"\n graclus_100_cluster_no_table = pd.read_sql(graclus_100_cluster_no_query, con=engine)\n \n graclus_100_cluster_no = str(graclus_100_cluster_no_table.at[0, 'graclus_cluster_no'])\n graclus_100_cluster_size = graclus_100_cluster_no_table.at[0, 'graclus_cluster_size']\n graclus_100_to_mcl_ratio = graclus_100_cluster_no_table.at[0, 'graclus_to_mcl_ratio']\n graclus_100_total_intersection = graclus_100_cluster_no_table.at[0, 'total_intersection']\n graclus_100_total_union = graclus_100_cluster_no_table.at[0, 'total_union']\n graclus_100_intersection_union_ratio = graclus_100_cluster_no_table.at[0, 'intersect_union_ratio']\n graclus_100_multiple_options = graclus_100_cluster_no_table.at[0, 'multiple_options']\n\n graclus_100_table_name = match_year + \"_all_merged_graclus\"\n graclus_100_query = \"SELECT coherence, conductance, int_edges, boundary, sum_article_score, max_article_score, median_article_score FROM theta_plus.\" + graclus_100_table_name + \" WHERE cluster_no=\" + graclus_100_cluster_no + \";\"\n \n graclus_half_mclsize_cluster_no_query = \"SELECT * FROM theta_plus.\" + match_year + \"_match_to_graclus_half_mclsize WHERE mcl_cluster_no=\" + mcl_year_cluster_no + \";\"\n graclus_half_mclsize_cluster_no_table = pd.read_sql(graclus_half_mclsize_cluster_no_query, con=engine)\n \n graclus_half_mclsize_cluster_no = str(graclus_half_mclsize_cluster_no_table.at[0, 'graclus_cluster_no'])\n graclus_half_mclsize_cluster_size = graclus_half_mclsize_cluster_no_table.at[0, 'graclus_cluster_size']\n graclus_half_mclsize_to_mcl_ratio = graclus_half_mclsize_cluster_no_table.at[0, 'graclus_to_mcl_ratio']\n graclus_half_mclsize_total_intersection = graclus_half_mclsize_cluster_no_table.at[0, 'total_intersection']\n graclus_half_mclsize_total_union = graclus_half_mclsize_cluster_no_table.at[0, 'total_union']\n graclus_half_mclsize_intersection_union_ratio = graclus_half_mclsize_cluster_no_table.at[0, 'intersect_union_ratio']\n graclus_half_mclsize_multiple_options = graclus_half_mclsize_cluster_no_table.at[0, 'multiple_options']\n\n graclus_half_mclsize_table_name = match_year + \"_all_merged_graclus_half_mclsize\"\n graclus_half_mclsize_query = \"SELECT coherence, conductance, int_edges, boundary, sum_article_score, max_article_score, median_article_score FROM theta_plus.\" + graclus_half_mclsize_table_name + \" WHERE cluster_no=\" + graclus_half_mclsize_cluster_no + \";\"\n \n mcl_year_table = pd.read_sql(mcl_year_query, con=engine)\n graclus_100_table = pd.read_sql(graclus_100_query, con=engine)\n graclus_half_mclsize_table = pd.read_sql(graclus_half_mclsize_query, con=engine)\n \n main_table.at[i, 'mcl_year_cluster_size'] = (mcl_year_table.at[0, 'cluster_size'])\n main_table.at[i, 'mcl_year_conductance'] = (mcl_year_table.at[0, 'conductance'])\n main_table.at[i,'mcl_year_coherence'] = mcl_year_table.at[0, 'coherence']\n main_table.at[i,'mcl_year_int_edges'] = mcl_year_table.at[0, 'int_edges']\n main_table.at[i,'mcl_year_boundary'] = mcl_year_table.at[0, 'boundary']\n main_table.at[i,'mcl_year_sum_article_score'] = mcl_year_table.at[0, 'sum_article_score']\n main_table.at[i,'mcl_year_max_article_score'] = mcl_year_table.at[0, 'max_article_score']\n main_table.at[i,'mcl_year_median_article_score'] = mcl_year_table.at[0, 'median_article_score']\n \n main_table.at[i,'graclus_100_cluster_no'] = graclus_100_cluster_no\n main_table.at[i,'graclus_100_cluster_size'] = graclus_100_cluster_size \n main_table.at[i,'graclus_100_to_mcl_ratio'] = graclus_100_to_mcl_ratio\n main_table.at[i,'graclus_100_total_intersection'] = graclus_100_total_intersection \n main_table.at[i,'graclus_100_total_union'] = graclus_100_total_union\n main_table.at[i,'graclus_100_intersection_union_ratio'] = graclus_100_intersection_union_ratio\n main_table.at[i,'graclus_100_multiple_options'] = graclus_100_multiple_options\n \n main_table.at[i,'graclus_100_conductance'] = graclus_100_table.at[0, 'conductance']\n main_table.at[i,'graclus_100_coherence'] = graclus_100_table.at[0, 'coherence']\n main_table.at[i,'graclus_100_int_edges'] = graclus_100_table.at[0, 'int_edges']\n main_table.at[i,'graclus_100_boundary'] = graclus_100_table.at[0, 'boundary']\n main_table.at[i,'graclus_100_sum_article_score'] = graclus_100_table.at[0, 'sum_article_score']\n main_table.at[i,'graclus_100_max_article_score'] = graclus_100_table.at[0, 'max_article_score']\n main_table.at[i,'graclus_100_median_article_score'] = graclus_100_table.at[0, 'median_article_score']\n \n main_table.at[i,'graclus_half_mclsize_cluster_no'] = graclus_half_mclsize_cluster_no\n main_table.at[i,'graclus_half_mclsize_cluster_size'] = graclus_half_mclsize_cluster_size \n main_table.at[i,'graclus_half_mclsize_to_mcl_ratio'] = graclus_half_mclsize_to_mcl_ratio \n main_table.at[i,'graclus_half_mclsize_total_intersection'] = graclus_half_mclsize_total_intersection \n main_table.at[i,'graclus_half_mclsize_total_union'] = graclus_half_mclsize_total_union \n main_table.at[i,'graclus_half_mclsize_intersection_union_ratio'] = graclus_half_mclsize_intersection_union_ratio\n main_table.at[i,'graclus_half_mclsize_multiple_options'] = graclus_half_mclsize_multiple_options \n \n main_table.at[i,'graclus_half_mclsize_conductance'] = graclus_half_mclsize_table.at[0, 'conductance']\n main_table.at[i,'graclus_half_mclsize_coherence'] = graclus_half_mclsize_table.at[0, 'coherence']\n main_table.at[i,'graclus_half_mclsize_int_edges'] = graclus_half_mclsize_table.at[0, 'int_edges']\n main_table.at[i,'graclus_half_mclsize_boundary'] = graclus_half_mclsize_table.at[0, 'boundary']\n main_table.at[i,'graclus_half_mclsize_sum_article_score'] = graclus_half_mclsize_table.at[0, 'sum_article_score']\n main_table.at[i,'graclus_half_mclsize_max_article_score'] = graclus_half_mclsize_table.at[0, 'max_article_score']\n main_table.at[i,'graclus_half_mclsize_median_article_score'] = graclus_half_mclsize_table.at[0, 'median_article_score']\n\nprint(\"Done updating table.\")\nmain_table = main_table.astype(float)\nmain_table.to_sql('rated_mcl_graclus_all_data', schema=schema, con=engine, if_exists='replace', index=False)\nprint(\"All completed.\")"
] |
[
[
"pandas.read_sql"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
datawhalechina/machine-learning-toy-code
|
[
"75a3438e1ad2681413b102c2f257c4e011e0e310"
] |
[
"ml-with-sklearn/AdaBoost/AdaBoost.py"
] |
[
"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nprint(\"X_train:\",len(X_train),\"; X_test:\",len(X_test),\"; y_train:\",len(y_train),\"; y_test:\",len(y_test))\n\n# Create adaboost object\nAdbc = AdaBoostClassifier(n_estimators=50,\n learning_rate=1.5)\n# Train Adaboost \nmodel = Adbc.fit(X_train, y_train)\n\n#Predict the response for test dataset\ny_pred = model.predict(X_test)\n\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n#('Accuracy:', 0.8888888888888888)"
] |
[
[
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YOGESH-TECH/analyzing-weather-dataset
|
[
"16ae97cb4d51a71808985368d17afeb94a901b56"
] |
[
"code.py"
] |
[
"# --------------\n#Importing the modules\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import mode \r\n\r\n\r\n\r\n\r\n#Code for categorical variable\r\ndef categorical(df):\r\n \"\"\" Extract names of categorical column\r\n \r\n This function accepts a dataframe and returns categorical list,\r\n containing the names of categorical columns(categorical_var).\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe from which the columns name will be extracted\r\n \r\n Returns:\r\n categorical_var - List of categorical features\r\n \"\"\"\r\n categorical_var=df.select_dtypes(include='object').columns.tolist()\r\n return categorical_var\r\n\r\n#Code for numerical variable\r\ndef numerical(df):\r\n \"\"\" Extract names of numerical column\r\n \r\n This function accepts a dataframe and returns numerical list,\r\n containing the names of numerical columns(numerical_var).\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe from which the columns name will be extracted\r\n \r\n Returns:\r\n numerical_var - List of numerical features\r\n \"\"\"\r\n numerical_var=df.select_dtypes(include='number').columns.tolist()\r\n return numerical_var\r\n\r\n\r\n#code to check distribution of variable\r\ndef clear(df,col,val):\r\n \"\"\" Check distribution of variable\r\n \r\n This function accepts a dataframe,column(feature) and value which returns count of the value,\r\n containing the value counts of a variable(value_counts)\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe\r\n col - Feature of the datagrame\r\n val - value of the feature\r\n \r\n Returns:\r\n value_counts - Value count of the feature \r\n \"\"\"\r\n value_counts=df[col].value_counts()[val]\r\n return value_counts\r\n\r\n\r\n#Code to check instances based on the condition\r\ndef instances_based_condition(df,col1,val1,col2,val2):\r\n \"\"\" Instances based on the condition\r\n \r\n This function accepts a dataframe, 2 columns(feature) and 2 values which returns the dataframe\r\n based on the condition.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n col1 - First feature of the dataframe on which you want to apply the filter\r\n val1 - Value to be filtered on the first feature\r\n col2 - Second feature of the dataframe on which you want to apply the filter\r\n val2 - Value to be filtered on second feature\r\n \r\n Returns:\r\n instance - Generated dataframe\r\n \"\"\"\r\n instance=df[(df[col1]>val1) & (df[col2]==val2)]\r\n return instance\r\n \r\n\r\n\r\n\r\n# Code to calculate different aggreagted values according to month\r\n\r\ndef agg_values_ina_month(df,date_col,agg_col, agg):\r\n \"\"\" Aggregate values according to month\r\n \r\n This function accepts a dataframe, 2 columns(feature) and aggregated funcion(agg) which returns the Pivot \r\n table with different aggregated value of the feature with an index of the month.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n date_col - Date feature of the dataframe on which you want to apply to_datetime conversion\r\n agg_col - Feature of the dataframe on which values will be aggregated.\r\n agg - The function to be used for aggregating the df (eg. 'mean', 'min', 'max').\r\n \r\n Returns:\r\n aggregated_value - Generated pivot table\r\n \"\"\"\r\n df[date_col]=pd.to_datetime(df[date_col])\r\n aggregate={'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}\r\n aggregated_value=df.pivot_table(values=[agg_col],index=df[date_col].dt.month,aggfunc={agg_col:aggregate[agg]})\r\n return aggregated_value\r\n\r\n# Code to group values based on the feature\r\ndef group_values(df,col1,agg1):\r\n \"\"\" Agrregate values by grouping\r\n \r\n This function accepts a dataframe, 1 column(feature) and aggregated function(agg1) which groupby the \r\n datframe based on the column.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n col1 - Feature of the dataframe on which values will be aggregated.\r\n agg1 - The function to be used for aggregating the df (eg. 'mean', 'min', 'max').\r\n \r\n Returns:\r\n grouping - Dataframe with all columns on which it is grouped on.\r\n \"\"\"\r\n aggregate={'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}\r\n grouping=df.groupby(col1).agg(aggregate[agg1])\r\n return grouping\r\n\r\n\r\n\r\n# function for conversion \r\ndef convert(df,celsius):\r\n \"\"\" Convert temperatures from celsius to fahrenhheit\r\n \r\n This function accepts a dataframe, 1 column(feature) which returns the dataframe with converted values from \r\n celsius to fahrenhheit.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n celsius - Temperature feature of the dataframe which you want to convert to fahrenhheit\r\n \r\n Returns:\r\n converted_temp - Generated dataframe with Fahrenhheit temp.\r\n \r\n \"\"\"\r\n centigrade_temps = df[celsius]\r\n converted_temp = 1.8*centigrade_temps + 32\r\n return converted_temp\r\n\r\n\r\n# Load the weather_2012 data csv file and store it in weather variable. The path of the dataset has been stored in the variable `path` for you.\r\nweather=pd.read_csv(path)\r\nweather.head()\r\n\r\n\r\n# As you have now loaded the weather data you might want to check the categorical and numerical variables. You can check it by calling categorical and numerical function. \r\nprint(categorical(weather))\r\nprint(numerical(weather))\r\n\r\n\r\n\r\n#You might be interested in checking the distribution of a specific value like the number of times the weather was exactly Cloudy in the given column. Feel free to check on other values.\r\n#You can check it by calling the function clear with respective parameters.\r\n#By using index of the value or name of the value you can check the number of count\r\nprint(clear(weather,\"Weather\",'Clear'))\r\nprint(clear(weather,\"Wind Spd (km/h)\", 4))\r\n\r\n\r\n\r\n# Now suppose you want to check some instances based on a specific condition like when the wind speed was above 35 and visibility was 25. You can dicretly check it by calling the function instances_based_condition with respective parameters.\r\nwind_speed_35_vis_25 = instances_based_condition(weather,'Wind Spd (km/h)',35,'Visibility (km)',25)\r\n\r\n\r\n\r\n#You have temperature data and want to calculate the mean temperature recorded by month.You can generate a pivot table which contains the aggregated values(like mean, max ,min, sum, len) recoreded by month. \r\n#You can call the function agg_values_ina_month with respective parameters. \r\n\r\nagg_values_ina_month(weather,'Date/Time','Dew Point Temp (C)','mean')\r\n\r\n# To groupby based on a column like you want to groupby on Weather column and then aggregate the mean values of each column for different types of weather using mean. You can call the function group_values.\r\n# Feel free to try on diffrent aggregated functions like max, min, sum, len\r\nmean_weather = group_values(weather,\"Weather\",'mean')\r\n\r\n\r\n# You have a temperature data and wanted to convert celsius temperature into fahrehheit temperatures you can call the function convert.\r\n\r\n\r\n\r\nweather_fahrehheit = convert(weather,\"Temp (C)\")\n\n\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ArthurFDLR/race-track-generator
|
[
"3fc5eea60c3ea2f451a8be51344fabafed06ffb5"
] |
[
"src/get-tracks.py"
] |
[
"\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport json\nimport sys\nfrom json_encoder import NoIndent, MyEncoder\n\nclass TracksExtraction:\n\n def __init__(self, img_path:str, input_data_path:str, display_factor:float=1.0):\n self.display_factor = display_factor\n with open(input_data_path) as f:\n self.data = json.load(f)\n self.img_raw = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)\n self.img_threshold = cv2.threshold(self.img_raw,20,1,cv2.THRESH_BINARY)[1]\n self.img_display = cv2.resize(self.img_raw, (int(self.img_raw.shape[1] * display_factor), int(self.img_raw.shape[0] * display_factor)))\n\n def save_data(self, path_save:str):\n with open(path_save, 'w') as outfile:\n json.dump(self.data, outfile, indent=4)\n print(\"Tracks data saved in\", path_save)\n\n def __find_upper_left_from(self, x=0, y=0):\n i=1\n while i:\n i+=1\n for j in range(i):\n if self.img_threshold[x+i-j-1, y+j] == 0:\n return (x+i-j-1, y+j)\n return None\n\n def __boundary_following(self, upper_left_corner=(0,0), stay_out=True, allow_diag=True):\n \n x_n = [0, -1, -1, -1, 0, 1, 1, 1] if allow_diag else [0, -1, 0, 1]\n y_n = [1, 1, 0, -1, -1, -1, 0, 1] if allow_diag else [1, 0, -1, 0]\n nbr_neighbore = len(x_n)\n angle = nbr_neighbore//2\n \n def get_neighbor(p,a):\n x, y = p[0] + x_n[a], p[1] + y_n[a]\n if (0 <= x < self.img_threshold.shape[0]) and (0 <= y < self.img_threshold.shape[1]):\n return self.img_threshold[x, y] == 0\n else: return None\n\n b = self.__find_upper_left_from(upper_left_corner[0], upper_left_corner[1])\n b_init = False\n coord_border = [b]\n chain_code = []\n\n while True:\n # Revolve around b until hit border\n while not get_neighbor(b, angle):\n angle = (angle - 1) if angle else (nbr_neighbore - 1)\n # Prefer direct neighbore\n if (not stay_out) and allow_diag and (angle%2 == 1) \\\n and get_neighbor(b, (angle - 1) if angle else 7):\n angle = (angle - 1) if angle else (nbr_neighbore - 1)\n # Update b <- n(k)\n b = (b[0] + x_n[angle], b[1] + y_n[angle])\n # End condition: two successive boundary pixels already visited\n if b_init:\n if b == coord_border[1]: break\n else: b_init = False\n if b == coord_border[0]: b_init = True\n # Store new border pixel\n chain_code.append(angle)\n coord_border.append(b)\n # Reset angle, c <- n(k−1)\n angle = (angle+angle%2+2)%8 if allow_diag else (angle+1)%4\n return coord_border, chain_code\n\n def capture_track_positions(self):\n def onClick(event, x, y, flags, param):\n nonlocal index_capture_track\n self.img_display\n if event == cv2.EVENT_RBUTTONDOWN:\n print(\"Pass track\")\n index_capture_track += 1\n print(\"No more tracks\" if index_capture_track>=len(self.data[\"tracks\"]) else self.data[\"tracks\"][index_capture_track][\"name\"])\n if event == cv2.EVENT_LBUTTONDOWN:\n if index_capture_track>=len(self.data[\"tracks\"]):\n print(\"No more tracks\")\n return\n x_scaled, y_scaled = int(x / self.display_factor), int(y / self.display_factor)\n print(x_scaled, y_scaled)\n self.data[\"tracks\"][index_capture_track]['upper-left-corner'] = (y_scaled,x_scaled)\n index_capture_track += 1\n\n self.img_display = cv2.circle(self.img_display, (x,y), 2, color=(0, 0, 255), thickness=-1)\n cv2.imshow('image', self.img_display)\n print(\"No more tracks\" if index_capture_track>=len(self.data[\"tracks\"]) else self.data[\"tracks\"][index_capture_track][\"name\"])\n \n index_capture_track = 0\n print(self.data[\"tracks\"][index_capture_track]['name'])\n cv2.imshow('image', self.img_display)\n cv2.setMouseCallback('image', onClick)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def tracks_point_generation(self):\n max_coordinate = 0.0\n borders_centered = {}\n for track in self.data[\"tracks\"]:\n if 'upper-left-corner' in track:\n border, _ = self.__boundary_following(track['upper-left-corner'])\n border_centered = np.array(border)\n border_centered = border_centered - border_centered.mean(0)\n borders_centered[track['name']] = border_centered\n max_coordinate = max(max_coordinate, np.absolute(border_centered).max())\n \n for i in range(len(self.data[\"tracks\"])):\n if self.data[\"tracks\"][i]['name'] in borders_centered:\n self.data[\"tracks\"][i]['points'] = (borders_centered[self.data[\"tracks\"][i]['name']] / max_coordinate).tolist()\n \n def display_track(self, index, save:bool=False, path:str=\"./\"):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(*np.array(self.data[\"tracks\"][index]['points']).transpose(), s=0.2)\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n ax.set_aspect('equal')\n ax.set_title(self.data[\"tracks\"][index]['name'])\n if save:\n fig.savefig(path+self.data[\"tracks\"][index]['name'].replace(' ', '_')+'.png', dpi=250)\n plt.close('all')\n else:\n fig.show()\n \n def generate_track_plot(self, folder_path:str):\n for i in range(len(self.data[\"tracks\"])):\n if 'points' in self.data[\"tracks\"][i]:\n print(self.data[\"tracks\"][i]['name'])\n self.display_track(i, True, folder_path)\n \n def save_data_normalized(self, path_save:str, points_order:int=0):\n clean_data = {}\n for track in self.data[\"tracks\"]:\n if 'points' in track:\n border_complex = np.array([c[1] + 1j * c[0] for c in track['points']])\n \n if points_order>0:\n border_complex = np.interp(np.linspace(0, len(border_complex), points_order), np.arange(0, len(border_complex)), border_complex)\n \n fourier_descriptors = np.fft.fft(border_complex, len(border_complex))\n clean_data[track['name']] = {\n 'points': NoIndent(track['points']),\n 'fourier-descriptors': {\n 'real' : NoIndent(fourier_descriptors.real.tolist()),\n 'imag' : NoIndent(fourier_descriptors.imag.tolist())\n }\n }\n with open(path_save, 'w') as outfile:\n outfile.write(json.dumps(clean_data, cls=MyEncoder, sort_keys=True, indent=4))\n #json.dump(clean_data, outfile, indent=4)\n print(\"Tracks data saved in\", path_save)\n \n\nif __name__==\"__main__\":\n if len(sys.argv) < 2 or sys.argv[1] not in ['1','2','3','4']:\n print(\"Choose action as argument:\")\n print(\"\\t1 - Capture tracks upper-left corners\")\n print(\"\\t2 - Generate normalized tracks\")\n print(\"\\t3 - Generate tracks plots\")\n print(\"\\t4 - Only export tracks points and Fourier Descriptors\")\n \n else:\n if sys.argv[1] == '1':\n tracks_extraction = TracksExtraction('./data/racetrackmap_raw.jpg', './data/tracks.json', .45)\n data_out = tracks_extraction.capture_track_positions()\n print(\"Tracks positionning completed.\")\n tracks_extraction.save_data(\"./data/tracks_positions.json\")\n\n elif sys.argv[1] == '2':\n tracks_extraction = TracksExtraction('./data/racetrackmap_raw.jpg', './data/tracks_positions.json')\n tracks_extraction.tracks_point_generation()\n print(\"Tracks points generation completed.\")\n tracks_extraction.save_data(\"./data/tracks_extracted.json\")\n tracks_extraction.display_track(0)\n\n elif sys.argv[1] == '3':\n tracks_extraction = TracksExtraction('./data/racetrackmap_raw.jpg', './data/tracks_extracted.json')\n tracks_extraction.generate_track_plot(\"./data/tracks_plots/\")\n \n elif sys.argv[1] == '4':\n tracks_extraction = TracksExtraction('./data/racetrackmap_raw.jpg', './data/tracks_extracted.json')\n tracks_extraction.save_data_normalized(\"./data/tracks_fourier.json\", 2**8)"
] |
[
[
"numpy.array",
"numpy.absolute",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gviejo/ColdPlay
|
[
"0fdab7e84b084ab67c1f29dd29a0188a69287077"
] |
[
"python/functions.py"
] |
[
"import numpy as np\nfrom numba import jit\nimport pandas as pd\nimport neuroseries as nts\nimport sys, os\nimport scipy\nfrom scipy import signal\nfrom itertools import combinations\n\n'''\nUtilities functions\nFeel free to add your own\n'''\n\n\n#########################################################\n# CORRELATION\n#########################################################\n@jit(nopython=True)\ndef crossCorr(t1, t2, binsize, nbins):\n\t''' \n\t\tFast crossCorr \n\t'''\n\tnt1 = len(t1)\n\tnt2 = len(t2)\n\tif np.floor(nbins/2)*2 == nbins:\n\t\tnbins = nbins+1\n\n\tm = -binsize*((nbins+1)/2)\n\tB = np.zeros(nbins)\n\tfor j in range(nbins):\n\t\tB[j] = m+j*binsize\n\n\tw = ((nbins/2) * binsize)\n\tC = np.zeros(nbins)\n\ti2 = 1\n\n\tfor i1 in range(nt1):\n\t\tlbound = t1[i1] - w\n\t\twhile i2 < nt2 and t2[i2] < lbound:\n\t\t\ti2 = i2+1\n\t\twhile i2 > 1 and t2[i2-1] > lbound:\n\t\t\ti2 = i2-1\n\n\t\trbound = lbound\n\t\tl = i2\n\t\tfor j in range(nbins):\n\t\t\tk = 0\n\t\t\trbound = rbound+binsize\n\t\t\twhile l < nt2 and t2[l] < rbound:\n\t\t\t\tl = l+1\n\t\t\t\tk = k+1\n\n\t\t\tC[j] += k\n\n\t# for j in range(nbins):\n\t# C[j] = C[j] / (nt1 * binsize)\n\tC = C/(nt1 * binsize/1000)\n\n\treturn C\n\ndef crossCorr2(t1, t2, binsize, nbins):\n\t'''\n\t\tSlow crossCorr\n\t'''\n\twindow = np.arange(-binsize*(nbins/2),binsize*(nbins/2)+2*binsize,binsize) - (binsize/2.)\n\tallcount = np.zeros(nbins+1)\n\tfor e in t1:\n\t\tmwind = window + e\n\t\t# need to add a zero bin and an infinite bin in mwind\n\t\tmwind = np.array([-1.0] + list(mwind) + [np.max([t1.max(),t2.max()])+binsize])\t\n\t\tindex = np.digitize(t2, mwind)\n\t\t# index larger than 2 and lower than mwind.shape[0]-1\n\t\t# count each occurences \n\t\tcount = np.array([np.sum(index == i) for i in range(2,mwind.shape[0]-1)])\n\t\tallcount += np.array(count)\n\tallcount = allcount/(float(len(t1))*binsize / 1000)\n\treturn allcount\n\ndef xcrossCorr_slow(t1, t2, binsize, nbins, nbiter, jitter, confInt):\t\t\n\ttimes \t\t\t= np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\n\tH0 \t\t\t\t= crossCorr(t1, t2, binsize, nbins)\t\n\tH1 \t\t\t\t= np.zeros((nbiter,nbins+1))\n\tt2j\t \t\t\t= t2 + 2*jitter*(np.random.rand(nbiter, len(t2)) - 0.5)\n\tt2j \t\t\t= np.sort(t2j, 1)\n\tfor i in range(nbiter):\t\t\t\n\t\tH1[i] \t\t= crossCorr(t1, t2j[i], binsize, nbins)\n\tHm \t\t\t\t= H1.mean(0)\n\ttmp \t\t\t= np.sort(H1, 0)\n\tHeI \t\t\t= tmp[int((1-confInt)/2*nbiter),:]\n\tHeS \t\t\t= tmp[int((confInt + (1-confInt)/2)*nbiter)]\n\tHstd \t\t\t= np.std(tmp, 0)\n\n\treturn (H0, Hm, HeI, HeS, Hstd, times)\n\ndef xcrossCorr_fast(t1, t2, binsize, nbins, nbiter, jitter, confInt):\t\t\n\ttimes \t\t\t= np.arange(0, binsize*(nbins*2+1), binsize) - (nbins*2*binsize)/2\n\t# need to do a cross-corr of double size to convolve after and avoid boundary effect\n\tH0 \t\t\t\t= crossCorr(t1, t2, binsize, nbins*2)\t\n\twindow_size \t= 2*jitter//binsize\n\twindow \t\t\t= np.ones(window_size)*(1/window_size)\n\tHm \t\t\t\t= np.convolve(H0, window, 'same')\n\tHstd\t\t\t= np.sqrt(np.var(Hm))\t\n\tHeI \t\t\t= np.NaN\n\tHeS \t\t\t= np.NaN\t\n\treturn (H0, Hm, HeI, HeS, Hstd, times)\t\n\ndef compute_AutoCorrs(spks, ep, binsize = 5, nbins = 200):\n\t# First let's prepare a pandas dataframe to receive the data\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\t\n\tautocorrs = pd.DataFrame(index = times, columns = list(spks.keys()))\n\tfiring_rates = pd.Series(index = list(spks.keys()))\n\n\t# Now we can iterate over the dictionnary of spikes\n\tfor i in spks:\n\t\t# First we extract the time of spikes in ms during wake\n\t\tspk_time = spks[i].restrict(ep).as_units('ms').index.values\n\t\t# Calling the crossCorr function\n\t\tautocorrs[i] = crossCorr(spk_time, spk_time, binsize, nbins)\n\t\t# Computing the mean firing rate\n\t\tfiring_rates[i] = len(spk_time)/ep.tot_length('s')\n\n\t# We can divide the autocorrs by the firing_rates\n\tautocorrs = autocorrs / firing_rates\n\n\t# And don't forget to replace the 0 ms for 0\n\tautocorrs.loc[0] = 0.0\n\treturn autocorrs, firing_rates\n\ndef compute_CrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False):\n\t\"\"\"\n\t\t\n\t\"\"\"\t\n\tneurons = list(spks.keys())\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\n\tcc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))\n\t\t\n\tfor i,j in cc.columns:\t\t\n\t\tspk1 = spks[i].restrict(ep).as_units('ms').index.values\n\t\tspk2 = spks[j].restrict(ep).as_units('ms').index.values\t\t\n\t\ttmp = crossCorr(spk1, spk2, binsize, nbins)\t\t\n\t\tfr = len(spk2)/ep.tot_length('s')\n\t\tif norm:\n\t\t\tcc[(i,j)] = tmp/fr\n\t\telse:\n\t\t\tcc[(i,j)] = tmp\n\treturn cc\n\ndef compute_PairCrossCorr(spks, ep, pair, binsize=10, nbins = 2000, norm = False):\n\t\"\"\"\n\t\t\n\t\"\"\"\t\t\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\t\n\tspk1 = spks[pair[0]].restrict(ep).as_units('ms').index.values\n\tspk2 = spks[pair[1]].restrict(ep).as_units('ms').index.values\t\t\n\ttmp = crossCorr(spk1, spk2, binsize, nbins)\t\t\n\tfr = len(spk2)/ep.tot_length('s')\n\ttmp = pd.Series(index = times, data = tmp)\n\tif norm:\n\t\ttmp = tmp/fr\n\telse:\n\t\ttmp = tmp\n\treturn tmp\n\ndef compute_EventCrossCorr(spks, evt, ep, binsize = 5, nbins = 1000, norm=False):\n\t\"\"\"\n\t\"\"\"\n\tneurons = list(spks.keys())\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\n\tcc = pd.DataFrame(index = times, columns = neurons)\n\ttsd1 = evt.restrict(ep).as_units('ms').index.values\n\tfor i in neurons:\n\t\tspk2 = spks[i].restrict(ep).as_units('ms').index.values\n\t\ttmp = crossCorr(tsd1, spk2, binsize, nbins)\n\t\tfr = len(spk2)/ep.tot_length('s')\n\t\tif norm:\n\t\t\tcc[i] = tmp/fr\n\t\telse:\n\t\t\tcc[i] = tmp\n\treturn cc\n\t\t\ndef compute_ISI(spks, ep, maxisi, nbins, log_=False):\n\t\"\"\"\n\t\"\"\"\n\tneurons = list(spks.keys())\n\tif log_:\n\t\tbins = np.linspace(np.log10(1), np.log10(maxisi), nbins)\n\telse:\n\t\tbins = np.linspace(0, maxisi, nbins)\n\t\t\n\tisi = pd.DataFrame(index = bins[0:-1] + np.diff(bins)/2, columns = neurons)\n\tfor i in neurons:\n\t\ttmp = []\n\t\tfor j in ep.index:\n\t\t\ttmp.append(np.diff(spks[i].restrict(ep.loc[[j]]).as_units('ms').index.values))\n\t\ttmp = np.hstack(tmp)\n\t\tif log_:\n\t\t\tisi[i], _ = np.histogram(np.log10(tmp), bins)\n\t\telse:\n\t\t\tisi[i], _ = np.histogram(tmp, bins)\n\n\treturn isi\n\ndef compute_AllPairsCrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False):\n\t\"\"\"\n\t\t\n\t\"\"\"\t\n\tneurons = list(spks.keys())\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\n\tcc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))\n\t\t\n\tfor i,j in cc.columns:\t\t\n\t\tspk1 = spks[i].restrict(ep).as_units('ms').index.values\n\t\tspk2 = spks[j].restrict(ep).as_units('ms').index.values\t\t\n\t\ttmp = crossCorr(spk1, spk2, binsize, nbins)\t\t\n\t\tfr = len(spk2)/ep.tot_length('s')\n\t\tif norm:\n\t\t\tcc[(i,j)] = tmp/fr\n\t\telse:\n\t\t\tcc[(i,j)] = tmp\n\treturn cc\n\ndef compute_AsyncCrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False, edge = 20):\n\t\"\"\"\n\t\t\n\t\"\"\"\t\n\tneurons = list(spks.keys())\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\n\tcc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))\n\t\t\n\tfor i,j in cc.columns:\t\t\n\t\tspk1 = spks[i].restrict(ep).as_units('ms').index.values\n\t\tspk2 = spks[j].restrict(ep).as_units('ms').index.values\t\t\n\n\t\tspksync = []\n\t\tspkasync = []\n\t\tfor t in spk2:\t\t\t\n\t\t\tif np.sum(np.abs(t-spk1)<edge):\n\t\t\t\tspksync.append(t)\n\t\t\telse:\n\t\t\t\tspkasync.append(t)\n\n\n\t\t# tmp = crossCorr(spk1, spk2, binsize, nbins)\t\t\n\t\ttmp = crossCorr(spk1, np.array(spkasync), binsize, nbins)\n\t\tfr = len(spkasync)/ep.tot_length('s')\n\t\tif norm:\n\t\t\tcc[(i,j)] = tmp/fr\n\t\telse:\n\t\t\tcc[(i,j)] = tmp\n\treturn cc\n\ndef compute_RandomCrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False, percent = 0.5):\n\t\"\"\"\n\t\t\n\t\"\"\"\t\n\tneurons = list(spks.keys())\n\ttimes = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2\n\tcc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))\n\t\t\n\tfor i,j in cc.columns:\t\t\n\t\tspk1 = spks[i].restrict(ep).as_units('ms').index.values\n\t\tspk2 = spks[j].restrict(ep).as_units('ms').index.values\t\t\n\n\t\tspk1_random = np.sort(np.random.choice(spk1, int(len(spk1)*percent), replace=False))\n\t\tspk2_random = np.sort(np.random.choice(spk2, int(len(spk2)*percent), replace=False))\n\n\t\t# tmp = crossCorr(spk1, spk2, binsize, nbins)\t\t\n\t\ttmp = crossCorr(spk1_random, spk2_random, binsize, nbins)\n\t\tfr = len(spk2_random)/ep.tot_length('s')\n\t\tif norm:\n\t\t\tcc[(i,j)] = tmp/fr\n\t\telse:\n\t\t\tcc[(i,j)] = tmp\n\treturn cc\n\n#########################################################\n# VARIOUS\n#########################################################\ndef computeLMNAngularTuningCurves(spikes, angle, ep, nb_bins = 180, frequency = 120.0, bin_size = 100):\n\ttmp \t\t\t= pd.Series(index = angle.index.values, data = np.unwrap(angle.values))\t\n\ttmp2 \t\t\t= tmp.rolling(window=50,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)\t\n\tbin_size \t\t= bin_size * 1000\n\ttime_bins\t\t= np.arange(tmp.index[0], tmp.index[-1]+bin_size, bin_size) # assuming microseconds\n\tindex \t\t\t= np.digitize(tmp2.index.values, time_bins)\n\ttmp3 \t\t\t= tmp2.groupby(index).mean()\n\ttmp3.index \t\t= time_bins[np.unique(index)-1]+bin_size/2\n\ttmp3 \t\t\t= nts.Tsd(tmp3)\n\ttmp4\t\t\t= np.diff(tmp3.values)/np.diff(tmp3.as_units('s').index.values)\n\tnewangle \t\t= nts.Tsd(t = tmp3.index.values, d = tmp3.values%(2*np.pi))\n\tvelocity \t\t= nts.Tsd(t=tmp3.index.values[1:], d = tmp4)\n\tvelocity \t\t= velocity.restrict(ep)\t\n\tvelo_spikes \t= {}\t\n\tfor k in spikes: velo_spikes[k]\t= velocity.realign(spikes[k].restrict(ep))\n\t# bins_velocity\t= np.array([velocity.min(), -2*np.pi/3, -np.pi/6, np.pi/6, 2*np.pi/3, velocity.max()+0.001])\n\tbins_velocity\t= np.array([velocity.min(), -np.pi/6, np.pi/6, velocity.max()+0.001])\n\n\tidx_velocity \t= {k:np.digitize(velo_spikes[k].values, bins_velocity)-1 for k in spikes}\n\n\tbins \t\t\t= np.linspace(0, 2*np.pi, nb_bins)\n\tidx \t\t\t= bins[0:-1]+np.diff(bins)/2\n\ttuning_curves \t= {i:pd.DataFrame(index = idx, columns = list(spikes.keys())) for i in range(3)}\t\n\n\t# for i,j in zip(range(3),range(0,6,2)):\n\tfor i,j in zip(range(3),range(3)):\n\t\tfor k in spikes:\n\t\t\tspks \t\t\t= spikes[k].restrict(ep)\t\t\t\n\t\t\tspks \t\t\t= spks[idx_velocity[k] == j]\n\t\t\tangle_spike \t= newangle.restrict(ep).realign(spks)\n\t\t\tspike_count, bin_edges = np.histogram(angle_spike, bins)\n\t\t\ttmp \t\t\t= newangle.loc[velocity.index[np.logical_and(velocity.values>bins_velocity[j], velocity.values<bins_velocity[j+1])]]\n\t\t\toccupancy, _ \t= np.histogram(tmp, bins)\n\t\t\tspike_count \t= spike_count/occupancy\t\n\t\t\ttuning_curves[i][k] = spike_count*(1/(bin_size*1e-6))\n\n\treturn tuning_curves, velocity, bins_velocity\n\ndef computeAngularTuningCurves(spikes, angle, ep, nb_bins = 180, frequency = 120.0):\n\tbins \t\t\t= np.linspace(0, 2*np.pi, nb_bins)\n\tidx \t\t\t= bins[0:-1]+np.diff(bins)/2\n\ttuning_curves \t= pd.DataFrame(index = idx, columns = list(spikes.keys()))\t\n\tangle \t\t\t= angle.restrict(ep)\n\t# Smoothing the angle here\n\ttmp \t\t\t= pd.Series(index = angle.index.values, data = np.unwrap(angle.values))\n\ttmp2 \t\t\t= tmp.rolling(window=50,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)\n\tangle\t\t\t= nts.Tsd(tmp2%(2*np.pi))\n\tfor k in spikes:\n\t\tspks \t\t\t= spikes[k]\n\t\t# true_ep \t\t= nts.IntervalSet(start = np.maximum(angle.index[0], spks.index[0]), end = np.minimum(angle.index[-1], spks.index[-1]))\t\t\n\t\tspks \t\t\t= spks.restrict(ep)\t\n\t\tangle_spike \t= angle.restrict(ep).realign(spks)\n\t\tspike_count, bin_edges = np.histogram(angle_spike, bins)\n\t\toccupancy, _ \t= np.histogram(angle, bins)\n\t\tspike_count \t= spike_count/occupancy\t\t\n\t\ttuning_curves[k] = spike_count*frequency\t\n\n\treturn tuning_curves\n\ndef findHDCells(tuning_curves, z = 50, p = 0.0001 , m = 1):\n\t\"\"\"\n\t\tPeak firing rate larger than 1\n\t\tand Rayleigh test p<0.001 & z > 100\n\t\"\"\"\n\tcond1 = tuning_curves.max()>m\n\tfrom pycircstat.tests import rayleigh\n\tstat = pd.DataFrame(index = tuning_curves.columns, columns = ['pval', 'z'])\n\tfor k in tuning_curves:\n\t\tstat.loc[k] = rayleigh(tuning_curves[k].index.values, tuning_curves[k].values)\n\tcond2 = np.logical_and(stat['pval']<p,stat['z']>z)\n\ttokeep = stat.index.values[np.where(np.logical_and(cond1, cond2))[0]]\n\treturn tokeep, stat\n\ndef decodeHD(tuning_curves, spikes, ep, bin_size = 200, px = None):\n\t\"\"\"\n\t\tSee : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells\n\t\ttuning_curves: pd.DataFrame with angular position as index and columns as neuron\n\t\tspikes : dictionnary of spike times\n\t\tep : nts.IntervalSet, the epochs for decoding\n\t\tbin_size : in ms (default:200ms)\n\t\tpx : Occupancy. If None, px is uniform\n\t\"\"\"\t\t\n\tif len(ep) == 1:\n\t\tbins = np.arange(ep.as_units('ms').start.iloc[0], ep.as_units('ms').end.iloc[-1], bin_size)\n\telse:\n\t\t# ep2 = nts.IntervalSet(ep.copy().as_units('ms'))\n\t\t# ep2 = ep2.drop_short_intervals(bin_size*2)\n\t\t# bins = []\n\t\t# for i in ep2.index:\n\t\t# \tbins.append(np.arange())\n\t\t# bins = np.arange(ep2.start.iloc[0], ep.end.iloc[-1], bin_size)\n\t\tprint(\"TODO\")\n\t\tsys.exit()\n\n\n\torder = tuning_curves.columns.values\n\t# TODO CHECK MATCH\n\n\t# smoothing with a non-normalized gaussian\n\tw = scipy.signal.gaussian(51, 2)\n\t\n\tspike_counts = pd.DataFrame(index = bins[0:-1]+np.diff(bins)/2, columns = order)\n\tfor n in spike_counts:\t\t\n\t\tspks = spikes[n].restrict(ep).as_units('ms').index.values\n\t\ttmp = np.histogram(spks, bins)\n\t\tspike_counts[n] = np.convolve(tmp[0], w, mode = 'same')\n\t\t# spike_counts[k] = tmp[0]\n\n\ttcurves_array = tuning_curves.values\n\tspike_counts_array = spike_counts.values\n\tproba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0]))\n\n\tpart1 = np.exp(-(bin_size/1000)*tcurves_array.sum(1))\n\tif px is not None:\n\t\tpart2 = px\n\telse:\n\t\tpart2 = np.ones(tuning_curves.shape[0])\n\t#part2 = np.histogram(position['ry'], np.linspace(0, 2*np.pi, 61), weights = np.ones_like(position['ry'])/float(len(position['ry'])))[0]\n\t\n\tfor i in range(len(proba_angle)):\n\t\tpart3 = np.prod(tcurves_array**spike_counts_array[i], 1)\n\t\tp = part1 * part2 * part3\n\t\tproba_angle[i] = p/p.sum() # Normalization process here\n\n\tproba_angle = pd.DataFrame(index = spike_counts.index.values, columns = tuning_curves.index.values, data= proba_angle)\t\n\tproba_angle = proba_angle.astype('float')\n\tdecoded = nts.Tsd(t = proba_angle.index.values, d = proba_angle.idxmax(1).values, time_units = 'ms')\n\treturn decoded, proba_angle, spike_counts\n\ndef computePlaceFields(spikes, position, ep, nb_bins = 200, frequency = 120.0):\n\tplace_fields = {}\n\tposition_tsd = position.restrict(ep)\n\txpos = position_tsd.iloc[:,0]\n\typos = position_tsd.iloc[:,1]\n\txbins = np.linspace(xpos.min(), xpos.max()+1e-6, nb_bins+1)\n\tybins = np.linspace(ypos.min(), ypos.max()+1e-6, nb_bins+1)\n\tfor n in spikes:\n\t\tposition_spike = position_tsd.realign(spikes[n].restrict(ep))\n\t\tspike_count,_,_ = np.histogram2d(position_spike.iloc[:,1].values, position_spike.iloc[:,0].values, [ybins,xbins])\n\t\toccupancy, _, _ = np.histogram2d(ypos, xpos, [ybins,xbins])\n\t\tmean_spike_count = spike_count/(occupancy+1)\n\t\tplace_field = mean_spike_count*frequency \n\t\tplace_fields[n] = pd.DataFrame(index = ybins[0:-1][::-1],columns = xbins[0:-1], data = place_field)\n\t\t\n\textent = (xbins[0], xbins[-1], ybins[0], ybins[-1]) # USEFUL FOR MATPLOTLIB\n\treturn place_fields, extent\n\ndef computeOccupancy(position_tsd, nb_bins = 100):\n xpos = position_tsd.iloc[:,0]\n ypos = position_tsd.iloc[:,1] \n xbins = np.linspace(xpos.min(), xpos.max()+1e-6, nb_bins+1)\n ybins = np.linspace(ypos.min(), ypos.max()+1e-6, nb_bins+1)\n occupancy, _, _ = np.histogram2d(ypos, xpos, [ybins,xbins])\n return occupancy\n\ndef computeAngularVelocityTuningCurves(spikes, angle, ep, nb_bins = 61, bin_size = 10000, norm=True):\n\ttmp \t\t\t= pd.Series(index = angle.index.values, data = np.unwrap(angle.values))\n\ttmp2 \t\t\t= tmp.rolling(window=100,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)\n\ttime_bins\t\t= np.arange(tmp.index[0], tmp.index[-1]+bin_size, bin_size) # assuming microseconds\n\tindex \t\t\t= np.digitize(tmp2.index.values, time_bins)\n\ttmp3 \t\t\t= tmp2.groupby(index).mean()\n\ttmp3.index \t\t= time_bins[np.unique(index)-1]+bin_size/2\n\ttmp3 \t\t\t= nts.Tsd(tmp3)\n\ttmp4\t\t\t= np.diff(tmp3.values)/np.diff(tmp3.as_units('s').index.values)\n\ttmp2 \t\t\t= nts.Tsd(tmp2)\n\ttmp4\t\t\t= np.diff(tmp2.values)/np.diff(tmp2.as_units('s').index.values)\t\n\tvelocity \t\t= nts.Tsd(t=tmp2.index.values[1:], d = tmp4)\n\tvelocity \t\t= velocity.restrict(ep)\t\n\tbins \t\t\t= np.linspace(-2*np.pi, 2*np.pi, nb_bins)\n\tidx \t\t\t= bins[0:-1]+np.diff(bins)/2\n\tvelo_curves\t\t= pd.DataFrame(index = idx, columns = list(spikes.keys()))\n\n\tfor k in spikes:\n\t\tspks \t\t= spikes[k]\n\t\tspks \t\t= spks.restrict(ep)\n\t\tspeed_spike = velocity.realign(spks)\n\t\tspike_count, bin_edges = np.histogram(speed_spike, bins)\n\t\toccupancy, _ = np.histogram(velocity.restrict(ep), bins)\n\t\tspike_count = spike_count/(occupancy+1)\n\t\tvelo_curves[k] = spike_count*(1/(bin_size*1e-6))\n\t\t# normalizing by firing rate \n\t\tif norm:\n\t\t\tvelo_curves[k] = velo_curves[k]/(len(spikes[k].restrict(ep))/ep.tot_length('s'))\n\n\treturn velo_curves\n\ndef smoothAngularTuningCurves(tuning_curves, window = 20, deviation = 3.0):\n\tnew_tuning_curves = {}\t\n\tfor i in tuning_curves.columns:\n\t\ttcurves = tuning_curves[i]\n\t\toffset = np.mean(np.diff(tcurves.index.values))\n\t\tpadded \t= pd.Series(index = np.hstack((tcurves.index.values-(2*np.pi)-offset,\n\t\t\t\t\t\t\t\t\t\t\t\ttcurves.index.values,\n\t\t\t\t\t\t\t\t\t\t\t\ttcurves.index.values+(2*np.pi)+offset)),\n\t\t\t\t\t\t\tdata = np.hstack((tcurves.values, tcurves.values, tcurves.values)))\n\t\tsmoothed = padded.rolling(window=window,win_type='gaussian',center=True,min_periods=1).mean(std=deviation)\t\t\n\t\tnew_tuning_curves[i] = smoothed.loc[tcurves.index]\n\n\tnew_tuning_curves = pd.DataFrame.from_dict(new_tuning_curves)\n\n\treturn new_tuning_curves\n\ndef computeMeanFiringRate(spikes, epochs, name):\n\tmean_frate = pd.DataFrame(index = spikes.keys(), columns = name)\n\tfor n, ep in zip(name, epochs):\n\t\tfor k in spikes:\n\t\t\tmean_frate.loc[k,n] = len(spikes[k].restrict(ep))/ep.tot_length('s')\n\treturn mean_frate\n\ndef computeSpeedTuningCurves(spikes, position, ep, bin_size = 0.1, nb_bins = 20, speed_max = 0.4):\n\ttime_bins \t= np.arange(position.index[0], position.index[-1]+bin_size*1e6, bin_size*1e6)\n\tindex \t\t= np.digitize(position.index.values, time_bins)\n\ttmp \t\t= position.groupby(index).mean()\n\ttmp.index \t= time_bins[np.unique(index)-1]+(bin_size*1e6)/2\n\tdistance\t= np.sqrt(np.power(np.diff(tmp['x']), 2) + np.power(np.diff(tmp['z']), 2))\n\tspeed \t\t= nts.Tsd(t = tmp.index.values[0:-1]+ bin_size/2, d = distance/bin_size)\n\tspeed \t\t= speed.restrict(ep)\n\tbins \t\t= np.linspace(0, speed_max, nb_bins)\n\tidx \t\t= bins[0:-1]+np.diff(bins)/2\n\tspeed_curves = pd.DataFrame(index = idx,columns = np.arange(len(spikes)))\n\tfor k in spikes:\n\t\tspks \t= spikes[k]\n\t\tspks \t= spks.restrict(ep)\n\t\tspeed_spike = speed.realign(spks)\n\t\tspike_count, bin_edges = np.histogram(speed_spike, bins)\n\t\toccupancy, _ = np.histogram(speed, bins)\n\t\tspike_count = spike_count/(occupancy+1)\n\t\tspeed_curves[k] = spike_count/bin_size\n\n\treturn speed_curves\n\ndef computeAccelerationTuningCurves(spikes, position, ep, bin_size = 0.1, nb_bins = 40):\n\ttime_bins \t= np.arange(position.index[0], position.index[-1]+bin_size*1e6, bin_size*1e6)\n\tindex \t\t= np.digitize(position.index.values, time_bins)\n\ttmp \t\t= position.groupby(index).mean()\n\ttmp.index \t= time_bins[np.unique(index)-1]+(bin_size*1e6)/2\n\tdistance\t= np.sqrt(np.power(np.diff(tmp['x']), 2) + np.power(np.diff(tmp['z']), 2))\n\tspeed \t\t= nts.Tsd(t = tmp.index.values[0:-1]+ bin_size/2, d = distance/bin_size)\n\tspeed \t\t= speed.restrict(ep)\n\tspeed \t\t= speed.as_series()\n\tspeed2 \t\t= speed.rolling(window=10, win_type='gaussian', center= True, min_periods=1).mean(std = 1.0)\n\taccel \t\t= nts.Tsd(t = speed2.index.values[0:-1] + np.diff(speed2.index.values)/2, d = np.diff(speed2.values))\t\n\tbins \t\t= np.linspace(accel.min(), accel.max(), nb_bins)\n\tidx \t\t= bins[0:-1]+np.diff(bins)/2\n\taccel_curves = pd.DataFrame(index = idx,columns = np.arange(len(spikes)))\n\tfor k in spikes:\n\t\tspks \t= spikes[k]\n\t\tspks \t= spks.restrict(ep)\n\t\taccel_spike = accel.realign(spks)\n\t\tspike_count, bin_edges = np.histogram(accel_spike, bins)\n\t\toccupancy, _ = np.histogram(accel, bins)\n\t\tspike_count = spike_count/(occupancy+1)\n\t\taccel_curves[k] = spike_count/bin_size\n\n\treturn accel_curves\n\ndef refineSleepFromAccel(acceleration, sleep_ep):\n\tvl = acceleration[0].restrict(sleep_ep)\n\tvl = vl.as_series().diff().abs().dropna()\t\n\ta, _ = scipy.signal.find_peaks(vl, 0.025)\n\tpeaks = nts.Tsd(vl.iloc[a])\n\tduration = np.diff(peaks.as_units('s').index.values)\n\tinterval = nts.IntervalSet(start = peaks.index.values[0:-1], end = peaks.index.values[1:])\n\n\tnewsleep_ep = interval.iloc[duration>15.0]\n\tnewsleep_ep = newsleep_ep.reset_index(drop=True)\n\tnewsleep_ep = newsleep_ep.merge_close_intervals(100000, time_units ='us')\n\n\tnewsleep_ep\t= sleep_ep.intersect(newsleep_ep)\n\n\treturn newsleep_ep\n\ndef splitWake(ep):\n\tif len(ep) != 1:\n\t\tprint('Cant split wake in 2')\n\t\tsys.exit()\n\ttmp = np.zeros((2,2))\n\ttmp[0,0] = ep.values[0,0]\n\ttmp[1,1] = ep.values[0,1]\n\ttmp[0,1] = tmp[1,0] = ep.values[0,0] + np.diff(ep.values[0])/2\n\treturn nts.IntervalSet(start = tmp[:,0], end = tmp[:,1])\n\ndef centerTuningCurves(tcurve):\n\t\"\"\"\n\tcenter tuning curves by peak\n\t\"\"\"\n\tpeak \t\t\t= pd.Series(index=tcurve.columns,data = np.array([circmean(tcurve.index.values, tcurve[i].values) for i in tcurve.columns]))\n\tnew_tcurve \t\t= []\n\tfor p in tcurve.columns:\t\n\t\tx = tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(peak[p], method='nearest')]\n\t\tx[x<-np.pi] += 2*np.pi\n\t\tx[x>np.pi] -= 2*np.pi\n\t\ttmp = pd.Series(index = x, data = tcurve[p].values).sort_index()\n\t\tnew_tcurve.append(tmp.values)\n\tnew_tcurve = pd.DataFrame(index = np.linspace(-np.pi, np.pi, tcurve.shape[0]+1)[0:-1], data = np.array(new_tcurve).T, columns = tcurve.columns)\n\treturn new_tcurve\n\ndef offsetTuningCurves(tcurve, diffs):\n\t\"\"\"\n\toffseting tuning curves synced by diff\n\t\"\"\"\t\n\tnew_tcurve \t\t= []\n\tfor p in tcurve.columns:\t\n\t\tx = tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(diffs[p], method='nearest')]\n\t\tx[x<-np.pi] += 2*np.pi\n\t\tx[x>np.pi] -= 2*np.pi\n\t\ttmp = pd.Series(index = x, data = tcurve[p].values).sort_index()\n\t\tnew_tcurve.append(tmp.values)\n\tnew_tcurve = pd.DataFrame(index = np.linspace(-np.pi, np.pi, tcurve.shape[0]+1)[0:-1], data = np.array(new_tcurve).T, columns = tcurve.columns)\n\treturn new_tcurve\n\ndef computeAngularHeadVelocity(angle, ep, bin_size = 10000):\n\t# COMPUTING AHV \n\ttmp \t\t\t= pd.Series(index = angle.index.values, data = np.unwrap(angle.values))\n\ttmp2 \t\t\t= tmp.rolling(window=100,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)\n\ttime_bins\t\t= np.arange(tmp.index[0], tmp.index[-1]+bin_size, bin_size) # assuming microseconds\n\tindex \t\t\t= np.digitize(tmp2.index.values, time_bins)\n\ttmp3 \t\t\t= tmp2.groupby(index).mean()\n\ttmp3.index \t\t= time_bins[np.unique(index)-1]+bin_size/2\n\ttmp3 \t\t\t= nts.Tsd(tmp3)\n\ttmp4\t\t\t= np.diff(tmp3.values)/np.diff(tmp3.as_units('s').index.values)\n\ttmp2 \t\t\t= nts.Tsd(tmp2)\n\ttmp4\t\t\t= np.diff(tmp2.values)/np.diff(tmp2.as_units('s').index.values)\t\n\tvelocity \t\t= nts.Tsd(t=tmp2.index.values[1:], d = tmp4)\n\tvelocity \t\t= velocity.restrict(ep)\t\n\treturn velocity\n\n\n#########################################################\n# LFP FUNCTIONS\n#########################################################\ndef butter_bandpass(lowcut, highcut, fs, order=5):\n\tfrom scipy.signal import butter\n\tnyq = 0.5 * fs\n\tlow = lowcut / nyq\n\thigh = highcut / nyq\n\tb, a = butter(order, [low, high], btype='band')\n\treturn b, a\n\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order=5):\n\tfrom scipy.signal import lfilter\n\tb, a = butter_bandpass(lowcut, highcut, fs, order=order)\n\ty = lfilter(b, a, data)\n\treturn y\n\ndef downsample(tsd, up, down):\n\timport scipy.signal\n\timport neuroseries as nts\n\tdtsd = scipy.signal.resample_poly(tsd.values, up, down)\n\tdt = tsd.as_units('s').index.values[np.arange(0, tsd.shape[0], down)]\n\tif len(tsd.shape) == 1:\t\t\n\t\treturn nts.Tsd(dt, dtsd, time_units = 's')\n\telif len(tsd.shape) == 2:\n\t\treturn nts.TsdFrame(dt, dtsd, time_units = 's', columns = list(tsd.columns))\n\ndef getPeaksandTroughs(lfp, min_points):\n\t\"\"\"\t \n\t\tAt 250Hz (1250/5), 2 troughs cannont be closer than 20 (min_points) points (if theta reaches 12Hz);\t\t\n\t\"\"\"\n\timport neuroseries as nts\n\timport scipy.signal\n\tif isinstance(lfp, nts.time_series.Tsd):\n\t\ttroughs \t\t= nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmin(lfp.values, order =min_points)[0]], time_units = 'us')\n\t\tpeaks \t\t\t= nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmax(lfp.values, order =min_points)[0]], time_units = 'us')\n\t\ttmp \t\t\t= nts.Tsd(troughs.realign(peaks, align = 'next').as_series().drop_duplicates('first')) # eliminate double peaks\n\t\tpeaks\t\t\t= peaks[tmp.index]\n\t\ttmp \t\t\t= nts.Tsd(peaks.realign(troughs, align = 'prev').as_series().drop_duplicates('first')) # eliminate double troughs\n\t\ttroughs \t\t= troughs[tmp.index]\n\t\treturn (peaks, troughs)\n\telif isinstance(lfp, nts.time_series.TsdFrame):\n\t\tpeaks \t\t\t= nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))\n\t\ttroughs\t\t\t= nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))\n\t\tfor i in lfp.keys():\n\t\t\tpeaks[i], troughs[i] = getPeaksandTroughs(lfp[i], min_points)\n\t\treturn (peaks, troughs)\n\ndef getPhase(lfp, fmin, fmax, nbins, fsamp, power = False):\n\t\"\"\" Continuous Wavelets Transform\n\t\treturn phase of lfp in a Tsd array\n\t\"\"\"\n\timport neuroseries as nts\n\tfrom Wavelets import MyMorlet as Morlet\n\tif isinstance(lfp, nts.time_series.TsdFrame):\n\t\tallphase \t\t= nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))\n\t\tallpwr \t\t\t= nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))\n\t\tfor i in lfp.keys():\n\t\t\tallphase[i], allpwr[i] = getPhase(lfp[i], fmin, fmax, nbins, fsamp, power = True)\n\t\tif power:\n\t\t\treturn allphase, allpwr\n\t\telse:\n\t\t\treturn allphase\t\t\t\n\n\telif isinstance(lfp, nts.time_series.Tsd):\n\t\tcw \t\t\t\t= Morlet(lfp.values, fmin, fmax, nbins, fsamp)\n\t\tcwt \t\t\t= cw.getdata()\n\t\tcwt \t\t\t= np.flip(cwt, axis = 0)\n\t\twave \t\t\t= np.abs(cwt)**2.0\n\t\tphases \t\t\t= np.arctan2(np.imag(cwt), np.real(cwt)).transpose()\t\n\t\tcwt \t\t\t= None\n\t\tindex \t\t\t= np.argmax(wave, 0)\n\t\t# memory problem here, need to loop\n\t\tphase \t\t\t= np.zeros(len(index))\t\n\t\tfor i in range(len(index)) : phase[i] = phases[i,index[i]]\n\t\tphases \t\t\t= None\n\t\tif power: \n\t\t\tpwrs \t\t= cw.getpower()\t\t\n\t\t\tpwr \t\t= np.zeros(len(index))\t\t\n\t\t\tfor i in range(len(index)):\n\t\t\t\tpwr[i] = pwrs[index[i],i]\t\n\t\t\treturn nts.Tsd(lfp.index.values, phase), nts.Tsd(lfp.index.values, pwr)\n\t\telse:\n\t\t\treturn nts.Tsd(lfp.index.values, phase)\n\n#########################################################\n# INTERPOLATION\n#########################################################\ndef interpolate(z, x, y, inter, bbox = None):\t\n\timport scipy.interpolate\n\txnew = np.arange(x.min(), x.max()+inter, inter)\n\tynew = np.arange(y.min(), y.max()+inter, inter)\n\tif bbox == None:\n\t\tf = scipy.interpolate.RectBivariateSpline(y, x, z)\n\telse:\n\t\tf = scipy.interpolate.RectBivariateSpline(y, x, z, bbox = bbox)\n\tznew = f(ynew, xnew)\n\treturn (xnew, ynew, znew)\n\ndef filter_(z, n):\n\tfrom scipy.ndimage import gaussian_filter\t\n\treturn gaussian_filter(z, n)\n\n\n#########################################################\n# HELPERS\n#########################################################\ndef writeNeuroscopeEvents(path, ep, name):\n\tf = open(path, 'w')\n\tfor i in range(len(ep)):\n\t\tf.writelines(str(ep.as_units('ms').iloc[i]['start']) + \" \"+name+\" start \"+ str(1)+\"\\n\")\n\t\tf.writelines(str(ep.as_units('ms').iloc[i]['end']) + \" \"+name+\" end \"+ str(1)+\"\\n\")\n\tf.close()\t\t\n\treturn\n\ndef getAllInfos(data_directory, datasets):\n\tallm = np.unique([\"/\".join(s.split(\"/\")[0:2]) for s in datasets])\n\tinfos = {}\n\tfor m in allm:\n\t\tpath = os.path.join(data_directory, m)\n\t\tcsv_file = list(filter(lambda x: '.csv' in x, os.listdir(path)))[0]\n\t\tinfos[m.split('/')[1]] = pd.read_csv(os.path.join(path, csv_file), index_col = 0)\n\treturn infos\n\ndef computeSpeed(position, ep, bin_size = 0.1):\n\ttime_bins \t= np.arange(position.index[0], position.index[-1]+bin_size*1e6, bin_size*1e6)\n\tindex \t\t= np.digitize(position.index.values, time_bins)\n\ttmp \t\t= position.groupby(index).mean()\n\ttmp.index \t= time_bins[np.unique(index)-1]+(bin_size*1e6)/2\n\tdistance\t= np.sqrt(np.power(np.diff(tmp['x']), 2) + np.power(np.diff(tmp['z']), 2))\n\tspeed \t\t= nts.Tsd(t = tmp.index.values[0:-1]+ bin_size/2, d = distance/bin_size)\n\tspeed \t\t= speed.restrict(ep)\n\treturn speed\n\n#######################################################\n# SYNCHRONY HELPERS\n#######################################################\n@jit(nopython=True, parallel=True)\ndef getSyncAsync(spk1, spk2):\n\tspksync = []\n\tspkasync = []\n\tfor t in spk2:\n\t\td = np.abs(t-spk1)\n\t\tidx = d<4\n\t\tif np.sum(idx):\n\t\t\tspksync.append(t)\n\t\telse:\n\t\t\tspkasync.append(t)\n\tspksync = np.array(spksync)\n\tspkasync = np.array(spkasync)\n\treturn spksync, spkasync\n\n\ndef getSpikesSyncAsync(spks, hd_neurons):\n\t\"\"\"\n\t\"\"\"\t\t\n\tallsync = {}\n\tallasync = {}\n\tn = len(spks)\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tif i != j:\t\t\t\t\n\t\t\t\tspk1 = spks[i]\n\t\t\t\tspk2 = spks[j]\n\t\t\t\tspksync, spkasync = getSyncAsync(spk1, spk2)\n\t\t\t\tallsync[(hd_neurons[i],hd_neurons[j])] = nts.Ts(spksync, time_units = 'ms')\n\t\t\t\tallasync[(hd_neurons[i],hd_neurons[j])] = nts.Ts(spkasync, time_units = 'ms')\n\n\treturn allsync, allasync\n\n\t\ndef sample_frates(frates_value, time_index, hd_neurons, n = 30):\n\tspikes_random = {}\n\tfor i in range(len(hd_neurons)):\n\t\tl = frates_value[:,i]\n\t\ts = np.random.poisson(l, size=(int(n),int(len(frates_value))))\n\t\ttmp = {}\n\t\tfor j in range(len(s)):\n\t\t\ttmp[j] = nts.Ts(time_index[s[j]>0])\n\t\tspikes_random[hd_neurons[i]] = tmp\n\treturn spikes_random\n\ndef sampleSpikesFromAngularPosition(tcurve, angles, ep, bin_size = 1000):\n\t\"\"\"\n\t\"\"\"\n\thd_neurons = tcurve.columns.values\n\tbin_size = 1000 # us\n\tangles = pd.Series(index = angles.index, data = np.unwrap(angles.values))\n\tbins = np.arange(ep.loc[0,'start'], ep.loc[0, 'end'], bin_size)\n\ttmp = angles.groupby(np.digitize(angles.index.values, bins)-1).mean()\n\tangle2 = pd.Series(index = np.arange(len(bins)), data = np.nan)\n\tangle2.loc[tmp.index] = tmp.values\n\tangle2 = angle2.interpolate()\n\tangle2.index = bins + np.max(np.diff(bins)/2)\n\tangle2 = angle2%(2*np.pi)\n\n\tidx = np.argsort(np.abs(np.vstack(angle2.values) - tcurve.index.values), axis = 1)[:,0]\n\tidx = tcurve.index[idx]\n\tfrates = tcurve.loc[idx]\n\tfrates.index = angle2.index\n\n\ttime_index = frates.index.values\n\tfrates_value = frates.values*(bin_size*1e-6)\n\n\tspikes_random = sample_frates(frates_value, time_index, hd_neurons)\n\n\treturn spikes_random\n\ndef sampleSpikesFromAngularVelocity(ahvcurve, angles, ep, bin_size = 1000):\n\t\"\"\"\n\t\"\"\"\n\thd_neurons = ahvcurve.columns.values\n\tbin_size = 1000 # us\n\n\ttmp \t\t\t= pd.Series(index = angles.index.values, data = np.unwrap(angles.values))\n\ttmp2 \t\t\t= tmp.rolling(window=100,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)\n\ttime_bins\t\t= np.arange(tmp.index[0], tmp.index[-1]+10000, 10000) # assuming microseconds\n\tindex \t\t\t= np.digitize(tmp2.index.values, time_bins)\n\ttmp3 \t\t\t= tmp2.groupby(index).mean()\n\ttmp3.index \t\t= time_bins[np.unique(index)-1]+bin_size/2\n\ttmp3 \t\t\t= nts.Tsd(tmp3)\n\ttmp4\t\t\t= np.diff(tmp3.values)/np.diff(tmp3.as_units('s').index.values)\n\ttmp2 \t\t\t= nts.Tsd(tmp2)\n\ttmp4\t\t\t= np.diff(tmp2.values)/np.diff(tmp2.as_units('s').index.values)\t\n\tvelocity \t\t= nts.Tsd(t=tmp2.index.values[1:], d = tmp4)\n\tvelocity \t\t= velocity.restrict(ep)\t\n\t\n\tbins = np.arange(ep.loc[0,'start'], ep.loc[0, 'end'], bin_size)\n\ttmp = velocity.groupby(np.digitize(velocity.index.values, bins)-1).mean()\n\tvelocity2 = pd.Series(index = np.arange(len(bins)), data = np.nan)\n\tvelocity2.loc[tmp.index] = tmp.values\n\tvelocity2 = velocity2.interpolate()\n\tvelocity2.index = bins + np.max(np.diff(bins)/2)\n\t\n\tidx = np.argsort(np.abs(np.vstack(velocity2.values) - ahvcurve.index.values), axis = 1)[:,0]\n\tidx = ahvcurve.index[idx]\n\tfrates = ahvcurve.loc[idx]\n\tfrates.index = velocity2.index\n\n\ttime_index = frates.index.values\n\tfrates_value = frates.values*(bin_size*1e-6)\n\n\tspikes_random = sample_frates(frates_value, time_index, hd_neurons)\n\n\treturn spikes_random\n\ndef loadShankStructure(generalinfo):\n\tshankStructure = {}\n\tfor k,i in zip(generalinfo['shankStructure'][0][0][0][0],range(len(generalinfo['shankStructure'][0][0][0][0]))):\n\t\tif len(generalinfo['shankStructure'][0][0][1][0][i]):\n\t\t\tshankStructure[k[0]] = generalinfo['shankStructure'][0][0][1][0][i][0]-1\n\t\telse :\n\t\t\tshankStructure[k[0]] = []\n\t\n\treturn shankStructure\t\n\n"
] |
[
[
"numpy.imag",
"scipy.signal.find_peaks",
"pandas.Series",
"numpy.linspace",
"numpy.vstack",
"pandas.DataFrame",
"numpy.var",
"numpy.digitize",
"numpy.histogram",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.std",
"scipy.signal.butter",
"numpy.diff",
"numpy.argmax",
"numpy.real",
"scipy.signal.lfilter",
"numpy.zeros",
"scipy.interpolate.RectBivariateSpline",
"scipy.signal.resample_poly",
"scipy.signal.argrelmax",
"numpy.log10",
"numpy.floor",
"pandas.DataFrame.from_dict",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"numpy.flip",
"numpy.histogram2d",
"numpy.convolve",
"scipy.signal.argrelmin",
"scipy.ndimage.gaussian_filter",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"numpy.unwrap",
"numpy.prod",
"scipy.signal.gaussian"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
xiafanzeng/Raman-Spectroscopy
|
[
"ba0b8b7ad0d9b9487a7602b0a09a41d970f70598"
] |
[
"api/classification/siamese/score.py"
] |
[
"from keras.utils import Sequence\nimport numpy as np\nimport keras.backend as K\nfrom tqdm import tqdm\n\nfrom config import spectral_shape\n\n\nclass FeatureGen(Sequence):\n def __init__(self, train, data, batch_size=64, verbose=1):\n super(FeatureGen, self).__init__()\n self.data = data\n self.train = train\n self.batch_size = batch_size\n self.verbose = verbose\n if self.verbose > 0: self.progress = tqdm(total=len(self), desc='Features')\n \n def __getitem__(self, index):\n start = self.batch_size*index\n size = min(len(self.data) - start, self.batch_size)\n a = np.zeros((size,) + spectral_shape, dtype=K.floatx())\n for i in range(size): a[i,:] = self.train[self.data[start + i]]\n if self.verbose > 0: \n self.progress.update()\n if self.progress.n >= len(self): self.progress.close()\n return a\n def __len__(self):\n return (len(self.data) + self.batch_size - 1)//self.batch_size\n \n \n\nclass ScoreGen(Sequence):\n def __init__(self, x, y=None, batch_size=2048, verbose=1):\n super(ScoreGen, self).__init__()\n self.x = x\n self.y = y\n self.batch_size = batch_size\n self.verbose = verbose\n if y is None:\n self.y = self.x\n self.ix, self.iy = np.triu_indices(x.shape[0],1)\n else:\n self.iy, self.ix = np.indices((y.shape[0],x.shape[0]))\n self.ix = self.ix.reshape((self.ix.size,))\n self.iy = self.iy.reshape((self.iy.size,))\n self.subbatch = (len(self.x) + self.batch_size - 1)//self.batch_size\n if self.verbose > 0: self.progress = tqdm(total=len(self), desc='Scores')\n def __getitem__(self, index):\n start = index*self.batch_size\n end = min(start + self.batch_size, len(self.ix))\n a = self.y[self.iy[start:end],:]\n b = self.x[self.ix[start:end],:]\n if self.verbose > 0: \n self.progress.update()\n if self.progress.n >= len(self): self.progress.close()\n return [a,b]\n def __len__(self):\n return (len(self.ix) + self.batch_size - 1)//self.batch_size\n \nif __name__ == '__main__':\n from utils import load_cache, group_label, shuffle_idxs, score_reshape\n \n train, y_, _, _ = load_cache('../../')\n score = np.random.random_sample(size=(len(train), len(train)))\n id2samples = group_label(y_)\n train_idx, _ = shuffle_idxs(train)\n \n \n from model import build_model\n model, branch_model, head_model = build_model(64e-5,0)\n \n inp = FeatureGen(train, train_idx)\n feats = branch_model.predict(inp[0])\n import ipdb; ipdb.set_trace()\n scoreGen = ScoreGen(feats)\n score = head_model.predict(scoreGen[0])\n res = score_reshape(score, feats)\n print(score.shape)\n "
] |
[
[
"numpy.indices",
"numpy.triu_indices"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sgsdxzy/ppdd
|
[
"abd53bc2fa873fe40f6128c3ff4e46e55e971cf0"
] |
[
"fittools.py"
] |
[
"import numpy as np\nimport numexpr as ne\nfrom scipy.optimize import curve_fit, brentq\nfrom scipy.interpolate import interp1d\n\nclass Guess(object):\n \"\"\"\n Container of guesses for fitting, used on initial fit guesses and learning.\n \"\"\"\n def __init__(self, peak_ratio = 0.2, sigma_x0 = 0.01, sigma_y0 = 0.01, sigma_x1 = 1, sigma_y1 = 1, offset_ratio = 0.006, fx = 0.03, fy = 0):\n self.peak_ratio = peak_ratio\n self.sigma_x0 = sigma_x0\n self.sigma_y0 = sigma_y0\n self.sigma_x1 = sigma_x1\n self.sigma_y1 = sigma_y1\n self.offset_ratio = offset_ratio\n self.fx = fx\n self.fy = fy\n\ndef find_nearest(array, value):\n \"\"\"\n Find the index of nearest element in array to value.\n \"\"\"\n idx = (np.abs(array-value)).argmin()\n return idx\n\ndef gaussian(x, a, mu, sigma, c):\n \"\"\"\n Gaussian function\n\n :math:`f(x)=a e^{-(x - \\mu)^2 / (2 \\\\sigma^2)} + c`\n\n ref: https://en.wikipedia.org/wiki/Gaussian_function\n\n Parameters\n ----------\n x : 1D np.array\n coordinate\n\n a : float\n the height of the curve's peak\n\n mu : float\n the position of the center of the peak\n\n sigma : float\n the standard deviation, sometimes called the Gaussian RMS width\n\n c : float\n non-zero background\n\n Returns\n -------\n out : 1D np.array\n the Gaussian profile\n \"\"\"\n return ne.evaluate('a * exp(-((x - mu) ** 2) / 2 / sigma ** 2) + c')\n\ndef guss_gaussian(x):\n \"\"\"\n Find a set of better starting parameters for Gaussian function fitting\n\n Parameters\n ----------\n x : 1D np.array\n 1D profile of your data\n\n Returns\n -------\n out : tuple of float\n estimated value of (a, mu, sigma, c)\n \"\"\"\n c_guess = (x[0] + x[-1]) / 2\n a_guess = x.max() - c_guess\n mu_guess = x.argmax()\n x_inter = interp1d(np.arange(len(x)), x)\n\n def _(i):\n return x_inter(i) - a_guess / 2 - c_guess\n\n try:\n sigma_l_guess = brentq(_, 0, mu_guess)\n except:\n sigma_l_guess = len(x) / 4\n try:\n sigma_r_guess = brentq(_, mu_guess, len(x) - 1)\n except:\n sigma_r_guess = 3 * len(x) / 4\n return a_guess, mu_guess, (sigma_r_guess -\n sigma_l_guess) / 2.35482, c_guess\n\ndef fit_gaussian(x, xmin, xmax):\n \"\"\"\n Fit a Gaussian function to x and return its parameters, with mu in [xmin, xmax]\n\n Parameters\n ----------\n x : 1D np.array\n 1D profile of your data\n\n Returns\n -------\n out : tuple of float\n (a, mu, sigma, c)\n \"\"\"\n p, q = curve_fit(gaussian, np.arange(x.size), x, p0=guss_gaussian(x), bounds=([-np.inf, xmin, -np.inf, -np.inf], [np.inf, xmax, np.inf, np.inf]))\n return p\n\ndef find_center_by_gaussian_fit(IM, ymin, ymax):\n \"\"\"\n Find image center by fitting the summation along x and y axis of the data to two 1D Gaussian function\n \"\"\"\n y = np.sum(IM, axis=1)\n return fit_gaussian(y, ymin, ymax)[1]\n\ndef find_center_by_convolution(IM, ymin, ymax):\n \"\"\" Center the image by convolution of two projections along each axis.\n code from the ``linbasex`` juptyer notebook\n Parameter\n -------\n IM: numpy 2D array\n image data\n Returns\n -------\n y-center\n \"\"\"\n # projection along axis=0 of image (rows)\n QL_raw0 = IM.sum(axis=1)\n\n # autocorrelate projections\n conv_0 = np.convolve(QL_raw0, QL_raw0, mode='full')\n\n #Take the first max, should there be several equal maxima.\n # 10May16 - axes swapped - check this\n return np.argmax(conv_0[ymin*2:ymax*2])/2 + ymin\n\ndef find_symmetry_axis(phase, ymin, ymax):\n \"\"\"\n Find symmetry axis of phase spectrum in range [ymin, ymax]. It will try different methods in the following order:\n find_center_by_gaussian_fit\n find_center_by_convolution\n If none of the methods could find a valid symmetry axis, a RuntimeError will be raised.\n\n Return the y index of the symmetry axis.\n \"\"\"\n try :\n center = find_center_by_gaussian_fit(phase, ymin, ymax)\n return center\n except (RuntimeError, ValueError) :\n #find_center_by_gaussian_fit failed, just pass to use next method\n pass\n\n #find_center_by_convolution always succeeds\n center = find_center_by_convolution(phase, ymin, ymax)\n return center\n\ndef three_peaks_1d(x, a0, x0, sigma_x0, a1, x1, sigma_x1, offset):\n \"\"\"\n The 1D fitting function for fitting three peaks in projection on x axis.\n \"\"\"\n peak0 = gaussian(x, a0, x0, sigma_x0, 0)\n peak1 = gaussian(x, a1, x1, sigma_x1, 0)\n peakm1 = gaussian(x, a1, 2*x0-x1, sigma_x1, 0)\n return ne.evaluate('peak0 + peak1 + peakm1 + offset')\n\ndef find_peaks_1d(x, a0, x0, sigma_x0, a1, x1, sigma_x1, offset):\n length_x = x.shape[0]\n popt,_ = curve_fit(three_peaks_1d, np.arange(length_x), x, p0 = (a0, x0, sigma_x0, a1, x1, sigma_x1, offset),\n bounds = ([-np.inf, 0, 0, -np.inf, length_x//2, 0, -np.inf], [np.inf, length_x, np.inf, np.inf, length_x, max(0.01*length_x, 5), np.inf]))\n #needs to limit sigma to avoid unsense results\n return popt\n\ndef three_peaks(xy_tuple, a0, x0, y0, sigma_x0, sigma_y0, a1, x1, y1, sigma_x1, sigma_y1, offset):\n \"\"\"\n The fitting function of three peaks.\n \"\"\"\n (x, y) = xy_tuple\n formula = ('a0*exp((-(x-x0)**2)/(2*sigma_x0**2) + (-(y-y0)**2)/(2*sigma_y0**2))'\n '+ a1*exp((-(x-x1)**2)/(2*sigma_x1**2) + (-(y-y1)**2)/(2*sigma_y1**2))'\n '+ a1*exp((-(x+x1-2*x0)**2)/(2*sigma_x1**2) + (-(y+y1-2*y0)**2)/(2*sigma_y1**2))'\n '+ offset'\n )\n return ne.evaluate(formula).ravel()\n\ndef find_peaks(XYf2d_shifted, guess):\n \"\"\"\n Fit the three peaks in the shifted 2d amplitude spectrum XYf2d_shifted.\n Return the phase shift of the secondary peak in x and y direction.\n \"\"\"\n length_x = XYf2d_shifted.shape[1]\n length_y = XYf2d_shifted.shape[0]\n dXf = 1/length_x\n dYf = 1/length_y\n\n a0 = np.max(XYf2d_shifted) #compose initial fit condition from guess\n x0 = length_x//2\n y0 = length_y//2\n a1 = guess.peak_ratio*a0\n x1 = x0 + guess.fx/dXf\n y1 = y0 + guess.fy/dYf\n offset = guess.offset_ratio*a0\n initial_guess = (a0, x0, y0, guess.sigma_x0, guess.sigma_y0, a1, x1, y1, guess.sigma_x1, guess.sigma_y1, offset)\n x, y = np.meshgrid(np.arange(length_x), np.arange(length_y))\n popt,_ = curve_fit(three_peaks, (x, y), XYf2d_shifted.ravel(), p0=initial_guess,\n bounds = ([0, 0, 0, 0, 0, 0, length_x//2, 0, 0, 0, 0],\n [np.inf, length_x, length_y, np.inf, np.inf, np.inf, length_x, length_y, max(0.01*length_x, 5), max(0.01*length_y, 5), np.inf]))\n #needs to limit sigma to avoid unsense results\n\n fx = (popt[6]-popt[1])*dXf\n fy = (popt[7]-popt[2])*dYf\n\n newguess = Guess()\n newguess.peak_ratio = popt[5]/popt[0] #update guess\n newguess.sigma_x0 = popt[3]\n newguess.sigma_y0 = popt[4]\n newguess.sigma_x1 = popt[8]\n newguess.sigma_y1 = popt[9]\n newguess.offset_ratio = popt[10]/popt[0]\n newguess.fx = fx\n newguess.fy = fy\n\n #xband1 = 0.09#100*popt[3]*dXf/0.5 #not used\n #xband2 = 0.16#(popt[6]-popt[1]+30*popt[8])*dXf/0.5\n #yband = 0.12#80*popt[9]*dYf/0.5\n\n return fx, fy, newguess\n\ndef half_image(IM, xcenter):\n \"\"\"\n Generate half of image IM by the image center in the x direction. This function is used to prepare for abel transfrom.\n \"\"\"\n xcenter = int(np.rint(xcenter))\n new_width = min(IM.shape[1] - xcenter - 1, xcenter)\n left = IM[:, xcenter-new_width:xcenter+1][:, ::-1]\n right = IM[:, xcenter:xcenter+new_width+1]\n return (left + right) / 2\n"
] |
[
[
"numpy.convolve",
"numpy.abs",
"numpy.arange",
"numpy.rint",
"numpy.max",
"numpy.argmax",
"scipy.optimize.brentq",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
loganbyers/data-pre-processing
|
[
"bbed4a86752ab11575825ae05b6234d06b23ff68"
] |
[
"foo_015a_global_hunger_index/foo_015a_global_hunger_index_processing.py"
] |
[
"import os\nimport pandas as pd\nimport urllib.request\nimport tabula\nfrom carto.datasets import DatasetManager\nfrom carto.auth import APIKeyAuthClient\nimport boto3\nfrom botocore.exceptions import NoCredentialsError\nfrom zipfile import ZipFile\n\n# name of table on Carto where you want to upload data\n# this should be a table name that is not currently in use\ndataset_name = 'foo_015a_global_hunger_index' #check\n\n# first, set the directory that you are working in with the path variable\n# you can use an environmental variable, as we did, or directly enter the directory name as a string\n# example: path = '/home/foo_015a_global_hunger_index'\npath = os.path.join(os.getenv('PROCESSING_DIR'), dataset_name)\n#move to this directory\nos.chdir(path)\n\n# create a new sub-directory within your specified dir called 'data'\ndata_dir = 'data'\nif not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n'''\nDownload data and save to your data directory\n'''\n# insert the url used to download the data from the source website\nurl = 'https://www.globalhungerindex.org/pdf/en/2019.pdf' #check\n\n# download the data from the source\nraw_data_file = os.path.join(data_dir, os.path.basename(url))\nurllib.request.urlretrieve(url, raw_data_file)\n\n'''\nProcess data\n'''\n# read in data from Table 2.1 GLOBAL HUNGER INDEX SCORES BY 2019 GHI RANK, which is on page 17 of the report\ndf_raw=tabula.read_pdf(raw_data_file,pages=17) #check\n\n#remove headers and poorly formatted column names (rows 0, 1)\ndf_raw=df_raw.iloc[2:]\n\n#get first half of table (columns 1-5, do not include rank column)\ndf_a=df_raw.iloc[:, 1:6]\n#name columns\ncol_names = [\"Country\", \"2000\", \"2005\", \"2010\", \"2019\"] #check\ndf_a.columns = col_names\n#get second half of table (columns 7-11, do not include rank column) and drop empty rows at end\ndf_b=df_raw.iloc[:, 7:12].dropna(how='all')\n#name columns\ndf_b.columns = col_names\n\n#combine first and second half of table\ndf = pd.concat([df_a, df_b], ignore_index=True, sort=False)\n\n# clean the dataframe\n# replace <5 with 5\ndf= df.replace('<5', 5)\n#replace — in table with None\ndf = df.replace({'—': None})\n\n#convert table from wide form (each year is a column) to long form (a single column of years and a single column of values)\ndf_long = pd.melt (df, id_vars= ['Country'] , var_name = 'year', value_name = 'hunger_index_score')\n\n#convert year column from object to integer\ndf_long.year=df_long.year.astype('int64')\n#convert hunger_index_score column from object to number\ndf_long.hunger_index_score = df_long.hunger_index_score.astype('float64')\n#replace NaN in table with None\ndf_long=df_long.where((pd.notnull(df_long)), None)\n\n#add rows for countries with insuffient data, but significant concern, as noted here:\n# https://www.globalhungerindex.org/results.html#box-2-1\n\n#add a column to our dataframe to store this flag - it will be False for all the countries already in our table\ndf_long['sig_concern'] = False\n\n#make a list of countries for which there is insuffient data, but significant concern\nsig_concern = ['Burundi', 'Comoros', 'Democratic Republic of Congo', 'Eritrea', 'Libya', 'Papua New Guinea', 'Somalia',\n 'South Sudan', 'Syrian Arab Republic']\n\n#add a new row to the dataframe for each of these countries, there will be no index score, but we will mark the flag as True\nfor country in sig_concern:\n row = [country, 2019, None, True]\n df_long = df_long.append(pd.Series(row, index=df_long.columns), ignore_index=True)\n\n#save processed dataset to csv\nprocessed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv')\ndf_long.to_csv(processed_data_file, index=False)\n\n\n'''\nUpload processed data to Carto\n'''\nprint('Uploading processed data to Carto.')\n#set up carto authentication using local variables for username (CARTO_WRI_RW_USER) and API key (CARTO_WRI_RW_KEY)\nauth_client = APIKeyAuthClient(api_key=os.getenv('CARTO_WRI_RW_KEY'), base_url=\"https://{user}.carto.com/\".format(user=os.getenv('CARTO_WRI_RW_USER')))\n#set up dataset manager with authentication\ndataset_manager = DatasetManager(auth_client)\n#upload dataset to carto\ndataset = dataset_manager.create(processed_data_file)\nprint('Carto table created: {}'.format(os.path.basename(processed_data_file).split('.')[0]))\n#set dataset privacy to 'Public with link'\ndataset.privacy = 'LINK'\ndataset.save()\nprint('Privacy set to public with link.')\n\n'''\nUpload original data and processed data to Amazon S3 storage\n'''\ndef upload_to_aws(local_file, bucket, s3_file):\n s3 = boto3.client('s3', aws_access_key_id=os.getenv('aws_access_key_id'), aws_secret_access_key=os.getenv('aws_secret_access_key'))\n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(\"Upload Successful\")\n print(\"http://{}.s3.amazonaws.com/{}\".format(bucket, s3_file))\n return True\n except FileNotFoundError:\n print(\"The file was not found\")\n return False\n except NoCredentialsError:\n print(\"Credentials not available\")\n return False\n\nprint('Uploading original data to S3.')\n# Copy the raw data into a zipped file to upload to S3\nraw_data_dir = os.path.join(data_dir, dataset_name+'.zip')\nwith ZipFile(raw_data_dir,'w') as zip:\n zip.write(raw_data_file, os.path.basename(raw_data_file))\n\n# Upload raw data file to S3\nuploaded = upload_to_aws(raw_data_dir, 'wri-public-data', 'resourcewatch/'+os.path.basename(raw_data_dir))\n\nprint('Uploading processed data to S3.')\n# Copy the processed data into a zipped file to upload to S3\nprocessed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')\nwith ZipFile(processed_data_dir,'w') as zip:\n zip.write(processed_data_file, os.path.basename(processed_data_file))\n\n# Upload processed data file to S3\nuploaded = upload_to_aws(processed_data_dir, 'wri-public-data', 'resourcewatch/'+os.path.basename(processed_data_dir))\n"
] |
[
[
"pandas.concat",
"pandas.notnull",
"pandas.melt",
"pandas.Series"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
anoff/skikitlearn
|
[
"cf3254ddf8d5b22183a171c8ac3f65bef8a89e32"
] |
[
"lib/process.py"
] |
[
"import gpxpy\nimport numpy as np\nfrom collections import namedtuple\n\ndef smooth(y, box_pts=11):\n box = np.ones(box_pts)/box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth\n\n# load file and concat all tracks/segments\ndef load_points(filename):\n Point = namedtuple(\"Point\", [\"lon\", \"lat\", \"elevation\", \"distance\", \"time\"])\n gpx_file = open(filename, 'r')\n gpx = gpxpy.parse(gpx_file)\n gpx_file.close()\n # aggregate all points into one array\n points = []\n # print(dir(gpx.tracks[0].segments[0].points[0]))\n for track in gpx.tracks:\n for segment in track.segments:\n for index, point in enumerate(segment.points, start=0):\n new_point = Point(\n lon=point.longitude,\n lat=point.latitude,\n elevation=point.elevation,\n distance=point.distance_3d(segment.points[index-1]) if index > 0 else 0,\n time=point.time\n )\n points.append(new_point)\n return points\n\n# generate additional values\ndef calc_additional(points):\n times = [p.time for p in points]\n t_min = min(times)\n duration = [(t - t_min).total_seconds() for t in times]\n height = smooth([p.elevation for p in points], 5)\n d_height = np.append(0, np.diff(height))\n distance = smooth([p.distance for p in points], 5)\n d_distance = np.append(0, np.diff(distance))\n\n return duration, height, d_height, distance, d_distance\n\n# extract rides\n# consecutive points with decreasing elevation & no stops (change in elevation) > 60s\ndef extract_rides(points):\n duration, height, d_height, distance, d_distance = calc_additional(points)\n smooth_d_height = smooth(d_height, 20)\n indices = []\n index = {\"start\": 0, \"end\": 0}\n for ix in range(len(points)):\n if smooth_d_height[ix] < 0 and (ix == 0 or smooth_d_height[ix-1] > 0):\n index[\"start\"] = ix\n elif index[\"start\"] > 0 and smooth_d_height[ix] > 0:\n index[\"end\"] = ix\n indices.append(index)\n index = {\"start\": 0, \"end\": 0}\n rides = []\n for trk in indices:\n rides.append(points[trk[\"start\"]:trk[\"end\"]])\n return rides\n"
] |
[
[
"numpy.convolve",
"numpy.diff",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
flandolfi/clockwork-rnn
|
[
"6d7d4fca128a1b344c291ed7a034e004973fa4da"
] |
[
"cudnnrnn.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom keras.layers.cudnn_recurrent import _CuDNNRNN\nfrom keras import initializers\nfrom keras import regularizers\nfrom keras import constraints\n\nfrom collections import namedtuple\n\n\nclass CuDNNSimpleRNN(_CuDNNRNN):\n \"\"\"Fast SimpleRNN implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).\n Can only be run on GPU, with the TensorFlow backend.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use. Can be either hyperbolic\n tangent ('tanh') or rectifier linear ('relu').\n Default: hyperbolic tangent (`tanh`).\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n return_sequences=False,\n return_state=False,\n stateful=False,\n **kwargs):\n self.units = units\n super(CuDNNSimpleRNN, self).__init__(\n return_sequences=return_sequences,\n return_state=return_state,\n stateful=stateful,\n **kwargs)\n\n if activation != 'tanh' and activation != 'relu':\n raise ValueError(\"Activation must be either 'tanh' or 'relu'. \"\n \"Found '%s'\" % str(activation))\n\n self.activation = activation\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n @property\n def cell(self):\n Cell = namedtuple('cell', 'state_size')\n cell = Cell(state_size=self.units)\n return cell\n\n def build(self, input_shape):\n super(CuDNNSimpleRNN, self).build(input_shape)\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n input_dim = input_shape[-1]\n\n from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops\n\n if self.activation == 'tanh':\n _cudnn_rnn_op = cudnn_rnn_ops.CudnnRNNTanh \n else:\n _cudnn_rnn_op = cudnn_rnn_ops.CudnnRNNRelu \n\n self._cudnn_rnn = _cudnn_rnn_op(\n num_layers=1,\n num_units=self.units,\n input_size=input_dim,\n input_mode='linear_input')\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n self.bias = self.add_weight(shape=(self.units,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n self.built = True\n\n def _process_batch(self, inputs, initial_state):\n import tensorflow as tf\n inputs = tf.transpose(inputs, (1, 0, 2))\n input_h = initial_state[0]\n input_h = tf.expand_dims(input_h, axis=0)\n\n params = self._canonical_to_params(\n weights=[\n self.kernel,\n self.recurrent_kernel,\n ],\n biases=[\n self.bias,\n ],\n )\n outputs, h = self._cudnn_rnn(\n inputs,\n input_h=input_h,\n params=params,\n is_training=True)\n\n if self.stateful or self.return_state:\n h = h[0]\n if self.return_sequences:\n output = tf.transpose(outputs, (1, 0, 2))\n else:\n output = outputs[-1]\n return output, [h]\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': self.activation,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)}\n base_config = super(CuDNNSimpleRNN, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] |
[
[
"tensorflow.transpose",
"tensorflow.expand_dims"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
bhedayat/Neural-Nets
|
[
"6a3caea4121e9694be5853c972c8d0d18c2fa7aa"
] |
[
"lstm.py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\nfrom __future__ import print_function\n\n\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nfrom process import load_data\n\n#x = T.tensor4()\n\n\nN_HIDDEN = 100\n\nLEARNING_RATE = .001\n\nGRAD_CLIP = 100\n\nNUM_EPOCHS = 20\n\nBATCH_SIZE = 200\n\nvocab_size = 9\n\ninp_t,inp_v,output_t,output_v = load_data()\nsli_l = 8\nsli = 64\n\n#y = T.ivector()\ndef gen_data():\n\n xx = np.zeros((BATCH_SIZE,512,512))\n rng_state = np.random.get_state()\n np.random.shuffle(inp_t)\n np.random.set_state(rng_state)\n np.random.shuffle(output_t)\n y = output_t[0:BATCH_SIZE]\n xx = inp_t[0:BATCH_SIZE,:,:]\n y_v = output_v\n\n x_v = np.zeros((936,sli,1,sli_l,512))\n for i in range(len(inp_v)):\n for j in range(0,512,sli_l):\n x_v[i,j:j+sli_l,:,:,:] = inp_v[i,j:j+sli_l,:]\n\n x = np.zeros((BATCH_SIZE,sli,1,sli_l,512))\n for i in range(len(xx)):\n for j in range(0,512,sli_l):\n x[i,j:j+sli_l,:,:,:] = xx[i,j:j+sli_l,:]\n return x, x_v, y, y_v\n#print(xx.shape)\n\ndef main(num_epochs=NUM_EPOCHS):\n\n #l_in = lasagne.layers.InputLayer((BATCH_SIZE,64,1,8,512),x,'input_layer')\n l_in = lasagne.layers.InputLayer((BATCH_SIZE,sli,1,sli_l,512))\n\n l_forward_1 = lasagne.layers.LSTMLayer(\n l_in, N_HIDDEN, grad_clipping=GRAD_CLIP,\n nonlinearity=lasagne.nonlinearities.tanh)\n\n\n l_forward_slice = lasagne.layers.SliceLayer(l_forward_1, -1, 1)\n\n\n l_out = lasagne.layers.DenseLayer(l_forward_slice, num_units=vocab_size, W = lasagne.init.GlorotUniform(),nonlinearity=lasagne.nonlinearities.softmax)\n\n target_values = T.ivector('target_output')\n\n network_output = lasagne.layers.get_output(l_out)\n\n cost = T.nnet.categorical_crossentropy(network_output,target_values).mean()\n\n all_params = lasagne.layers.get_all_params(l_out,trainable=True)\n\n updates = lasagne.updates.adagrad(cost, all_params, LEARNING_RATE)\n\n\n train = theano.function([l_in.input_var, target_values], cost, updates=updates, allow_input_downcast=True)\n compute_cost = theano.function([l_in.input_var, target_values], cost, allow_input_downcast=True)\n\n get_out = theano.function([l_in.input_var],lasagne.layers.get_output(l_out),allow_input_downcast=True)\n\n probs = theano.function([l_in.input_var],network_output,allow_input_downcast=True)\n for n in xrange(1000):\n inp_t,inp_v,output_t,output_v = load_data()\n x, x_v, y, y_v = gen_data()\n avg_cost = 0\n avg_cost += train(x,y)\n val_output = get_out(x_v)\n val_predictions = np.argmax(val_output, axis=1)\n #print(val_predictions)\n #print(y_v)\n accuracy = np.mean(val_predictions == y_v)\n print(accuracy)\n print(avg_cost)\nif __name__ == '__main__':\n main()\n\n"
] |
[
[
"numpy.random.get_state",
"numpy.random.shuffle",
"numpy.random.set_state",
"numpy.argmax",
"numpy.mean",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BenjaminMey/nipype
|
[
"388f140fceaf55438a987e9cdfa2a8e995428afd",
"388f140fceaf55438a987e9cdfa2a8e995428afd"
] |
[
"nipype/interfaces/freesurfer/preprocess.py",
"nipype/pipeline/plugins/base.py"
] |
[
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Provides interfaces to various commands provided by FreeSurfer\n\"\"\"\nimport os\nimport os.path as op\nfrom glob import glob\nimport shutil\n\nimport numpy as np\nfrom nibabel import load\n\nfrom ... import logging, LooseVersion\nfrom ...utils.filemanip import fname_presuffix, check_depends\nfrom ..io import FreeSurferSource\nfrom ..base import (\n TraitedSpec,\n File,\n traits,\n Directory,\n InputMultiPath,\n OutputMultiPath,\n CommandLine,\n CommandLineInputSpec,\n isdefined,\n)\nfrom .base import FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, FSCommandOpenMP, Info\nfrom .utils import copy2subjdir\n\n__docformat__ = \"restructuredtext\"\niflogger = logging.getLogger(\"nipype.interface\")\n\n# Keeping this to avoid breaking external programs that depend on it, but\n# this should not be used internally\nFSVersion = Info.looseversion().vstring\n\n\nclass ParseDICOMDirInputSpec(FSTraitedSpec):\n dicom_dir = Directory(\n exists=True,\n argstr=\"--d %s\",\n mandatory=True,\n desc=\"path to siemens dicom directory\",\n )\n dicom_info_file = File(\n \"dicominfo.txt\",\n argstr=\"--o %s\",\n usedefault=True,\n desc=\"file to which results are written\",\n )\n sortbyrun = traits.Bool(argstr=\"--sortbyrun\", desc=\"assign run numbers\")\n summarize = traits.Bool(\n argstr=\"--summarize\", desc=\"only print out info for run leaders\"\n )\n\n\nclass ParseDICOMDirOutputSpec(TraitedSpec):\n dicom_info_file = File(exists=True, desc=\"text file containing dicom information\")\n\n\nclass ParseDICOMDir(FSCommand):\n \"\"\"Uses mri_parse_sdcmdir to get information from dicom directories\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import ParseDICOMDir\n >>> dcminfo = ParseDICOMDir()\n >>> dcminfo.inputs.dicom_dir = '.'\n >>> dcminfo.inputs.sortbyrun = True\n >>> dcminfo.inputs.summarize = True\n >>> dcminfo.cmdline\n 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize'\n\n \"\"\"\n\n _cmd = \"mri_parse_sdcmdir\"\n input_spec = ParseDICOMDirInputSpec\n output_spec = ParseDICOMDirOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if isdefined(self.inputs.dicom_info_file):\n outputs[\"dicom_info_file\"] = os.path.join(\n os.getcwd(), self.inputs.dicom_info_file\n )\n return outputs\n\n\nclass UnpackSDICOMDirInputSpec(FSTraitedSpec):\n source_dir = Directory(\n exists=True,\n argstr=\"-src %s\",\n mandatory=True,\n desc=\"directory with the DICOM files\",\n )\n output_dir = Directory(\n argstr=\"-targ %s\", desc=\"top directory into which the files will be unpacked\"\n )\n run_info = traits.Tuple(\n traits.Int,\n traits.Str,\n traits.Str,\n traits.Str,\n mandatory=True,\n argstr=\"-run %d %s %s %s\",\n xor=(\"run_info\", \"config\", \"seq_config\"),\n desc=\"runno subdir format name : spec unpacking rules on cmdline\",\n )\n config = File(\n exists=True,\n argstr=\"-cfg %s\",\n mandatory=True,\n xor=(\"run_info\", \"config\", \"seq_config\"),\n desc=\"specify unpacking rules in file\",\n )\n seq_config = File(\n exists=True,\n argstr=\"-seqcfg %s\",\n mandatory=True,\n xor=(\"run_info\", \"config\", \"seq_config\"),\n desc=\"specify unpacking rules based on sequence\",\n )\n dir_structure = traits.Enum(\n \"fsfast\",\n \"generic\",\n argstr=\"-%s\",\n desc=\"unpack to specified directory structures\",\n )\n no_info_dump = traits.Bool(argstr=\"-noinfodump\", desc=\"do not create infodump file\")\n scan_only = File(\n exists=True,\n argstr=\"-scanonly %s\",\n desc=\"only scan the directory and put result in file\",\n )\n log_file = File(exists=True, argstr=\"-log %s\", desc=\"explicilty set log file\")\n spm_zeropad = traits.Int(\n argstr=\"-nspmzeropad %d\", desc=\"set frame number zero padding width for SPM\"\n )\n no_unpack_err = traits.Bool(\n argstr=\"-no-unpackerr\", desc=\"do not try to unpack runs with errors\"\n )\n\n\nclass UnpackSDICOMDir(FSCommand):\n \"\"\"Use unpacksdcmdir to convert dicom files\n\n Call unpacksdcmdir -help from the command line to see more information on\n using this command.\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import UnpackSDICOMDir\n >>> unpack = UnpackSDICOMDir()\n >>> unpack.inputs.source_dir = '.'\n >>> unpack.inputs.output_dir = '.'\n >>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct')\n >>> unpack.inputs.dir_structure = 'generic'\n >>> unpack.cmdline\n 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .'\n \"\"\"\n\n _cmd = \"unpacksdcmdir\"\n input_spec = UnpackSDICOMDirInputSpec\n\n\nclass MRIConvertInputSpec(FSTraitedSpec):\n read_only = traits.Bool(argstr=\"--read_only\", desc=\"read the input volume\")\n no_write = traits.Bool(argstr=\"--no_write\", desc=\"do not write output\")\n in_info = traits.Bool(argstr=\"--in_info\", desc=\"display input info\")\n out_info = traits.Bool(argstr=\"--out_info\", desc=\"display output info\")\n in_stats = traits.Bool(argstr=\"--in_stats\", desc=\"display input stats\")\n out_stats = traits.Bool(argstr=\"--out_stats\", desc=\"display output stats\")\n in_matrix = traits.Bool(argstr=\"--in_matrix\", desc=\"display input matrix\")\n out_matrix = traits.Bool(argstr=\"--out_matrix\", desc=\"display output matrix\")\n in_i_size = traits.Int(argstr=\"--in_i_size %d\", desc=\"input i size\")\n in_j_size = traits.Int(argstr=\"--in_j_size %d\", desc=\"input j size\")\n in_k_size = traits.Int(argstr=\"--in_k_size %d\", desc=\"input k size\")\n force_ras = traits.Bool(\n argstr=\"--force_ras_good\", desc=\"use default when orientation info absent\"\n )\n in_i_dir = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--in_i_direction %f %f %f\",\n desc=\"<R direction> <A direction> <S direction>\",\n )\n in_j_dir = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--in_j_direction %f %f %f\",\n desc=\"<R direction> <A direction> <S direction>\",\n )\n in_k_dir = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--in_k_direction %f %f %f\",\n desc=\"<R direction> <A direction> <S direction>\",\n )\n _orientations = [\n \"LAI\",\n \"LIA\",\n \"ALI\",\n \"AIL\",\n \"ILA\",\n \"IAL\",\n \"LAS\",\n \"LSA\",\n \"ALS\",\n \"ASL\",\n \"SLA\",\n \"SAL\",\n \"LPI\",\n \"LIP\",\n \"PLI\",\n \"PIL\",\n \"ILP\",\n \"IPL\",\n \"LPS\",\n \"LSP\",\n \"PLS\",\n \"PSL\",\n \"SLP\",\n \"SPL\",\n \"RAI\",\n \"RIA\",\n \"ARI\",\n \"AIR\",\n \"IRA\",\n \"IAR\",\n \"RAS\",\n \"RSA\",\n \"ARS\",\n \"ASR\",\n \"SRA\",\n \"SAR\",\n \"RPI\",\n \"RIP\",\n \"PRI\",\n \"PIR\",\n \"IRP\",\n \"IPR\",\n \"RPS\",\n \"RSP\",\n \"PRS\",\n \"PSR\",\n \"SRP\",\n \"SPR\",\n ]\n # _orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])]\n in_orientation = traits.Enum(\n _orientations,\n argstr=\"--in_orientation %s\",\n desc=\"specify the input orientation\",\n )\n in_center = traits.List(\n traits.Float,\n maxlen=3,\n argstr=\"--in_center %s\",\n desc=\"<R coordinate> <A coordinate> <S coordinate>\",\n )\n sphinx = traits.Bool(argstr=\"--sphinx\", desc=\"change orientation info to sphinx\")\n out_i_count = traits.Int(\n argstr=\"--out_i_count %d\", desc=\"some count ?? in i direction\"\n )\n out_j_count = traits.Int(\n argstr=\"--out_j_count %d\", desc=\"some count ?? in j direction\"\n )\n out_k_count = traits.Int(\n argstr=\"--out_k_count %d\", desc=\"some count ?? in k direction\"\n )\n vox_size = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"-voxsize %f %f %f\",\n desc=\"<size_x> <size_y> <size_z> specify the size (mm) - useful for upsampling or downsampling\",\n )\n out_i_size = traits.Int(argstr=\"--out_i_size %d\", desc=\"output i size\")\n out_j_size = traits.Int(argstr=\"--out_j_size %d\", desc=\"output j size\")\n out_k_size = traits.Int(argstr=\"--out_k_size %d\", desc=\"output k size\")\n out_i_dir = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--out_i_direction %f %f %f\",\n desc=\"<R direction> <A direction> <S direction>\",\n )\n out_j_dir = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--out_j_direction %f %f %f\",\n desc=\"<R direction> <A direction> <S direction>\",\n )\n out_k_dir = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--out_k_direction %f %f %f\",\n desc=\"<R direction> <A direction> <S direction>\",\n )\n out_orientation = traits.Enum(\n _orientations,\n argstr=\"--out_orientation %s\",\n desc=\"specify the output orientation\",\n )\n out_center = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"--out_center %f %f %f\",\n desc=\"<R coordinate> <A coordinate> <S coordinate>\",\n )\n out_datatype = traits.Enum(\n \"uchar\",\n \"short\",\n \"int\",\n \"float\",\n argstr=\"--out_data_type %s\",\n desc=\"output data type <uchar|short|int|float>\",\n )\n resample_type = traits.Enum(\n \"interpolate\",\n \"weighted\",\n \"nearest\",\n \"sinc\",\n \"cubic\",\n argstr=\"--resample_type %s\",\n desc=\"<interpolate|weighted|nearest|sinc|cubic> (default is interpolate)\",\n )\n no_scale = traits.Bool(argstr=\"--no_scale 1\", desc=\"dont rescale values for COR\")\n no_change = traits.Bool(\n argstr=\"--nochange\", desc=\"don't change type of input to that of template\"\n )\n tr = traits.Int(argstr=\"-tr %d\", desc=\"TR in msec\")\n te = traits.Int(argstr=\"-te %d\", desc=\"TE in msec\")\n ti = traits.Int(argstr=\"-ti %d\", desc=\"TI in msec (note upper case flag)\")\n autoalign_matrix = File(\n exists=True, argstr=\"--autoalign %s\", desc=\"text file with autoalign matrix\"\n )\n unwarp_gradient = traits.Bool(\n argstr=\"--unwarp_gradient_nonlinearity\", desc=\"unwarp gradient nonlinearity\"\n )\n apply_transform = File(\n exists=True, argstr=\"--apply_transform %s\", desc=\"apply xfm file\"\n )\n apply_inv_transform = File(\n exists=True,\n argstr=\"--apply_inverse_transform %s\",\n desc=\"apply inverse transformation xfm file\",\n )\n devolve_transform = traits.Str(argstr=\"--devolvexfm %s\", desc=\"subject id\")\n crop_center = traits.Tuple(\n traits.Int,\n traits.Int,\n traits.Int,\n argstr=\"--crop %d %d %d\",\n desc=\"<x> <y> <z> crop to 256 around center (x, y, z)\",\n )\n crop_size = traits.Tuple(\n traits.Int,\n traits.Int,\n traits.Int,\n argstr=\"--cropsize %d %d %d\",\n desc=\"<dx> <dy> <dz> crop to size <dx, dy, dz>\",\n )\n cut_ends = traits.Int(\n argstr=\"--cutends %d\", desc=\"remove ncut slices from the ends\"\n )\n slice_crop = traits.Tuple(\n traits.Int,\n traits.Int,\n argstr=\"--slice-crop %d %d\",\n desc=\"s_start s_end : keep slices s_start to s_end\",\n )\n slice_reverse = traits.Bool(\n argstr=\"--slice-reverse\", desc=\"reverse order of slices, update vox2ras\"\n )\n slice_bias = traits.Float(\n argstr=\"--slice-bias %f\", desc=\"apply half-cosine bias field\"\n )\n fwhm = traits.Float(argstr=\"--fwhm %f\", desc=\"smooth input volume by fwhm mm\")\n _filetypes = [\n \"cor\",\n \"mgh\",\n \"mgz\",\n \"minc\",\n \"analyze\",\n \"analyze4d\",\n \"spm\",\n \"afni\",\n \"brik\",\n \"bshort\",\n \"bfloat\",\n \"sdt\",\n \"outline\",\n \"otl\",\n \"gdf\",\n \"nifti1\",\n \"nii\",\n \"niigz\",\n ]\n _infiletypes = [\"ge\", \"gelx\", \"lx\", \"ximg\", \"siemens\", \"dicom\", \"siemens_dicom\"]\n in_type = traits.Enum(\n _filetypes + _infiletypes, argstr=\"--in_type %s\", desc=\"input file type\"\n )\n out_type = traits.Enum(_filetypes, argstr=\"--out_type %s\", desc=\"output file type\")\n ascii = traits.Bool(\n argstr=\"--ascii\", desc=\"save output as ascii col>row>slice>frame\"\n )\n reorder = traits.Tuple(\n traits.Int,\n traits.Int,\n traits.Int,\n argstr=\"--reorder %d %d %d\",\n desc=\"olddim1 olddim2 olddim3\",\n )\n invert_contrast = traits.Float(\n argstr=\"--invert_contrast %f\", desc=\"threshold for inversting contrast\"\n )\n in_file = File(\n exists=True,\n mandatory=True,\n position=-2,\n argstr=\"--input_volume %s\",\n desc=\"File to read/convert\",\n )\n out_file = File(\n argstr=\"--output_volume %s\",\n position=-1,\n genfile=True,\n desc=\"output filename or True to generate one\",\n )\n conform = traits.Bool(\n argstr=\"--conform\",\n desc=\"conform to 1mm voxel size in coronal slice direction with 256^3 or more\",\n )\n conform_min = traits.Bool(argstr=\"--conform_min\", desc=\"conform to smallest size\")\n conform_size = traits.Float(\n argstr=\"--conform_size %s\", desc=\"conform to size_in_mm\"\n )\n cw256 = traits.Bool(argstr=\"--cw256\", desc=\"confrom to dimensions of 256^3\")\n parse_only = traits.Bool(argstr=\"--parse_only\", desc=\"parse input only\")\n subject_name = traits.Str(argstr=\"--subject_name %s\", desc=\"subject name ???\")\n reslice_like = File(\n exists=True, argstr=\"--reslice_like %s\", desc=\"reslice output to match file\"\n )\n template_type = traits.Enum(\n _filetypes + _infiletypes,\n argstr=\"--template_type %s\",\n desc=\"template file type\",\n )\n split = traits.Bool(\n argstr=\"--split\", desc=\"split output frames into separate output files.\"\n )\n frame = traits.Int(argstr=\"--frame %d\", desc=\"keep only 0-based frame number\")\n midframe = traits.Bool(argstr=\"--mid-frame\", desc=\"keep only the middle frame\")\n skip_n = traits.Int(argstr=\"--nskip %d\", desc=\"skip the first n frames\")\n drop_n = traits.Int(argstr=\"--ndrop %d\", desc=\"drop the last n frames\")\n frame_subsample = traits.Tuple(\n traits.Int,\n traits.Int,\n traits.Int,\n argstr=\"--fsubsample %d %d %d\",\n desc=\"start delta end : frame subsampling (end = -1 for end)\",\n )\n in_scale = traits.Float(argstr=\"--scale %f\", desc=\"input intensity scale factor\")\n out_scale = traits.Float(\n argstr=\"--out-scale %d\", desc=\"output intensity scale factor\"\n )\n in_like = File(exists=True, argstr=\"--in_like %s\", desc=\"input looks like\")\n fill_parcellation = traits.Bool(\n argstr=\"--fill_parcellation\", desc=\"fill parcellation\"\n )\n smooth_parcellation = traits.Bool(\n argstr=\"--smooth_parcellation\", desc=\"smooth parcellation\"\n )\n zero_outlines = traits.Bool(argstr=\"--zero_outlines\", desc=\"zero outlines\")\n color_file = File(exists=True, argstr=\"--color_file %s\", desc=\"color file\")\n no_translate = traits.Bool(argstr=\"--no_translate\", desc=\"???\")\n status_file = File(argstr=\"--status %s\", desc=\"status file for DICOM conversion\")\n sdcm_list = File(\n exists=True, argstr=\"--sdcmlist %s\", desc=\"list of DICOM files for conversion\"\n )\n template_info = traits.Bool(\n argstr=\"--template_info\", desc=\"dump info about template\"\n )\n crop_gdf = traits.Bool(argstr=\"--crop_gdf\", desc=\"apply GDF cropping\")\n zero_ge_z_offset = traits.Bool(\n argstr=\"--zero_ge_z_offset\", desc=\"zero ge z offset ???\"\n )\n\n\nclass MRIConvertOutputSpec(TraitedSpec):\n out_file = OutputMultiPath(File(exists=True), desc=\"converted output file\")\n\n\nclass MRIConvert(FSCommand):\n \"\"\"use fs mri_convert to manipulate files\n\n .. note::\n Adds niigz as an output type option\n\n Examples\n --------\n\n >>> mc = MRIConvert()\n >>> mc.inputs.in_file = 'structural.nii'\n >>> mc.inputs.out_file = 'outfile.mgz'\n >>> mc.inputs.out_type = 'mgz'\n >>> mc.cmdline\n 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz'\n\n \"\"\"\n\n _cmd = \"mri_convert\"\n input_spec = MRIConvertInputSpec\n output_spec = MRIConvertOutputSpec\n\n filemap = dict(\n cor=\"cor\",\n mgh=\"mgh\",\n mgz=\"mgz\",\n minc=\"mnc\",\n afni=\"brik\",\n brik=\"brik\",\n bshort=\"bshort\",\n spm=\"img\",\n analyze=\"img\",\n analyze4d=\"img\",\n bfloat=\"bfloat\",\n nifti1=\"img\",\n nii=\"nii\",\n niigz=\"nii.gz\",\n )\n\n def _format_arg(self, name, spec, value):\n if name in [\"in_type\", \"out_type\", \"template_type\"]:\n if value == \"niigz\":\n return spec.argstr % \"nii\"\n return super(MRIConvert, self)._format_arg(name, spec, value)\n\n def _get_outfilename(self):\n outfile = self.inputs.out_file\n if not isdefined(outfile):\n if isdefined(self.inputs.out_type):\n suffix = \"_out.\" + self.filemap[self.inputs.out_type]\n else:\n suffix = \"_out.nii.gz\"\n outfile = fname_presuffix(\n self.inputs.in_file, newpath=os.getcwd(), suffix=suffix, use_ext=False\n )\n return os.path.abspath(outfile)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outfile = self._get_outfilename()\n if isdefined(self.inputs.split) and self.inputs.split:\n size = load(self.inputs.in_file).shape\n if len(size) == 3:\n tp = 1\n else:\n tp = size[-1]\n if outfile.endswith(\".mgz\"):\n stem = outfile.split(\".mgz\")[0]\n ext = \".mgz\"\n elif outfile.endswith(\".nii.gz\"):\n stem = outfile.split(\".nii.gz\")[0]\n ext = \".nii.gz\"\n else:\n stem = \".\".join(outfile.split(\".\")[:-1])\n ext = \".\" + outfile.split(\".\")[-1]\n outfile = []\n for idx in range(0, tp):\n outfile.append(stem + \"%04d\" % idx + ext)\n if isdefined(self.inputs.out_type):\n if self.inputs.out_type in [\"spm\", \"analyze\"]:\n # generate all outputs\n size = load(self.inputs.in_file).shape\n if len(size) == 3:\n tp = 1\n else:\n tp = size[-1]\n # have to take care of all the frame manipulations\n raise Exception(\n \"Not taking frame manipulations into account- please warn the developers\"\n )\n outfiles = []\n outfile = self._get_outfilename()\n for i in range(tp):\n outfiles.append(fname_presuffix(outfile, suffix=\"%03d\" % (i + 1)))\n outfile = outfiles\n outputs[\"out_file\"] = outfile\n return outputs\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._get_outfilename()\n return None\n\n\nclass DICOMConvertInputSpec(FSTraitedSpec):\n dicom_dir = Directory(\n exists=True,\n mandatory=True,\n desc=\"dicom directory from which to convert dicom files\",\n )\n base_output_dir = Directory(\n mandatory=True, desc=\"directory in which subject directories are created\"\n )\n subject_dir_template = traits.Str(\n \"S.%04d\", usedefault=True, desc=\"template for subject directory name\"\n )\n subject_id = traits.Any(desc=\"subject identifier to insert into template\")\n file_mapping = traits.List(\n traits.Tuple(traits.Str, traits.Str),\n desc=\"defines the output fields of interface\",\n )\n out_type = traits.Enum(\n \"niigz\",\n MRIConvertInputSpec._filetypes,\n usedefault=True,\n desc=\"defines the type of output file produced\",\n )\n dicom_info = File(\n exists=True, desc=\"File containing summary information from mri_parse_sdcmdir\"\n )\n seq_list = traits.List(\n traits.Str,\n requires=[\"dicom_info\"],\n desc=\"list of pulse sequence names to be converted.\",\n )\n ignore_single_slice = traits.Bool(\n requires=[\"dicom_info\"], desc=\"ignore volumes containing a single slice\"\n )\n\n\nclass DICOMConvert(FSCommand):\n \"\"\"use fs mri_convert to convert dicom files\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import DICOMConvert\n >>> cvt = DICOMConvert()\n >>> cvt.inputs.dicom_dir = 'dicomdir'\n >>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')]\n\n \"\"\"\n\n _cmd = \"mri_convert\"\n input_spec = DICOMConvertInputSpec\n\n def _get_dicomfiles(self):\n \"\"\"validate fsl bet options\n if set to None ignore\n \"\"\"\n return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir, \"*-1.dcm\")))\n\n def _get_outdir(self):\n \"\"\"returns output directory\"\"\"\n subjid = self.inputs.subject_id\n if not isdefined(subjid):\n path, fname = os.path.split(self._get_dicomfiles()[0])\n subjid = int(fname.split(\"-\")[0])\n if isdefined(self.inputs.subject_dir_template):\n subjid = self.inputs.subject_dir_template % subjid\n basedir = self.inputs.base_output_dir\n if not isdefined(basedir):\n basedir = os.path.abspath(\".\")\n outdir = os.path.abspath(os.path.join(basedir, subjid))\n return outdir\n\n def _get_runs(self):\n \"\"\"Returns list of dicom series that should be converted.\n\n Requires a dicom info summary file generated by ``DicomDirInfo``\n\n \"\"\"\n seq = np.genfromtxt(self.inputs.dicom_info, dtype=object)\n runs = []\n for s in seq:\n if self.inputs.seq_list:\n if self.inputs.ignore_single_slice:\n if (int(s[8]) > 1) and any(\n [s[12].startswith(sn) for sn in self.inputs.seq_list]\n ):\n runs.append(int(s[2]))\n else:\n if any([s[12].startswith(sn) for sn in self.inputs.seq_list]):\n runs.append(int(s[2]))\n else:\n runs.append(int(s[2]))\n return runs\n\n def _get_filelist(self, outdir):\n \"\"\"Returns list of files to be converted\"\"\"\n filemap = {}\n for f in self._get_dicomfiles():\n head, fname = os.path.split(f)\n fname, ext = os.path.splitext(fname)\n fileparts = fname.split(\"-\")\n runno = int(fileparts[1])\n out_type = MRIConvert.filemap[self.inputs.out_type]\n outfile = os.path.join(\n outdir, \".\".join((\"%s-%02d\" % (fileparts[0], runno), out_type))\n )\n filemap[runno] = (f, outfile)\n if self.inputs.dicom_info:\n files = [filemap[r] for r in self._get_runs()]\n else:\n files = [filemap[r] for r in list(filemap.keys())]\n return files\n\n @property\n def cmdline(self):\n \"\"\" `command` plus any arguments (args)\n validates arguments and generates command line\"\"\"\n self._check_mandatory_inputs()\n outdir = self._get_outdir()\n cmd = []\n if not os.path.exists(outdir):\n cmdstr = \"python -c \\\"import os; os.makedirs('%s')\\\"\" % outdir\n cmd.extend([cmdstr])\n infofile = os.path.join(outdir, \"shortinfo.txt\")\n if not os.path.exists(infofile):\n cmdstr = \"dcmdir-info-mgh %s > %s\" % (self.inputs.dicom_dir, infofile)\n cmd.extend([cmdstr])\n files = self._get_filelist(outdir)\n for infile, outfile in files:\n if not os.path.exists(outfile):\n single_cmd = \"%s%s %s %s\" % (\n self._cmd_prefix,\n self.cmd,\n infile,\n os.path.join(outdir, outfile),\n )\n cmd.extend([single_cmd])\n return \"; \".join(cmd)\n\n\nclass ResampleInputSpec(FSTraitedSpec):\n in_file = File(\n exists=True,\n argstr=\"-i %s\",\n mandatory=True,\n desc=\"file to resample\",\n position=-2,\n )\n resampled_file = File(\n argstr=\"-o %s\", desc=\"output filename\", genfile=True, position=-1\n )\n voxel_size = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n argstr=\"-vs %.2f %.2f %.2f\",\n desc=\"triplet of output voxel sizes\",\n mandatory=True,\n )\n\n\nclass ResampleOutputSpec(TraitedSpec):\n resampled_file = File(exists=True, desc=\"output filename\")\n\n\nclass Resample(FSCommand):\n \"\"\"Use FreeSurfer mri_convert to up or down-sample image files\n\n Examples\n --------\n\n >>> from nipype.interfaces import freesurfer\n >>> resampler = freesurfer.Resample()\n >>> resampler.inputs.in_file = 'structural.nii'\n >>> resampler.inputs.resampled_file = 'resampled.nii'\n >>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1)\n >>> resampler.cmdline\n 'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii'\n\n \"\"\"\n\n _cmd = \"mri_convert\"\n input_spec = ResampleInputSpec\n output_spec = ResampleOutputSpec\n\n def _get_outfilename(self):\n if isdefined(self.inputs.resampled_file):\n outfile = self.inputs.resampled_file\n else:\n outfile = fname_presuffix(\n self.inputs.in_file, newpath=os.getcwd(), suffix=\"_resample\"\n )\n return outfile\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"resampled_file\"] = self._get_outfilename()\n return outputs\n\n def _gen_filename(self, name):\n if name == \"resampled_file\":\n return self._get_outfilename()\n return None\n\n\nclass ReconAllInputSpec(CommandLineInputSpec):\n subject_id = traits.Str(\n \"recon_all\", argstr=\"-subjid %s\", desc=\"subject name\", usedefault=True\n )\n directive = traits.Enum(\n \"all\",\n \"autorecon1\",\n # autorecon2 variants\n \"autorecon2\",\n \"autorecon2-volonly\",\n \"autorecon2-perhemi\",\n \"autorecon2-inflate1\",\n \"autorecon2-cp\",\n \"autorecon2-wm\",\n # autorecon3 variants\n \"autorecon3\",\n \"autorecon3-T2pial\",\n # Mix of autorecon2 and autorecon3 steps\n \"autorecon-pial\",\n \"autorecon-hemi\",\n # Not \"multi-stage flags\"\n \"localGI\",\n \"qcache\",\n argstr=\"-%s\",\n desc=\"process directive\",\n usedefault=True,\n position=0,\n )\n hemi = traits.Enum(\"lh\", \"rh\", desc=\"hemisphere to process\", argstr=\"-hemi %s\")\n T1_files = InputMultiPath(\n File(exists=True), argstr=\"-i %s...\", desc=\"name of T1 file to process\"\n )\n T2_file = File(\n exists=True,\n argstr=\"-T2 %s\",\n min_ver=\"5.3.0\",\n desc=\"Convert T2 image to orig directory\",\n )\n FLAIR_file = File(\n exists=True,\n argstr=\"-FLAIR %s\",\n min_ver=\"5.3.0\",\n desc=\"Convert FLAIR image to orig directory\",\n )\n use_T2 = traits.Bool(\n argstr=\"-T2pial\",\n min_ver=\"5.3.0\",\n xor=[\"use_FLAIR\"],\n desc=\"Use T2 image to refine the pial surface\",\n )\n use_FLAIR = traits.Bool(\n argstr=\"-FLAIRpial\",\n min_ver=\"5.3.0\",\n xor=[\"use_T2\"],\n desc=\"Use FLAIR image to refine the pial surface\",\n )\n openmp = traits.Int(\n argstr=\"-openmp %d\", desc=\"Number of processors to use in parallel\"\n )\n parallel = traits.Bool(argstr=\"-parallel\", desc=\"Enable parallel execution\")\n hires = traits.Bool(\n argstr=\"-hires\",\n min_ver=\"6.0.0\",\n desc=\"Conform to minimum voxel size (for voxels < 1mm)\",\n )\n mprage = traits.Bool(\n argstr=\"-mprage\",\n desc=(\n \"Assume scan parameters are MGH MP-RAGE \"\n \"protocol, which produces darker gray matter\"\n ),\n )\n big_ventricles = traits.Bool(\n argstr=\"-bigventricles\",\n desc=(\"For use in subjects with enlarged \" \"ventricles\"),\n )\n brainstem = traits.Bool(\n argstr=\"-brainstem-structures\", desc=\"Segment brainstem structures\"\n )\n hippocampal_subfields_T1 = traits.Bool(\n argstr=\"-hippocampal-subfields-T1\",\n min_ver=\"6.0.0\",\n desc=\"segment hippocampal subfields using input T1 scan\",\n )\n hippocampal_subfields_T2 = traits.Tuple(\n File(exists=True),\n traits.Str(),\n argstr=\"-hippocampal-subfields-T2 %s %s\",\n min_ver=\"6.0.0\",\n desc=(\n \"segment hippocampal subfields using T2 scan, identified by \"\n \"ID (may be combined with hippocampal_subfields_T1)\"\n ),\n )\n expert = File(\n exists=True, argstr=\"-expert %s\", desc=\"Set parameters using expert file\"\n )\n xopts = traits.Enum(\n \"use\",\n \"clean\",\n \"overwrite\",\n argstr=\"-xopts-%s\",\n desc=\"Use, delete or overwrite existing expert options file\",\n )\n subjects_dir = Directory(\n exists=True,\n argstr=\"-sd %s\",\n hash_files=False,\n desc=\"path to subjects directory\",\n genfile=True,\n )\n flags = InputMultiPath(traits.Str, argstr=\"%s\", desc=\"additional parameters\")\n\n # Expert options\n talairach = traits.Str(desc=\"Flags to pass to talairach commands\", xor=[\"expert\"])\n mri_normalize = traits.Str(\n desc=\"Flags to pass to mri_normalize commands\", xor=[\"expert\"]\n )\n mri_watershed = traits.Str(\n desc=\"Flags to pass to mri_watershed commands\", xor=[\"expert\"]\n )\n mri_em_register = traits.Str(\n desc=\"Flags to pass to mri_em_register commands\", xor=[\"expert\"]\n )\n mri_ca_normalize = traits.Str(\n desc=\"Flags to pass to mri_ca_normalize commands\", xor=[\"expert\"]\n )\n mri_ca_register = traits.Str(\n desc=\"Flags to pass to mri_ca_register commands\", xor=[\"expert\"]\n )\n mri_remove_neck = traits.Str(\n desc=\"Flags to pass to mri_remove_neck commands\", xor=[\"expert\"]\n )\n mri_ca_label = traits.Str(\n desc=\"Flags to pass to mri_ca_label commands\", xor=[\"expert\"]\n )\n mri_segstats = traits.Str(\n desc=\"Flags to pass to mri_segstats commands\", xor=[\"expert\"]\n )\n mri_mask = traits.Str(desc=\"Flags to pass to mri_mask commands\", xor=[\"expert\"])\n mri_segment = traits.Str(\n desc=\"Flags to pass to mri_segment commands\", xor=[\"expert\"]\n )\n mri_edit_wm_with_aseg = traits.Str(\n desc=\"Flags to pass to mri_edit_wm_with_aseg commands\", xor=[\"expert\"]\n )\n mri_pretess = traits.Str(\n desc=\"Flags to pass to mri_pretess commands\", xor=[\"expert\"]\n )\n mri_fill = traits.Str(desc=\"Flags to pass to mri_fill commands\", xor=[\"expert\"])\n mri_tessellate = traits.Str(\n desc=\"Flags to pass to mri_tessellate commands\", xor=[\"expert\"]\n )\n mris_smooth = traits.Str(\n desc=\"Flags to pass to mri_smooth commands\", xor=[\"expert\"]\n )\n mris_inflate = traits.Str(\n desc=\"Flags to pass to mri_inflate commands\", xor=[\"expert\"]\n )\n mris_sphere = traits.Str(\n desc=\"Flags to pass to mris_sphere commands\", xor=[\"expert\"]\n )\n mris_fix_topology = traits.Str(\n desc=\"Flags to pass to mris_fix_topology commands\", xor=[\"expert\"]\n )\n mris_make_surfaces = traits.Str(\n desc=\"Flags to pass to mris_make_surfaces commands\", xor=[\"expert\"]\n )\n mris_surf2vol = traits.Str(\n desc=\"Flags to pass to mris_surf2vol commands\", xor=[\"expert\"]\n )\n mris_register = traits.Str(\n desc=\"Flags to pass to mris_register commands\", xor=[\"expert\"]\n )\n mrisp_paint = traits.Str(\n desc=\"Flags to pass to mrisp_paint commands\", xor=[\"expert\"]\n )\n mris_ca_label = traits.Str(\n desc=\"Flags to pass to mris_ca_label commands\", xor=[\"expert\"]\n )\n mris_anatomical_stats = traits.Str(\n desc=\"Flags to pass to mris_anatomical_stats commands\", xor=[\"expert\"]\n )\n mri_aparc2aseg = traits.Str(\n desc=\"Flags to pass to mri_aparc2aseg commands\", xor=[\"expert\"]\n )\n\n\nclass ReconAllOutputSpec(FreeSurferSource.output_spec):\n subjects_dir = Directory(exists=True, desc=\"Freesurfer subjects directory.\")\n subject_id = traits.Str(desc=\"Subject name for whom to retrieve data\")\n\n\nclass ReconAll(CommandLine):\n \"\"\"Uses recon-all to generate surfaces and parcellations of structural data\n from anatomical images of a subject.\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import ReconAll\n >>> reconall = ReconAll()\n >>> reconall.inputs.subject_id = 'foo'\n >>> reconall.inputs.directive = 'all'\n >>> reconall.inputs.subjects_dir = '.'\n >>> reconall.inputs.T1_files = 'structural.nii'\n >>> reconall.cmdline\n 'recon-all -all -i structural.nii -subjid foo -sd .'\n >>> reconall.inputs.flags = \"-qcache\"\n >>> reconall.cmdline\n 'recon-all -all -i structural.nii -qcache -subjid foo -sd .'\n >>> reconall.inputs.flags = [\"-cw256\", \"-qcache\"]\n >>> reconall.cmdline\n 'recon-all -all -i structural.nii -cw256 -qcache -subjid foo -sd .'\n\n Hemisphere may be specified regardless of directive:\n\n >>> reconall.inputs.flags = []\n >>> reconall.inputs.hemi = 'lh'\n >>> reconall.cmdline\n 'recon-all -all -i structural.nii -hemi lh -subjid foo -sd .'\n\n ``-autorecon-hemi`` uses the ``-hemi`` input to specify the hemisphere\n to operate upon:\n\n >>> reconall.inputs.directive = 'autorecon-hemi'\n >>> reconall.cmdline\n 'recon-all -autorecon-hemi lh -i structural.nii -subjid foo -sd .'\n\n Hippocampal subfields can accept T1 and T2 images:\n\n >>> reconall_subfields = ReconAll()\n >>> reconall_subfields.inputs.subject_id = 'foo'\n >>> reconall_subfields.inputs.directive = 'all'\n >>> reconall_subfields.inputs.subjects_dir = '.'\n >>> reconall_subfields.inputs.T1_files = 'structural.nii'\n >>> reconall_subfields.inputs.hippocampal_subfields_T1 = True\n >>> reconall_subfields.cmdline\n 'recon-all -all -i structural.nii -hippocampal-subfields-T1 -subjid foo -sd .'\n >>> reconall_subfields.inputs.hippocampal_subfields_T2 = (\n ... 'structural.nii', 'test')\n >>> reconall_subfields.cmdline\n 'recon-all -all -i structural.nii -hippocampal-subfields-T1T2 structural.nii test -subjid foo -sd .'\n >>> reconall_subfields.inputs.hippocampal_subfields_T1 = False\n >>> reconall_subfields.cmdline\n 'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .'\n \"\"\"\n\n _cmd = \"recon-all\"\n _additional_metadata = [\"loc\", \"altkey\"]\n input_spec = ReconAllInputSpec\n output_spec = ReconAllOutputSpec\n _can_resume = True\n force_run = False\n\n # Steps are based off of the recon-all tables [0,1] describing, inputs,\n # commands, and outputs of each step of the recon-all process,\n # controlled by flags.\n #\n # Each step is a 3-tuple containing (flag, [outputs], [inputs])\n # A step is considered complete if all of its outputs exist and are newer\n # than the inputs. An empty input list indicates input mtimes will not\n # be checked. This may need updating, if users are working with manually\n # edited files.\n #\n # [0] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV5.3\n # [1] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV6.0\n _autorecon1_steps = [\n (\"motioncor\", [\"mri/rawavg.mgz\", \"mri/orig.mgz\"], []),\n (\n \"talairach\",\n [\n \"mri/orig_nu.mgz\",\n \"mri/transforms/talairach.auto.xfm\",\n \"mri/transforms/talairach.xfm\",\n # 'mri/transforms/talairach_avi.log',\n ],\n [],\n ),\n (\"nuintensitycor\", [\"mri/nu.mgz\"], []),\n (\"normalization\", [\"mri/T1.mgz\"], []),\n (\n \"skullstrip\",\n [\n \"mri/transforms/talairach_with_skull.lta\",\n \"mri/brainmask.auto.mgz\",\n \"mri/brainmask.mgz\",\n ],\n [],\n ),\n ]\n if Info.looseversion() < LooseVersion(\"6.0.0\"):\n _autorecon2_volonly_steps = [\n (\"gcareg\", [\"mri/transforms/talairach.lta\"], []),\n (\"canorm\", [\"mri/norm.mgz\"], []),\n (\"careg\", [\"mri/transforms/talairach.m3z\"], []),\n (\n \"careginv\",\n [\n \"mri/transforms/talairach.m3z.inv.x.mgz\",\n \"mri/transforms/talairach.m3z.inv.y.mgz\",\n \"mri/transforms/talairach.m3z.inv.z.mgz\",\n ],\n [],\n ),\n (\"rmneck\", [\"mri/nu_noneck.mgz\"], []),\n (\"skull-lta\", [\"mri/transforms/talairach_with_skull_2.lta\"], []),\n (\n \"calabel\",\n [\"mri/aseg.auto_noCCseg.mgz\", \"mri/aseg.auto.mgz\", \"mri/aseg.mgz\"],\n [],\n ),\n (\"normalization2\", [\"mri/brain.mgz\"], []),\n (\"maskbfs\", [\"mri/brain.finalsurfs.mgz\"], []),\n (\n \"segmentation\",\n [\"mri/wm.seg.mgz\", \"mri/wm.asegedit.mgz\", \"mri/wm.mgz\"],\n [],\n ),\n (\n \"fill\",\n [\n \"mri/filled.mgz\",\n # 'scripts/ponscc.cut.log',\n ],\n [],\n ),\n ]\n _autorecon2_lh_steps = [\n (\"tessellate\", [\"surf/lh.orig.nofix\"], []),\n (\"smooth1\", [\"surf/lh.smoothwm.nofix\"], []),\n (\"inflate1\", [\"surf/lh.inflated.nofix\"], []),\n (\"qsphere\", [\"surf/lh.qsphere.nofix\"], []),\n (\"fix\", [\"surf/lh.orig\"], []),\n (\n \"white\",\n [\n \"surf/lh.white\",\n \"surf/lh.curv\",\n \"surf/lh.area\",\n \"label/lh.cortex.label\",\n ],\n [],\n ),\n (\"smooth2\", [\"surf/lh.smoothwm\"], []),\n (\n \"inflate2\",\n [\n \"surf/lh.inflated\",\n \"surf/lh.sulc\",\n \"surf/lh.inflated.H\",\n \"surf/lh.inflated.K\",\n ],\n [],\n ),\n # Undocumented in ReconAllTableStableV5.3\n (\"curvstats\", [\"stats/lh.curv.stats\"], []),\n ]\n _autorecon3_lh_steps = [\n (\"sphere\", [\"surf/lh.sphere\"], []),\n (\"surfreg\", [\"surf/lh.sphere.reg\"], []),\n (\"jacobian_white\", [\"surf/lh.jacobian_white\"], []),\n (\"avgcurv\", [\"surf/lh.avg_curv\"], []),\n (\"cortparc\", [\"label/lh.aparc.annot\"], []),\n (\n \"pial\",\n [\n \"surf/lh.pial\",\n \"surf/lh.curv.pial\",\n \"surf/lh.area.pial\",\n \"surf/lh.thickness\",\n ],\n [],\n ),\n # Misnamed outputs in ReconAllTableStableV5.3: ?h.w-c.pct.mgz\n (\"pctsurfcon\", [\"surf/lh.w-g.pct.mgh\"], []),\n (\"parcstats\", [\"stats/lh.aparc.stats\"], []),\n (\"cortparc2\", [\"label/lh.aparc.a2009s.annot\"], []),\n (\"parcstats2\", [\"stats/lh.aparc.a2009s.stats\"], []),\n # Undocumented in ReconAllTableStableV5.3\n (\"cortparc3\", [\"label/lh.aparc.DKTatlas40.annot\"], []),\n # Undocumented in ReconAllTableStableV5.3\n (\"parcstats3\", [\"stats/lh.aparc.a2009s.stats\"], []),\n (\"label-exvivo-ec\", [\"label/lh.entorhinal_exvivo.label\"], []),\n ]\n _autorecon3_added_steps = [\n (\n \"cortribbon\",\n [\"mri/lh.ribbon.mgz\", \"mri/rh.ribbon.mgz\", \"mri/ribbon.mgz\"],\n [],\n ),\n (\"segstats\", [\"stats/aseg.stats\"], []),\n (\"aparc2aseg\", [\"mri/aparc+aseg.mgz\", \"mri/aparc.a2009s+aseg.mgz\"], []),\n (\"wmparc\", [\"mri/wmparc.mgz\", \"stats/wmparc.stats\"], []),\n (\"balabels\", [\"label/BA.ctab\", \"label/BA.thresh.ctab\"], []),\n ]\n else:\n _autorecon2_volonly_steps = [\n (\"gcareg\", [\"mri/transforms/talairach.lta\"], []),\n (\"canorm\", [\"mri/norm.mgz\"], []),\n (\"careg\", [\"mri/transforms/talairach.m3z\"], []),\n (\n \"calabel\",\n [\"mri/aseg.auto_noCCseg.mgz\", \"mri/aseg.auto.mgz\", \"mri/aseg.mgz\"],\n [],\n ),\n (\"normalization2\", [\"mri/brain.mgz\"], []),\n (\"maskbfs\", [\"mri/brain.finalsurfs.mgz\"], []),\n (\n \"segmentation\",\n [\"mri/wm.seg.mgz\", \"mri/wm.asegedit.mgz\", \"mri/wm.mgz\"],\n [],\n ),\n (\n \"fill\",\n [\n \"mri/filled.mgz\",\n # 'scripts/ponscc.cut.log',\n ],\n [],\n ),\n ]\n _autorecon2_lh_steps = [\n (\"tessellate\", [\"surf/lh.orig.nofix\"], []),\n (\"smooth1\", [\"surf/lh.smoothwm.nofix\"], []),\n (\"inflate1\", [\"surf/lh.inflated.nofix\"], []),\n (\"qsphere\", [\"surf/lh.qsphere.nofix\"], []),\n (\"fix\", [\"surf/lh.orig\"], []),\n (\n \"white\",\n [\n \"surf/lh.white.preaparc\",\n \"surf/lh.curv\",\n \"surf/lh.area\",\n \"label/lh.cortex.label\",\n ],\n [],\n ),\n (\"smooth2\", [\"surf/lh.smoothwm\"], []),\n (\"inflate2\", [\"surf/lh.inflated\", \"surf/lh.sulc\"], []),\n (\n \"curvHK\",\n [\n \"surf/lh.white.H\",\n \"surf/lh.white.K\",\n \"surf/lh.inflated.H\",\n \"surf/lh.inflated.K\",\n ],\n [],\n ),\n (\"curvstats\", [\"stats/lh.curv.stats\"], []),\n ]\n _autorecon3_lh_steps = [\n (\"sphere\", [\"surf/lh.sphere\"], []),\n (\"surfreg\", [\"surf/lh.sphere.reg\"], []),\n (\"jacobian_white\", [\"surf/lh.jacobian_white\"], []),\n (\"avgcurv\", [\"surf/lh.avg_curv\"], []),\n (\"cortparc\", [\"label/lh.aparc.annot\"], []),\n (\n \"pial\",\n [\n \"surf/lh.pial\",\n \"surf/lh.curv.pial\",\n \"surf/lh.area.pial\",\n \"surf/lh.thickness\",\n \"surf/lh.white\",\n ],\n [],\n ),\n (\"parcstats\", [\"stats/lh.aparc.stats\"], []),\n (\"cortparc2\", [\"label/lh.aparc.a2009s.annot\"], []),\n (\"parcstats2\", [\"stats/lh.aparc.a2009s.stats\"], []),\n (\"cortparc3\", [\"label/lh.aparc.DKTatlas.annot\"], []),\n (\"parcstats3\", [\"stats/lh.aparc.DKTatlas.stats\"], []),\n (\"pctsurfcon\", [\"surf/lh.w-g.pct.mgh\"], []),\n ]\n _autorecon3_added_steps = [\n (\n \"cortribbon\",\n [\"mri/lh.ribbon.mgz\", \"mri/rh.ribbon.mgz\", \"mri/ribbon.mgz\"],\n [],\n ),\n (\"hyporelabel\", [\"mri/aseg.presurf.hypos.mgz\"], []),\n (\n \"aparc2aseg\",\n [\n \"mri/aparc+aseg.mgz\",\n \"mri/aparc.a2009s+aseg.mgz\",\n \"mri/aparc.DKTatlas+aseg.mgz\",\n ],\n [],\n ),\n (\"apas2aseg\", [\"mri/aseg.mgz\"], [\"mri/aparc+aseg.mgz\"]),\n (\"segstats\", [\"stats/aseg.stats\"], []),\n (\"wmparc\", [\"mri/wmparc.mgz\", \"stats/wmparc.stats\"], []),\n # Note that this is a very incomplete list; however the ctab\n # files are last to be touched, so this should be reasonable\n (\n \"balabels\",\n [\n \"label/BA_exvivo.ctab\",\n \"label/BA_exvivo.thresh.ctab\",\n \"label/lh.entorhinal_exvivo.label\",\n \"label/rh.entorhinal_exvivo.label\",\n ],\n [],\n ),\n ]\n\n # Fill out autorecon2 steps\n _autorecon2_rh_steps = [\n (step, [out.replace(\"lh\", \"rh\") for out in outs], ins)\n for step, outs, ins in _autorecon2_lh_steps\n ]\n _autorecon2_perhemi_steps = [\n (step, [of for out in outs for of in (out, out.replace(\"lh\", \"rh\"))], ins)\n for step, outs, ins in _autorecon2_lh_steps\n ]\n _autorecon2_steps = _autorecon2_volonly_steps + _autorecon2_perhemi_steps\n\n # Fill out autorecon3 steps\n _autorecon3_rh_steps = [\n (step, [out.replace(\"lh\", \"rh\") for out in outs], ins)\n for step, outs, ins in _autorecon3_lh_steps\n ]\n _autorecon3_perhemi_steps = [\n (step, [of for out in outs for of in (out, out.replace(\"lh\", \"rh\"))], ins)\n for step, outs, ins in _autorecon3_lh_steps\n ]\n _autorecon3_steps = _autorecon3_perhemi_steps + _autorecon3_added_steps\n\n # Fill out autorecon-hemi lh/rh steps\n _autorecon_lh_steps = _autorecon2_lh_steps + _autorecon3_lh_steps\n _autorecon_rh_steps = _autorecon2_rh_steps + _autorecon3_rh_steps\n\n _steps = _autorecon1_steps + _autorecon2_steps + _autorecon3_steps\n\n _binaries = [\n \"talairach\",\n \"mri_normalize\",\n \"mri_watershed\",\n \"mri_em_register\",\n \"mri_ca_normalize\",\n \"mri_ca_register\",\n \"mri_remove_neck\",\n \"mri_ca_label\",\n \"mri_segstats\",\n \"mri_mask\",\n \"mri_segment\",\n \"mri_edit_wm_with_aseg\",\n \"mri_pretess\",\n \"mri_fill\",\n \"mri_tessellate\",\n \"mris_smooth\",\n \"mris_inflate\",\n \"mris_sphere\",\n \"mris_fix_topology\",\n \"mris_make_surfaces\",\n \"mris_surf2vol\",\n \"mris_register\",\n \"mrisp_paint\",\n \"mris_ca_label\",\n \"mris_anatomical_stats\",\n \"mri_aparc2aseg\",\n ]\n\n def _gen_subjects_dir(self):\n return os.getcwd()\n\n def _gen_filename(self, name):\n if name == \"subjects_dir\":\n return self._gen_subjects_dir()\n return None\n\n def _list_outputs(self):\n \"\"\"\n See io.FreeSurferSource.outputs for the list of outputs returned\n \"\"\"\n if isdefined(self.inputs.subjects_dir):\n subjects_dir = self.inputs.subjects_dir\n else:\n subjects_dir = self._gen_subjects_dir()\n\n if isdefined(self.inputs.hemi):\n hemi = self.inputs.hemi\n else:\n hemi = \"both\"\n\n outputs = self._outputs().get()\n\n outputs.update(\n FreeSurferSource(\n subject_id=self.inputs.subject_id, subjects_dir=subjects_dir, hemi=hemi\n )._list_outputs()\n )\n outputs[\"subject_id\"] = self.inputs.subject_id\n outputs[\"subjects_dir\"] = subjects_dir\n return outputs\n\n def _is_resuming(self):\n subjects_dir = self.inputs.subjects_dir\n if not isdefined(subjects_dir):\n subjects_dir = self._gen_subjects_dir()\n if os.path.isdir(os.path.join(subjects_dir, self.inputs.subject_id, \"mri\")):\n return True\n return False\n\n def _format_arg(self, name, trait_spec, value):\n if name == \"T1_files\":\n if self._is_resuming():\n return None\n if name == \"hippocampal_subfields_T1\" and isdefined(\n self.inputs.hippocampal_subfields_T2\n ):\n return None\n if all(\n (\n name == \"hippocampal_subfields_T2\",\n isdefined(self.inputs.hippocampal_subfields_T1)\n and self.inputs.hippocampal_subfields_T1,\n )\n ):\n argstr = trait_spec.argstr.replace(\"T2\", \"T1T2\")\n return argstr % value\n if name == \"directive\" and value == \"autorecon-hemi\":\n if not isdefined(self.inputs.hemi):\n raise ValueError(\n \"Directive 'autorecon-hemi' requires hemi \" \"input to be set\"\n )\n value += \" \" + self.inputs.hemi\n if all(\n (\n name == \"hemi\",\n isdefined(self.inputs.directive)\n and self.inputs.directive == \"autorecon-hemi\",\n )\n ):\n return None\n return super(ReconAll, self)._format_arg(name, trait_spec, value)\n\n @property\n def cmdline(self):\n cmd = super(ReconAll, self).cmdline\n\n # Adds '-expert' flag if expert flags are passed\n # Mutually exclusive with 'expert' input parameter\n cmd += self._prep_expert_file()\n\n if not self._is_resuming():\n return cmd\n subjects_dir = self.inputs.subjects_dir\n if not isdefined(subjects_dir):\n subjects_dir = self._gen_subjects_dir()\n\n # Check only relevant steps\n directive = self.inputs.directive\n if not isdefined(directive):\n steps = []\n elif directive == \"autorecon1\":\n steps = self._autorecon1_steps\n elif directive == \"autorecon2-volonly\":\n steps = self._autorecon2_volonly_steps\n elif directive == \"autorecon2-perhemi\":\n steps = self._autorecon2_perhemi_steps\n elif directive.startswith(\"autorecon2\"):\n if isdefined(self.inputs.hemi):\n if self.inputs.hemi == \"lh\":\n steps = self._autorecon2_volonly_steps + self._autorecon2_lh_steps\n else:\n steps = self._autorecon2_volonly_steps + self._autorecon2_rh_steps\n else:\n steps = self._autorecon2_steps\n elif directive == \"autorecon-hemi\":\n if self.inputs.hemi == \"lh\":\n steps = self._autorecon_lh_steps\n else:\n steps = self._autorecon_rh_steps\n elif directive == \"autorecon3\":\n steps = self._autorecon3_steps\n else:\n steps = self._steps\n\n no_run = True\n flags = []\n for step, outfiles, infiles in steps:\n flag = \"-{}\".format(step)\n noflag = \"-no{}\".format(step)\n if noflag in cmd:\n continue\n elif flag in cmd:\n no_run = False\n continue\n\n subj_dir = os.path.join(subjects_dir, self.inputs.subject_id)\n if check_depends(\n [os.path.join(subj_dir, f) for f in outfiles],\n [os.path.join(subj_dir, f) for f in infiles],\n ):\n flags.append(noflag)\n else:\n no_run = False\n\n if no_run and not self.force_run:\n iflogger.info(\"recon-all complete : Not running\")\n return \"echo recon-all: nothing to do\"\n\n cmd += \" \" + \" \".join(flags)\n iflogger.info(\"resume recon-all : %s\", cmd)\n return cmd\n\n def _prep_expert_file(self):\n if isdefined(self.inputs.expert):\n return \"\"\n\n lines = []\n for binary in self._binaries:\n args = getattr(self.inputs, binary)\n if isdefined(args):\n lines.append(\"{} {}\\n\".format(binary, args))\n\n if lines == []:\n return \"\"\n\n contents = \"\".join(lines)\n if not isdefined(self.inputs.xopts) and self._get_expert_file() == contents:\n return \" -xopts-use\"\n\n expert_fname = os.path.abspath(\"expert.opts\")\n with open(expert_fname, \"w\") as fobj:\n fobj.write(contents)\n return \" -expert {}\".format(expert_fname)\n\n def _get_expert_file(self):\n # Read pre-existing options file, if it exists\n if isdefined(self.inputs.subjects_dir):\n subjects_dir = self.inputs.subjects_dir\n else:\n subjects_dir = self._gen_subjects_dir()\n\n xopts_file = os.path.join(\n subjects_dir, self.inputs.subject_id, \"scripts\", \"expert-options\"\n )\n if not os.path.exists(xopts_file):\n return \"\"\n with open(xopts_file, \"r\") as fobj:\n return fobj.read()\n\n @property\n def version(self):\n ver = Info.looseversion()\n if ver > LooseVersion(\"0.0.0\"):\n return ver.vstring\n\n\nclass BBRegisterInputSpec(FSTraitedSpec):\n subject_id = traits.Str(\n argstr=\"--s %s\", desc=\"freesurfer subject id\", mandatory=True\n )\n source_file = File(\n argstr=\"--mov %s\",\n desc=\"source file to be registered\",\n mandatory=True,\n copyfile=False,\n )\n init = traits.Enum(\n \"spm\",\n \"fsl\",\n \"header\",\n argstr=\"--init-%s\",\n mandatory=True,\n xor=[\"init_reg_file\"],\n desc=\"initialize registration spm, fsl, header\",\n )\n init_reg_file = File(\n exists=True,\n argstr=\"--init-reg %s\",\n desc=\"existing registration file\",\n xor=[\"init\"],\n mandatory=True,\n )\n contrast_type = traits.Enum(\n \"t1\",\n \"t2\",\n \"bold\",\n \"dti\",\n argstr=\"--%s\",\n desc=\"contrast type of image\",\n mandatory=True,\n )\n intermediate_file = File(\n exists=True,\n argstr=\"--int %s\",\n desc=\"Intermediate image, e.g. in case of partial FOV\",\n )\n reg_frame = traits.Int(\n argstr=\"--frame %d\",\n xor=[\"reg_middle_frame\"],\n desc=\"0-based frame index for 4D source file\",\n )\n reg_middle_frame = traits.Bool(\n argstr=\"--mid-frame\",\n xor=[\"reg_frame\"],\n desc=\"Register middle frame of 4D source file\",\n )\n out_reg_file = File(\n argstr=\"--reg %s\", desc=\"output registration file\", genfile=True\n )\n spm_nifti = traits.Bool(\n argstr=\"--spm-nii\", desc=\"force use of nifti rather than analyze with SPM\"\n )\n epi_mask = traits.Bool(\n argstr=\"--epi-mask\", desc=\"mask out B0 regions in stages 1 and 2\"\n )\n dof = traits.Enum(\n 6, 9, 12, argstr=\"--%d\", desc=\"number of transform degrees of freedom\"\n )\n fsldof = traits.Int(\n argstr=\"--fsl-dof %d\", desc=\"degrees of freedom for initial registration (FSL)\"\n )\n out_fsl_file = traits.Either(\n traits.Bool,\n File,\n argstr=\"--fslmat %s\",\n desc=\"write the transformation matrix in FSL FLIRT format\",\n )\n out_lta_file = traits.Either(\n traits.Bool,\n File,\n argstr=\"--lta %s\",\n min_ver=\"5.2.0\",\n desc=\"write the transformation matrix in LTA format\",\n )\n registered_file = traits.Either(\n traits.Bool,\n File,\n argstr=\"--o %s\",\n desc=\"output warped sourcefile either True or filename\",\n )\n init_cost_file = traits.Either(\n traits.Bool,\n File,\n argstr=\"--initcost %s\",\n desc=\"output initial registration cost file\",\n )\n\n\nclass BBRegisterInputSpec6(BBRegisterInputSpec):\n init = traits.Enum(\n \"coreg\",\n \"rr\",\n \"spm\",\n \"fsl\",\n \"header\",\n \"best\",\n argstr=\"--init-%s\",\n xor=[\"init_reg_file\"],\n desc=\"initialize registration with mri_coreg, spm, fsl, or header\",\n )\n init_reg_file = File(\n exists=True,\n argstr=\"--init-reg %s\",\n desc=\"existing registration file\",\n xor=[\"init\"],\n )\n\n\nclass BBRegisterOutputSpec(TraitedSpec):\n out_reg_file = File(exists=True, desc=\"Output registration file\")\n out_fsl_file = File(exists=True, desc=\"Output FLIRT-style registration file\")\n out_lta_file = File(exists=True, desc=\"Output LTA-style registration file\")\n min_cost_file = File(exists=True, desc=\"Output registration minimum cost file\")\n init_cost_file = File(exists=True, desc=\"Output initial registration cost file\")\n registered_file = File(exists=True, desc=\"Registered and resampled source file\")\n\n\nclass BBRegister(FSCommand):\n \"\"\"Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical.\n\n This program performs within-subject, cross-modal registration using a\n boundary-based cost function. It is required that you have an anatomical\n scan of the subject that has already been recon-all-ed using freesurfer.\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import BBRegister\n >>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2')\n >>> bbreg.cmdline\n 'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me'\n\n \"\"\"\n\n _cmd = \"bbregister\"\n if LooseVersion(\"0.0.0\") < Info.looseversion() < LooseVersion(\"6.0.0\"):\n input_spec = BBRegisterInputSpec\n else:\n input_spec = BBRegisterInputSpec6\n output_spec = BBRegisterOutputSpec\n\n def _list_outputs(self):\n\n outputs = self.output_spec().get()\n _in = self.inputs\n\n if isdefined(_in.out_reg_file):\n outputs[\"out_reg_file\"] = op.abspath(_in.out_reg_file)\n elif _in.source_file:\n suffix = \"_bbreg_%s.dat\" % _in.subject_id\n outputs[\"out_reg_file\"] = fname_presuffix(\n _in.source_file, suffix=suffix, use_ext=False\n )\n\n if isdefined(_in.registered_file):\n if isinstance(_in.registered_file, bool):\n outputs[\"registered_file\"] = fname_presuffix(\n _in.source_file, suffix=\"_bbreg\"\n )\n else:\n outputs[\"registered_file\"] = op.abspath(_in.registered_file)\n\n if isdefined(_in.out_lta_file):\n if isinstance(_in.out_lta_file, bool):\n suffix = \"_bbreg_%s.lta\" % _in.subject_id\n out_lta_file = fname_presuffix(\n _in.source_file, suffix=suffix, use_ext=False\n )\n outputs[\"out_lta_file\"] = out_lta_file\n else:\n outputs[\"out_lta_file\"] = op.abspath(_in.out_lta_file)\n\n if isdefined(_in.out_fsl_file):\n if isinstance(_in.out_fsl_file, bool):\n suffix = \"_bbreg_%s.mat\" % _in.subject_id\n out_fsl_file = fname_presuffix(\n _in.source_file, suffix=suffix, use_ext=False\n )\n outputs[\"out_fsl_file\"] = out_fsl_file\n else:\n outputs[\"out_fsl_file\"] = op.abspath(_in.out_fsl_file)\n\n if isdefined(_in.init_cost_file):\n if isinstance(_in.out_fsl_file, bool):\n outputs[\"init_cost_file\"] = outputs[\"out_reg_file\"] + \".initcost\"\n else:\n outputs[\"init_cost_file\"] = op.abspath(_in.init_cost_file)\n\n outputs[\"min_cost_file\"] = outputs[\"out_reg_file\"] + \".mincost\"\n return outputs\n\n def _format_arg(self, name, spec, value):\n if name in (\n \"registered_file\",\n \"out_fsl_file\",\n \"out_lta_file\",\n \"init_cost_file\",\n ) and isinstance(value, bool):\n value = self._list_outputs()[name]\n return super(BBRegister, self)._format_arg(name, spec, value)\n\n def _gen_filename(self, name):\n\n if name == \"out_reg_file\":\n return self._list_outputs()[name]\n return None\n\n\nclass ApplyVolTransformInputSpec(FSTraitedSpec):\n source_file = File(\n exists=True,\n argstr=\"--mov %s\",\n copyfile=False,\n mandatory=True,\n desc=\"Input volume you wish to transform\",\n )\n transformed_file = File(desc=\"Output volume\", argstr=\"--o %s\", genfile=True)\n _targ_xor = (\"target_file\", \"tal\", \"fs_target\")\n target_file = File(\n exists=True,\n argstr=\"--targ %s\",\n xor=_targ_xor,\n desc=\"Output template volume\",\n mandatory=True,\n )\n tal = traits.Bool(\n argstr=\"--tal\",\n xor=_targ_xor,\n mandatory=True,\n desc=\"map to a sub FOV of MNI305 (with --reg only)\",\n )\n tal_resolution = traits.Float(\n argstr=\"--talres %.10f\", desc=\"Resolution to sample when using tal\"\n )\n fs_target = traits.Bool(\n argstr=\"--fstarg\",\n xor=_targ_xor,\n mandatory=True,\n requires=[\"reg_file\"],\n desc=\"use orig.mgz from subject in regfile as target\",\n )\n _reg_xor = (\n \"reg_file\",\n \"lta_file\",\n \"lta_inv_file\",\n \"fsl_reg_file\",\n \"xfm_reg_file\",\n \"reg_header\",\n \"mni_152_reg\",\n \"subject\",\n )\n reg_file = File(\n exists=True,\n xor=_reg_xor,\n argstr=\"--reg %s\",\n mandatory=True,\n desc=\"tkRAS-to-tkRAS matrix (tkregister2 format)\",\n )\n lta_file = File(\n exists=True,\n xor=_reg_xor,\n argstr=\"--lta %s\",\n mandatory=True,\n desc=\"Linear Transform Array file\",\n )\n lta_inv_file = File(\n exists=True,\n xor=_reg_xor,\n argstr=\"--lta-inv %s\",\n mandatory=True,\n desc=\"LTA, invert\",\n )\n reg_file = File(\n exists=True,\n xor=_reg_xor,\n argstr=\"--reg %s\",\n mandatory=True,\n desc=\"tkRAS-to-tkRAS matrix (tkregister2 format)\",\n )\n fsl_reg_file = File(\n exists=True,\n xor=_reg_xor,\n argstr=\"--fsl %s\",\n mandatory=True,\n desc=\"fslRAS-to-fslRAS matrix (FSL format)\",\n )\n xfm_reg_file = File(\n exists=True,\n xor=_reg_xor,\n argstr=\"--xfm %s\",\n mandatory=True,\n desc=\"ScannerRAS-to-ScannerRAS matrix (MNI format)\",\n )\n reg_header = traits.Bool(\n xor=_reg_xor,\n argstr=\"--regheader\",\n mandatory=True,\n desc=\"ScannerRAS-to-ScannerRAS matrix = identity\",\n )\n mni_152_reg = traits.Bool(\n xor=_reg_xor, argstr=\"--regheader\", mandatory=True, desc=\"target MNI152 space\"\n )\n subject = traits.Str(\n xor=_reg_xor,\n argstr=\"--s %s\",\n mandatory=True,\n desc=\"set matrix = identity and use subject for any templates\",\n )\n inverse = traits.Bool(desc=\"sample from target to source\", argstr=\"--inv\")\n interp = traits.Enum(\n \"trilin\",\n \"nearest\",\n \"cubic\",\n argstr=\"--interp %s\",\n desc=\"Interpolation method (<trilin> or nearest)\",\n )\n no_resample = traits.Bool(\n desc=\"Do not resample; just change vox2ras matrix\", argstr=\"--no-resample\"\n )\n m3z_file = File(\n argstr=\"--m3z %s\",\n desc=(\n \"This is the morph to be applied to the volume. \"\n \"Unless the morph is in mri/transforms (eg.: for \"\n \"talairach.m3z computed by reconall), you will need \"\n \"to specify the full path to this morph and use the \"\n \"--noDefM3zPath flag.\"\n ),\n )\n no_ded_m3z_path = traits.Bool(\n argstr=\"--noDefM3zPath\",\n requires=[\"m3z_file\"],\n desc=(\n \"To be used with the m3z flag. \"\n \"Instructs the code not to look for the\"\n \"m3z morph in the default location \"\n \"(SUBJECTS_DIR/subj/mri/transforms), \"\n \"but instead just use the path \"\n \"indicated in --m3z.\"\n ),\n )\n\n invert_morph = traits.Bool(\n argstr=\"--inv-morph\",\n requires=[\"m3z_file\"],\n desc=(\n \"Compute and use the inverse of the \"\n \"non-linear morph to resample the input \"\n \"volume. To be used by --m3z.\"\n ),\n )\n\n\nclass ApplyVolTransformOutputSpec(TraitedSpec):\n transformed_file = File(exists=True, desc=\"Path to output file if used normally\")\n\n\nclass ApplyVolTransform(FSCommand):\n \"\"\"Use FreeSurfer mri_vol2vol to apply a transform.\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import ApplyVolTransform\n >>> applyreg = ApplyVolTransform()\n >>> applyreg.inputs.source_file = 'structural.nii'\n >>> applyreg.inputs.reg_file = 'register.dat'\n >>> applyreg.inputs.transformed_file = 'struct_warped.nii'\n >>> applyreg.inputs.fs_target = True\n >>> applyreg.cmdline\n 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii'\n\n \"\"\"\n\n _cmd = \"mri_vol2vol\"\n input_spec = ApplyVolTransformInputSpec\n output_spec = ApplyVolTransformOutputSpec\n\n def _get_outfile(self):\n outfile = self.inputs.transformed_file\n if not isdefined(outfile):\n if self.inputs.inverse is True:\n if self.inputs.fs_target is True:\n src = \"orig.mgz\"\n else:\n src = self.inputs.target_file\n else:\n src = self.inputs.source_file\n outfile = fname_presuffix(src, newpath=os.getcwd(), suffix=\"_warped\")\n return outfile\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"transformed_file\"] = os.path.abspath(self._get_outfile())\n return outputs\n\n def _gen_filename(self, name):\n if name == \"transformed_file\":\n return self._get_outfile()\n return None\n\n\nclass SmoothInputSpec(FSTraitedSpec):\n in_file = File(exists=True, desc=\"source volume\", argstr=\"--i %s\", mandatory=True)\n reg_file = File(\n desc=\"registers volume to surface anatomical \",\n argstr=\"--reg %s\",\n mandatory=True,\n exists=True,\n )\n smoothed_file = File(desc=\"output volume\", argstr=\"--o %s\", genfile=True)\n proj_frac_avg = traits.Tuple(\n traits.Float,\n traits.Float,\n traits.Float,\n xor=[\"proj_frac\"],\n desc=\"average a long normal min max delta\",\n argstr=\"--projfrac-avg %.2f %.2f %.2f\",\n )\n proj_frac = traits.Float(\n desc=\"project frac of thickness a long surface normal\",\n xor=[\"proj_frac_avg\"],\n argstr=\"--projfrac %s\",\n )\n surface_fwhm = traits.Range(\n low=0.0,\n requires=[\"reg_file\"],\n mandatory=True,\n xor=[\"num_iters\"],\n desc=\"surface FWHM in mm\",\n argstr=\"--fwhm %f\",\n )\n num_iters = traits.Range(\n low=1,\n xor=[\"surface_fwhm\"],\n mandatory=True,\n argstr=\"--niters %d\",\n desc=\"number of iterations instead of fwhm\",\n )\n vol_fwhm = traits.Range(\n low=0.0, argstr=\"--vol-fwhm %f\", desc=\"volume smoothing outside of surface\"\n )\n\n\nclass SmoothOutputSpec(TraitedSpec):\n smoothed_file = File(exists=True, desc=\"smoothed input volume\")\n\n\nclass Smooth(FSCommand):\n \"\"\"Use FreeSurfer mris_volsmooth to smooth a volume\n\n This function smoothes cortical regions on a surface and non-cortical\n regions in volume.\n\n .. note::\n Cortical voxels are mapped to the surface (3D->2D) and then the\n smoothed values from the surface are put back into the volume to fill\n the cortical ribbon. If data is smoothed with this algorithm, one has to\n be careful about how further processing is interpreted.\n\n Examples\n --------\n\n >>> from nipype.interfaces.freesurfer import Smooth\n >>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6)\n >>> smoothvol.cmdline\n 'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000'\n\n \"\"\"\n\n _cmd = \"mris_volsmooth\"\n input_spec = SmoothInputSpec\n output_spec = SmoothOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outfile = self.inputs.smoothed_file\n if not isdefined(outfile):\n outfile = self._gen_fname(self.inputs.in_file, suffix=\"_smooth\")\n outputs[\"smoothed_file\"] = outfile\n return outputs\n\n def _gen_filename(self, name):\n if name == \"smoothed_file\":\n return self._list_outputs()[name]\n return None\n\n\nclass RobustRegisterInputSpec(FSTraitedSpec):\n\n source_file = File(\n exists=True, mandatory=True, argstr=\"--mov %s\", desc=\"volume to be registered\"\n )\n target_file = File(\n exists=True,\n mandatory=True,\n argstr=\"--dst %s\",\n desc=\"target volume for the registration\",\n )\n out_reg_file = traits.Either(\n True,\n File,\n default=True,\n usedefault=True,\n argstr=\"--lta %s\",\n desc=\"registration file; either True or filename\",\n )\n registered_file = traits.Either(\n traits.Bool,\n File,\n argstr=\"--warp %s\",\n desc=\"registered image; either True or filename\",\n )\n weights_file = traits.Either(\n traits.Bool,\n File,\n argstr=\"--weights %s\",\n desc=\"weights image to write; either True or filename\",\n )\n est_int_scale = traits.Bool(\n argstr=\"--iscale\",\n desc=\"estimate intensity scale (recommended for unnormalized images)\",\n )\n trans_only = traits.Bool(\n argstr=\"--transonly\", desc=\"find 3 parameter translation only\"\n )\n in_xfm_file = File(\n exists=True, argstr=\"--transform\", desc=\"use initial transform on source\"\n )\n half_source = traits.Either(\n traits.Bool,\n File,\n argstr=\"--halfmov %s\",\n desc=\"write source volume mapped to halfway space\",\n )\n half_targ = traits.Either(\n traits.Bool,\n File,\n argstr=\"--halfdst %s\",\n desc=\"write target volume mapped to halfway space\",\n )\n half_weights = traits.Either(\n traits.Bool,\n File,\n argstr=\"--halfweights %s\",\n desc=\"write weights volume mapped to halfway space\",\n )\n half_source_xfm = traits.Either(\n traits.Bool,\n File,\n argstr=\"--halfmovlta %s\",\n desc=\"write transform from source to halfway space\",\n )\n half_targ_xfm = traits.Either(\n traits.Bool,\n File,\n argstr=\"--halfdstlta %s\",\n desc=\"write transform from target to halfway space\",\n )\n auto_sens = traits.Bool(\n argstr=\"--satit\",\n xor=[\"outlier_sens\"],\n mandatory=True,\n desc=\"auto-detect good sensitivity\",\n )\n outlier_sens = traits.Float(\n argstr=\"--sat %.4f\",\n xor=[\"auto_sens\"],\n mandatory=True,\n desc=\"set outlier sensitivity explicitly\",\n )\n least_squares = traits.Bool(\n argstr=\"--leastsquares\", desc=\"use least squares instead of robust estimator\"\n )\n no_init = traits.Bool(argstr=\"--noinit\", desc=\"skip transform init\")\n init_orient = traits.Bool(\n argstr=\"--initorient\",\n desc=\"use moments for initial orient (recommended for stripped brains)\",\n )\n max_iterations = traits.Int(\n argstr=\"--maxit %d\", desc=\"maximum # of times on each resolution\"\n )\n high_iterations = traits.Int(\n argstr=\"--highit %d\", desc=\"max # of times on highest resolution\"\n )\n iteration_thresh = traits.Float(\n argstr=\"--epsit %.3f\", desc=\"stop iterations when below threshold\"\n )\n subsample_thresh = traits.Int(\n argstr=\"--subsample %d\", desc=\"subsample if dimension is above threshold size\"\n )\n outlier_limit = traits.Float(\n argstr=\"--wlimit %.3f\", desc=\"set maximal outlier limit in satit\"\n )\n write_vo2vox = traits.Bool(\n argstr=\"--vox2vox\", desc=\"output vox2vox matrix (default is RAS2RAS)\"\n )\n no_multi = traits.Bool(argstr=\"--nomulti\", desc=\"work on highest resolution\")\n mask_source = File(\n exists=True, argstr=\"--maskmov %s\", desc=\"image to mask source volume with\"\n )\n mask_target = File(\n exists=True, argstr=\"--maskdst %s\", desc=\"image to mask target volume with\"\n )\n force_double = traits.Bool(\n argstr=\"--doubleprec\", desc=\"use double-precision intensities\"\n )\n force_float = traits.Bool(argstr=\"--floattype\", desc=\"use float intensities\")\n\n\nclass RobustRegisterOutputSpec(TraitedSpec):\n\n out_reg_file = File(exists=True, desc=\"output registration file\")\n registered_file = File(exists=True, desc=\"output image with registration applied\")\n weights_file = File(exists=True, desc=\"image of weights used\")\n half_source = File(exists=True, desc=\"source image mapped to halfway space\")\n half_targ = File(exists=True, desc=\"target image mapped to halfway space\")\n half_weights = File(exists=True, desc=\"weights image mapped to halfway space\")\n half_source_xfm = File(\n exists=True, desc=\"transform file to map source image to halfway space\"\n )\n half_targ_xfm = File(\n exists=True, desc=\"transform file to map target image to halfway space\"\n )\n\n\nclass RobustRegister(FSCommand):\n \"\"\"Perform intramodal linear registration (translation and rotation) using\n robust statistics.\n\n Examples\n --------\n >>> from nipype.interfaces.freesurfer import RobustRegister\n >>> reg = RobustRegister()\n >>> reg.inputs.source_file = 'structural.nii'\n >>> reg.inputs.target_file = 'T1.nii'\n >>> reg.inputs.auto_sens = True\n >>> reg.inputs.init_orient = True\n >>> reg.cmdline # doctest: +ELLIPSIS\n 'mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii'\n\n References\n ----------\n Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse\n Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96.\n\n \"\"\"\n\n _cmd = \"mri_robust_register\"\n input_spec = RobustRegisterInputSpec\n output_spec = RobustRegisterOutputSpec\n\n def _format_arg(self, name, spec, value):\n options = (\n \"out_reg_file\",\n \"registered_file\",\n \"weights_file\",\n \"half_source\",\n \"half_targ\",\n \"half_weights\",\n \"half_source_xfm\",\n \"half_targ_xfm\",\n )\n if name in options and isinstance(value, bool):\n value = self._list_outputs()[name]\n return super(RobustRegister, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n cwd = os.getcwd()\n prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file)\n suffices = dict(\n out_reg_file=(\"src\", \"_robustreg.lta\", False),\n registered_file=(\"src\", \"_robustreg\", True),\n weights_file=(\"src\", \"_robustweights\", True),\n half_source=(\"src\", \"_halfway\", True),\n half_targ=(\"trg\", \"_halfway\", True),\n half_weights=(\"src\", \"_halfweights\", True),\n half_source_xfm=(\"src\", \"_robustxfm.lta\", False),\n half_targ_xfm=(\"trg\", \"_robustxfm.lta\", False),\n )\n for name, sufftup in list(suffices.items()):\n value = getattr(self.inputs, name)\n if value:\n if value is True:\n outputs[name] = fname_presuffix(\n prefices[sufftup[0]],\n suffix=sufftup[1],\n newpath=cwd,\n use_ext=sufftup[2],\n )\n else:\n outputs[name] = os.path.abspath(value)\n return outputs\n\n\nclass FitMSParamsInputSpec(FSTraitedSpec):\n\n in_files = traits.List(\n File(exists=True),\n argstr=\"%s\",\n position=-2,\n mandatory=True,\n desc=\"list of FLASH images (must be in mgh format)\",\n )\n tr_list = traits.List(traits.Int, desc=\"list of TRs of the input files (in msec)\")\n te_list = traits.List(traits.Float, desc=\"list of TEs of the input files (in msec)\")\n flip_list = traits.List(traits.Int, desc=\"list of flip angles of the input files\")\n xfm_list = traits.List(\n File(exists=True), desc=\"list of transform files to apply to each FLASH image\"\n )\n out_dir = Directory(\n argstr=\"%s\", position=-1, genfile=True, desc=\"directory to store output in\"\n )\n\n\nclass FitMSParamsOutputSpec(TraitedSpec):\n\n t1_image = File(exists=True, desc=\"image of estimated T1 relaxation values\")\n pd_image = File(exists=True, desc=\"image of estimated proton density values\")\n t2star_image = File(exists=True, desc=\"image of estimated T2* values\")\n\n\nclass FitMSParams(FSCommand):\n \"\"\"Estimate tissue paramaters from a set of FLASH images.\n\n Examples\n --------\n >>> from nipype.interfaces.freesurfer import FitMSParams\n >>> msfit = FitMSParams()\n >>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz']\n >>> msfit.inputs.out_dir = 'flash_parameters'\n >>> msfit.cmdline\n 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters'\n\n \"\"\"\n\n _cmd = \"mri_ms_fitparms\"\n input_spec = FitMSParamsInputSpec\n output_spec = FitMSParamsOutputSpec\n\n def _format_arg(self, name, spec, value):\n if name == \"in_files\":\n cmd = \"\"\n for i, file in enumerate(value):\n if isdefined(self.inputs.tr_list):\n cmd = \" \".join((cmd, \"-tr %.1f\" % self.inputs.tr_list[i]))\n if isdefined(self.inputs.te_list):\n cmd = \" \".join((cmd, \"-te %.3f\" % self.inputs.te_list[i]))\n if isdefined(self.inputs.flip_list):\n cmd = \" \".join((cmd, \"-fa %.1f\" % self.inputs.flip_list[i]))\n if isdefined(self.inputs.xfm_list):\n cmd = \" \".join((cmd, \"-at %s\" % self.inputs.xfm_list[i]))\n cmd = \" \".join((cmd, file))\n return cmd\n return super(FitMSParams, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if not isdefined(self.inputs.out_dir):\n out_dir = self._gen_filename(\"out_dir\")\n else:\n out_dir = self.inputs.out_dir\n outputs[\"t1_image\"] = os.path.join(out_dir, \"T1.mgz\")\n outputs[\"pd_image\"] = os.path.join(out_dir, \"PD.mgz\")\n outputs[\"t2star_image\"] = os.path.join(out_dir, \"T2star.mgz\")\n return outputs\n\n def _gen_filename(self, name):\n if name == \"out_dir\":\n return os.getcwd()\n return None\n\n\nclass SynthesizeFLASHInputSpec(FSTraitedSpec):\n\n fixed_weighting = traits.Bool(\n position=1,\n argstr=\"-w\",\n desc=\"use a fixed weighting to generate optimal gray/white contrast\",\n )\n tr = traits.Float(\n mandatory=True, position=2, argstr=\"%.2f\", desc=\"repetition time (in msec)\"\n )\n flip_angle = traits.Float(\n mandatory=True, position=3, argstr=\"%.2f\", desc=\"flip angle (in degrees)\"\n )\n te = traits.Float(\n mandatory=True, position=4, argstr=\"%.3f\", desc=\"echo time (in msec)\"\n )\n t1_image = File(\n exists=True, mandatory=True, position=5, argstr=\"%s\", desc=\"image of T1 values\"\n )\n pd_image = File(\n exists=True,\n mandatory=True,\n position=6,\n argstr=\"%s\",\n desc=\"image of proton density values\",\n )\n out_file = File(genfile=True, argstr=\"%s\", desc=\"image to write\")\n\n\nclass SynthesizeFLASHOutputSpec(TraitedSpec):\n\n out_file = File(exists=True, desc=\"synthesized FLASH acquisition\")\n\n\nclass SynthesizeFLASH(FSCommand):\n \"\"\"Synthesize a FLASH acquisition from T1 and proton density maps.\n\n Examples\n --------\n >>> from nipype.interfaces.freesurfer import SynthesizeFLASH\n >>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30)\n >>> syn.inputs.t1_image = 'T1.mgz'\n >>> syn.inputs.pd_image = 'PD.mgz'\n >>> syn.inputs.out_file = 'flash_30syn.mgz'\n >>> syn.cmdline\n 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz'\n\n \"\"\"\n\n _cmd = \"mri_synthesize\"\n input_spec = SynthesizeFLASHInputSpec\n output_spec = SynthesizeFLASHOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if isdefined(self.inputs.out_file):\n outputs[\"out_file\"] = self.inputs.out_file\n else:\n outputs[\"out_file\"] = self._gen_fname(\n \"synth-flash_%02d.mgz\" % self.inputs.flip_angle, suffix=\"\"\n )\n return outputs\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n\n\nclass MNIBiasCorrectionInputSpec(FSTraitedSpec):\n # mandatory\n in_file = File(\n exists=True,\n mandatory=True,\n argstr=\"--i %s\",\n desc=\"input volume. Input can be any format accepted by mri_convert.\",\n )\n # optional\n out_file = File(\n argstr=\"--o %s\",\n name_source=[\"in_file\"],\n name_template=\"%s_output\",\n hash_files=False,\n keep_extension=True,\n desc=\"output volume. Output can be any format accepted by mri_convert. \"\n + \"If the output format is COR, then the directory must exist.\",\n )\n iterations = traits.Int(\n 4,\n usedefault=True,\n argstr=\"--n %d\",\n desc=\"Number of iterations to run nu_correct. Default is 4. This is the number of times \"\n + \"that nu_correct is repeated (ie, using the output from the previous run as the input for \"\n + \"the next). This is different than the -iterations option to nu_correct.\",\n )\n protocol_iterations = traits.Int(\n argstr=\"--proto-iters %d\",\n desc=\"Passes Np as argument of the -iterations flag of nu_correct. This is different \"\n + \"than the --n flag above. Default is not to pass nu_correct the -iterations flag.\",\n )\n distance = traits.Int(argstr=\"--distance %d\", desc=\"N3 -distance option\")\n no_rescale = traits.Bool(\n argstr=\"--no-rescale\",\n desc=\"do not rescale so that global mean of output == input global mean\",\n )\n mask = File(\n exists=True,\n argstr=\"--mask %s\",\n desc=\"brainmask volume. Input can be any format accepted by mri_convert.\",\n )\n transform = File(\n exists=True,\n argstr=\"--uchar %s\",\n desc=\"tal.xfm. Use mri_make_uchar instead of conforming\",\n )\n stop = traits.Float(\n argstr=\"--stop %f\",\n desc=\"Convergence threshold below which iteration stops (suggest 0.01 to 0.0001)\",\n )\n shrink = traits.Int(\n argstr=\"--shrink %d\", desc=\"Shrink parameter for finer sampling (default is 4)\"\n )\n\n\nclass MNIBiasCorrectionOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"output volume\")\n\n\nclass MNIBiasCorrection(FSCommand):\n \"\"\" Wrapper for nu_correct, a program from the Montreal Neurological Insitute (MNI)\n used for correcting intensity non-uniformity (ie, bias fields). You must have the\n MNI software installed on your system to run this. See [www.bic.mni.mcgill.ca/software/N3]\n for more info.\n\n mri_nu_correct.mni uses float internally instead of uchar. It also rescales the output so\n that the global mean is the same as that of the input. These two changes are linked and\n can be turned off with --no-float\n\n Examples\n --------\n >>> from nipype.interfaces.freesurfer import MNIBiasCorrection\n >>> correct = MNIBiasCorrection()\n >>> correct.inputs.in_file = \"norm.mgz\"\n >>> correct.inputs.iterations = 6\n >>> correct.inputs.protocol_iterations = 1000\n >>> correct.inputs.distance = 50\n >>> correct.cmdline\n 'mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000'\n\n References:\n ----------\n [http://freesurfer.net/fswiki/mri_nu_correct.mni]\n [http://www.bic.mni.mcgill.ca/software/N3]\n [https://github.com/BIC-MNI/N3]\n\n \"\"\"\n\n _cmd = \"mri_nu_correct.mni\"\n input_spec = MNIBiasCorrectionInputSpec\n output_spec = MNIBiasCorrectionOutputSpec\n\n\nclass WatershedSkullStripInputSpec(FSTraitedSpec):\n # required\n in_file = File(\n argstr=\"%s\", exists=True, mandatory=True, position=-2, desc=\"input volume\"\n )\n out_file = File(\n \"brainmask.auto.mgz\",\n argstr=\"%s\",\n exists=False,\n mandatory=True,\n position=-1,\n usedefault=True,\n desc=\"output volume\",\n )\n # optional\n t1 = traits.Bool(argstr=\"-T1\", desc=\"specify T1 input volume (T1 grey value = 110)\")\n brain_atlas = File(argstr=\"-brain_atlas %s\", exists=True, position=-4, desc=\"\")\n transform = File(argstr=\"%s\", exists=False, position=-3, desc=\"undocumented\")\n\n\nclass WatershedSkullStripOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"skull stripped brain volume\")\n\n\nclass WatershedSkullStrip(FSCommand):\n \"\"\" This program strips skull and other outer non-brain tissue and\n produces the brain volume from T1 volume or the scanned volume.\n\n The \"watershed\" segmentation algorithm was used to dertermine the\n intensity values for white matter, grey matter, and CSF.\n A force field was then used to fit a spherical surface to the brain.\n The shape of the surface fit was then evaluated against a previously\n derived template.\n\n The default parameters are: -w 0.82 -b 0.32 -h 10 -seedpt -ta -wta\n\n (Segonne 2004)\n\n Examples\n ========\n >>> from nipype.interfaces.freesurfer import WatershedSkullStrip\n >>> skullstrip = WatershedSkullStrip()\n >>> skullstrip.inputs.in_file = \"T1.mgz\"\n >>> skullstrip.inputs.t1 = True\n >>> skullstrip.inputs.transform = \"transforms/talairach_with_skull.lta\"\n >>> skullstrip.inputs.out_file = \"brainmask.auto.mgz\"\n >>> skullstrip.cmdline\n 'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz'\n \"\"\"\n\n _cmd = \"mri_watershed\"\n input_spec = WatershedSkullStripInputSpec\n output_spec = WatershedSkullStripOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n\nclass NormalizeInputSpec(FSTraitedSpec):\n # required\n in_file = File(\n argstr=\"%s\",\n exists=True,\n mandatory=True,\n position=-2,\n desc=\"The input file for Normalize\",\n )\n out_file = File(\n argstr=\"%s\",\n position=-1,\n name_source=[\"in_file\"],\n name_template=\"%s_norm\",\n hash_files=False,\n keep_extension=True,\n desc=\"The output file for Normalize\",\n )\n # optional\n gradient = traits.Int(\n argstr=\"-g %d\", desc=\"use max intensity/mm gradient g (default=1)\"\n )\n mask = File(\n argstr=\"-mask %s\", exists=True, desc=\"The input mask file for Normalize\"\n )\n segmentation = File(\n argstr=\"-aseg %s\", exists=True, desc=\"The input segmentation for Normalize\"\n )\n transform = File(\n exists=True, desc=\"Tranform file from the header of the input file\"\n )\n\n\nclass NormalizeOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"The output file for Normalize\")\n\n\nclass Normalize(FSCommand):\n \"\"\"\n Normalize the white-matter, optionally based on control points. The\n input volume is converted into a new volume where white matter image\n values all range around 110.\n\n Examples\n ========\n >>> from nipype.interfaces import freesurfer\n >>> normalize = freesurfer.Normalize()\n >>> normalize.inputs.in_file = \"T1.mgz\"\n >>> normalize.inputs.gradient = 1\n >>> normalize.cmdline\n 'mri_normalize -g 1 T1.mgz T1_norm.mgz'\n \"\"\"\n\n _cmd = \"mri_normalize\"\n input_spec = NormalizeInputSpec\n output_spec = NormalizeOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n\nclass CANormalizeInputSpec(FSTraitedSpec):\n in_file = File(\n argstr=\"%s\",\n exists=True,\n mandatory=True,\n position=-4,\n desc=\"The input file for CANormalize\",\n )\n out_file = File(\n argstr=\"%s\",\n position=-1,\n name_source=[\"in_file\"],\n name_template=\"%s_norm\",\n hash_files=False,\n keep_extension=True,\n desc=\"The output file for CANormalize\",\n )\n atlas = File(\n argstr=\"%s\",\n exists=True,\n mandatory=True,\n position=-3,\n desc=\"The atlas file in gca format\",\n )\n transform = File(\n argstr=\"%s\",\n exists=True,\n mandatory=True,\n position=-2,\n desc=\"The tranform file in lta format\",\n )\n # optional\n mask = File(argstr=\"-mask %s\", exists=True, desc=\"Specifies volume to use as mask\")\n control_points = File(\n argstr=\"-c %s\", desc=\"File name for the output control points\"\n )\n long_file = File(\n argstr=\"-long %s\", desc=\"undocumented flag used in longitudinal processing\"\n )\n\n\nclass CANormalizeOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"The output file for Normalize\")\n control_points = File(exists=False, desc=\"The output control points for Normalize\")\n\n\nclass CANormalize(FSCommand):\n \"\"\"This program creates a normalized volume using the brain volume and an\n input gca file.\n\n For complete details, see the `FS Documentation <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_ca_normalize>`_\n\n Examples\n ========\n\n >>> from nipype.interfaces import freesurfer\n >>> ca_normalize = freesurfer.CANormalize()\n >>> ca_normalize.inputs.in_file = \"T1.mgz\"\n >>> ca_normalize.inputs.atlas = \"atlas.nii.gz\" # in practice use .gca atlases\n >>> ca_normalize.inputs.transform = \"trans.mat\" # in practice use .lta transforms\n >>> ca_normalize.cmdline\n 'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz'\n \"\"\"\n\n _cmd = \"mri_ca_normalize\"\n input_spec = CANormalizeInputSpec\n output_spec = CANormalizeOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n outputs[\"control_points\"] = os.path.abspath(self.inputs.control_points)\n return outputs\n\n\nclass CARegisterInputSpec(FSTraitedSpecOpenMP):\n # required\n in_file = File(\n argstr=\"%s\",\n exists=True,\n mandatory=True,\n position=-3,\n desc=\"The input volume for CARegister\",\n )\n out_file = File(\n argstr=\"%s\", position=-1, genfile=True, desc=\"The output volume for CARegister\"\n )\n template = File(\n argstr=\"%s\", exists=True, position=-2, desc=\"The template file in gca format\"\n )\n # optional\n mask = File(argstr=\"-mask %s\", exists=True, desc=\"Specifies volume to use as mask\")\n invert_and_save = traits.Bool(\n argstr=\"-invert-and-save\",\n position=-4,\n desc=\"Invert and save the .m3z multi-dimensional talaraich transform to x, y, and z .mgz files\",\n )\n no_big_ventricles = traits.Bool(argstr=\"-nobigventricles\", desc=\"No big ventricles\")\n transform = File(\n argstr=\"-T %s\", exists=True, desc=\"Specifies transform in lta format\"\n )\n align = traits.String(\n argstr=\"-align-%s\", desc=\"Specifies when to perform alignment\"\n )\n levels = traits.Int(\n argstr=\"-levels %d\",\n desc=\"defines how many surrounding voxels will be used in interpolations, default is 6\",\n )\n A = traits.Int(\n argstr=\"-A %d\", desc=\"undocumented flag used in longitudinal processing\"\n )\n l_files = InputMultiPath(\n File(exists=False),\n argstr=\"-l %s\",\n desc=\"undocumented flag used in longitudinal processing\",\n )\n\n\nclass CARegisterOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"The output file for CARegister\")\n\n\nclass CARegister(FSCommandOpenMP):\n \"\"\"Generates a multi-dimensional talairach transform from a gca file and talairach.lta file\n\n For complete details, see the `FS Documentation <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_ca_register>`_\n\n Examples\n ========\n >>> from nipype.interfaces import freesurfer\n >>> ca_register = freesurfer.CARegister()\n >>> ca_register.inputs.in_file = \"norm.mgz\"\n >>> ca_register.inputs.out_file = \"talairach.m3z\"\n >>> ca_register.cmdline\n 'mri_ca_register norm.mgz talairach.m3z'\n \"\"\"\n\n _cmd = \"mri_ca_register\"\n input_spec = CARegisterInputSpec\n output_spec = CARegisterOutputSpec\n\n def _format_arg(self, name, spec, value):\n if name == \"l_files\" and len(value) == 1:\n value.append(\"identity.nofile\")\n return super(CARegister, self)._format_arg(name, spec, value)\n\n def _gen_fname(self, name):\n if name == \"out_file\":\n return os.path.abspath(\"talairach.m3z\")\n return None\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n\nclass CALabelInputSpec(FSTraitedSpecOpenMP):\n # required\n in_file = File(\n argstr=\"%s\",\n position=-4,\n mandatory=True,\n exists=True,\n desc=\"Input volume for CALabel\",\n )\n out_file = File(\n argstr=\"%s\",\n position=-1,\n mandatory=True,\n exists=False,\n desc=\"Output file for CALabel\",\n )\n transform = File(\n argstr=\"%s\",\n position=-3,\n mandatory=True,\n exists=True,\n desc=\"Input transform for CALabel\",\n )\n template = File(\n argstr=\"%s\",\n position=-2,\n mandatory=True,\n exists=True,\n desc=\"Input template for CALabel\",\n )\n # optional\n in_vol = File(argstr=\"-r %s\", exists=True, desc=\"set input volume\")\n intensities = File(\n argstr=\"-r %s\",\n exists=True,\n desc=\"input label intensities file(used in longitudinal processing)\",\n )\n no_big_ventricles = traits.Bool(argstr=\"-nobigventricles\", desc=\"No big ventricles\")\n align = traits.Bool(argstr=\"-align\", desc=\"Align CALabel\")\n prior = traits.Float(argstr=\"-prior %.1f\", desc=\"Prior for CALabel\")\n relabel_unlikely = traits.Tuple(\n traits.Int,\n traits.Float,\n argstr=\"-relabel_unlikely %d %.1f\",\n desc=(\n \"Reclassify voxels at least some std\"\n \" devs from the mean using some size\"\n \" Gaussian window\"\n ),\n )\n label = File(\n argstr=\"-l %s\",\n exists=True,\n desc=\"Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file\",\n )\n aseg = File(\n argstr=\"-aseg %s\",\n exists=True,\n desc=\"Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file\",\n )\n\n\nclass CALabelOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"Output volume from CALabel\")\n\n\nclass CALabel(FSCommandOpenMP):\n \"\"\"\n For complete details, see the `FS Documentation <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_ca_register>`_\n\n Examples\n ========\n\n >>> from nipype.interfaces import freesurfer\n >>> ca_label = freesurfer.CALabel()\n >>> ca_label.inputs.in_file = \"norm.mgz\"\n >>> ca_label.inputs.out_file = \"out.mgz\"\n >>> ca_label.inputs.transform = \"trans.mat\"\n >>> ca_label.inputs.template = \"Template_6.nii\" # in practice use .gcs extension\n >>> ca_label.cmdline\n 'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz'\n \"\"\"\n\n _cmd = \"mri_ca_label\"\n input_spec = CALabelInputSpec\n output_spec = CALabelOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n\nclass MRIsCALabelInputSpec(FSTraitedSpecOpenMP):\n # required\n subject_id = traits.String(\n \"subject_id\",\n argstr=\"%s\",\n position=-5,\n usedefault=True,\n mandatory=True,\n desc=\"Subject name or ID\",\n )\n hemisphere = traits.Enum(\n \"lh\",\n \"rh\",\n argstr=\"%s\",\n position=-4,\n mandatory=True,\n desc=\"Hemisphere ('lh' or 'rh')\",\n )\n canonsurf = File(\n argstr=\"%s\",\n position=-3,\n mandatory=True,\n exists=True,\n desc=\"Input canonical surface file\",\n )\n classifier = File(\n argstr=\"%s\",\n position=-2,\n mandatory=True,\n exists=True,\n desc=\"Classifier array input file\",\n )\n smoothwm = File(\n mandatory=True, exists=True, desc=\"implicit input {hemisphere}.smoothwm\"\n )\n curv = File(mandatory=True, exists=True, desc=\"implicit input {hemisphere}.curv\")\n sulc = File(mandatory=True, exists=True, desc=\"implicit input {hemisphere}.sulc\")\n out_file = File(\n argstr=\"%s\",\n position=-1,\n exists=False,\n name_source=[\"hemisphere\"],\n keep_extension=True,\n hash_files=False,\n name_template=\"%s.aparc.annot\",\n desc=\"Annotated surface output file\",\n )\n # optional\n label = File(\n argstr=\"-l %s\",\n exists=True,\n desc=\"Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file\",\n )\n aseg = File(\n argstr=\"-aseg %s\",\n exists=True,\n desc=\"Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file\",\n )\n seed = traits.Int(argstr=\"-seed %d\", desc=\"\")\n copy_inputs = traits.Bool(\n desc=\"Copies implicit inputs to node directory \"\n + \"and creates a temp subjects_directory. \"\n + \"Use this when running as a node\"\n )\n\n\nclass MRIsCALabelOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"Output volume from MRIsCALabel\")\n\n\nclass MRIsCALabel(FSCommandOpenMP):\n \"\"\"\n For a single subject, produces an annotation file, in which each\n cortical surface vertex is assigned a neuroanatomical label.This\n automatic procedure employs data from a previously-prepared atlas\n file. An atlas file is created from a training set, capturing region\n data manually drawn by neuroanatomists combined with statistics on\n variability correlated to geometric information derived from the\n cortical model (sulcus and curvature). Besides the atlases provided\n with FreeSurfer, new ones can be prepared using mris_ca_train).\n\n Examples\n ========\n\n >>> from nipype.interfaces import freesurfer\n >>> ca_label = freesurfer.MRIsCALabel()\n >>> ca_label.inputs.subject_id = \"test\"\n >>> ca_label.inputs.hemisphere = \"lh\"\n >>> ca_label.inputs.canonsurf = \"lh.pial\"\n >>> ca_label.inputs.curv = \"lh.pial\"\n >>> ca_label.inputs.sulc = \"lh.pial\"\n >>> ca_label.inputs.classifier = \"im1.nii\" # in pracice, use .gcs extension\n >>> ca_label.inputs.smoothwm = \"lh.pial\"\n >>> ca_label.cmdline\n 'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot'\n \"\"\"\n\n _cmd = \"mris_ca_label\"\n input_spec = MRIsCALabelInputSpec\n output_spec = MRIsCALabelOutputSpec\n\n def run(self, **inputs):\n if self.inputs.copy_inputs:\n self.inputs.subjects_dir = os.getcwd()\n if \"subjects_dir\" in inputs:\n inputs[\"subjects_dir\"] = self.inputs.subjects_dir\n copy2subjdir(self, self.inputs.canonsurf, folder=\"surf\")\n copy2subjdir(\n self,\n self.inputs.smoothwm,\n folder=\"surf\",\n basename=\"{0}.smoothwm\".format(self.inputs.hemisphere),\n )\n copy2subjdir(\n self,\n self.inputs.curv,\n folder=\"surf\",\n basename=\"{0}.curv\".format(self.inputs.hemisphere),\n )\n copy2subjdir(\n self,\n self.inputs.sulc,\n folder=\"surf\",\n basename=\"{0}.sulc\".format(self.inputs.hemisphere),\n )\n\n # The label directory must exist in order for an output to be written\n label_dir = os.path.join(\n self.inputs.subjects_dir, self.inputs.subject_id, \"label\"\n )\n if not os.path.isdir(label_dir):\n os.makedirs(label_dir)\n\n return super(MRIsCALabel, self).run(**inputs)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n out_basename = os.path.basename(self.inputs.out_file)\n outputs[\"out_file\"] = os.path.join(\n self.inputs.subjects_dir, self.inputs.subject_id, \"label\", out_basename\n )\n return outputs\n\n\nclass SegmentCCInputSpec(FSTraitedSpec):\n in_file = File(\n argstr=\"-aseg %s\",\n mandatory=True,\n exists=True,\n desc=\"Input aseg file to read from subjects directory\",\n )\n in_norm = File(\n mandatory=True,\n exists=True,\n desc=\"Required undocumented input {subject}/mri/norm.mgz\",\n )\n out_file = File(\n argstr=\"-o %s\",\n exists=False,\n name_source=[\"in_file\"],\n name_template=\"%s.auto.mgz\",\n hash_files=False,\n keep_extension=False,\n desc=\"Filename to write aseg including CC\",\n )\n out_rotation = File(\n argstr=\"-lta %s\",\n mandatory=True,\n exists=False,\n desc=\"Global filepath for writing rotation lta\",\n )\n subject_id = traits.String(\n \"subject_id\",\n argstr=\"%s\",\n mandatory=True,\n position=-1,\n usedefault=True,\n desc=\"Subject name\",\n )\n copy_inputs = traits.Bool(\n desc=\"If running as a node, set this to True.\"\n + \"This will copy the input files to the node \"\n + \"directory.\"\n )\n\n\nclass SegmentCCOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"Output segmentation uncluding corpus collosum\")\n out_rotation = File(exists=False, desc=\"Output lta rotation file\")\n\n\nclass SegmentCC(FSCommand):\n \"\"\"\n This program segments the corpus callosum into five separate labels in\n the subcortical segmentation volume 'aseg.mgz'. The divisions of the\n cc are equally spaced in terms of distance along the primary\n eigendirection (pretty much the long axis) of the cc. The lateral\n extent can be changed with the -T <thickness> parameter, where\n <thickness> is the distance off the midline (so -T 1 would result in\n the who CC being 3mm thick). The default is 2 so it's 5mm thick. The\n aseg.stats values should be volume.\n\n Examples\n ========\n >>> from nipype.interfaces import freesurfer\n >>> SegmentCC_node = freesurfer.SegmentCC()\n >>> SegmentCC_node.inputs.in_file = \"aseg.mgz\"\n >>> SegmentCC_node.inputs.in_norm = \"norm.mgz\"\n >>> SegmentCC_node.inputs.out_rotation = \"cc.lta\"\n >>> SegmentCC_node.inputs.subject_id = \"test\"\n >>> SegmentCC_node.cmdline\n 'mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test'\n \"\"\"\n\n _cmd = \"mri_cc\"\n input_spec = SegmentCCInputSpec\n output_spec = SegmentCCOutputSpec\n\n # mri_cc does not take absolute paths and will look for the\n # input files in <SUBJECTS_DIR>/<subject_id>/mri/<basename>\n # So, if the files are not there, they will be copied to that\n # location\n def _format_arg(self, name, spec, value):\n if name in [\"in_file\", \"in_norm\", \"out_file\"]:\n # mri_cc can't use abspaths just the basename\n basename = os.path.basename(value)\n return spec.argstr % basename\n return super(SegmentCC, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n outputs[\"out_rotation\"] = os.path.abspath(self.inputs.out_rotation)\n return outputs\n\n def run(self, **inputs):\n if self.inputs.copy_inputs:\n self.inputs.subjects_dir = os.getcwd()\n if \"subjects_dir\" in inputs:\n inputs[\"subjects_dir\"] = self.inputs.subjects_dir\n for originalfile in [self.inputs.in_file, self.inputs.in_norm]:\n copy2subjdir(self, originalfile, folder=\"mri\")\n return super(SegmentCC, self).run(**inputs)\n\n def aggregate_outputs(self, runtime=None, needed_outputs=None):\n # it is necessary to find the output files and move\n # them to the correct loacation\n predicted_outputs = self._list_outputs()\n for name in [\"out_file\", \"out_rotation\"]:\n out_file = predicted_outputs[name]\n if not os.path.isfile(out_file):\n out_base = os.path.basename(out_file)\n if isdefined(self.inputs.subjects_dir):\n subj_dir = os.path.join(\n self.inputs.subjects_dir, self.inputs.subject_id\n )\n else:\n subj_dir = os.path.join(os.getcwd(), self.inputs.subject_id)\n if name == \"out_file\":\n out_tmp = os.path.join(subj_dir, \"mri\", out_base)\n elif name == \"out_rotation\":\n out_tmp = os.path.join(subj_dir, \"mri\", \"transforms\", out_base)\n else:\n out_tmp = None\n # move the file to correct location\n if out_tmp and os.path.isfile(out_tmp):\n if not os.path.isdir(os.path.dirname(out_tmp)):\n os.makedirs(os.path.dirname(out_tmp))\n shutil.move(out_tmp, out_file)\n return super(SegmentCC, self).aggregate_outputs(runtime, needed_outputs)\n\n\nclass SegmentWMInputSpec(FSTraitedSpec):\n in_file = File(\n argstr=\"%s\",\n exists=True,\n mandatory=True,\n position=-2,\n desc=\"Input file for SegmentWM\",\n )\n out_file = File(\n argstr=\"%s\",\n exists=False,\n mandatory=True,\n position=-1,\n desc=\"File to be written as output for SegmentWM\",\n )\n\n\nclass SegmentWMOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"Output white matter segmentation\")\n\n\nclass SegmentWM(FSCommand):\n \"\"\"\n This program segments white matter from the input volume. The input\n volume should be normalized such that white matter voxels are\n ~110-valued, and the volume is conformed to 256^3.\n\n\n Examples\n ========\n >>> from nipype.interfaces import freesurfer\n >>> SegmentWM_node = freesurfer.SegmentWM()\n >>> SegmentWM_node.inputs.in_file = \"norm.mgz\"\n >>> SegmentWM_node.inputs.out_file = \"wm.seg.mgz\"\n >>> SegmentWM_node.cmdline\n 'mri_segment norm.mgz wm.seg.mgz'\n \"\"\"\n\n _cmd = \"mri_segment\"\n input_spec = SegmentWMInputSpec\n output_spec = SegmentWMOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n\nclass EditWMwithAsegInputSpec(FSTraitedSpec):\n in_file = File(\n argstr=\"%s\",\n position=-4,\n mandatory=True,\n exists=True,\n desc=\"Input white matter segmentation file\",\n )\n brain_file = File(\n argstr=\"%s\",\n position=-3,\n mandatory=True,\n exists=True,\n desc=\"Input brain/T1 file\",\n )\n seg_file = File(\n argstr=\"%s\",\n position=-2,\n mandatory=True,\n exists=True,\n desc=\"Input presurf segmentation file\",\n )\n out_file = File(\n argstr=\"%s\",\n position=-1,\n mandatory=True,\n exists=False,\n desc=\"File to be written as output\",\n )\n # optional\n keep_in = traits.Bool(argstr=\"-keep-in\", desc=\"Keep edits as found in input volume\")\n\n\nclass EditWMwithAsegOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc=\"Output edited WM file\")\n\n\nclass EditWMwithAseg(FSCommand):\n \"\"\"\n Edits a wm file using a segmentation\n\n Examples\n ========\n >>> from nipype.interfaces.freesurfer import EditWMwithAseg\n >>> editwm = EditWMwithAseg()\n >>> editwm.inputs.in_file = \"T1.mgz\"\n >>> editwm.inputs.brain_file = \"norm.mgz\"\n >>> editwm.inputs.seg_file = \"aseg.mgz\"\n >>> editwm.inputs.out_file = \"wm.asegedit.mgz\"\n >>> editwm.inputs.keep_in = True\n >>> editwm.cmdline\n 'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz'\n \"\"\"\n\n _cmd = \"mri_edit_wm_with_aseg\"\n input_spec = EditWMwithAsegInputSpec\n output_spec = EditWMwithAsegOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n\nclass ConcatenateLTAInputSpec(FSTraitedSpec):\n # required\n in_lta1 = File(\n exists=True,\n mandatory=True,\n argstr=\"%s\",\n position=-3,\n desc=\"maps some src1 to dst1\",\n )\n in_lta2 = traits.Either(\n File(exists=True),\n \"identity.nofile\",\n argstr=\"%s\",\n position=-2,\n mandatory=True,\n desc=\"maps dst1(src2) to dst2\",\n )\n out_file = File(\n position=-1,\n argstr=\"%s\",\n hash_files=False,\n name_source=[\"in_lta1\"],\n name_template=\"%s_concat\",\n keep_extension=True,\n desc=\"the combined LTA maps: src1 to dst2 = LTA2*LTA1\",\n )\n\n # Inversion and transform type\n invert_1 = traits.Bool(argstr=\"-invert1\", desc=\"invert in_lta1 before applying it\")\n invert_2 = traits.Bool(argstr=\"-invert2\", desc=\"invert in_lta2 before applying it\")\n invert_out = traits.Bool(argstr=\"-invertout\", desc=\"invert output LTA\")\n out_type = traits.Enum(\n \"VOX2VOX\", \"RAS2RAS\", argstr=\"-out_type %d\", desc=\"set final LTA type\"\n )\n\n # Talairach options\n tal_source_file = File(\n exists=True,\n argstr=\"-tal %s\",\n position=-5,\n requires=[\"tal_template_file\"],\n desc=\"if in_lta2 is talairach.xfm, specify source for talairach\",\n )\n tal_template_file = File(\n exists=True,\n argstr=\"%s\",\n position=-4,\n requires=[\"tal_source_file\"],\n desc=\"if in_lta2 is talairach.xfm, specify template for talairach\",\n )\n\n subject = traits.Str(argstr=\"-subject %s\", desc=\"set subject in output LTA\")\n # Note rmsdiff would be xor out_file, and would be most easily dealt with\n # in a new interface. -CJM 2017.10.05\n\n\nclass ConcatenateLTAOutputSpec(TraitedSpec):\n out_file = File(\n exists=False, desc=\"the combined LTA maps: src1 to dst2 = LTA2*LTA1\"\n )\n\n\nclass ConcatenateLTA(FSCommand):\n \"\"\" Concatenates two consecutive LTA transformations into one overall\n transformation\n\n Out = LTA2*LTA1\n\n Examples\n --------\n >>> from nipype.interfaces.freesurfer import ConcatenateLTA\n >>> conc_lta = ConcatenateLTA()\n >>> conc_lta.inputs.in_lta1 = 'lta1.lta'\n >>> conc_lta.inputs.in_lta2 = 'lta2.lta'\n >>> conc_lta.cmdline\n 'mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta'\n\n You can use 'identity.nofile' as the filename for in_lta2, e.g.:\n\n >>> conc_lta.inputs.in_lta2 = 'identity.nofile'\n >>> conc_lta.inputs.invert_1 = True\n >>> conc_lta.inputs.out_file = 'inv1.lta'\n >>> conc_lta.cmdline\n 'mri_concatenate_lta -invert1 lta1.lta identity.nofile inv1.lta'\n\n To create a RAS2RAS transform:\n\n >>> conc_lta.inputs.out_type = 'RAS2RAS'\n >>> conc_lta.cmdline\n 'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta'\n \"\"\"\n\n _cmd = \"mri_concatenate_lta\"\n input_spec = ConcatenateLTAInputSpec\n output_spec = ConcatenateLTAOutputSpec\n\n def _format_arg(self, name, spec, value):\n if name == \"out_type\":\n value = {\"VOX2VOX\": 0, \"RAS2RAS\": 1}[value]\n return super(ConcatenateLTA, self)._format_arg(name, spec, value)\n",
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Common graph operations for execution\n\"\"\"\nimport sys\nfrom copy import deepcopy\nfrom glob import glob\nimport os\nimport shutil\nfrom time import sleep, time\nfrom traceback import format_exception\n\nimport numpy as np\n\nfrom ... import logging\nfrom ...utils.misc import str2bool\nfrom ..engine.utils import topological_sort, load_resultfile\nfrom ..engine import MapNode\nfrom .tools import report_crash, report_nodes_not_run, create_pyscript\n\nlogger = logging.getLogger(\"nipype.workflow\")\n\n\nclass PluginBase(object):\n \"\"\"\n Base class for plugins\n\n \"\"\"\n\n def __init__(self, plugin_args=None):\n if plugin_args is None:\n plugin_args = {}\n self.plugin_args = plugin_args\n self._config = None\n self._status_callback = plugin_args.get(\"status_callback\")\n\n def run(self, graph, config, updatehash=False):\n \"\"\"\n The core plugin member that should be implemented by\n all plugins.\n\n graph: a networkx, flattened :abbr:`DAG (Directed Acyclic Graph)`\n to be executed\n\n config: a nipype.config object\n\n updatehash:\n\n \"\"\"\n raise NotImplementedError\n\n\nclass DistributedPluginBase(PluginBase):\n \"\"\"\n Execute workflow with a distribution engine\n\n Relevant class attributes\n -------------------------\n\n procs: list (N) of underlying interface elements to be processed\n proc_done: a boolean numpy array (N,) signifying whether a process has been\n submitted for execution\n proc_pending: a boolean numpy array (N,) signifying whether a\n process is currently running.\n depidx: a boolean matrix (NxN) storing the dependency structure accross\n processes. Process dependencies are derived from each column.\n\n Combinations of ``proc_done`` and ``proc_pending``\n --------------------------------------------------\n\n +------------+---------------+--------------------------------+\n | proc_done | proc_pending | outcome |\n +============+===============+================================+\n | True | False | Process is finished |\n +------------+---------------+--------------------------------+\n | True | True | Process is currently being run |\n +------------+---------------+--------------------------------+\n | False | False | Process is queued |\n +------------+---------------+--------------------------------+\n | False | True | INVALID COMBINATION |\n +------------+---------------+--------------------------------+\n \"\"\"\n\n def __init__(self, plugin_args=None):\n \"\"\"\n Initialize runtime attributes to none\n\n \"\"\"\n super(DistributedPluginBase, self).__init__(plugin_args=plugin_args)\n self.procs = None\n self.depidx = None\n self.refidx = None\n self.mapnodes = None\n self.mapnodesubids = None\n self.proc_done = None\n self.proc_pending = None\n self.pending_tasks = []\n self.max_jobs = self.plugin_args.get(\"max_jobs\", np.inf)\n\n def _prerun_check(self, graph):\n \"\"\"Stub method to validate/massage graph and nodes before running\"\"\"\n\n def _postrun_check(self):\n \"\"\"Stub method to close any open resources\"\"\"\n\n def run(self, graph, config, updatehash=False):\n \"\"\"\n Executes a pre-defined pipeline using distributed approaches\n \"\"\"\n logger.info(\"Running in parallel.\")\n self._config = config\n poll_sleep_secs = float(config[\"execution\"][\"poll_sleep_duration\"])\n\n self._prerun_check(graph)\n # Generate appropriate structures for worker-manager model\n self._generate_dependency_list(graph)\n self.mapnodes = []\n self.mapnodesubids = {}\n # setup polling - TODO: change to threaded model\n notrun = []\n\n old_progress_stats = None\n old_presub_stats = None\n while not np.all(self.proc_done) or np.any(self.proc_pending):\n loop_start = time()\n # Check if a job is available (jobs with all dependencies run)\n # https://github.com/nipy/nipype/pull/2200#discussion_r141605722\n jobs_ready = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1]\n\n progress_stats = (\n len(self.proc_done),\n np.sum(self.proc_done ^ self.proc_pending),\n np.sum(self.proc_done & self.proc_pending),\n len(jobs_ready),\n len(self.pending_tasks),\n np.sum(~self.proc_done & ~self.proc_pending),\n )\n display_stats = progress_stats != old_progress_stats\n if display_stats:\n logger.debug(\n \"Progress: %d jobs, %d/%d/%d \"\n \"(done/running/ready), %d/%d \"\n \"(pending_tasks/waiting).\",\n *progress_stats\n )\n old_progress_stats = progress_stats\n toappend = []\n # trigger callbacks for any pending results\n while self.pending_tasks:\n taskid, jobid = self.pending_tasks.pop()\n try:\n result = self._get_result(taskid)\n except Exception:\n notrun.append(self._clean_queue(jobid, graph))\n else:\n if result:\n if result[\"traceback\"]:\n notrun.append(\n self._clean_queue(jobid, graph, result=result)\n )\n else:\n self._task_finished_cb(jobid)\n self._remove_node_dirs()\n self._clear_task(taskid)\n else:\n assert self.proc_done[jobid] and self.proc_pending[jobid]\n toappend.insert(0, (taskid, jobid))\n\n if toappend:\n self.pending_tasks.extend(toappend)\n\n num_jobs = len(self.pending_tasks)\n presub_stats = (num_jobs, np.sum(self.proc_done & self.proc_pending))\n display_stats = display_stats or presub_stats != old_presub_stats\n if display_stats:\n logger.debug(\"Tasks currently running: %d. Pending: %d.\", *presub_stats)\n old_presub_stats = presub_stats\n if num_jobs < self.max_jobs:\n self._send_procs_to_workers(updatehash=updatehash, graph=graph)\n elif display_stats:\n logger.debug(\"Not submitting (max jobs reached)\")\n\n sleep_til = loop_start + poll_sleep_secs\n sleep(max(0, sleep_til - time()))\n\n self._remove_node_dirs()\n report_nodes_not_run(notrun)\n\n # close any open resources\n self._postrun_check()\n\n def _get_result(self, taskid):\n raise NotImplementedError\n\n def _submit_job(self, node, updatehash=False):\n raise NotImplementedError\n\n def _report_crash(self, node, result=None):\n tb = None\n if result is not None:\n node._result = result[\"result\"]\n tb = result[\"traceback\"]\n node._traceback = tb\n return report_crash(node, traceback=tb)\n\n def _clear_task(self, taskid):\n raise NotImplementedError\n\n def _clean_queue(self, jobid, graph, result=None):\n logger.debug(\"Clearing %d from queue\", jobid)\n\n if self._status_callback:\n self._status_callback(self.procs[jobid], \"exception\")\n if result is None:\n result = {\n \"result\": None,\n \"traceback\": \"\\n\".join(format_exception(*sys.exc_info())),\n }\n\n crashfile = self._report_crash(self.procs[jobid], result=result)\n if str2bool(self._config[\"execution\"][\"stop_on_first_crash\"]):\n raise RuntimeError(\"\".join(result[\"traceback\"]))\n if jobid in self.mapnodesubids:\n # remove current jobid\n self.proc_pending[jobid] = False\n self.proc_done[jobid] = True\n # remove parent mapnode\n jobid = self.mapnodesubids[jobid]\n self.proc_pending[jobid] = False\n self.proc_done[jobid] = True\n # remove dependencies from queue\n return self._remove_node_deps(jobid, crashfile, graph)\n\n def _submit_mapnode(self, jobid):\n import scipy.sparse as ssp\n\n if jobid in self.mapnodes:\n return True\n self.mapnodes.append(jobid)\n mapnodesubids = self.procs[jobid].get_subnodes()\n numnodes = len(mapnodesubids)\n logger.debug(\"Adding %d jobs for mapnode %s\", numnodes, self.procs[jobid])\n for i in range(numnodes):\n self.mapnodesubids[self.depidx.shape[0] + i] = jobid\n self.procs.extend(mapnodesubids)\n self.depidx = ssp.vstack(\n (self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))),\n \"lil\",\n )\n self.depidx = ssp.hstack(\n (self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))),\n \"lil\",\n )\n self.depidx[-numnodes:, jobid] = 1\n self.proc_done = np.concatenate(\n (self.proc_done, np.zeros(numnodes, dtype=bool))\n )\n self.proc_pending = np.concatenate(\n (self.proc_pending, np.zeros(numnodes, dtype=bool))\n )\n return False\n\n def _send_procs_to_workers(self, updatehash=False, graph=None):\n \"\"\"\n Sends jobs to workers\n \"\"\"\n\n while not np.all(self.proc_done):\n num_jobs = len(self.pending_tasks)\n if np.isinf(self.max_jobs):\n slots = None\n else:\n slots = max(0, self.max_jobs - num_jobs)\n logger.debug(\"Slots available: %s\", slots)\n if (num_jobs >= self.max_jobs) or (slots == 0):\n break\n\n # Check if a job is available (jobs with all dependencies run)\n # https://github.com/nipy/nipype/pull/2200#discussion_r141605722\n jobids = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1]\n\n if len(jobids) > 0:\n # send all available jobs\n logger.info(\n \"Pending[%d] Submitting[%d] jobs Slots[%s]\",\n num_jobs,\n len(jobids[:slots]),\n slots or \"inf\",\n )\n\n for jobid in jobids[:slots]:\n if isinstance(self.procs[jobid], MapNode):\n try:\n num_subnodes = self.procs[jobid].num_subnodes()\n except Exception:\n self._clean_queue(jobid, graph)\n self.proc_pending[jobid] = False\n continue\n if num_subnodes > 1:\n submit = self._submit_mapnode(jobid)\n if not submit:\n continue\n # change job status in appropriate queues\n self.proc_done[jobid] = True\n self.proc_pending[jobid] = True\n # Send job to task manager and add to pending tasks\n logger.info(\"Submitting: %s ID: %d\", self.procs[jobid], jobid)\n if self._status_callback:\n self._status_callback(self.procs[jobid], \"start\")\n\n if not self._local_hash_check(jobid, graph):\n if self.procs[jobid].run_without_submitting:\n logger.debug(\n \"Running node %s on master thread\", self.procs[jobid]\n )\n try:\n self.procs[jobid].run()\n except Exception:\n self._clean_queue(jobid, graph)\n self._task_finished_cb(jobid)\n self._remove_node_dirs()\n else:\n tid = self._submit_job(\n deepcopy(self.procs[jobid]), updatehash=updatehash\n )\n if tid is None:\n self.proc_done[jobid] = False\n self.proc_pending[jobid] = False\n else:\n self.pending_tasks.insert(0, (tid, jobid))\n logger.info(\n \"Finished submitting: %s ID: %d\", self.procs[jobid], jobid\n )\n else:\n break\n\n def _local_hash_check(self, jobid, graph):\n if not str2bool(self.procs[jobid].config[\"execution\"][\"local_hash_check\"]):\n return False\n\n try:\n cached, updated = self.procs[jobid].is_cached()\n except Exception:\n logger.warning(\n \"Error while checking node hash, forcing re-run. \"\n \"Although this error may not prevent the workflow from running, \"\n \"it could indicate a major problem. Please report a new issue \"\n \"at https://github.com/nipy/nipype/issues adding the following \"\n \"information:\\n\\n\\tNode: %s\\n\\tInterface: %s.%s\\n\\tTraceback:\\n%s\",\n self.procs[jobid],\n self.procs[jobid].interface.__module__,\n self.procs[jobid].interface.__class__.__name__,\n \"\\n\".join(format_exception(*sys.exc_info())),\n )\n return False\n\n logger.debug(\n 'Checking hash \"%s\" locally: cached=%s, updated=%s.',\n self.procs[jobid],\n cached,\n updated,\n )\n overwrite = self.procs[jobid].overwrite\n always_run = self.procs[jobid].interface.always_run\n\n if (\n cached\n and updated\n and (overwrite is False or overwrite is None and not always_run)\n ):\n logger.debug(\n \"Skipping cached node %s with ID %s.\", self.procs[jobid], jobid\n )\n try:\n self._task_finished_cb(jobid, cached=True)\n self._remove_node_dirs()\n except Exception:\n logger.debug(\n \"Error skipping cached node %s (%s).\\n\\n%s\",\n self.procs[jobid],\n jobid,\n \"\\n\".join(format_exception(*sys.exc_info())),\n )\n self._clean_queue(jobid, graph)\n self.proc_pending[jobid] = False\n return True\n return False\n\n def _task_finished_cb(self, jobid, cached=False):\n \"\"\" Extract outputs and assign to inputs of dependent tasks\n\n This is called when a job is completed.\n \"\"\"\n logger.info(\n \"[Job %d] %s (%s).\",\n jobid,\n \"Cached\" if cached else \"Completed\",\n self.procs[jobid],\n )\n if self._status_callback:\n self._status_callback(self.procs[jobid], \"end\")\n # Update job and worker queues\n self.proc_pending[jobid] = False\n # update the job dependency structure\n rowview = self.depidx.getrowview(jobid)\n rowview[rowview.nonzero()] = 0\n if jobid not in self.mapnodesubids:\n self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0\n\n def _generate_dependency_list(self, graph):\n \"\"\" Generates a dependency list for a list of graphs.\n \"\"\"\n import networkx as nx\n\n self.procs, _ = topological_sort(graph)\n try:\n self.depidx = nx.to_scipy_sparse_matrix(\n graph, nodelist=self.procs, format=\"lil\"\n )\n except:\n self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs)\n self.refidx = deepcopy(self.depidx)\n self.refidx.astype = np.int\n self.proc_done = np.zeros(len(self.procs), dtype=bool)\n self.proc_pending = np.zeros(len(self.procs), dtype=bool)\n\n def _remove_node_deps(self, jobid, crashfile, graph):\n import networkx as nx\n\n try:\n dfs_preorder = nx.dfs_preorder\n except AttributeError:\n dfs_preorder = nx.dfs_preorder_nodes\n subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])]\n for node in subnodes:\n idx = self.procs.index(node)\n self.proc_done[idx] = True\n self.proc_pending[idx] = False\n return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile)\n\n def _remove_node_dirs(self):\n \"\"\"Removes directories whose outputs have already been used up\n \"\"\"\n if str2bool(self._config[\"execution\"][\"remove_node_directories\"]):\n indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]\n for idx in indices:\n if idx in self.mapnodesubids:\n continue\n if self.proc_done[idx] and (not self.proc_pending[idx]):\n self.refidx[idx, idx] = -1\n outdir = self.procs[idx].output_dir()\n logger.info(\n (\n \"[node dependencies finished] \"\n \"removing node: %s from directory %s\"\n )\n % (self.procs[idx]._id, outdir)\n )\n shutil.rmtree(outdir)\n\n\nclass SGELikeBatchManagerBase(DistributedPluginBase):\n \"\"\"Execute workflow with SGE/OGE/PBS like batch system\n \"\"\"\n\n def __init__(self, template, plugin_args=None):\n super(SGELikeBatchManagerBase, self).__init__(plugin_args=plugin_args)\n self._template = template\n self._qsub_args = None\n if plugin_args:\n if \"template\" in plugin_args:\n self._template = plugin_args[\"template\"]\n if os.path.isfile(self._template):\n with open(self._template) as tpl_file:\n self._template = tpl_file.read()\n if \"qsub_args\" in plugin_args:\n self._qsub_args = plugin_args[\"qsub_args\"]\n self._pending = {}\n\n def _is_pending(self, taskid):\n \"\"\"Check if a task is pending in the batch system\n \"\"\"\n raise NotImplementedError\n\n def _submit_batchtask(self, scriptfile, node):\n \"\"\"Submit a task to the batch system\n \"\"\"\n raise NotImplementedError\n\n def _get_result(self, taskid):\n if taskid not in self._pending:\n raise Exception(\"Task %d not found\" % taskid)\n if self._is_pending(taskid):\n return None\n node_dir = self._pending[taskid]\n # MIT HACK\n # on the pbs system at mit the parent node directory needs to be\n # accessed before internal directories become available. there\n # is a disconnect when the queueing engine knows a job is\n # finished to when the directories become statable.\n t = time()\n timeout = float(self._config[\"execution\"][\"job_finished_timeout\"])\n timed_out = True\n while (time() - t) < timeout:\n try:\n glob(os.path.join(node_dir, \"result_*.pklz\")).pop()\n timed_out = False\n break\n except Exception as e:\n logger.debug(e)\n sleep(2)\n if timed_out:\n result_data = {\"hostname\": \"unknown\", \"result\": None, \"traceback\": None}\n results_file = None\n try:\n error_message = (\n \"Job id ({0}) finished or terminated, but \"\n \"results file does not exist after ({1}) \"\n \"seconds. Batch dir contains crashdump file \"\n \"if node raised an exception.\\n\"\n \"Node working directory: ({2}) \".format(taskid, timeout, node_dir)\n )\n raise IOError(error_message)\n except IOError as e:\n result_data[\"traceback\"] = \"\\n\".join(format_exception(*sys.exc_info()))\n else:\n results_file = glob(os.path.join(node_dir, \"result_*.pklz\"))[0]\n result_data = load_resultfile(results_file)\n result_out = dict(result=None, traceback=None)\n if isinstance(result_data, dict):\n result_out[\"result\"] = result_data[\"result\"]\n result_out[\"traceback\"] = result_data[\"traceback\"]\n result_out[\"hostname\"] = result_data[\"hostname\"]\n if results_file:\n crash_file = os.path.join(node_dir, \"crashstore.pklz\")\n os.rename(results_file, crash_file)\n else:\n result_out[\"result\"] = result_data\n return result_out\n\n def _submit_job(self, node, updatehash=False):\n \"\"\"submit job and return taskid\n \"\"\"\n pyscript = create_pyscript(node, updatehash=updatehash)\n batch_dir, name = os.path.split(pyscript)\n name = \".\".join(name.split(\".\")[:-1])\n batchscript = \"\\n\".join((self._template, \"%s %s\" % (sys.executable, pyscript)))\n batchscriptfile = os.path.join(batch_dir, \"batchscript_%s.sh\" % name)\n with open(batchscriptfile, \"wt\") as fp:\n fp.writelines(batchscript)\n return self._submit_batchtask(batchscriptfile, node)\n\n def _clear_task(self, taskid):\n del self._pending[taskid]\n\n\nclass GraphPluginBase(PluginBase):\n \"\"\"Base class for plugins that distribute graphs to workflows\n \"\"\"\n\n def __init__(self, plugin_args=None):\n if plugin_args and plugin_args.get(\"status_callback\"):\n logger.warning(\n \"status_callback not supported for Graph submission\" \" plugins\"\n )\n super(GraphPluginBase, self).__init__(plugin_args=plugin_args)\n\n def run(self, graph, config, updatehash=False):\n import networkx as nx\n\n pyfiles = []\n dependencies = {}\n self._config = config\n nodes = list(nx.topological_sort(graph))\n logger.debug(\"Creating executable python files for each node\")\n for idx, node in enumerate(nodes):\n pyfiles.append(\n create_pyscript(node, updatehash=updatehash, store_exception=False)\n )\n dependencies[idx] = [\n nodes.index(prevnode) for prevnode in list(graph.predecessors(node))\n ]\n self._submit_graph(pyfiles, dependencies, nodes)\n\n def _get_args(self, node, keywords):\n values = ()\n for keyword in keywords:\n value = getattr(self, \"_\" + keyword)\n if keyword == \"template\" and os.path.isfile(value):\n with open(value) as f:\n value = f.read()\n if (\n hasattr(node, \"plugin_args\")\n and isinstance(node.plugin_args, dict)\n and keyword in node.plugin_args\n ):\n if keyword == \"template\" and os.path.isfile(node.plugin_args[keyword]):\n with open(node.plugin_args[keyword]) as f:\n tmp_value = f.read()\n else:\n tmp_value = node.plugin_args[keyword]\n\n if \"overwrite\" in node.plugin_args and node.plugin_args[\"overwrite\"]:\n value = tmp_value\n else:\n value += tmp_value\n values += (value,)\n return values\n\n def _submit_graph(self, pyfiles, dependencies, nodes):\n \"\"\"\n pyfiles: list of files corresponding to a topological sort\n dependencies: dictionary of dependencies based on the toplogical sort\n \"\"\"\n raise NotImplementedError\n\n def _get_result(self, taskid):\n if taskid not in self._pending:\n raise Exception(\"Task %d not found\" % taskid)\n if self._is_pending(taskid):\n return None\n node_dir = self._pending[taskid]\n\n glob(os.path.join(node_dir, \"result_*.pklz\")).pop()\n\n results_file = glob(os.path.join(node_dir, \"result_*.pklz\"))[0]\n result_data = load_resultfile(results_file)\n result_out = dict(result=None, traceback=None)\n\n if isinstance(result_data, dict):\n result_out[\"result\"] = result_data[\"result\"]\n result_out[\"traceback\"] = result_data[\"traceback\"]\n result_out[\"hostname\"] = result_data[\"hostname\"]\n if results_file:\n crash_file = os.path.join(node_dir, \"crashstore.pklz\")\n os.rename(results_file, crash_file)\n else:\n result_out[\"result\"] = result_data\n\n return result_out\n"
] |
[
[
"numpy.genfromtxt"
],
[
"numpy.sum",
"numpy.all",
"numpy.any",
"numpy.zeros",
"numpy.isinf"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pavanell/pyscf
|
[
"c0d19e499685e95dbf4c879539ad3a3ceb6934e2",
"c0d19e499685e95dbf4c879539ad3a3ceb6934e2",
"c0d19e499685e95dbf4c879539ad3a3ceb6934e2",
"c0d19e499685e95dbf4c879539ad3a3ceb6934e2"
] |
[
"pyscf/hessian/rhf.py",
"pyscf/prop/magnetizability/rks.py",
"pyscf/fci/direct_spin1.py",
"pyscf/lib/misc.py"
] |
[
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nNon-relativistic RHF analytical Hessian\n'''\n\nfrom functools import reduce\nimport time\nimport numpy\nfrom pyscf import lib\nfrom pyscf import gto\nfrom pyscf.lib import logger\nfrom pyscf.scf import _vhf\nfrom pyscf.scf import cphf\nfrom pyscf.soscf.newton_ah import _gen_rhf_response\n\n\n# import pyscf.grad.rhf to activate nuc_grad_method method\nfrom pyscf.grad import rhf\n\n\ndef hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,\n mo1=None, mo_e1=None, h1ao=None,\n atmlst=None, max_memory=4000, verbose=None):\n log = logger.new_logger(hessobj, verbose)\n time0 = t1 = (time.clock(), time.time())\n\n mol = hessobj.mol\n mf = hessobj.base\n if mo_energy is None: mo_energy = mf.mo_energy\n if mo_occ is None: mo_occ = mf.mo_occ\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if atmlst is None: atmlst = range(mol.natm)\n\n de2 = hessobj.partial_hess_elec(mo_energy, mo_coeff, mo_occ, atmlst,\n max_memory, log)\n\n if h1ao is None:\n h1ao = hessobj.make_h1(mo_coeff, mo_occ, hessobj.chkfile, atmlst, log)\n t1 = log.timer_debug1('making H1', *time0)\n if mo1 is None or mo_e1 is None:\n mo1, mo_e1 = hessobj.solve_mo1(mo_energy, mo_coeff, mo_occ, h1ao,\n None, atmlst, max_memory, log)\n t1 = log.timer_debug1('solving MO1', *t1)\n\n if isinstance(h1ao, str):\n h1ao = lib.chkfile.load(h1ao, 'scf_f1ao')\n h1ao = dict([(int(k), h1ao[k]) for k in h1ao])\n if isinstance(mo1, str):\n mo1 = lib.chkfile.load(mo1, 'scf_mo1')\n mo1 = dict([(int(k), mo1[k]) for k in mo1])\n\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n s1a = -mol.intor('int1e_ipovlp', comp=3)\n\n aoslices = mol.aoslice_by_atom()\n for i0, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n s1ao = numpy.zeros((3,nao,nao))\n s1ao[:,p0:p1] += s1a[:,p0:p1]\n s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)\n s1oo = numpy.einsum('xpq,pi,qj->xij', s1ao, mocc, mocc)\n\n for j0 in range(i0+1):\n ja = atmlst[j0]\n q0, q1 = aoslices[ja][2:]\n# *2 for double occupancy, *2 for +c.c.\n dm1 = numpy.einsum('ypi,qi->ypq', mo1[ja], mocc)\n de2[i0,j0] += numpy.einsum('xpq,ypq->xy', h1ao[ia], dm1) * 4\n dm1 = numpy.einsum('ypi,qi,i->ypq', mo1[ja], mocc, mo_energy[mo_occ>0])\n de2[i0,j0] -= numpy.einsum('xpq,ypq->xy', s1ao, dm1) * 4\n de2[i0,j0] -= numpy.einsum('xpq,ypq->xy', s1oo, mo_e1[ja]) * 2\n\n for j0 in range(i0):\n de2[j0,i0] = de2[i0,j0].T\n\n log.timer('RHF hessian', *time0)\n return de2\n\ndef partial_hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,\n atmlst=None, max_memory=4000, verbose=None):\n '''Partial derivative\n '''\n log = logger.new_logger(hessobj, verbose)\n time0 = t1 = (time.clock(), time.time())\n\n mol = hessobj.mol\n mf = hessobj.base\n if mo_energy is None: mo_energy = mf.mo_energy\n if mo_occ is None: mo_occ = mf.mo_occ\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if atmlst is None: atmlst = range(mol.natm)\n\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n nocc = mocc.shape[1]\n dm0 = numpy.dot(mocc, mocc.T) * 2\n # Energy weighted density matrix\n dme0 = numpy.einsum('pi,qi,i->pq', mocc, mocc, mo_energy[mo_occ>0]) * 2\n\n hcore_deriv = hessobj.hcore_generator(mol)\n s1aa, s1ab, s1a = get_ovlp(mol)\n\n vj1, vk1 = _get_jk(mol, 'int2e_ipip1', 9, 's2kl',\n ['lk->s1ij', dm0, # vj1\n 'jk->s1il', dm0]) # vk1\n vhf_diag = vj1 - vk1*.5\n vhf_diag = vhf_diag.reshape(3,3,nao,nao)\n vj1 = vk1 = None\n t1 = log.timer_debug1('contracting int2e_ipip1', *t1)\n\n aoslices = mol.aoslice_by_atom()\n de2 = numpy.zeros((mol.natm,mol.natm,3,3)) # (A,B,dR_A,dR_B)\n for i0, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n shls_slice = (shl0, shl1) + (0, mol.nbas)*3\n vj1, vk1, vk2 = _get_jk(mol, 'int2e_ip1ip2', 9, 's1',\n ['ji->s1kl', dm0[:,p0:p1], # vj1\n 'li->s1kj', dm0[:,p0:p1], # vk1\n 'lj->s1ki', dm0 ], # vk2\n shls_slice=shls_slice)\n vhf = vj1 * 2 - vk1 * .5\n vhf[:,:,p0:p1] -= vk2 * .5\n t1 = log.timer_debug1('contracting int2e_ip1ip2 for atom %d'%ia, *t1)\n vj1, vk1 = _get_jk(mol, 'int2e_ipvip1', 9, 's2kl',\n ['lk->s1ij', dm0 , # vj1\n 'li->s1kj', dm0[:,p0:p1]], # vk1\n shls_slice=shls_slice)\n vhf[:,:,p0:p1] += vj1.transpose(0,2,1)\n vhf -= vk1.transpose(0,2,1) * .5\n vhf = vhf.reshape(3,3,nao,nao)\n t1 = log.timer_debug1('contracting int2e_ipvip1 for atom %d'%ia, *t1)\n\n s1ao = numpy.zeros((3,nao,nao))\n s1ao[:,p0:p1] += s1a[:,p0:p1]\n s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)\n s1oo = numpy.einsum('xpq,pi,qj->xij', s1ao, mocc, mocc)\n\n de2[i0,i0] += numpy.einsum('xypq,pq->xy', vhf_diag[:,:,p0:p1], dm0[p0:p1])*2\n de2[i0,i0] -= numpy.einsum('xypq,pq->xy', s1aa[:,:,p0:p1], dme0[p0:p1])*2\n\n for j0, ja in enumerate(atmlst[:i0+1]):\n q0, q1 = aoslices[ja][2:]\n # *2 for +c.c.\n de2[i0,j0] += numpy.einsum('xypq,pq->xy', vhf[:,:,q0:q1], dm0[q0:q1])*2\n de2[i0,j0] -= numpy.einsum('xypq,pq->xy', s1ab[:,:,p0:p1,q0:q1], dme0[p0:p1,q0:q1])*2\n\n h1ao = hcore_deriv(ia, ja)\n de2[i0,j0] += numpy.einsum('xypq,pq->xy', h1ao, dm0)\n\n for j0 in range(i0):\n de2[j0,i0] = de2[i0,j0].T\n\n log.timer('RHF partial hessian', *time0)\n return de2\n\ndef make_h1(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None, verbose=None):\n time0 = t1 = (time.clock(), time.time())\n mol = hessobj.mol\n if atmlst is None:\n atmlst = range(mol.natm)\n\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n dm0 = numpy.dot(mocc, mocc.T) * 2\n hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)\n\n aoslices = mol.aoslice_by_atom()\n h1ao = [None] * mol.natm\n for i0, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n shls_slice = (shl0, shl1) + (0, mol.nbas)*3\n vj1, vj2, vk1, vk2 = _get_jk(mol, 'int2e_ip1', 3, 's2kl',\n ['ji->s2kl', -dm0[:,p0:p1], # vj1\n 'lk->s1ij', -dm0 , # vj2\n 'li->s1kj', -dm0[:,p0:p1], # vk1\n 'jk->s1il', -dm0 ], # vk2\n shls_slice=shls_slice)\n vhf = vj1 - vk1*.5\n vhf[:,p0:p1] += vj2 - vk2*.5\n h1 = vhf + vhf.transpose(0,2,1)\n h1 += hcore_deriv(ia)\n\n if chkfile is None:\n h1ao[ia] = h1\n else:\n key = 'scf_f1ao/%d' % ia\n lib.chkfile.save(chkfile, key, h1)\n if chkfile is None:\n return h1ao\n else:\n return chkfile\n\ndef get_hcore(mol):\n '''Part of the second derivatives of core Hamiltonian'''\n h1aa = mol.intor('int1e_ipipkin', comp=9)\n h1ab = mol.intor('int1e_ipkinip', comp=9)\n if mol._pseudo:\n NotImplementedError('Nuclear hessian for GTH PP')\n else:\n h1aa+= mol.intor('int1e_ipipnuc', comp=9)\n h1ab+= mol.intor('int1e_ipnucip', comp=9)\n if mol.has_ecp():\n h1aa += mol.intor('ECPscalar_ipipnuc', comp=9)\n h1ab += mol.intor('ECPscalar_ipnucip', comp=9)\n nao = h1aa.shape[-1]\n return h1aa.reshape(3,3,nao,nao), h1ab.reshape(3,3,nao,nao)\n\ndef get_ovlp(mol):\n s1a =-mol.intor('int1e_ipovlp', comp=3)\n nao = s1a.shape[-1]\n s1aa = mol.intor('int1e_ipipovlp', comp=9).reshape(3,3,nao,nao)\n s1ab = mol.intor('int1e_ipovlpip', comp=9).reshape(3,3,nao,nao)\n return s1aa, s1ab, s1a\n\ndef _get_jk(mol, intor, comp, aosym, script_dms,\n shls_slice=None, cintopt=None):\n intor = mol._add_suffix(intor)\n scripts = script_dms[::2]\n dms = script_dms[1::2]\n vs = _vhf.direct_bindm(intor, aosym, scripts, dms, comp,\n mol._atm, mol._bas, mol._env,\n cintopt=cintopt, shls_slice=shls_slice)\n for k, script in enumerate(scripts):\n if 's2' in script:\n hermi = 1\n elif 'a2' in script:\n hermi = 2\n else:\n continue\n\n shape = vs[k].shape\n if shape[-2] == shape[-1]:\n if comp > 1:\n for i in range(comp):\n lib.hermi_triu(vs[k][i], hermi=hermi, inplace=True)\n else:\n lib.hermi_triu(vs[k], hermi=hermi, inplace=True)\n return vs\n\ndef solve_mo1(mf, mo_energy, mo_coeff, mo_occ, h1ao_or_chkfile,\n fx=None, atmlst=None, max_memory=4000, verbose=None):\n '''Solve the first order equation\n\n Kwargs:\n fx : function(dm_mo) => v1_mo\n A function to generate the induced potential.\n See also the function gen_vind.\n '''\n mol = mf.mol\n if atmlst is None: atmlst = range(mol.natm)\n\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n nocc = mocc.shape[1]\n\n if fx is None:\n fx = gen_vind(mf, mo_coeff, mo_occ)\n s1a = -mol.intor('int1e_ipovlp', comp=3)\n\n def _ao2mo(mat):\n return numpy.asarray([reduce(numpy.dot, (mo_coeff.T, x, mocc)) for x in mat])\n\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, max_memory*.9-mem_now)\n blksize = max(2, int(max_memory*1e6/8 / (nmo*nocc*3*6)))\n mo1s = [None] * mol.natm\n e1s = [None] * mol.natm\n aoslices = mol.aoslice_by_atom()\n for ia0, ia1 in lib.prange(0, len(atmlst), blksize):\n s1vo = []\n h1vo = []\n for i0 in range(ia0, ia1):\n ia = atmlst[i0]\n shl0, shl1, p0, p1 = aoslices[ia]\n s1ao = numpy.zeros((3,nao,nao))\n s1ao[:,p0:p1] += s1a[:,p0:p1]\n s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)\n s1vo.append(_ao2mo(s1ao))\n if isinstance(h1ao_or_chkfile, str):\n key = 'scf_f1ao/%d' % ia\n h1ao = lib.chkfile.load(h1ao_or_chkfile, key)\n else:\n h1ao = h1ao_or_chkfile[ia]\n h1vo.append(_ao2mo(h1ao))\n\n h1vo = numpy.vstack(h1vo)\n s1vo = numpy.vstack(s1vo)\n mo1, e1 = cphf.solve(fx, mo_energy, mo_occ, h1vo, s1vo)\n mo1 = numpy.einsum('pq,xqi->xpi', mo_coeff, mo1).reshape(-1,3,nao,nocc)\n e1 = e1.reshape(-1,3,nocc,nocc)\n\n for k in range(ia1-ia0):\n ia = atmlst[k+ia0]\n if isinstance(h1ao_or_chkfile, str):\n key = 'scf_mo1/%d' % ia\n lib.chkfile.save(h1ao_or_chkfile, key, mo1[k])\n else:\n mo1s[ia] = mo1[k]\n e1s[ia] = e1[k].reshape(3,nocc,nocc)\n mo1 = e1 = None\n\n if isinstance(h1ao_or_chkfile, str):\n return h1ao_or_chkfile, e1s\n else:\n return mo1s, e1s\n\ndef gen_vind(mf, mo_coeff, mo_occ):\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n nocc = mocc.shape[1]\n vresp = _gen_rhf_response(mf, mo_coeff, mo_occ, hermi=1)\n def fx(mo1):\n mo1 = mo1.reshape(-1,nmo,nocc)\n nset = len(mo1)\n dm1 = numpy.empty((nset,nao,nao))\n for i, x in enumerate(mo1):\n dm = reduce(numpy.dot, (mo_coeff, x*2, mocc.T)) # *2 for double occupancy\n dm1[i] = dm + dm.T\n v1 = vresp(dm1)\n v1vo = numpy.empty_like(mo1)\n for i, x in enumerate(v1):\n v1vo[i] = reduce(numpy.dot, (mo_coeff.T, x, mocc))\n return v1vo\n return fx\n\ndef hess_nuc(mol, atmlst=None):\n h = numpy.zeros((mol.natm,mol.natm,3,3))\n qs = numpy.asarray([mol.atom_charge(i) for i in range(mol.natm)])\n rs = numpy.asarray([mol.atom_coord(i) for i in range(mol.natm)])\n for i in range(mol.natm):\n r12 = rs[i] - rs\n s12 = numpy.sqrt(numpy.einsum('ki,ki->k', r12, r12))\n s12[i] = 1e60\n tmp1 = qs[i] * qs / s12**3\n tmp2 = numpy.einsum('k, ki,kj->kij',-3*qs[i]*qs/s12**5, r12, r12)\n\n h[i,i,0,0] = \\\n h[i,i,1,1] = \\\n h[i,i,2,2] = -tmp1.sum()\n h[i,i] -= numpy.einsum('kij->ij', tmp2)\n\n h[i,:,0,0] += tmp1\n h[i,:,1,1] += tmp1\n h[i,:,2,2] += tmp1\n h[i,:] += tmp2\n\n if atmlst is not None:\n h = h[atmlst][:,atmlst]\n return h\n\n\ndef gen_hop(hobj, mo_energy=None, mo_coeff=None, mo_occ=None, verbose=None):\n log = logger.new_logger(hobj, verbose)\n mol = hobj.mol\n mf = hobj.base\n\n if mo_energy is None: mo_energy = mf.mo_energy\n if mo_occ is None: mo_occ = mf.mo_occ\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n\n natm = mol.natm\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n nocc = mocc.shape[1]\n\n atmlst = range(natm)\n max_memory = max(2000, hobj.max_memory - lib.current_memory()[0])\n de2 = hobj.partial_hess_elec(mo_energy, mo_coeff, mo_occ, atmlst,\n max_memory, log)\n de2 += hobj.hess_nuc()\n\n # Compute H1 integrals and store in hobj.chkfile\n hobj.make_h1(mo_coeff, mo_occ, hobj.chkfile, atmlst, log)\n\n aoslices = mol.aoslice_by_atom()\n s1a = -mol.intor('int1e_ipovlp', comp=3)\n\n fvind = gen_vind(mf, mo_coeff, mo_occ)\n def h_op(x):\n x = x.reshape(natm,3)\n hx = numpy.einsum('abxy,ax->by', de2, x)\n h1ao = 0\n s1ao = 0\n for ia in range(natm):\n shl0, shl1, p0, p1 = aoslices[ia]\n h1ao_i = lib.chkfile.load(hobj.chkfile, 'scf_f1ao/%d' % ia)\n h1ao += numpy.einsum('x,xij->ij', x[ia], h1ao_i)\n s1ao_i = numpy.zeros((3,nao,nao))\n s1ao_i[:,p0:p1] += s1a[:,p0:p1]\n s1ao_i[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)\n s1ao += numpy.einsum('x,xij->ij', x[ia], s1ao_i)\n\n s1vo = reduce(numpy.dot, (mo_coeff.T, s1ao, mocc))\n h1vo = reduce(numpy.dot, (mo_coeff.T, h1ao, mocc))\n mo1, mo_e1 = cphf.solve(fvind, mo_energy, mo_occ, h1vo, s1vo)\n mo1 = numpy.dot(mo_coeff, mo1)\n mo_e1 = mo_e1.reshape(nocc,nocc)\n dm1 = numpy.einsum('pi,qi->pq', mo1, mocc)\n dme1 = numpy.einsum('pi,qi,i->pq', mo1, mocc, mo_energy[mo_occ>0])\n dme1 = dme1 + dme1.T + reduce(numpy.dot, (mocc, mo_e1.T, mocc.T))\n\n for ja in range(natm):\n q0, q1 = aoslices[ja][2:]\n h1ao = lib.chkfile.load(hobj.chkfile, 'scf_f1ao/%s'%ja)\n hx[ja] += numpy.einsum('xpq,pq->x', h1ao, dm1) * 4\n hx[ja] -= numpy.einsum('xpq,pq->x', s1a[:,q0:q1], dme1[q0:q1]) * 2\n hx[ja] -= numpy.einsum('xpq,qp->x', s1a[:,q0:q1], dme1[:,q0:q1]) * 2\n return hx.ravel()\n\n hdiag = numpy.einsum('aaxx->ax', de2).ravel()\n return h_op, hdiag\n\n\nclass Hessian(lib.StreamObject):\n '''Non-relativistic restricted Hartree-Fock hessian'''\n def __init__(self, scf_method):\n self.verbose = scf_method.verbose\n self.stdout = scf_method.stdout\n self.mol = scf_method.mol\n self.base = scf_method\n self.chkfile = scf_method.chkfile\n self.max_memory = self.mol.max_memory\n\n self.atmlst = range(self.mol.natm)\n self.de = numpy.zeros((0,0,3,3)) # (A,B,dR_A,dR_B)\n self._keys = set(self.__dict__.keys())\n\n partial_hess_elec = partial_hess_elec\n hess_elec = hess_elec\n make_h1 = make_h1\n\n def get_hcore(self, mol=None):\n if mol is None: mol = self.mol\n return get_hcore(mol)\n\n def hcore_generator(self, mol=None):\n if mol is None: mol = self.mol\n with_x2c = getattr(self.base, 'with_x2c', None)\n if with_x2c:\n return with_x2c.hcore_deriv_generator(deriv=2)\n\n with_ecp = mol.has_ecp()\n if with_ecp:\n ecp_atoms = set(mol._ecpbas[:,gto.ATOM_OF])\n else:\n ecp_atoms = ()\n aoslices = mol.aoslice_by_atom()\n nbas = mol.nbas\n nao = mol.nao_nr()\n h1aa, h1ab = self.get_hcore(mol)\n def get_hcore(iatm, jatm):\n ish0, ish1, i0, i1 = aoslices[iatm]\n jsh0, jsh1, j0, j1 = aoslices[jatm]\n zi = mol.atom_charge(iatm)\n zj = mol.atom_charge(jatm)\n if iatm == jatm:\n with mol.with_rinv_as_nucleus(iatm):\n rinv2aa = mol.intor('int1e_ipiprinv', comp=9)\n rinv2ab = mol.intor('int1e_iprinvip', comp=9)\n rinv2aa *= zi\n rinv2ab *= zi\n if with_ecp and iatm in ecp_atoms:\n rinv2aa -= mol.intor('ECPscalar_ipiprinv', comp=9)\n rinv2ab -= mol.intor('ECPscalar_iprinvip', comp=9)\n rinv2aa = rinv2aa.reshape(3,3,nao,nao)\n rinv2ab = rinv2ab.reshape(3,3,nao,nao)\n hcore = -rinv2aa - rinv2ab\n hcore[:,:,i0:i1] += h1aa[:,:,i0:i1]\n hcore[:,:,i0:i1] += rinv2aa[:,:,i0:i1]\n hcore[:,:,i0:i1] += rinv2ab[:,:,i0:i1]\n hcore[:,:,:,i0:i1] += rinv2aa[:,:,i0:i1].transpose(0,1,3,2)\n hcore[:,:,:,i0:i1] += rinv2ab[:,:,:,i0:i1]\n hcore[:,:,i0:i1,i0:i1] += h1ab[:,:,i0:i1,i0:i1]\n\n else:\n hcore = numpy.zeros((3,3,nao,nao))\n hcore[:,:,i0:i1,j0:j1] += h1ab[:,:,i0:i1,j0:j1]\n with mol.with_rinv_as_nucleus(iatm):\n shls_slice = (jsh0, jsh1, 0, nbas)\n rinv2aa = mol.intor('int1e_ipiprinv', comp=9, shls_slice=shls_slice)\n rinv2ab = mol.intor('int1e_iprinvip', comp=9, shls_slice=shls_slice)\n rinv2aa *= zi\n rinv2ab *= zi\n if with_ecp and iatm in ecp_atoms:\n rinv2aa -= mol.intor('ECPscalar_ipiprinv', comp=9, shls_slice=shls_slice)\n rinv2ab -= mol.intor('ECPscalar_iprinvip', comp=9, shls_slice=shls_slice)\n hcore[:,:,j0:j1] += rinv2aa.reshape(3,3,j1-j0,nao)\n hcore[:,:,j0:j1] += rinv2ab.reshape(3,3,j1-j0,nao).transpose(1,0,2,3)\n\n with mol.with_rinv_as_nucleus(jatm):\n shls_slice = (ish0, ish1, 0, nbas)\n rinv2aa = mol.intor('int1e_ipiprinv', comp=9, shls_slice=shls_slice)\n rinv2ab = mol.intor('int1e_iprinvip', comp=9, shls_slice=shls_slice)\n rinv2aa *= zj\n rinv2ab *= zj\n if with_ecp and jatm in ecp_atoms:\n rinv2aa -= mol.intor('ECPscalar_ipiprinv', comp=9, shls_slice=shls_slice)\n rinv2ab -= mol.intor('ECPscalar_iprinvip', comp=9, shls_slice=shls_slice)\n hcore[:,:,i0:i1] += rinv2aa.reshape(3,3,i1-i0,nao)\n hcore[:,:,i0:i1] += rinv2ab.reshape(3,3,i1-i0,nao)\n return hcore + hcore.conj().transpose(0,1,3,2)\n return get_hcore\n\n def solve_mo1(self, mo_energy, mo_coeff, mo_occ, h1ao_or_chkfile,\n fx=None, atmlst=None, max_memory=4000, verbose=None):\n return solve_mo1(self.base, mo_energy, mo_coeff, mo_occ, h1ao_or_chkfile,\n fx, atmlst, max_memory, verbose)\n\n def hess_nuc(self, mol=None, atmlst=None):\n if mol is None: mol = self.mol\n return hess_nuc(mol, atmlst)\n\n def kernel(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):\n cput0 = (time.clock(), time.time())\n if mo_energy is None: mo_energy = self.base.mo_energy\n if mo_coeff is None: mo_coeff = self.base.mo_coeff\n if mo_occ is None: mo_occ = self.base.mo_occ\n if atmlst is None:\n atmlst = self.atmlst\n else:\n self.atmlst = atmlst\n\n de = self.hess_elec(mo_energy, mo_coeff, mo_occ, atmlst=atmlst)\n self.de = de + self.hess_nuc(self.mol, atmlst=atmlst)\n return self.de\n hess = kernel\n\n gen_hop = gen_hop\n\n# Inject to RHF class\nfrom pyscf import scf\nscf.hf.RHF.Hessian = lib.class_as_method(Hessian)\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n mol.atom = [\n [1 , (1. , 0. , 0.000)],\n [1 , (0. , 1. , 0.000)],\n [1 , (0. , -1.517 , 1.177)],\n [1 , (0. , 1.517 , 1.177)] ]\n mol.basis = '631g'\n mol.unit = 'B'\n mol.build()\n mf = scf.RHF(mol)\n mf.conv_tol = 1e-14\n mf.scf()\n n3 = mol.natm * 3\n hobj = mf.Hessian()\n e2 = hobj.kernel().transpose(0,2,1,3).reshape(n3,n3)\n print(lib.finger(e2) - -0.50693144355876429)\n #from hessian import rhf_o0\n #e2ref = rhf_o0.Hessian(mf).kernel().transpose(0,2,1,3).reshape(n3,n3)\n #print numpy.linalg.norm(e2-e2ref)\n #print numpy.allclose(e2,e2ref)\n\n def grad_full(ia, inc):\n coord = mol.atom_coord(ia).copy()\n ptr = mol._atm[ia,gto.PTR_COORD]\n de = []\n for i in range(3):\n mol._env[ptr+i] = coord[i] + inc\n mf = scf.RHF(mol).run(conv_tol=1e-14)\n e1a = mf.nuc_grad_method().kernel()\n mol._env[ptr+i] = coord[i] - inc\n mf = scf.RHF(mol).run(conv_tol=1e-14)\n e1b = mf.nuc_grad_method().kernel()\n mol._env[ptr+i] = coord[i]\n de.append((e1a-e1b)/(2*inc))\n return de\n e2ref = [grad_full(ia, .5e-4) for ia in range(mol.natm)]\n e2ref = numpy.asarray(e2ref).reshape(n3,n3)\n print(numpy.linalg.norm(e2-e2ref))\n print(abs(e2-e2ref).max())\n print(numpy.allclose(e2,e2ref,atol=1e-6))\n\n# \\partial^2 E / \\partial R \\partial R'\n e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)\n e2 += hobj.hess_nuc(mol)\n e2 = e2.transpose(0,2,1,3).reshape(n3,n3)\n def grad_partial_R(ia, inc):\n coord = mol.atom_coord(ia).copy()\n ptr = mol._atm[ia,gto.PTR_COORD]\n de = []\n for i in range(3):\n mol._env[ptr+i] = coord[i] + inc\n e1a = mf.nuc_grad_method().kernel()\n mol._env[ptr+i] = coord[i] - inc\n e1b = mf.nuc_grad_method().kernel()\n mol._env[ptr+i] = coord[i]\n de.append((e1a-e1b)/(2*inc))\n return de\n e2ref = [grad_partial_R(ia, .5e-4) for ia in range(mol.natm)]\n e2ref = numpy.asarray(e2ref).reshape(n3,n3)\n print(numpy.linalg.norm(e2-e2ref))\n print(abs(e2-e2ref).max())\n print(numpy.allclose(e2,e2ref,atol=1e-8))\n",
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nNon-relativistic magnetizability tensor for DFT\n\nRefs:\n[1] R. Cammi, J. Chem. Phys., 109, 3185 (1998)\n[2] Todd A. Keith, Chem. Phys., 213, 123 (1996)\n'''\n\n\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.scf import jk\nfrom pyscf.dft import numint\nfrom pyscf.prop.nmr import rhf as rhf_nmr\nfrom pyscf.prop.nmr import rks as rks_nmr\nfrom pyscf.prop.magnetizability import rhf as rhf_mag\n\n\ndef dia(magobj, gauge_orig=None):\n mol = magobj.mol\n mf = magobj._scf\n mo_energy = mf.mo_energy\n mo_coeff = mf.mo_coeff\n mo_occ = mf.mo_occ\n orbo = mo_coeff[:,mo_occ > 0]\n dm0 = numpy.dot(orbo, orbo.T) * 2\n dm0 = lib.tag_array(dm0, mo_coeff=mo_coeff, mo_occ=mo_occ)\n dme0 = numpy.dot(orbo * mo_energy[mo_occ > 0], orbo.T) * 2\n\n e2 = rhf_mag._get_dia_1e(magobj, gauge_orig, dm0, dme0)\n\n if gauge_orig is not None:\n return -e2\n\n # Computing the 2nd order Vxc integrals from GIAO\n grids = mf.grids\n ni = mf._numint\n xc_code = mf.xc\n xctype = ni._xc_type(xc_code)\n omega, alpha, hyb = ni.rsh_and_hybrid_coeff(xc_code, mol.spin)\n\n make_rho, nset, nao = ni._gen_rho_evaluator(mol, dm0, hermi=1)\n ngrids = len(grids.weights)\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, mf.max_memory*.9-mem_now)\n BLKSIZE = numint.BLKSIZE\n blksize = min(int(max_memory/12*1e6/8/nao/BLKSIZE)*BLKSIZE, ngrids)\n\n vmat = numpy.zeros((3,3,nao,nao))\n if xctype == 'LDA':\n ao_deriv = 0\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,\n blksize=blksize):\n rho = make_rho(0, ao, mask, 'LDA')\n vxc = ni.eval_xc(xc_code, rho, 0, deriv=1)[1]\n vrho = vxc[0]\n r_ao = numpy.einsum('pi,px->pxi', ao, coords)\n aow = numpy.einsum('pxi,p,p->pxi', r_ao, weight, vrho)\n vmat += lib.einsum('pxi,pyj->xyij', r_ao, aow)\n rho = vxc = vrho = aow = None\n\n elif xctype == 'GGA':\n ao_deriv = 1\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,\n blksize=blksize):\n rho = make_rho(0, ao, mask, 'GGA')\n vxc = ni.eval_xc(xc_code, rho, 0, deriv=1)[1]\n wv = numint._rks_gga_wv0(rho, vxc, weight)\n\n # Computing \\nabla (r * AO) = r * \\nabla AO + [\\nabla,r]_- * AO\n r_ao = numpy.einsum('npi,px->npxi', ao, coords)\n r_ao[1,:,0] += ao[0]\n r_ao[2,:,1] += ao[0]\n r_ao[3,:,2] += ao[0]\n\n aow = numpy.einsum('npxi,np->pxi', r_ao, wv)\n vmat += lib.einsum('pxi,pyj->xyij', r_ao[0], aow)\n rho = vxc = vrho = vsigma = wv = aow = None\n\n vmat = vmat + vmat.transpose(0,1,3,2)\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n vmat = _add_giao_phase(mol, vmat)\n e2 += numpy.einsum('qp,xypq->xy', dm0, vmat)\n vmat = None\n\n e2 = e2.ravel()\n # Handle the hybrid functional and the range-separated functional\n if abs(hyb) > 1e-10:\n vs = jk.get_jk(mol, [dm0]*3, ['ijkl,ji->s2kl',\n 'ijkl,jk->s1il',\n 'ijkl,li->s1kj'],\n 'int2e_gg1', 's4', 9, hermi=1)\n e2 += numpy.einsum('xpq,qp->x', vs[0], dm0)\n e2 -= numpy.einsum('xpq,qp->x', vs[1], dm0) * .25 * hyb\n e2 -= numpy.einsum('xpq,qp->x', vs[2], dm0) * .25 * hyb\n vk = jk.get_jk(mol, dm0, 'ijkl,jk->s1il',\n 'int2e_g1g2', 'aa4', 9, hermi=0)\n e2 -= numpy.einsum('xpq,qp->x', vk, dm0) * .5 * hyb\n\n if abs(omega) > 1e-10:\n with mol.with_range_coulomb(omega):\n vs = jk.get_jk(mol, [dm0]*2, ['ijkl,jk->s1il',\n 'ijkl,li->s1kj'],\n 'int2e_gg1', 's4', 9, hermi=1)\n e2 -= numpy.einsum('xpq,qp->x', vs[0], dm0) * .25 * (alpha-hyb)\n e2 -= numpy.einsum('xpq,qp->x', vs[1], dm0) * .25 * (alpha-hyb)\n vk = jk.get_jk(mol, dm0, 'ijkl,jk->s1il',\n 'int2e_g1g2', 'aa4', 9, hermi=0)\n e2 -= numpy.einsum('xpq,qp->x', vk, dm0) * .5 * (alpha-hyb)\n\n else:\n vj = jk.get_jk(mol, dm0, 'ijkl,ji->s2kl',\n 'int2e_gg1', 's4', 9, hermi=1)\n e2 += numpy.einsum('xpq,qp->x', vj, dm0)\n\n return -e2.reshape(3, 3)\n\ndef _add_giao_phase(mol, vmat):\n '''Add the factor i/2*(Ri-Rj) of the GIAO phase e^{i/2 (Ri-Rj) times r}'''\n ao_coords = rhf_mag._get_ao_coords(mol)\n Rx = .5 * (ao_coords[:,0:1] - ao_coords[:,0])\n Ry = .5 * (ao_coords[:,1:2] - ao_coords[:,1])\n Rz = .5 * (ao_coords[:,2:3] - ao_coords[:,2])\n vxc20 = numpy.empty_like(vmat)\n vxc20[0] = Ry * vmat[2] - Rz * vmat[1]\n vxc20[1] = Rz * vmat[0] - Rx * vmat[2]\n vxc20[2] = Rx * vmat[1] - Ry * vmat[0]\n vxc20, vmat = vmat, vxc20\n vxc20[:,0] = Ry * vmat[:,2] - Rz * vmat[:,1]\n vxc20[:,1] = Rz * vmat[:,0] - Rx * vmat[:,2]\n vxc20[:,2] = Rx * vmat[:,1] - Ry * vmat[:,0]\n vxc20 *= -1\n return vxc20\n\n\nclass Magnetizability(rhf_mag.Magnetizability):\n dia = dia\n get_fock = rks_nmr.get_fock\n solve_mo1 = rks_nmr.solve_mo1\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import dft\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n\n mol.atom = [\n ['Ne' , (0. , 0. , 0.)], ]\n mol.basis='631g'\n mol.build()\n\n mf = dft.RKS(mol).run()\n mag = Magnetizability(mf).kernel()\n print(lib.finger(mag) - -0.30375149255154221)\n\n mf.set(xc = 'b3lyp').run()\n mag = Magnetizability(mf).kernel()\n print(lib.finger(mag) - -0.3022331813238171)\n\n mol.atom = [\n [1 , (0. , 0. , .917)],\n ['F' , (0. , 0. , 0. )], ]\n mol.basis = '6-31g'\n mol.build()\n\n mf = dft.RKS(mol).set(xc='lda,vwn').run()\n mag = Magnetizability(mf).kernel()\n print(lib.finger(mag) - -0.4313210213418015)\n\n mf = dft.RKS(mol).set(xc='b3lyp').run()\n mag = Magnetizability(mf).kernel()\n print(lib.finger(mag) - -0.42828345739100998)\n\n mol = gto.M(atom='''O 0. 0. 0.\n H 0. -0.757 0.587\n H 0. 0.757 0.587''',\n basis='ccpvdz')\n mf = dft.RKS(mol)\n mf.xc = 'b3lyp'\n mf.run()\n mag = Magnetizability(mf).kernel()\n print(lib.finger(mag) - -0.61042958313712403)\n",
"#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nFull CI solver for spin-free Hamiltonian. This solver can be used to compute\ndoublet, triplet,...\n\nThe CI wfn are stored as a 2D array [alpha,beta], where each row corresponds\nto an alpha string. For each row (alpha string), there are\ntotal-num-beta-strings of columns. Each column corresponds to a beta string.\n\nDifferent FCI solvers are implemented to support different type of symmetry.\n Symmetry\nFile Point group Spin singlet Real hermitian* Alpha/beta degeneracy\ndirect_spin0_symm Yes Yes Yes Yes\ndirect_spin1_symm Yes No Yes Yes\ndirect_spin0 No Yes Yes Yes\ndirect_spin1 No No Yes Yes\ndirect_uhf No No Yes No\ndirect_nosym No No No** Yes\n\n* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)\n** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...\n'''\n\nimport sys\nimport ctypes\nimport numpy\nimport scipy.linalg\nfrom pyscf import lib\nfrom pyscf import ao2mo\nfrom pyscf.lib import logger\nfrom pyscf.fci import cistring\nfrom pyscf.fci import rdm\nfrom pyscf.fci import spin_op\nfrom pyscf.fci.spin_op import contract_ss\nfrom pyscf import __config__\n\nlibfci = lib.load_library('libfci')\n\ndef contract_1e(f1e, fcivec, norb, nelec, link_index=None):\n '''Contract the 1-electron Hamiltonian with a FCI vector to get a new FCI\n vector.\n '''\n fcivec = numpy.asarray(fcivec, order='C')\n link_indexa, link_indexb = _unpack(norb, nelec, link_index)\n na, nlinka = link_indexa.shape[:2]\n nb, nlinkb = link_indexb.shape[:2]\n assert(fcivec.size == na*nb)\n f1e_tril = lib.pack_tril(f1e)\n ci1 = numpy.zeros_like(fcivec)\n libfci.FCIcontract_a_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nlinka), ctypes.c_int(nlinkb),\n link_indexa.ctypes.data_as(ctypes.c_void_p),\n link_indexb.ctypes.data_as(ctypes.c_void_p))\n libfci.FCIcontract_b_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nlinka), ctypes.c_int(nlinkb),\n link_indexa.ctypes.data_as(ctypes.c_void_p),\n link_indexb.ctypes.data_as(ctypes.c_void_p))\n return ci1\n\ndef contract_2e(eri, fcivec, norb, nelec, link_index=None):\n r'''Contract the 2-electron Hamiltonian with a FCI vector to get a new FCI\n vector.\n\n Note the input arg eri is NOT the 2e hamiltonian matrix, the 2e hamiltonian is\n\n .. math::\n\n h2e &= eri_{pq,rs} p^+ q r^+ s \\\\\n &= (pq|rs) p^+ r^+ s q - (pq|rs) \\delta_{qr} p^+ s\n\n So eri is defined as\n\n .. math::\n\n eri_{pq,rs} = (pq|rs) - (1/Nelec) \\sum_q (pq|qs)\n\n to restore the symmetry between pq and rs,\n\n .. math::\n\n eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\\sum_q (pq|qs) + \\sum_p (pq|rp)]\n\n See also :func:`direct_spin1.absorb_h1e`\n '''\n fcivec = numpy.asarray(fcivec, order='C')\n eri = ao2mo.restore(4, eri, norb)\n link_indexa, link_indexb = _unpack(norb, nelec, link_index)\n na, nlinka = link_indexa.shape[:2]\n nb, nlinkb = link_indexb.shape[:2]\n assert(fcivec.size == na*nb)\n ci1 = numpy.empty_like(fcivec)\n\n libfci.FCIcontract_2e_spin1(eri.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nlinka), ctypes.c_int(nlinkb),\n link_indexa.ctypes.data_as(ctypes.c_void_p),\n link_indexb.ctypes.data_as(ctypes.c_void_p))\n return ci1\n\ndef make_hdiag(h1e, eri, norb, nelec):\n '''Diagonal Hamiltonian for Davidson preconditioner\n '''\n neleca, nelecb = _unpack_nelec(nelec)\n h1e = numpy.asarray(h1e, order='C')\n eri = ao2mo.restore(1, eri, norb)\n occslsta = occslstb = cistring._gen_occslst(range(norb), neleca)\n if neleca != nelecb:\n occslstb = cistring._gen_occslst(range(norb), nelecb)\n na = len(occslsta)\n nb = len(occslstb)\n\n hdiag = numpy.empty(na*nb)\n jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')\n kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')\n c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)\n c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)\n c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)\n libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),\n c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(neleca), ctypes.c_int(nelecb),\n occslsta.ctypes.data_as(ctypes.c_void_p),\n occslstb.ctypes.data_as(ctypes.c_void_p))\n return hdiag\n\ndef absorb_h1e(h1e, eri, norb, nelec, fac=1):\n '''Modify 2e Hamiltonian to include 1e Hamiltonian contribution.\n '''\n if not isinstance(nelec, (int, numpy.number)):\n nelec = sum(nelec)\n h2e = ao2mo.restore(1, eri.copy(), norb)\n f1e = h1e - numpy.einsum('jiik->jk', h2e) * .5\n f1e = f1e * (1./(nelec+1e-100))\n for k in range(norb):\n h2e[k,k,:,:] += f1e\n h2e[:,:,k,k] += f1e\n return ao2mo.restore(4, h2e, norb) * fac\n\ndef pspace(h1e, eri, norb, nelec, hdiag=None, np=400):\n '''pspace Hamiltonian to improve Davidson preconditioner. See, CPL, 169, 463\n '''\n if norb > 63:\n raise NotImplementedError('norb > 63')\n\n neleca, nelecb = _unpack_nelec(nelec)\n h1e = numpy.ascontiguousarray(h1e)\n eri = ao2mo.restore(1, eri, norb)\n nb = cistring.num_strings(norb, nelecb)\n if hdiag is None:\n hdiag = make_hdiag(h1e, eri, norb, nelec)\n if hdiag.size < np:\n addr = numpy.arange(hdiag.size)\n else:\n try:\n addr = numpy.argpartition(hdiag, np-1)[:np]\n except AttributeError:\n addr = numpy.argsort(hdiag)[:np]\n addra, addrb = divmod(addr, nb)\n stra = cistring.addrs2str(norb, neleca, addra)\n strb = cistring.addrs2str(norb, nelecb, addrb)\n np = len(addr)\n h0 = numpy.zeros((np,np))\n libfci.FCIpspace_h0tril(h0.ctypes.data_as(ctypes.c_void_p),\n h1e.ctypes.data_as(ctypes.c_void_p),\n eri.ctypes.data_as(ctypes.c_void_p),\n stra.ctypes.data_as(ctypes.c_void_p),\n strb.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(np))\n\n HERMITIAN_THRESHOLD = 1e-10\n if (abs(h1e - h1e.T).max() < HERMITIAN_THRESHOLD and\n abs(eri - eri.transpose(1,0,3,2)).max() < HERMITIAN_THRESHOLD):\n # symmetric Hamiltonian\n h0 = lib.hermi_triu(h0)\n else:\n # Fill the upper triangular part\n h0 = numpy.asarray(h0, order='F')\n h1e = numpy.asarray(h1e.T, order='C')\n eri = numpy.asarray(eri.transpose(1,0,3,2), order='C')\n libfci.FCIpspace_h0tril(h0.ctypes.data_as(ctypes.c_void_p),\n h1e.ctypes.data_as(ctypes.c_void_p),\n eri.ctypes.data_as(ctypes.c_void_p),\n stra.ctypes.data_as(ctypes.c_void_p),\n strb.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(np))\n\n idx = numpy.arange(np)\n h0[idx,idx] = hdiag[addr]\n return addr, h0\n\n# be careful with single determinant initial guess. It may diverge the\n# preconditioner when the eigvalue of first davidson iter equals to hdiag\ndef kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,\n lindep=1e-14, max_cycle=50, max_space=12, nroots=1,\n davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,\n ecore=0, **kwargs):\n return _kfactory(FCISolver, h1e, eri, norb, nelec, ci0, level_shift,\n tol, lindep, max_cycle, max_space, nroots,\n davidson_only, pspace_size, ecore=ecore, **kwargs)\ndef _kfactory(Solver, h1e, eri, norb, nelec, ci0=None, level_shift=1e-3,\n tol=1e-10, lindep=1e-14, max_cycle=50, max_space=12, nroots=1,\n davidson_only=False, pspace_size=400, ecore=0, **kwargs):\n cis = Solver(None)\n cis.level_shift = level_shift\n cis.conv_tol = tol\n cis.lindep = lindep\n cis.max_cycle = max_cycle\n cis.max_space = max_space\n cis.nroots = nroots\n cis.davidson_only = davidson_only\n cis.pspace_size = pspace_size\n\n unknown = {}\n for k in kwargs:\n if not hasattr(cis, k):\n unknown[k] = kwargs[k]\n setattr(cis, k, kwargs[k])\n if unknown:\n sys.stderr.write('Unknown keys %s for FCI kernel %s\\n' %\n (str(unknown.keys()), __name__))\n e, c = cis.kernel(h1e, eri, norb, nelec, ci0, ecore=ecore, **unknown)\n return e, c\n\ndef energy(h1e, eri, fcivec, norb, nelec, link_index=None):\n '''Compute the FCI electronic energy for given Hamiltonian and FCI vector.\n '''\n h2e = absorb_h1e(h1e, eri, norb, nelec, .5)\n ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index)\n return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))\n\n\ndef make_rdm1s(fcivec, norb, nelec, link_index=None):\n '''Spin separated 1-particle density matrices.\n The return values include two density matrices: (alpha,alpha), (beta,beta)\n\n dm1[p,q] = <q^\\dagger p>\n\n The convention is based on McWeeney's book, Eq (5.4.20).\n The contraction between 1-particle Hamiltonian and rdm1 is\n E = einsum('pq,qp', h1, rdm1)\n '''\n if link_index is None:\n neleca, nelecb = _unpack_nelec(nelec)\n link_indexa = cistring.gen_linkstr_index(range(norb), neleca)\n link_indexb = cistring.gen_linkstr_index(range(norb), nelecb)\n link_index = (link_indexa, link_indexb)\n rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', fcivec, fcivec,\n norb, nelec, link_index)\n rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', fcivec, fcivec,\n norb, nelec, link_index)\n return rdm1a, rdm1b\n\ndef make_rdm1(fcivec, norb, nelec, link_index=None):\n '''Spin-traced one-particle density matrix\n\n dm1[p,q] = <q_alpha^\\dagger p_alpha> + <q_beta^\\dagger p_beta>\n\n The convention is based on McWeeney's book, Eq (5.4.20)\n The contraction between 1-particle Hamiltonian and rdm1 is\n E = einsum('pq,qp', h1, rdm1)\n '''\n rdm1a, rdm1b = make_rdm1s(fcivec, norb, nelec, link_index)\n return rdm1a + rdm1b\n\ndef make_rdm12s(fcivec, norb, nelec, link_index=None, reorder=True):\n r'''Spin separated 1- and 2-particle density matrices.\n The return values include two lists, a list of 1-particle density matrices\n and a list of 2-particle density matrices. The density matrices are:\n (alpha,alpha), (beta,beta) for 1-particle density matrices;\n (alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta),\n (beta,beta,beta,beta) for 2-particle density matrices.\n\n 1pdm[p,q] = :math:`\\langle q^\\dagger p\\rangle`;\n 2pdm[p,q,r,s] = :math:`\\langle p^\\dagger r^\\dagger s q\\rangle`.\n\n Energy should be computed as\n E = einsum('pq,qp', h1, 1pdm) + 1/2 * einsum('pqrs,pqrs', eri, 2pdm)\n where h1[p,q] = <p|h|q> and eri[p,q,r,s] = (pq|rs)\n '''\n dm1a, dm2aa = rdm.make_rdm12_spin1('FCIrdm12kern_a', fcivec, fcivec,\n norb, nelec, link_index, 1)\n dm1b, dm2bb = rdm.make_rdm12_spin1('FCIrdm12kern_b', fcivec, fcivec,\n norb, nelec, link_index, 1)\n _, dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,\n norb, nelec, link_index, 0)\n if reorder:\n dm1a, dm2aa = rdm.reorder_rdm(dm1a, dm2aa, inplace=True)\n dm1b, dm2bb = rdm.reorder_rdm(dm1b, dm2bb, inplace=True)\n return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)\n\ndef make_rdm12(fcivec, norb, nelec, link_index=None, reorder=True):\n r'''Spin traced 1- and 2-particle density matrices.\n\n 1pdm[p,q] = :math:`\\langle q_\\alpha^\\dagger p_\\alpha \\rangle +\n \\langle q_\\beta^\\dagger p_\\beta \\rangle`;\n 2pdm[p,q,r,s] = :math:`\\langle p_\\alpha^\\dagger r_\\alpha^\\dagger s_\\alpha q_\\alpha\\rangle +\n \\langle p_\\beta^\\dagger r_\\alpha^\\dagger s_\\alpha q_\\beta\\rangle +\n \\langle p_\\alpha^\\dagger r_\\beta^\\dagger s_\\beta q_\\alpha\\rangle +\n \\langle p_\\beta^\\dagger r_\\beta^\\dagger s_\\beta q_\\beta\\rangle`.\n\n Energy should be computed as\n E = einsum('pq,qp', h1, 1pdm) + 1/2 * einsum('pqrs,pqrs', eri, 2pdm)\n where h1[p,q] = <p|h|q> and eri[p,q,r,s] = (pq|rs)\n '''\n #(dm1a, dm1b), (dm2aa, dm2ab, dm2bb) = \\\n # make_rdm12s(fcivec, norb, nelec, link_index, reorder)\n #return dm1a+dm1b, dm2aa+dm2ab+dm2ab.transpose(2,3,0,1)+dm2bb\n dm1, dm2 = rdm.make_rdm12_spin1('FCIrdm12kern_sf', fcivec, fcivec,\n norb, nelec, link_index, 1)\n if reorder:\n dm1, dm2 = rdm.reorder_rdm(dm1, dm2, inplace=True)\n return dm1, dm2\n\ndef trans_rdm1s(cibra, ciket, norb, nelec, link_index=None):\n r'''Spin separated transition 1-particle density matrices.\n The return values include two density matrices: (alpha,alpha), (beta,beta).\n See also function :func:`make_rdm1s`\n\n 1pdm[p,q] = :math:`\\langle q^\\dagger p \\rangle`\n '''\n rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,\n norb, nelec, link_index)\n rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,\n norb, nelec, link_index)\n return rdm1a, rdm1b\n\ndef trans_rdm1(cibra, ciket, norb, nelec, link_index=None):\n r'''Spin traced transition 1-particle transition density matrices.\n\n 1pdm[p,q] = :math:`\\langle q_\\alpha^\\dagger p_\\alpha \\rangle\n + \\langle q_\\beta^\\dagger p_\\beta \\rangle`\n '''\n rdm1a, rdm1b = trans_rdm1s(cibra, ciket, norb, nelec, link_index)\n return rdm1a + rdm1b\n\ndef trans_rdm12s(cibra, ciket, norb, nelec, link_index=None, reorder=True):\n r'''Spin separated 1- and 2-particle transition density matrices.\n The return values include two lists, a list of 1-particle transition\n density matrices and a list of 2-particle transition density matrices.\n The density matrices are:\n (alpha,alpha), (beta,beta) for 1-particle transition density matrices;\n (alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta),\n (beta,beta,alpha,alpha), (beta,beta,beta,beta) for 2-particle transition\n density matrices.\n\n 1pdm[p,q] = :math:`\\langle q^\\dagger p\\rangle`;\n 2pdm[p,q,r,s] = :math:`\\langle p^\\dagger r^\\dagger s q\\rangle`.\n '''\n dm1a, dm2aa = rdm.make_rdm12_spin1('FCItdm12kern_a', cibra, ciket,\n norb, nelec, link_index, 2)\n dm1b, dm2bb = rdm.make_rdm12_spin1('FCItdm12kern_b', cibra, ciket,\n norb, nelec, link_index, 2)\n _, dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', cibra, ciket,\n norb, nelec, link_index, 0)\n _, dm2ba = rdm.make_rdm12_spin1('FCItdm12kern_ab', ciket, cibra,\n norb, nelec, link_index, 0)\n dm2ba = dm2ba.transpose(3,2,1,0)\n if reorder:\n dm1a, dm2aa = rdm.reorder_rdm(dm1a, dm2aa, inplace=True)\n dm1b, dm2bb = rdm.reorder_rdm(dm1b, dm2bb, inplace=True)\n return (dm1a, dm1b), (dm2aa, dm2ab, dm2ba, dm2bb)\n\ndef trans_rdm12(cibra, ciket, norb, nelec, link_index=None, reorder=True):\n r'''Spin traced transition 1- and 2-particle transition density matrices.\n\n 1pdm[p,q] = :math:`\\langle q^\\dagger p\\rangle`;\n 2pdm[p,q,r,s] = :math:`\\langle p^\\dagger r^\\dagger s q\\rangle`.\n '''\n #(dm1a, dm1b), (dm2aa, dm2ab, dm2ba, dm2bb) = \\\n # trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)\n #return dm1a+dm1b, dm2aa+dm2ab+dm2ba+dm2bb\n dm1, dm2 = rdm.make_rdm12_spin1('FCItdm12kern_sf', cibra, ciket,\n norb, nelec, link_index, 2)\n if reorder:\n dm1, dm2 = rdm.reorder_rdm(dm1, dm2, inplace=True)\n return dm1, dm2\n\ndef _get_init_guess(na, nb, nroots, hdiag):\n '''Initial guess is the single Slater determinant\n '''\n # The \"nroots\" lowest determinats based on energy expectation value.\n ci0 = []\n try:\n addrs = numpy.argpartition(hdiag, nroots-1)[:nroots]\n except AttributeError:\n addrs = numpy.argsort(hdiag)[:nroots]\n for addr in addrs:\n x = numpy.zeros((na*nb))\n x[addr] = 1\n ci0.append(x.ravel())\n\n # Add noise\n ci0[0][0 ] += 1e-5\n ci0[0][-1] -= 1e-5\n return ci0\n\ndef get_init_guess(norb, nelec, nroots, hdiag):\n '''Initial guess is the single Slater determinant\n '''\n neleca, nelecb = _unpack_nelec(nelec)\n na = cistring.num_strings(norb, neleca)\n nb = cistring.num_strings(norb, nelecb)\n return _get_init_guess(na, nb, nroots, hdiag)\n\n\n###############################################################\n# direct-CI driver\n###############################################################\n\ndef kernel_ms1(fci, h1e, eri, norb, nelec, ci0=None, link_index=None,\n tol=None, lindep=None, max_cycle=None, max_space=None,\n nroots=None, davidson_only=None, pspace_size=None,\n max_memory=None, verbose=None, ecore=0, **kwargs):\n if nroots is None: nroots = fci.nroots\n if davidson_only is None: davidson_only = fci.davidson_only\n if pspace_size is None: pspace_size = fci.pspace_size\n\n nelec = _unpack_nelec(nelec, fci.spin)\n assert(0 <= nelec[0] <= norb and 0 <= nelec[1] <= norb)\n link_indexa, link_indexb = _unpack(norb, nelec, link_index)\n na = link_indexa.shape[0]\n nb = link_indexb.shape[0]\n hdiag = fci.make_hdiag(h1e, eri, norb, nelec)\n nroots = min(hdiag.size, nroots)\n\n try:\n addr, h0 = fci.pspace(h1e, eri, norb, nelec, hdiag, max(pspace_size,nroots))\n if pspace_size > 0:\n pw, pv = fci.eig(h0)\n else:\n pw = pv = None\n\n if pspace_size >= na*nb and ci0 is None and not davidson_only:\n# The degenerated wfn can break symmetry. The davidson iteration with proper\n# initial guess doesn't have this issue\n if na*nb == 1:\n return pw[0]+ecore, pv[:,0].reshape(1,1)\n elif nroots > 1:\n civec = numpy.empty((nroots,na*nb))\n civec[:,addr] = pv[:,:nroots].T\n return pw[:nroots]+ecore, [c.reshape(na,nb) for c in civec]\n elif abs(pw[0]-pw[1]) > 1e-12:\n civec = numpy.empty((na*nb))\n civec[addr] = pv[:,0]\n return pw[0]+ecore, civec.reshape(na,nb)\n except NotImplementedError:\n addr = [0]\n pw = pv = None\n\n precond = fci.make_precond(hdiag, pw, pv, addr)\n\n h2e = fci.absorb_h1e(h1e, eri, norb, nelec, .5)\n def hop(c):\n hc = fci.contract_2e(h2e, c, norb, nelec, (link_indexa,link_indexb))\n return hc.ravel()\n\n if ci0 is None:\n if callable(getattr(fci, 'get_init_guess', None)):\n ci0 = lambda: fci.get_init_guess(norb, nelec, nroots, hdiag)\n else:\n def ci0(): # lazy initialization to reduce memory footprint\n x0 = []\n for i in range(nroots):\n x = numpy.zeros(na*nb)\n x[addr[i]] = 1\n x0.append(x)\n return x0\n elif not callable(ci0):\n if isinstance(ci0, numpy.ndarray) and ci0.size == na*nb:\n ci0 = [ci0.ravel()]\n else:\n ci0 = [x.ravel() for x in ci0]\n\n if tol is None: tol = fci.conv_tol\n if lindep is None: lindep = fci.lindep\n if max_cycle is None: max_cycle = fci.max_cycle\n if max_space is None: max_space = fci.max_space\n if max_memory is None: max_memory = fci.max_memory\n if verbose is None: verbose = logger.Logger(fci.stdout, fci.verbose)\n tol_residual = getattr(fci, 'conv_tol_residual', None)\n\n with lib.with_omp_threads(fci.threads):\n #e, c = lib.davidson(hop, ci0, precond, tol=fci.conv_tol, lindep=fci.lindep)\n e, c = fci.eig(hop, ci0, precond, tol=tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=verbose, follow_state=True,\n tol_residual=tol_residual, **kwargs)\n if nroots > 1:\n return e+ecore, [ci.reshape(na,nb) for ci in c]\n else:\n return e+ecore, c.reshape(na,nb)\n\ndef make_pspace_precond(hdiag, pspaceig, pspaceci, addr, level_shift=0):\n # precondition with pspace Hamiltonian, CPL, 169, 463\n def precond(r, e0, x0, *args):\n #h0e0 = h0 - numpy.eye(len(addr))*(e0-level_shift)\n h0e0inv = numpy.dot(pspaceci/(pspaceig-(e0-level_shift)), pspaceci.T)\n hdiaginv = 1/(hdiag - (e0-level_shift))\n hdiaginv[abs(hdiaginv)>1e8] = 1e8\n h0x0 = x0 * hdiaginv\n #h0x0[addr] = numpy.linalg.solve(h0e0, x0[addr])\n h0x0[addr] = numpy.dot(h0e0inv, x0[addr])\n h0r = r * hdiaginv\n #h0r[addr] = numpy.linalg.solve(h0e0, r[addr])\n h0r[addr] = numpy.dot(h0e0inv, r[addr])\n e1 = numpy.dot(x0, h0r) / numpy.dot(x0, h0x0)\n x1 = r - e1*x0\n #pspace_x1 = x1[addr].copy()\n x1 *= hdiaginv\n# pspace (h0-e0)^{-1} cause diverging?\n #x1[addr] = numpy.linalg.solve(h0e0, pspace_x1)\n return x1\n return precond\n\ndef make_diag_precond(hdiag, pspaceig, pspaceci, addr, level_shift=0):\n return lib.make_diag_precond(hdiag, level_shift)\n\n\nclass FCISolver(lib.StreamObject):\n '''Full CI solver\n\n Attributes:\n verbose : int\n Print level. Default value equals to :class:`Mole.verbose`.\n max_cycle : int\n Total number of iterations. Default is 100\n max_space : tuple of int\n Davidson iteration space size. Default is 14.\n conv_tol : float\n Energy convergence tolerance. Default is 1e-10.\n level_shift : float\n Level shift applied in the preconditioner to avoid singularity.\n Default is 1e-3\n davidson_only : bool\n By default, the entire Hamiltonian matrix will be constructed and\n diagonalized if the system is small (see attribute pspace_size).\n Setting this parameter to True will enforce the eigenvalue\n problems being solved by Davidson subspace algorithm. This flag\n should be enabled when initial guess is given or particular spin\n symmetry or point-group symmetry is required because the initial\n guess or symmetry are completely ignored in the direct diagonlization.\n pspace_size : int\n The dimension of Hamiltonian matrix over which Davidson iteration\n algorithm will be used for the eigenvalue problem. Default is 400.\n This is roughly corresponding to a (6e,6o) system.\n nroots : int\n Number of states to be solved. Default is 1, the ground state.\n spin : int or None\n Spin (2S = nalpha-nbeta) of the system. If this attribute is None,\n spin will be determined by the argument nelec (number of electrons)\n of the kernel function.\n wfnsym : str or int\n Symmetry of wavefunction. It is used only in direct_spin1_symm\n and direct_spin0_symm solver.\n\n Saved results\n\n eci : float or a list of float\n FCI energy(ies)\n ci : nparray\n FCI wfn vector(s)\n converged : bool (or a list of bool for multiple roots)\n Whether davidson iteration is converged\n\n Examples:\n\n >>> from pyscf import gto, scf, ao2mo, fci\n >>> mol = gto.M(atom='Li 0 0 0; Li 0 0 1', basis='sto-3g')\n >>> mf = scf.RHF(mol).run()\n >>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)\n >>> eri = ao2mo.kernel(mol, mf.mo_coeff)\n >>> cisolver = fci.direct_spin1.FCI(mol)\n >>> e, ci = cisolver.kernel(h1, eri, h1.shape[1], mol.nelec, ecore=mol.energy_nuc())\n >>> print(e)\n -14.4197890826\n '''\n\n max_cycle = getattr(__config__, 'fci_direct_spin1_FCI_max_cycle', 100)\n max_space = getattr(__config__, 'fci_direct_spin1_FCI_max_space', 12)\n conv_tol = getattr(__config__, 'fci_direct_spin1_FCI_conv_tol', 1e-10)\n conv_tol_residual = getattr(__config__, 'fci_direct_spin1_FCI_conv_tol_residual', None)\n lindep = getattr(__config__, 'fci_direct_spin1_FCI_lindep', 1e-14)\n\n # level shift in precond\n level_shift = getattr(__config__, 'fci_direct_spin1_FCI_level_shift', 1e-3)\n\n # force the diagonlization use davidson iteration. When the CI space\n # is small, the solver exactly diagonlizes the Hamiltonian. But this\n # solution will ignore the initial guess. Setting davidson_only can\n # enforce the solution on the initial guess state\n davidson_only = getattr(__config__, 'fci_direct_spin1_FCI_davidson_only', False)\n\n pspace_size = getattr(__config__, 'fci_direct_spin1_FCI_pspace_size', 400)\n threads = getattr(__config__, 'fci_direct_spin1_FCI_threads', None)\n lessio = getattr(__config__, 'fci_direct_spin1_FCI_lessio', False)\n\n def __init__(self, mol=None):\n if mol is None:\n self.stdout = sys.stdout\n self.verbose = logger.NOTE\n self.max_memory = lib.param.MAX_MEMORY\n else:\n self.stdout = mol.stdout\n self.verbose = mol.verbose\n self.max_memory = mol.max_memory\n self.mol = mol\n self.nroots = 1\n self.spin = None\n# Initialize symmetry attributes for the compatibility with direct_spin1_symm\n# solver. They are not used by direct_spin1 solver.\n self.orbsym = None\n self.wfnsym = None\n\n self.converged = False\n self.norb = None\n self.nelec = None\n self.eci = None\n self.ci = None\n\n keys = set(('max_cycle', 'max_space', 'conv_tol', 'lindep',\n 'level_shift', 'davidson_only', 'pspace_size', 'threads',\n 'lessio'))\n self._keys = set(self.__dict__.keys()).union(keys)\n\n @property\n def e_tot(self):\n return self.eci\n\n @property\n def nstates(self):\n return self.nroots\n @nstates.setter\n def nstates(self, x):\n self.nroots = x\n\n def dump_flags(self, verbose=None):\n if verbose is None: verbose = self.verbose\n log = logger.Logger(self.stdout, verbose)\n log.info('******** %s ********', self.__class__)\n log.info('max. cycles = %d', self.max_cycle)\n log.info('conv_tol = %g', self.conv_tol)\n log.info('davidson only = %s', self.davidson_only)\n log.info('linear dependence = %g', self.lindep)\n log.info('level shift = %g', self.level_shift)\n log.info('max iter space = %d', self.max_space)\n log.info('max_memory %d MB', self.max_memory)\n log.info('nroots = %d', self.nroots)\n log.info('pspace_size = %d', self.pspace_size)\n log.info('spin = %s', self.spin)\n return self\n\n @lib.with_doc(absorb_h1e.__doc__)\n def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):\n return absorb_h1e(h1e, eri, norb, nelec, fac)\n\n @lib.with_doc(make_hdiag.__doc__)\n def make_hdiag(self, h1e, eri, norb, nelec):\n return make_hdiag(h1e, eri, norb, nelec)\n\n @lib.with_doc(pspace.__doc__)\n def pspace(self, h1e, eri, norb, nelec, hdiag=None, np=400):\n return pspace(h1e, eri, norb, nelec, hdiag, np)\n\n @lib.with_doc(contract_1e.__doc__)\n def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):\n return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)\n\n @lib.with_doc(contract_2e.__doc__)\n def contract_2e(self, eri, fcivec, norb, nelec, link_index=None, **kwargs):\n return contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)\n\n def eig(self, op, x0=None, precond=None, **kwargs):\n if isinstance(op, numpy.ndarray):\n self.converged = True\n return scipy.linalg.eigh(op)\n\n self.converged, e, ci = \\\n lib.davidson1(lambda xs: [op(x) for x in xs],\n x0, precond, lessio=self.lessio, **kwargs)\n if kwargs['nroots'] == 1:\n self.converged = self.converged[0]\n e = e[0]\n ci = ci[0]\n return e, ci\n\n def make_precond(self, hdiag, pspaceig, pspaceci, addr):\n if pspaceig is None:\n return make_diag_precond(hdiag, pspaceig, pspaceci, addr,\n self.level_shift)\n else:\n return make_pspace_precond(hdiag, pspaceig, pspaceci, addr,\n self.level_shift)\n\n @lib.with_doc(get_init_guess.__doc__)\n def get_init_guess(self, norb, nelec, nroots, hdiag):\n return get_init_guess(norb, nelec, nroots, hdiag)\n\n def kernel(self, h1e, eri, norb, nelec, ci0=None,\n tol=None, lindep=None, max_cycle=None, max_space=None,\n nroots=None, davidson_only=None, pspace_size=None,\n orbsym=None, wfnsym=None, ecore=0, **kwargs):\n if self.verbose >= logger.WARN:\n self.check_sanity()\n self.norb = norb\n self.nelec = nelec\n self.eci, self.ci = \\\n kernel_ms1(self, h1e, eri, norb, nelec, ci0, None,\n tol, lindep, max_cycle, max_space, nroots,\n davidson_only, pspace_size, ecore=ecore, **kwargs)\n return self.eci, self.ci\n\n @lib.with_doc(energy.__doc__)\n def energy(self, h1e, eri, fcivec, norb, nelec, link_index=None):\n h2e = self.absorb_h1e(h1e, eri, norb, nelec, .5)\n ci1 = self.contract_2e(h2e, fcivec, norb, nelec, link_index)\n return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))\n\n def spin_square(self, fcivec, norb, nelec):\n nelec = _unpack_nelec(nelec, self.spin)\n return spin_op.spin_square0(fcivec, norb, nelec)\n spin_square.__doc__ = spin_op.spin_square0.__doc__\n\n @lib.with_doc(make_rdm1s.__doc__)\n def make_rdm1s(self, fcivec, norb, nelec, link_index=None):\n nelec = _unpack_nelec(nelec, self.spin)\n return make_rdm1s(fcivec, norb, nelec, link_index)\n\n @lib.with_doc(make_rdm1.__doc__)\n def make_rdm1(self, fcivec, norb, nelec, link_index=None):\n nelec = _unpack_nelec(nelec, self.spin)\n return make_rdm1(fcivec, norb, nelec, link_index)\n\n @lib.with_doc(make_rdm12s.__doc__)\n def make_rdm12s(self, fcivec, norb, nelec, link_index=None, reorder=True):\n nelec = _unpack_nelec(nelec, self.spin)\n return make_rdm12s(fcivec, norb, nelec, link_index, reorder)\n\n @lib.with_doc(make_rdm12.__doc__)\n def make_rdm12(self, fcivec, norb, nelec, link_index=None, reorder=True):\n nelec = _unpack_nelec(nelec, self.spin)\n return make_rdm12(fcivec, norb, nelec, link_index, reorder)\n\n def make_rdm2(self, fcivec, norb, nelec, link_index=None, reorder=True):\n r'''Spin traced 2-particle density matrice\n\n NOTE the 2pdm is :math:`\\langle p^\\dagger q^\\dagger s r\\rangle` but\n stored as [p,r,q,s]\n '''\n nelec = _unpack_nelec(nelec, self.spin)\n return self.make_rdm12(fcivec, norb, nelec, link_index, reorder)[1]\n\n @lib.with_doc(trans_rdm1s.__doc__)\n def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):\n nelec = _unpack_nelec(nelec, self.spin)\n return trans_rdm1s(cibra, ciket, norb, nelec, link_index)\n\n @lib.with_doc(trans_rdm1.__doc__)\n def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):\n nelec = _unpack_nelec(nelec, self.spin)\n return trans_rdm1(cibra, ciket, norb, nelec, link_index)\n\n @lib.with_doc(trans_rdm12s.__doc__)\n def trans_rdm12s(self, cibra, ciket, norb, nelec, link_index=None,\n reorder=True):\n nelec = _unpack_nelec(nelec, self.spin)\n return trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)\n\n @lib.with_doc(trans_rdm12.__doc__)\n def trans_rdm12(self, cibra, ciket, norb, nelec, link_index=None,\n reorder=True):\n nelec = _unpack_nelec(nelec, self.spin)\n return trans_rdm12(cibra, ciket, norb, nelec, link_index, reorder)\n\n def large_ci(self, fcivec, norb, nelec,\n tol=getattr(__config__, 'fci_addons_large_ci_tol', .1),\n return_strs=getattr(__config__, 'fci_addons_large_ci_return_strs', True)):\n from pyscf.fci import addons\n nelec = _unpack_nelec(nelec, self.spin)\n return addons.large_ci(fcivec, norb, nelec, tol, return_strs)\n\n def contract_ss(self, fcivec, norb, nelec):\n from pyscf.fci import spin_op\n return spin_op.contract_ss(fcivec, norb, nelec)\n\n def gen_linkstr(self, norb, nelec, tril=True, spin=None):\n if spin is None:\n spin = self.spin\n neleca, nelecb = _unpack_nelec(nelec, spin)\n if tril:\n link_indexa = cistring.gen_linkstr_index_trilidx(range(norb), neleca)\n link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)\n else:\n link_indexa = cistring.gen_linkstr_index(range(norb), neleca)\n link_indexb = cistring.gen_linkstr_index(range(norb), nelecb)\n return link_indexa, link_indexb\n\nFCI = FCISolver\n\n\ndef _unpack_nelec(nelec, spin=None):\n if spin is None:\n spin = 0\n else:\n nelec = int(numpy.sum(nelec))\n if isinstance(nelec, (int, numpy.number)):\n nelecb = (nelec-spin)//2\n neleca = nelec - nelecb\n nelec = neleca, nelecb\n return nelec\n\ndef _unpack(norb, nelec, link_index, spin=None):\n if link_index is None:\n neleca, nelecb = _unpack_nelec(nelec, spin)\n link_indexa = link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), neleca)\n if neleca != nelecb:\n link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)\n return link_indexa, link_indexb\n else:\n return link_index\n\n\nif __name__ == '__main__':\n from functools import reduce\n from pyscf import gto\n from pyscf import scf\n from pyscf import ao2mo\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None#\"out_h2o\"\n mol.atom = [\n ['H', ( 1.,-1. , 0. )],\n ['H', ( 0.,-1. ,-1. )],\n ['H', ( 1.,-0.5 ,-1. )],\n #['H', ( 0.,-0.5 ,-1. )],\n #['H', ( 0.,-0.5 ,-0. )],\n ['H', ( 0.,-0. ,-1. )],\n ['H', ( 1.,-0.5 , 0. )],\n ['H', ( 0., 1. , 1. )],\n ]\n\n mol.basis = {'H': 'sto-3g'}\n mol.build()\n\n m = scf.RHF(mol)\n ehf = m.scf()\n\n cis = FCISolver(mol)\n norb = m.mo_coeff.shape[1]\n nelec = mol.nelectron - 2\n h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))\n eri = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False)\n eri = eri.reshape(norb,norb,norb,norb)\n nea = nelec//2 + 1\n neb = nelec//2 - 1\n nelec = (nea, neb)\n\n e1 = cis.kernel(h1e, eri, norb, nelec, davidson_only=True)[0]\n print(e1, e1 - -7.7466756526056004)\n\n",
"#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nSome hacky functions\n'''\n\nimport os, sys\nimport warnings\nimport imp\nimport tempfile\nimport shutil\nimport functools\nimport itertools\nimport math\nimport types\nimport ctypes\nimport numpy\nimport h5py\nfrom pyscf.lib import param\nfrom pyscf import __config__\n\nif h5py.version.version[:4] == '2.2.':\n sys.stderr.write('h5py-%s is found in your environment. '\n 'h5py-%s has bug in threading mode.\\n'\n 'Async-IO is disabled.\\n' % ((h5py.version.version,)*2))\n\nc_double_p = ctypes.POINTER(ctypes.c_double)\nc_int_p = ctypes.POINTER(ctypes.c_int)\nc_null_ptr = ctypes.POINTER(ctypes.c_void_p)\n\ndef load_library(libname):\n# numpy 1.6 has bug in ctypeslib.load_library, see numpy/distutils/misc_util.py\n if '1.6' in numpy.__version__:\n if (sys.platform.startswith('linux') or\n sys.platform.startswith('gnukfreebsd')):\n so_ext = '.so'\n elif sys.platform.startswith('darwin'):\n so_ext = '.dylib'\n elif sys.platform.startswith('win'):\n so_ext = '.dll'\n else:\n raise OSError('Unknown platform')\n libname_so = libname + so_ext\n return ctypes.CDLL(os.path.join(os.path.dirname(__file__), libname_so))\n else:\n _loaderpath = os.path.dirname(__file__)\n return numpy.ctypeslib.load_library(libname, _loaderpath)\n\n#Fixme, the standard resouce module gives wrong number when objects are released\n#see http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/#fn:1\n#or use slow functions as memory_profiler._get_memory did\nCLOCK_TICKS = os.sysconf(\"SC_CLK_TCK\")\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\ndef current_memory():\n '''Return the size of used memory and allocated virtual memory (in MB)'''\n #import resource\n #return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000\n if sys.platform.startswith('linux'):\n with open(\"/proc/%s/statm\" % os.getpid()) as f:\n vms, rss = [int(x)*PAGESIZE for x in f.readline().split()[:2]]\n return rss/1e6, vms/1e6\n else:\n return 0, 0\n\ndef num_threads(n=None):\n '''Set the number of OMP threads. If argument is not specified, the\n function will return the total number of available OMP threads.\n\n It's recommended to call this function to set OMP threads than\n \"os.environ['OMP_NUM_THREADS'] = int(n)\". This is because environment\n variables like OMP_NUM_THREADS were read when a module was imported. They\n cannot be reset through os.environ after the module was loaded.\n\n Examples:\n\n >>> from pyscf import lib\n >>> print(lib.num_threads())\n 8\n >>> lib.num_threads(4)\n 4\n >>> print(lib.num_threads())\n 4\n '''\n from pyscf.lib.numpy_helper import _np_helper\n if n is not None:\n _np_helper.set_omp_threads.restype = ctypes.c_int\n threads = _np_helper.set_omp_threads(ctypes.c_int(int(n)))\n if threads == 0:\n warnings.warn('OpenMP is not available. '\n 'Setting omp_threads to %s has no effects.' % n)\n return threads\n else:\n _np_helper.get_omp_threads.restype = ctypes.c_int\n return _np_helper.get_omp_threads()\n\nclass with_omp_threads(object):\n '''Using this macro to create a temporary context in which the number of\n OpenMP threads are set to the required value. When the program exits the\n context, the number OpenMP threads will be restored.\n\n Args:\n nthreads : int\n\n Examples:\n\n >>> from pyscf import lib\n >>> print(lib.num_threads())\n 8\n >>> with lib.with_omp_threads(2):\n ... print(lib.num_threads())\n 2\n >>> print(lib.num_threads())\n 8\n '''\n def __init__(self, nthreads=None):\n self.nthreads = nthreads\n self.sys_threads = None\n def __enter__(self):\n if self.nthreads is not None and self.nthreads >= 1:\n self.sys_threads = num_threads()\n num_threads(self.nthreads)\n return self\n def __exit__(self, type, value, traceback):\n if self.sys_threads is not None:\n num_threads(self.sys_threads)\n\n\ndef c_int_arr(m):\n npm = numpy.array(m).flatten('C')\n arr = (ctypes.c_int * npm.size)(*npm)\n # cannot return LP_c_double class,\n #Xreturn npm.ctypes.data_as(c_int_p), which destructs npm before return\n return arr\ndef f_int_arr(m):\n npm = numpy.array(m).flatten('F')\n arr = (ctypes.c_int * npm.size)(*npm)\n return arr\ndef c_double_arr(m):\n npm = numpy.array(m).flatten('C')\n arr = (ctypes.c_double * npm.size)(*npm)\n return arr\ndef f_double_arr(m):\n npm = numpy.array(m).flatten('F')\n arr = (ctypes.c_double * npm.size)(*npm)\n return arr\n\n\ndef member(test, x, lst):\n for l in lst:\n if test(x, l):\n return True\n return False\n\ndef remove_dup(test, lst, from_end=False):\n if test is None:\n return set(lst)\n else:\n if from_end:\n lst = list(reversed(lst))\n seen = []\n for l in lst:\n if not member(test, l, seen):\n seen.append(l)\n return seen\n\ndef remove_if(test, lst):\n return [x for x in lst if not test(x)]\n\ndef find_if(test, lst):\n for l in lst:\n if test(l):\n return l\n raise ValueError('No element of the given list matches the test condition.')\n\ndef arg_first_match(test, lst):\n for i,x in enumerate(lst):\n if test(x):\n return i\n raise ValueError('No element of the given list matches the test condition.')\n\ndef _balanced_partition(cum, ntasks):\n segsize = float(cum[-1]) / ntasks\n bounds = numpy.arange(ntasks+1) * segsize\n displs = abs(bounds[:,None] - cum).argmin(axis=1)\n return displs\n\ndef _blocksize_partition(cum, blocksize):\n n = len(cum) - 1\n displs = [0]\n if n == 0:\n return displs\n\n p0 = 0\n for i in range(1, n):\n if cum[i+1]-cum[p0] > blocksize:\n displs.append(i)\n p0 = i\n displs.append(n)\n return displs\n\ndef flatten(lst):\n '''flatten nested lists\n x[0] + x[1] + x[2] + ...\n\n Examples:\n\n >>> flatten([[0, 2], [1], [[9, 8, 7]]])\n [0, 2, 1, [9, 8, 7]]\n '''\n return list(itertools.chain.from_iterable(lst))\n\ndef prange(start, end, step):\n '''This function splits the number sequence between \"start\" and \"end\"\n using uniform \"step\" length. It yields the boundary (start, end) for each\n fragment.\n\n Examples:\n\n >>> for p0, p1 in lib.prange(0, 8, 2):\n ... print(p0, p1)\n (0, 2)\n (2, 4)\n (4, 6)\n (6, 8)\n '''\n if start < end:\n for i in range(start, end, step):\n yield i, min(i+step, end)\n\ndef prange_tril(start, stop, blocksize):\n '''Similar to :func:`prange`, yeilds start (p0) and end (p1) with the\n restriction p1*(p1+1)/2-p0*(p0+1)/2 < blocksize\n\n Examples:\n\n >>> for p0, p1 in lib.prange_tril(0, 10, 25):\n ... print(p0, p1)\n (0, 6)\n (6, 9)\n (9, 10)\n '''\n if start >= stop:\n return []\n idx = numpy.arange(start, stop+1)\n cum_costs = idx*(idx+1)//2 - start*(start+1)//2\n displs = [x+start for x in _blocksize_partition(cum_costs, blocksize)]\n return zip(displs[:-1], displs[1:])\n\n\ndef index_tril_to_pair(ij):\n '''Given tril-index ij, compute the pair indices (i,j) which satisfy\n ij = i * (i+1) / 2 + j\n '''\n i = (numpy.sqrt(2*ij+.25) - .5 + 1e-7).astype(int)\n j = ij - i*(i+1)//2\n return i, j\n\n\ndef tril_product(*iterables, **kwds):\n '''Cartesian product in lower-triangular form for multiple indices\n\n For a given list of indices (`iterables`), this function yields all\n indices such that the sub-indices given by the kwarg `tril_idx` satisfy a\n lower-triangular form. The lower-triangular form satisfies:\n\n .. math:: i[tril_idx[0]] >= i[tril_idx[1]] >= ... >= i[tril_idx[len(tril_idx)-1]]\n\n Args:\n *iterables: Variable length argument list of indices for the cartesian product\n **kwds: Arbitrary keyword arguments. Acceptable keywords include:\n repeat (int): Number of times to repeat the iterables\n tril_idx (array_like): Indices to put into lower-triangular form.\n\n Yields:\n product (tuple): Tuple in lower-triangular form.\n\n Examples:\n Specifying no `tril_idx` is equivalent to just a cartesian product.\n\n >>> list(tril_product(range(2), repeat=2))\n [(0, 0), (0, 1), (1, 0), (1, 1)]\n\n We can specify only sub-indices to satisfy a lower-triangular form:\n\n >>> list(tril_product(range(2), repeat=3, tril_idx=[1,2]))\n [(0, 0, 0), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0), (1, 1, 1)]\n\n We specify all indices to satisfy a lower-triangular form, useful for iterating over\n the symmetry unique elements of occupied/virtual orbitals in a 3-particle operator:\n\n >>> list(tril_product(range(3), repeat=3, tril_idx=[0,1,2]))\n [(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 0, 0), (2, 1, 0), (2, 1, 1), (2, 2, 0), (2, 2, 1), (2, 2, 2)]\n '''\n repeat = kwds.get('repeat', 1)\n tril_idx = kwds.get('tril_idx', [])\n niterables = len(iterables) * repeat\n ntril_idx = len(tril_idx)\n\n assert ntril_idx <= niterables, 'Cant have a greater number of tril indices than iterables!'\n if ntril_idx > 0:\n assert numpy.max(tril_idx) < niterables, 'Tril index out of bounds for %d iterables! idx = %s' % \\\n (niterables, tril_idx)\n for tup in itertools.product(*iterables, repeat=repeat):\n if ntril_idx == 0:\n yield tup\n continue\n\n if all([tup[tril_idx[i]] >= tup[tril_idx[i+1]] for i in range(ntril_idx-1)]):\n yield tup\n else:\n pass\n\ndef square_mat_in_trilu_indices(n):\n '''Return a n x n symmetric index matrix, in which the elements are the\n indices of the unique elements of a tril vector\n [0 1 3 ... ]\n [1 2 4 ... ]\n [3 4 5 ... ]\n [... ]\n '''\n idx = numpy.tril_indices(n)\n tril2sq = numpy.zeros((n,n), dtype=int)\n tril2sq[idx[0],idx[1]] = tril2sq[idx[1],idx[0]] = numpy.arange(n*(n+1)//2)\n return tril2sq\n\nclass capture_stdout(object):\n '''redirect all stdout (c printf & python print) into a string\n\n Examples:\n\n >>> import os\n >>> from pyscf import lib\n >>> with lib.capture_stdout as out:\n ... os.system('ls')\n >>> print(out.read())\n '''\n #TODO: handle stderr\n def __enter__(self):\n sys.stdout.flush()\n self._contents = None\n self.old_stdout_fileno = sys.stdout.fileno()\n self.bak_stdout_fd = os.dup(self.old_stdout_fileno)\n self.ftmp = tempfile.NamedTemporaryFile(dir=param.TMPDIR)\n os.dup2(self.ftmp.file.fileno(), self.old_stdout_fileno)\n return self\n def __exit__(self, type, value, traceback):\n sys.stdout.flush()\n self.ftmp.file.seek(0)\n self._contents = self.ftmp.file.read()\n self.ftmp.close()\n os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)\n os.close(self.bak_stdout_fd)\n def read(self):\n if self._contents:\n return self._contents\n else:\n sys.stdout.flush()\n self.ftmp.file.seek(0)\n return self.ftmp.file.read()\nctypes_stdout = capture_stdout\n\nclass quite_run(object):\n '''capture all stdout (c printf & python print) but output nothing\n\n Examples:\n\n >>> import os\n >>> from pyscf import lib\n >>> with lib.quite_run():\n ... os.system('ls')\n '''\n def __enter__(self):\n sys.stdout.flush()\n #TODO: to handle the redirected stdout e.g. StringIO()\n self.old_stdout_fileno = sys.stdout.fileno()\n self.bak_stdout_fd = os.dup(self.old_stdout_fileno)\n self.fnull = open(os.devnull, 'wb')\n os.dup2(self.fnull.fileno(), self.old_stdout_fileno)\n def __exit__(self, type, value, traceback):\n sys.stdout.flush()\n os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)\n self.fnull.close()\n\n\n# from pygeocoder\n# this decorator lets me use methods as both static and instance methods\n# In contrast to classmethod, when obj.function() is called, the first\n# argument is obj in omnimethod rather than obj.__class__ in classmethod\nclass omnimethod(object):\n def __init__(self, func):\n self.func = func\n\n def __get__(self, instance, owner):\n return functools.partial(self.func, instance)\n\n\nclass StreamObject(object):\n '''For most methods, there are three stream functions to pipe computing stream:\n\n 1 ``.set_`` function to update object attributes, eg\n ``mf = scf.RHF(mol).set(conv_tol=1e-5)`` is identical to proceed in two steps\n ``mf = scf.RHF(mol); mf.conv_tol=1e-5``\n\n 2 ``.run`` function to execute the kenerl function (the function arguments\n are passed to kernel function). If keyword arguments is given, it will first\n call ``.set`` function to update object attributes then execute the kernel\n function. Eg\n ``mf = scf.RHF(mol).run(dm_init, conv_tol=1e-5)`` is identical to three steps\n ``mf = scf.RHF(mol); mf.conv_tol=1e-5; mf.kernel(dm_init)``\n\n 3 ``.apply`` function to apply the given function/class to the current object\n (function arguments and keyword arguments are passed to the given function).\n Eg\n ``mol.apply(scf.RHF).run().apply(mcscf.CASSCF, 6, 4, frozen=4)`` is identical to\n ``mf = scf.RHF(mol); mf.kernel(); mcscf.CASSCF(mf, 6, 4, frozen=4)``\n '''\n\n verbose = 0\n stdout = sys.stdout\n _keys = set(['verbose', 'stdout'])\n\n def kernel(self, *args, **kwargs):\n '''\n Kernel function is the main driver of a method. Every method should\n define the kernel function as the entry of the calculation. Note the\n return value of kernel function is not strictly defined. It can be\n anything related to the method (such as the energy, the wave-function,\n the DFT mesh grids etc.).\n '''\n pass\n\n def pre_kernel(self, envs):\n '''\n A hook to be run before the main body of kernel function is executed.\n Internal variables are exposed to pre_kernel through the \"envs\"\n dictionary. Return value of pre_kernel function is not required.\n '''\n pass\n\n def post_kernel(self, envs):\n '''\n A hook to be run after the main body of the kernel function. Internal\n variables are exposed to post_kernel through the \"envs\" dictionary.\n Return value of post_kernel function is not required.\n '''\n pass\n\n def run(self, *args, **kwargs):\n '''\n Call the kernel function of current object. `args` will be passed\n to kernel function. `kwargs` will be used to update the attributes of\n current object. The return value of method run is the object itself.\n This allows a series of functions/methods to be executed in pipe.\n '''\n self.set(**kwargs)\n self.kernel(*args)\n return self\n\n def set(self, **kwargs):\n '''\n Update the attributes of the current object. The return value of\n method set is the object itself. This allows a series of\n functions/methods to be executed in pipe.\n '''\n #if getattr(self, '_keys', None):\n # for k,v in kwargs.items():\n # setattr(self, k, v)\n # if k not in self._keys:\n # sys.stderr.write('Warning: %s does not have attribute %s\\n'\n # % (self.__class__, k))\n #else:\n for k,v in kwargs.items():\n setattr(self, k, v)\n return self\n\n def apply(self, fn, *args, **kwargs):\n '''\n Apply the fn to rest arguments: return fn(*args, **kwargs). The\n return value of method set is the object itself. This allows a series\n of functions/methods to be executed in pipe.\n '''\n return fn(self, *args, **kwargs)\n\n# def _format_args(self, args, kwargs, kernel_kw_lst):\n# args1 = [kwargs.pop(k, v) for k, v in kernel_kw_lst]\n# return args + args1[len(args):], kwargs\n\n def check_sanity(self):\n '''\n Check input of class/object attributes, check whether a class method is\n overwritten. It does not check the attributes which are prefixed with\n \"_\". The\n return value of method set is the object itself. This allows a series\n of functions/methods to be executed in pipe.\n '''\n if (self.verbose > 0 and # logger.QUIET\n getattr(self, '_keys', None)):\n check_sanity(self, self._keys, self.stdout)\n return self\n\n def view(self, cls):\n '''New view of object with the same attributes.'''\n obj = cls.__new__(cls)\n obj.__dict__.update(self.__dict__)\n return obj\n\n_warn_once_registry = {}\ndef check_sanity(obj, keysref, stdout=sys.stdout):\n '''Check misinput of class attributes, check whether a class method is\n overwritten. It does not check the attributes which are prefixed with\n \"_\".\n '''\n objkeys = [x for x in obj.__dict__ if not x.startswith('_')]\n keysub = set(objkeys) - set(keysref)\n if keysub:\n class_attr = set(dir(obj.__class__))\n keyin = keysub.intersection(class_attr)\n if keyin:\n msg = ('Overwritten attributes %s of %s\\n' %\n (' '.join(keyin), obj.__class__))\n if msg not in _warn_once_registry:\n _warn_once_registry[msg] = 1\n sys.stderr.write(msg)\n if stdout is not sys.stdout:\n stdout.write(msg)\n keydiff = keysub - class_attr\n if keydiff:\n msg = ('%s does not have attributes %s\\n' %\n (obj.__class__, ' '.join(keydiff)))\n if msg not in _warn_once_registry:\n _warn_once_registry[msg] = 1\n sys.stderr.write(msg)\n if stdout is not sys.stdout:\n stdout.write(msg)\n return obj\n\ndef with_doc(doc):\n '''Use this decorator to add doc string for function\n\n @with_doc(doc)\n def fn:\n ...\n\n is equivalent to\n\n fn.__doc__ = doc\n '''\n def fn_with_doc(fn):\n fn.__doc__ = doc\n return fn\n return fn_with_doc\n\ndef alias(fn, alias_name=None):\n '''\n The statement \"fn1 = alias(fn)\" in a class is equivalent to define the\n following method in the class:\n\n .. code-block:: python\n def fn1(self, *args, **kwargs):\n return self.fn(*args, **kwargs)\n\n Using alias function instead of fn1 = fn because some methods may be\n overloaded in the child class. Using \"alias\" can make sure that the\n overloaded mehods were called when calling the aliased method.\n '''\n fname = fn.__name__\n def aliased_fn(self, *args, **kwargs):\n return getattr(self, fname)(*args, **kwargs)\n\n if alias_name is not None:\n aliased_fn.__name__ = alias_name\n\n doc_str = 'An alias to method %s\\n' % fname\n if sys.version_info >= (3,):\n from inspect import signature\n sig = str(signature(fn))\n if alias_name is None:\n doc_str += 'Function Signature: %s\\n' % sig\n else:\n doc_str += 'Function Signature: %s%s\\n' % (alias_name, sig)\n doc_str += '----------------------------------------\\n\\n'\n\n if fn.__doc__ is not None:\n doc_str += fn.__doc__\n\n aliased_fn.__doc__ = doc_str\n return aliased_fn\n\ndef class_as_method(cls):\n '''\n The statement \"fn1 = alias(Class)\" is equivalent to:\n\n .. code-block:: python\n def fn1(self, *args, **kwargs):\n return Class(self, *args, **kwargs)\n '''\n def fn(obj, *args, **kwargs):\n return cls(obj, *args, **kwargs)\n fn.__doc__ = cls.__doc__\n fn.__name__ = cls.__name__\n return fn\n\ndef import_as_method(fn, default_keys=None):\n '''\n The statement \"fn1 = import_as_method(fn, default_keys=['a','b'])\"\n in a class is equivalent to define the following method in the class:\n\n .. code-block:: python\n def fn1(self, ..., a=None, b=None, ...):\n if a is None: a = self.a\n if b is None: b = self.b\n return fn(..., a, b, ...)\n '''\n code_obj = fn.__code__\n# Add the default_keys as kwargs in CodeType is very complicated\n# new_code_obj = types.CodeType(code_obj.co_argcount+1,\n# code_obj.co_nlocals,\n# code_obj.co_stacksize,\n# code_obj.co_flags,\n# code_obj.co_code,\n# code_obj.co_consts,\n# code_obj.co_names,\n## As a class method, the first argument should be self\n# ('self',) + code_obj.co_varnames,\n# code_obj.co_filename,\n# code_obj.co_name,\n# code_obj.co_firstlineno,\n# code_obj.co_lnotab,\n# code_obj.co_freevars,\n# code_obj.co_cellvars)\n# clsmethod = types.FunctionType(new_code_obj, fn.__globals__)\n# clsmethod.__defaults__ = fn.__defaults__\n\n # exec is a bad solution here. But I didn't find a better way to\n # implement this for now.\n nargs = code_obj.co_argcount\n argnames = code_obj.co_varnames[:nargs]\n defaults = fn.__defaults__\n new_code_str = 'def clsmethod(self, %s):\\n' % (', '.join(argnames))\n if default_keys is not None:\n for k in default_keys:\n new_code_str += ' if %s is None: %s = self.%s\\n' % (k, k, k)\n if defaults is None:\n defaults = (None,) * nargs\n else:\n defaults = (None,) * (nargs-len(defaults)) + defaults\n new_code_str += ' return %s(%s)\\n' % (fn.__name__, ', '.join(argnames))\n exec(new_code_str, fn.__globals__, locals())\n\n clsmethod.__name__ = fn.__name__\n clsmethod.__defaults__ = defaults\n return clsmethod\n\ndef overwrite_mro(obj, mro):\n '''A hacky function to overwrite the __mro__ attribute'''\n class HackMRO(type):\n pass\n# Overwrite type.mro function so that Temp class can use the given mro\n HackMRO.mro = lambda self: mro\n #if sys.version_info < (3,):\n # class Temp(obj.__class__):\n # __metaclass__ = HackMRO\n #else:\n # class Temp(obj.__class__, metaclass=HackMRO):\n # pass\n Temp = HackMRO(obj.__class__.__name__, obj.__class__.__bases__, obj.__dict__)\n obj = Temp()\n# Delete mro function otherwise all subclass of Temp are not able to\n# resolve the right mro\n del(HackMRO.mro)\n return obj\n\ndef izip(*args):\n '''python2 izip == python3 zip'''\n if sys.version_info < (3,):\n return itertools.izip(*args)\n else:\n return zip(*args)\n\nfrom threading import Thread\nfrom multiprocessing import Queue, Process\nclass ProcessWithReturnValue(Process):\n def __init__(self, group=None, target=None, name=None, args=(),\n kwargs=None):\n self._q = Queue()\n self._e = None\n def qwrap(*args, **kwargs):\n try:\n self._q.put(target(*args, **kwargs))\n except BaseException as e:\n self._e = e\n raise e\n Process.__init__(self, group, qwrap, name, args, kwargs)\n def join(self):\n if self._e is not None:\n raise ProcessRuntimeError('Error on process %s' % self)\n else:\n Process.join(self)\n return self._q.get()\n get = join\n\nclass ProcessRuntimeError(RuntimeError):\n pass\n\nclass ThreadWithReturnValue(Thread):\n def __init__(self, group=None, target=None, name=None, args=(),\n kwargs=None):\n self._q = Queue()\n self._e = None\n def qwrap(*args, **kwargs):\n try:\n self._q.put(target(*args, **kwargs))\n except BaseException as e:\n self._e = e\n raise e\n Thread.__init__(self, group, qwrap, name, args, kwargs)\n def join(self):\n if self._e is not None:\n raise ThreadRuntimeError('Error on thread %s' % self)\n else:\n Thread.join(self)\n# Note: If the return value of target is huge, Queue.get may raise\n# SystemError: NULL result without error in PyObject_Call\n# It is because return value is cached somewhere by pickle but pickle is\n# unable to handle huge amount of data.\n return self._q.get()\n get = join\n\nclass ThreadWithTraceBack(Thread):\n def __init__(self, group=None, target=None, name=None, args=(),\n kwargs=None):\n self._e = None\n def qwrap(*args, **kwargs):\n try:\n target(*args, **kwargs)\n except BaseException as e:\n self._e = e\n raise e\n Thread.__init__(self, group, qwrap, name, args, kwargs)\n def join(self):\n if self._e is not None:\n raise ThreadRuntimeError('Error on thread %s' % self)\n else:\n Thread.join(self)\n\nclass ThreadRuntimeError(RuntimeError):\n pass\n\ndef background_thread(func, *args, **kwargs):\n '''applying function in background'''\n thread = ThreadWithReturnValue(target=func, args=args, kwargs=kwargs)\n thread.start()\n return thread\n\ndef background_process(func, *args, **kwargs):\n '''applying function in background'''\n thread = ProcessWithReturnValue(target=func, args=args, kwargs=kwargs)\n thread.start()\n return thread\n\nbg = background = bg_thread = background_thread\nbp = bg_process = background_process\n\nASYNC_IO = getattr(__config__, 'ASYNC_IO', True)\nclass call_in_background(object):\n '''Within this macro, function(s) can be executed asynchronously (the\n given functions are executed in background).\n\n Attributes:\n sync (bool): Whether to run in synchronized mode. The default value\n is False (asynchoronized mode).\n\n Examples:\n\n >>> with call_in_background(fun) as async_fun:\n ... async_fun(a, b) # == fun(a, b)\n ... do_something_else()\n\n >>> with call_in_background(fun1, fun2) as (afun1, afun2):\n ... afun2(a, b)\n ... do_something_else()\n ... afun2(a, b)\n ... do_something_else()\n ... afun1(a, b)\n ... do_something_else()\n '''\n\n def __init__(self, *fns, **kwargs):\n self.fns = fns\n self.handler = None\n self.sync = kwargs.get('sync', not ASYNC_IO)\n\n if h5py.version.version[:4] == '2.2.': # h5py-2.2.* has bug in threading mode\n # Disable back-ground mode\n def __enter__(self):\n if len(self.fns) == 1:\n return self.fns[0]\n else:\n return self.fns\n\n else:\n def __enter__(self):\n if self.sync or imp.lock_held():\n# Some modules like nosetests, coverage etc\n# python -m unittest test_xxx.py or nosetests test_xxx.py\n# hang when Python multi-threading was used in the import stage due to (Python\n# import lock) bug in the threading module. See also\n# https://github.com/paramiko/paramiko/issues/104\n# https://docs.python.org/2/library/threading.html#importing-in-threaded-code\n# Disable the asynchoronous mode for safe importing\n def def_async_fn(fn):\n return fn\n\n else:\n # Enable back-ground mode\n def def_async_fn(fn):\n def async_fn(*args, **kwargs):\n if self.handler is not None:\n self.handler.join()\n self.handler = ThreadWithTraceBack(target=fn, args=args,\n kwargs=kwargs)\n self.handler.start()\n return self.handler\n return async_fn\n\n if len(self.fns) == 1:\n return def_async_fn(self.fns[0])\n else:\n return [def_async_fn(fn) for fn in self.fns]\n\n def __exit__(self, type, value, traceback):\n if self.handler is not None:\n self.handler.join()\n\n\nclass H5TmpFile(h5py.File):\n '''Create and return an HDF5 temporary file.\n\n Kwargs:\n filename : str or None\n If a string is given, an HDF5 file of the given filename will be\n created. The temporary file will exist even if the H5TmpFile\n object is released. If nothing is specified, the HDF5 temporary\n file will be deleted when the H5TmpFile object is released.\n\n The return object is an h5py.File object. The file will be automatically\n deleted when it is closed or the object is released (unless filename is\n specified).\n\n Examples:\n\n >>> from pyscf import lib\n >>> ftmp = lib.H5TmpFile()\n '''\n def __init__(self, filename=None, *args, **kwargs):\n if filename is None:\n tmpfile = tempfile.NamedTemporaryFile(dir=param.TMPDIR)\n filename = tmpfile.name\n h5py.File.__init__(self, filename, *args, **kwargs)\n#FIXME: Does GC flush/close the HDF5 file when releasing the resource?\n# To make HDF5 file reusable, file has to be closed or flushed\n def __del__(self):\n try:\n self.close()\n except ValueError: # if close() is called twice\n pass\n\ndef fingerprint(a):\n '''Fingerprint of numpy array'''\n a = numpy.asarray(a)\n return numpy.dot(numpy.cos(numpy.arange(a.size)), a.ravel())\nfinger = fingerprint\n\n\ndef ndpointer(*args, **kwargs):\n base = numpy.ctypeslib.ndpointer(*args, **kwargs)\n\n @classmethod\n def from_param(cls, obj):\n if obj is None:\n return obj\n return base.from_param(obj)\n return type(base.__name__, (base,), {'from_param': from_param})\n\n\n# A tag to label the derived Scanner class\nclass SinglePointScanner: pass\nclass GradScanner:\n def __init__(self, g):\n self.__dict__.update(g.__dict__)\n self.base = g.base.as_scanner()\n @property\n def e_tot(self):\n return self.base.e_tot\n @property\n def converged(self):\n# Some base methods like MP2 does not have the attribute converged\n conv = getattr(self.base, 'converged', True)\n return conv\n\nclass temporary_env(object):\n '''Within the context of this macro, the attributes of the object are\n temporarily updated. When the program goes out of the scope of the\n context, the original value of each attribute will be restored.\n\n Examples:\n\n >>> with temporary_env(lib.param, LIGHT_SPEED=15., BOHR=2.5):\n ... print(lib.param.LIGHT_SPEED, lib.param.BOHR)\n 15. 2.5\n >>> print(lib.param.LIGHT_SPEED, lib.param.BOHR)\n 137.03599967994 0.52917721092\n '''\n def __init__(self, obj, **kwargs):\n self.obj = obj\n\n # Should I skip the keys which are not presented in obj?\n #keys = [key for key in kwargs.keys() if hasattr(obj, key)]\n #self.env_bak = [(key, getattr(obj, key, 'TO_DEL')) for key in keys]\n #self.env_new = [(key, kwargs[key]) for key in keys]\n\n self.env_bak = [(key, getattr(obj, key, 'TO_DEL')) for key in kwargs]\n self.env_new = [(key, kwargs[key]) for key in kwargs]\n\n def __enter__(self):\n for k, v in self.env_new:\n setattr(self.obj, k, v)\n return self\n\n def __exit__(self, type, value, traceback):\n for k, v in self.env_bak:\n if isinstance(v, str) and v == 'TO_DEL':\n delattr(self.obj, k)\n else:\n setattr(self.obj, k, v)\n\nclass light_speed(temporary_env):\n '''Within the context of this macro, the environment varialbe LIGHT_SPEED\n can be customized.\n\n Examples:\n\n >>> with light_speed(15.):\n ... print(lib.param.LIGHT_SPEED)\n 15.\n >>> print(lib.param.LIGHT_SPEED)\n 137.03599967994\n '''\n def __init__(self, c):\n temporary_env.__init__(self, param, LIGHT_SPEED=c)\n self.c = c\n def __enter__(self):\n temporary_env.__enter__(self)\n return self.c\n\n\nif __name__ == '__main__':\n for i,j in prange_tril(0, 90, 300):\n print(i, j, j*(j+1)//2-i*(i+1)//2)\n"
] |
[
[
"numpy.dot",
"numpy.allclose",
"numpy.einsum",
"numpy.asarray",
"numpy.empty_like",
"numpy.linalg.norm",
"numpy.empty",
"numpy.zeros",
"numpy.vstack"
],
[
"numpy.empty_like",
"numpy.dot",
"numpy.zeros",
"numpy.einsum"
],
[
"numpy.dot",
"numpy.einsum",
"numpy.asarray",
"numpy.empty_like",
"numpy.ascontiguousarray",
"numpy.arange",
"numpy.zeros_like",
"numpy.argpartition",
"numpy.argsort",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"numpy.sqrt",
"numpy.tril_indices",
"numpy.arange",
"numpy.asarray",
"numpy.max",
"numpy.ctypeslib.ndpointer",
"numpy.array",
"numpy.zeros",
"numpy.ctypeslib.load_library"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zgyangleo/GeneralImageProcessing
|
[
"4fe3a7986095040fa1a741fd5e4728aaf9056b6e",
"4fe3a7986095040fa1a741fd5e4728aaf9056b6e"
] |
[
"Category_11_20/code_py/emboss_filter.py",
"Category_11_20/code_py/different_filter.py"
] |
[
"import cv2\nimport numpy as np\n\n# Gray scale\ndef BGR2GRAY(img):\n\tb = img[:, :, 0].copy()\n\tg = img[:, :, 1].copy()\n\tr = img[:, :, 2].copy()\n\n\t# Gray scale\n\tout = 0.2126 * r + 0.7152 * g + 0.0722 * b\n\tout = out.astype(np.uint8)\n\n\treturn out\n\n# emboss filter\ndef emboss_filter(img, K_size=3):\n\tif len(img.shape) == 3:\n\t\tH, W, C = img.shape\n\telse:\n\t\timg = np.expand_dims(img, axis=-1)\n\t\tH, W, C = img.shape\n\n\t# zero padding\n\tpad = K_size // 2\n\tout = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)\n\tout[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)\n\ttmp = out.copy()\n\n\t# emboss kernel\n\tK = [[-2., -1., 0.],[-1., 1., 1.], [0., 1., 2.]]\n\n\t# filtering\n\tfor y in range(H):\n\t\tfor x in range(W):\n\t\t\tout[pad + y, pad + x] = np.sum(K * (tmp[y: y + K_size, x: x + K_size]))\n\n\tout = np.clip(out, 0, 255)\n\tout = out[pad: pad + H, pad: pad + W].astype(np.uint8)\n\n\treturn out\n\n\n# Read image\nimg = cv2.imread(\"../hfut.jpg\").astype(np.float)\n\n# grayscale\ngray = BGR2GRAY(img)\n\n# emboss filtering\nout = emboss_filter(gray, K_size=3)\n\n\n# Save result\ncv2.imwrite(\"../result_image/emboss_filter.jpg\", out)\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"import cv2\nimport numpy as np\n\n# Gray scale\ndef BGR2GRAY(img):\n\tb = img[:, :, 0].copy()\n\tg = img[:, :, 1].copy()\n\tr = img[:, :, 2].copy()\n\n\t# Gray scale\n\tout = 0.2126 * r + 0.7152 * g + 0.0722 * b\n\tout = out.astype(np.uint8)\n\n\treturn out\n\n# different filter\ndef different_filter(img, K_size=3):\n\tif len(img.shape) == 3:\n\t\tH, W, C=img.shape\n\telse:\n\t\timg = np.expand_dims(img, axis=-1)\n\t\tH, W, C = img.shape\n\n\t# Zero padding\n\tpad = K_size // 2\n\tout = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)\n\tout[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)\n\ttmp = out.copy()\n\n\tout_v = out.copy()\n\tout_h = out.copy()\n\n\t# vertical kernel\n\tKv = [[0., -1., 0.],[0., 1., 0.],[0., 0., 0.]]\n\t# horizontal kernel\n\tKh = [[0., 0., 0.],[-1., 1., 0.], [0., 0., 0.]]\n\n\t# filtering\n\tfor y in range(H):\n\t\tfor x in range(W):\n\t\t\tout_v[pad + y, pad + x] = np.sum(Kv * (tmp[y: y + K_size, x: x + K_size]))\n\t\t\tout_h[pad + y, pad + x] = np.sum(Kh * (tmp[y: y + K_size, x: x + K_size]))\n\n\tout_v = np.clip(out_v, 0, 255)\n\tout_h = np.clip(out_h, 0, 255)\n\n\tout_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)\n\tout_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)\n\n\treturn out_v, out_h\n\n# Read image\nimg = cv2.imread(\"../hfut.jpg\").astype(np.float)\n\n# grayscale\ngray = BGR2GRAY(img)\n\n# different filtering\nout_v, out_h = different_filter(gray, K_size=3)\n\n\n\n# Save result\ncv2.imwrite(\"../result_image/different_filter_v.jpg\", out_v)\ncv2.imshow(\"result_v\", out_v)\nwhile cv2.waitKey(100) != 27:# loop if not get ESC\n if cv2.getWindowProperty('result_v',cv2.WND_PROP_VISIBLE) <= 0:\n break\ncv2.destroyWindow('result_v')\n\ncv2.imwrite(\"../result_image/different_filter_h.jpg\", out_h)\ncv2.imshow(\"result_h\", out_h)\n# loop if not get ESC or click x\nwhile cv2.waitKey(100) != 27:\n if cv2.getWindowProperty('result_h',cv2.WND_PROP_VISIBLE) <= 0:\n break\ncv2.destroyWindow('result_h')\ncv2.destroyAllWindows()\n\n\n\n"
] |
[
[
"numpy.expand_dims",
"numpy.zeros",
"numpy.sum",
"numpy.clip"
],
[
"numpy.expand_dims",
"numpy.zeros",
"numpy.sum",
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xxxsssyyy/DeepNER
|
[
"b8686d0605f1df88a3ac1885052bd2dbdc583204"
] |
[
"layers/transformer.py"
] |
[
"# encoding = utf8\n# this file is copy from: https://github.com/DongjunLee/transformer-tensorflow\nimport tensorflow as tf\n\nimport sys\nsys.path.append('../')\n\nfrom layers.attention import Attention\n\n\nclass FFN:\n \"\"\"FFN class (Position-wise Feed-Forward Networks)\"\"\"\n def __init__(self,\n w1_dim=200,\n w2_dim=100,\n dropout=0.1):\n\n self.w1_dim = w1_dim\n self.w2_dim = w2_dim\n self.dropout = dropout\n\n def dense_relu_dense(self, inputs):\n output = tf.layers.dense(inputs, self.w1_dim, activation=tf.nn.relu)\n output =tf.layers.dense(output, self.w2_dim)\n\n return tf.nn.dropout(output, 1.0 - self.dropout)\n\n def conv_relu_conv(self):\n raise NotImplementedError(\"i will implement it!\")\n\n\nclass Encoder:\n \"\"\"Encoder class\"\"\"\n def __init__(self,\n num_layers=8,\n num_heads=8,\n linear_key_dim=50,\n linear_value_dim=50,\n model_dim=50,\n ffn_dim=50,\n max_seq_len=100,\n dropout=0.2,\n ):\n\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.linear_key_dim = linear_key_dim\n self.linear_value_dim = linear_value_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.max_seq_len = max_seq_len\n self.dropout = dropout\n\n def build(self, encoder_inputs, key_masks):\n o1 = tf.identity(encoder_inputs) # reference passing\n\n for i in range(1, self.num_layers+1):\n with tf.variable_scope(f\"layer-{i}\"):\n o2 = self._add_and_norm(o1, self._self_attention(q=o1,\n k=o1,\n v=o1,\n key_masks=key_masks), num=1)\n o3 = self._add_and_norm(o2, self._positional_feed_forward(o2), num=2)\n o1 = tf.identity(o3)\n\n return o3\n\n def _self_attention(self, q, k, v, key_masks):\n with tf.variable_scope(\"self-attention\"):\n attention = Attention(num_heads=self.num_heads,\n masked=False,\n linear_key_dim=self.linear_key_dim,\n linear_value_dim=self.linear_value_dim,\n model_dim=self.model_dim,\n max_seq_len=self.max_seq_len,\n dropout=self.dropout,\n )\n #self.att = attention\n return attention.multi_head(q, k, v, key_masks)\n\n def _add_and_norm(self, x, sub_layer_x, num=0):\n with tf.variable_scope(f\"encoder-add-and-norm-{num}\"):\n # Layer Normalization with Residual connection\n return tf.contrib.layers.layer_norm(tf.add(x, sub_layer_x))\n\n def _positional_feed_forward(self, output):\n with tf.variable_scope(\"feed-forward\"):\n ffn = FFN(w1_dim=self.ffn_dim,\n w2_dim=self.model_dim,\n dropout=self.dropout)\n return ffn.dense_relu_dense(output)\n\n\nclass Decoder:\n \"\"\"Decoder class\"\"\"\n def __init__(self,\n num_layers=8,\n num_heads=8,\n linear_key_dim=50,\n linear_value_dim=50,\n model_dim=50,\n ffn_dim=50,\n dropout=0.2):\n\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.linear_key_dim = linear_key_dim\n self.linear_value_dim = linear_value_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.dropout = dropout\n\n def build(self, decoder_inputs, encoder_outputs):\n o1 = tf.identity(decoder_inputs)\n\n for i in range(1, self.num_layers+1):\n with tf.variable_scope(f\"layer-{i}\"):\n o2 = self._add_and_norm(o1, self._masked_self_attention(q=o1,\n k=o1,\n v=o1), num=1)\n o3 = self._add_and_norm(o2, self._encoder_decoder_attention(q=o2,\n k=encoder_outputs,\n v=encoder_outputs), num=2)\n o4 = self._add_and_norm(o3, self._positional_feed_forward(o3), num=3)\n o1 = tf.identity(o4)\n\n return o4\n\n def _masked_self_attention(self, q, k, v):\n with tf.variable_scope(\"masked-self-attention\"):\n attention = Attention(num_heads=self.num_heads,\n masked=True, # Not implemented yet\n linear_key_dim=self.linear_key_dim,\n linear_value_dim=self.linear_value_dim,\n model_dim=self.model_dim,\n dropout=self.dropout)\n return attention.multi_head(q, k, v)\n\n def _add_and_norm(self, x, sub_layer_x, num=0):\n with tf.variable_scope(f\"decoder-add-and-norm-{num}\"):\n return tf.contrib.layers.layer_norm(tf.add(x, sub_layer_x)) # with Residual connection\n\n def _encoder_decoder_attention(self, q, k, v):\n with tf.variable_scope(\"encoder-decoder-attention\"):\n attention = Attention(num_heads=self.num_heads,\n masked=False,\n linear_key_dim=self.linear_key_dim,\n linear_value_dim=self.linear_value_dim,\n model_dim=self.model_dim,\n dropout=self.dropout)\n return attention.multi_head(q, k, v)\n\n def _positional_feed_forward(self, output):\n with tf.variable_scope(\"feed-forward\"):\n ffn = FFN(w1_dim=self.ffn_dim,\n w2_dim=self.model_dim,\n dropout=self.dropout)\n return ffn.dense_relu_dense(output)"
] |
[
[
"tensorflow.identity",
"tensorflow.layers.dense",
"tensorflow.add",
"tensorflow.variable_scope",
"tensorflow.nn.dropout"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
selwyni/ottertune-1
|
[
"2a096eb7572a46f14522c5ff758aa7970fba81db"
] |
[
"server/analysis/constraints.py"
] |
[
"#\n# OtterTune - constraints.py\n#\n# Copyright (c) 2017-18, Carnegie Mellon University Database Group\n#\n'''\nCreated on Sep 8, 2016\n\n@author: dvanaken\n'''\n\nimport copy\nimport numpy as np\n\n\nclass ParamConstraintHelper(object):\n\n @property\n def num_categorical_params(self):\n return self.cat_param_indices_.shape[0]\n\n def __init__(self, params, scaler, encoder, init_flip_prob, flip_prob_decay):\n if 'inverse_transform' not in dir(scaler):\n raise Exception(\"Scaler object must provide function inverse_transform(X)\")\n if 'transform' not in dir(scaler):\n raise Exception(\"Scaler object must provide function transform(X)\")\n self.params_ = params\n self.scaler_ = scaler\n self.encoder_ = encoder\n self.init_flip_prob_ = init_flip_prob\n self.flip_prob_decay_ = flip_prob_decay\n cat_param_indices_ = []\n for i, param in enumerate(self.params_):\n if param.iscategorical:\n cat_param_indices_.append(i)\n self.cat_param_indices_ = np.array(cat_param_indices_)\n\n def apply_constraints(self, sample, scaled=True, rescale=True):\n conv_sample = self._handle_scaling(sample, scaled)\n\n if self.encoder_ is not None:\n n_values = self.encoder_.n_values\n cat_start_indices = self.encoder_.xform_start_indices\n current_idx = 0\n cat_offset = 0\n for (param, param_val) in zip(self.params_, conv_sample):\n if param.iscategorical and not param.isboolean:\n assert current_idx == cat_start_indices[cat_offset]\n nvals = n_values[cat_offset]\n\n cvals = conv_sample[current_idx:current_idx + nvals]\n cvals = np.array(np.arange(nvals) == np.argmax(cvals), dtype=float)\n assert np.sum(cvals) == 1\n conv_sample[current_idx:current_idx + nvals] = cvals\n\n cat_offset += 1\n current_idx += nvals\n else:\n if param.isboolean:\n pmin, pmax = 0, 1\n param_val = round(param_val)\n else:\n if param.true_range is not None:\n pmin, pmax = param.true_range\n else:\n true_vals = param.true_values\n assert true_vals is not None and len(true_vals) > 0, \\\n \"param={}\".format(param.name)\n pmin, pmax = true_vals[0], true_vals[-1]\n\n if param_val < pmin:\n param_val = pmin\n elif param_val > pmax:\n param_val = pmax\n conv_sample[current_idx] = param_val\n current_idx += 1\n conv_sample = self._handle_rescaling(conv_sample, rescale)\n return conv_sample\n\n def _handle_scaling(self, sample, scaled):\n if scaled:\n if sample.ndim == 1:\n sample = sample.reshape(1, -1)\n sample = self.scaler_.inverse_transform(sample).ravel()\n else:\n sample = np.array(sample)\n return sample\n\n def _handle_rescaling(self, sample, rescale):\n if rescale:\n if sample.ndim == 1:\n sample = sample.reshape(1, -1)\n return self.scaler_.transform(sample).ravel()\n return sample\n\n def get_valid_config(self, sample, scaled=True, rescale=True):\n conv_sample = self._handle_scaling(sample, scaled)\n\n for i, (param, param_val) in enumerate(zip(self.params_, conv_sample)):\n if param.isinteger:\n conv_sample[i] = round(param_val)\n\n conv_sample = self.apply_constraints(conv_sample,\n scaled=False,\n rescale=False)\n\n if conv_sample.ndim == 1:\n conv_sample = conv_sample.reshape(1, -1)\n if self.encoder_ is not None:\n conv_sample = self.encoder_.inverse_transform(conv_sample)\n\n conv_sample = self._handle_rescaling(conv_sample.squeeze(), rescale)\n return conv_sample\n\n def randomize_categorical_features(self, sample, scaled=True, rescale=True):\n n_cat_feats = self.cat_param_indices_.size\n if n_cat_feats == 0:\n return sample\n\n conv_sample = self._handle_scaling(sample, scaled)\n flips = np.zeros((n_cat_feats,), dtype=bool)\n\n # Always flip at least one categorical feature\n flips[0] = True\n\n # Flip the rest with decreasing probability\n p = self.init_flip_prob_\n for i in range(1, n_cat_feats):\n if np.random.rand() <= p:\n flips[i] = True\n p *= self.flip_prob_decay_\n\n flip_shuffle_indices = np.random.choice(np.arange(n_cat_feats),\n n_cat_feats,\n replace=False)\n flips = flips[flip_shuffle_indices]\n\n current_idx, cat_idx, flip_idx = 0, 0, 0\n for param in self.params_:\n if param.iscategorical:\n if param.isboolean:\n nvals = 1\n else:\n assert current_idx == self.encoder_.xform_start_indices[cat_idx]\n nvals = self.encoder_.n_values[cat_idx]\n cat_idx += 1\n flip = flips[flip_idx]\n if flip:\n current_val = conv_sample[current_idx:current_idx + nvals]\n assert np.all(np.logical_or(current_val == 0, current_val == 1)), \\\n \"{0}: value not 0/1: {1}\".format(param.name, current_val)\n if param.isboolean:\n current_val = current_val.squeeze()\n r = 1 if current_val == 0 else 0\n else:\n choices = np.arange(nvals)[current_val != 1]\n assert choices.size == nvals - 1\n r = np.zeros(nvals)\n r[np.random.choice(choices)] = 1\n assert np.sum(r) == 1\n conv_sample[current_idx:current_idx + nvals] = r\n\n current_idx += nvals\n flip_idx += 1\n else:\n current_idx += 1\n conv_sample = self._handle_rescaling(conv_sample, rescale)\n return conv_sample\n\n def get_numerical_mask(self):\n mask = []\n current_idx, cat_idx = 0, 0\n for param in self.params_:\n if param.iscategorical:\n if param.isboolean:\n mask.append(False)\n current_idx += 1\n else:\n assert current_idx == self.encoder_.xform_start_indices[cat_idx]\n nvals = self.encoder_.n_values[cat_idx]\n mask.extend([False for _ in range(nvals)])\n cat_idx += 1\n current_idx += nvals\n else:\n mask.append(True)\n current_idx += 1\n return np.array(mask)\n\n def get_combinations_size(self):\n if self.num_categorical_params == 0:\n return 0\n cat_count = 0\n current_idx, cat_idx = 0, 0\n for param in self.params_:\n if param.iscategorical:\n if param.isboolean:\n cat_count += 1\n current_idx += 1\n else:\n assert current_idx == self.encoder_.xform_start_indices[cat_idx]\n nvals = self.encoder_.n_values[cat_idx]\n cat_count += nvals\n cat_idx += 1\n current_idx += nvals\n else:\n current_idx += 1\n assert cat_count > 0\n return 2 ** cat_count\n\n def get_grid(self, max_size=2048):\n import itertools\n\n possible_combos = self.get_combinations_size()\n assert possible_combos > 0\n num_columns = int(np.log2(possible_combos))\n if possible_combos > max_size:\n # Grid too large so sample instead\n combo_grid = np.random.binomial(1, 0.5, (max_size, num_columns))\n else:\n # Get entire grid\n combo_grid = list(itertools.product([0, 1], repeat=num_columns))\n assert len(combo_grid) == possible_combos\n combo_grid = np.array(combo_grid)\n # Scale the grid\n cat_mask = ~self.get_numerical_mask()\n\n X_scaler_cat = copy.deepcopy(self.scaler_)\n X_scaler_cat.mean_ = X_scaler_cat.mean_[cat_mask]\n X_scaler_cat.scale_ = X_scaler_cat.scale_[cat_mask]\n X_scaler_cat.var_ = X_scaler_cat.var_[cat_mask]\n combo_grid = X_scaler_cat.transform(combo_grid)\n return combo_grid\n\n def merge_grid(self, combo_grid, numeric_param_conf):\n nrows = combo_grid.shape[0]\n ncols = combo_grid.shape[1] + numeric_param_conf.shape[0]\n data_grid = np.ones((nrows, ncols)) * np.nan\n\n num_mask = self.get_numerical_mask()\n assert num_mask.shape[0] == ncols\n combo_idx, conf_idx = 0, 0\n for i, isnumeric in enumerate(num_mask):\n if isnumeric:\n data_grid[:, i] = numeric_param_conf[conf_idx]\n conf_idx += 1\n else:\n data_grid[:, i] = combo_grid[:, combo_idx]\n combo_idx += 1\n assert np.all(np.isfinite(data_grid))\n return data_grid\n"
] |
[
[
"numpy.log2",
"numpy.isfinite",
"numpy.random.choice",
"numpy.arange",
"numpy.ones",
"numpy.logical_or",
"numpy.argmax",
"numpy.random.rand",
"numpy.random.binomial",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abeja-inc/honk
|
[
"fd9c774e18fb4a8124e9d0dc5415ec2f422041d8"
] |
[
"utils/model.py"
] |
[
"import hashlib\nimport os\nimport random\nimport re\nfrom enum import Enum\n\nimport librosa\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom chainmap import ChainMap\nfrom torch import nn\nfrom torch.utils import data\n\nfrom .manage_audio import AudioPreprocessor\n\n\nclass SimpleCache(dict):\n def __init__(self, limit):\n super().__init__()\n self.limit = limit\n self.n_keys = 0\n\n def __setitem__(self, key, value):\n if key in self.keys():\n super().__setitem__(key, value)\n elif self.n_keys < self.limit:\n self.n_keys += 1\n super().__setitem__(key, value)\n return value\n\n\nclass ConfigType(Enum):\n CNN_TRAD_POOL2 = \"cnn-trad-pool2\" # default full model (TF variant)\n CNN_ONE_STRIDE1 = \"cnn-one-stride1\" # default compact model (TF variant)\n CNN_ONE_FPOOL3 = \"cnn-one-fpool3\"\n CNN_ONE_FSTRIDE4 = \"cnn-one-fstride4\"\n CNN_ONE_FSTRIDE8 = \"cnn-one-fstride8\"\n CNN_TPOOL2 = \"cnn-tpool2\"\n CNN_TPOOL3 = \"cnn-tpool3\"\n CNN_TSTRIDE2 = \"cnn-tstride2\"\n CNN_TSTRIDE4 = \"cnn-tstride4\"\n CNN_TSTRIDE8 = \"cnn-tstride8\"\n RES15 = \"res15\"\n RES26 = \"res26\"\n RES8 = \"res8\"\n RES15_NARROW = \"res15-narrow\"\n RES8_NARROW = \"res8-narrow\"\n RES26_NARROW = \"res26-narrow\"\n\n\ndef find_model(conf):\n if isinstance(conf, ConfigType):\n conf = conf.value\n if conf.startswith(\"res\"):\n return SpeechResModel\n else:\n return SpeechModel\n\n\ndef find_config(conf):\n if isinstance(conf, ConfigType):\n conf = conf.value\n return _configs[conf]\n\n\ndef truncated_normal(tensor, std_dev=0.01):\n tensor.zero_()\n tensor.normal_(std=std_dev)\n while torch.sum(torch.abs(tensor) > 2 * std_dev) > 0:\n t = tensor[torch.abs(tensor) > 2 * std_dev]\n t.zero_()\n tensor[torch.abs(tensor) > 2 * std_dev] = torch.normal(t, std=std_dev)\n\n\nclass SerializableModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def save(self, filename):\n torch.save(self.state_dict(), filename)\n\n def load(self, filename):\n self.load_state_dict(\n torch.load(filename, map_location=lambda storage, loc: storage)\n )\n\n\nclass SpeechResModel(SerializableModule):\n def __init__(self, config):\n super().__init__()\n n_labels = config[\"n_labels\"]\n n_maps = config[\"n_feature_maps\"]\n self.conv0 = nn.Conv2d(1, n_maps, (3, 3), padding=(1, 1), bias=False)\n if \"res_pool\" in config:\n self.pool = nn.AvgPool2d(config[\"res_pool\"])\n\n self.n_layers = n_layers = config[\"n_layers\"]\n dilation = config[\"use_dilation\"]\n if dilation:\n self.convs = [\n nn.Conv2d(\n n_maps,\n n_maps,\n (3, 3),\n padding=int(2 ** (i // 3)),\n dilation=int(2 ** (i // 3)),\n bias=False,\n )\n for i in range(n_layers)\n ]\n else:\n self.convs = [\n nn.Conv2d(n_maps, n_maps, (3, 3), padding=1, dilation=1, bias=False)\n for _ in range(n_layers)\n ]\n for i, conv in enumerate(self.convs):\n self.add_module(\"bn{}\".format(i + 1), nn.BatchNorm2d(n_maps, affine=False))\n self.add_module(\"conv{}\".format(i + 1), conv)\n self.output = nn.Linear(n_maps, n_labels)\n\n def forward(self, x):\n x = x.unsqueeze(1)\n for i in range(self.n_layers + 1):\n y = F.relu(getattr(self, \"conv{}\".format(i))(x))\n if i == 0:\n if hasattr(self, \"pool\"):\n y = self.pool(y)\n old_x = y\n if i > 0 and i % 2 == 0:\n x = y + old_x\n old_x = x\n else:\n x = y\n if i > 0:\n x = getattr(self, \"bn{}\".format(i))(x)\n x = x.view(x.size(0), x.size(1), -1) # shape: (batch, feats, o3)\n x = torch.mean(x, 2)\n return self.output(x)\n\n\nclass SpeechModel(SerializableModule):\n def __init__(self, config):\n super().__init__()\n n_labels = config[\"n_labels\"]\n n_featmaps1 = config[\"n_feature_maps1\"]\n\n conv1_size = config[\"conv1_size\"] # (time, frequency)\n conv1_pool = config[\"conv1_pool\"]\n conv1_stride = tuple(config[\"conv1_stride\"])\n dropout_prob = config[\"dropout_prob\"]\n width = config[\"width\"]\n height = config[\"height\"]\n self.conv1 = nn.Conv2d(1, n_featmaps1, conv1_size, stride=conv1_stride)\n tf_variant = config.get(\"tf_variant\")\n self.tf_variant = tf_variant\n if tf_variant:\n truncated_normal(self.conv1.weight.data)\n self.conv1.bias.data.zero_()\n self.pool1 = nn.MaxPool2d(conv1_pool)\n\n with torch.no_grad():\n x = torch.zeros(1, 1, height, width)\n x = self.pool1(self.conv1(x))\n conv_net_size = x.view(1, -1).size(1)\n last_size = conv_net_size\n\n if \"conv2_size\" in config:\n conv2_size = config[\"conv2_size\"]\n conv2_pool = config[\"conv2_pool\"]\n conv2_stride = tuple(config[\"conv2_stride\"])\n n_featmaps2 = config[\"n_feature_maps2\"]\n self.conv2 = nn.Conv2d(\n n_featmaps1, n_featmaps2, conv2_size, stride=conv2_stride\n )\n if tf_variant:\n truncated_normal(self.conv2.weight.data)\n self.conv2.bias.data.zero_()\n self.pool2 = nn.MaxPool2d(conv2_pool)\n x = self.pool2(self.conv2(x))\n conv_net_size = x.view(1, -1).size(1)\n last_size = conv_net_size\n if not tf_variant:\n self.lin = nn.Linear(conv_net_size, 32)\n\n if \"dnn1_size\" in config:\n dnn1_size = config[\"dnn1_size\"]\n last_size = dnn1_size\n if tf_variant:\n self.dnn1 = nn.Linear(conv_net_size, dnn1_size)\n truncated_normal(self.dnn1.weight.data)\n self.dnn1.bias.data.zero_()\n else:\n self.dnn1 = nn.Linear(32, dnn1_size)\n if \"dnn2_size\" in config:\n dnn2_size = config[\"dnn2_size\"]\n last_size = dnn2_size\n self.dnn2 = nn.Linear(dnn1_size, dnn2_size)\n if tf_variant:\n truncated_normal(self.dnn2.weight.data)\n self.dnn2.bias.data.zero_()\n self.output = nn.Linear(last_size, n_labels)\n if tf_variant:\n truncated_normal(self.output.weight.data)\n self.output.bias.data.zero_()\n self.dropout = nn.Dropout(dropout_prob)\n\n def forward(self, x):\n x = F.relu(self.conv1(x.unsqueeze(1))) # shape: (batch, channels, i1, o1)\n x = self.dropout(x)\n x = self.pool1(x)\n if hasattr(self, \"conv2\"):\n x = F.relu(self.conv2(x)) # shape: (batch, o1, i2, o2)\n x = self.dropout(x)\n x = self.pool2(x)\n x = x.view(x.size(0), -1) # shape: (batch, o3)\n if hasattr(self, \"lin\"):\n x = self.lin(x)\n if hasattr(self, \"dnn1\"):\n x = self.dnn1(x)\n if not self.tf_variant:\n x = F.relu(x)\n x = self.dropout(x)\n if hasattr(self, \"dnn2\"):\n x = self.dnn2(x)\n x = self.dropout(x)\n return self.output(x)\n\n\nclass DatasetType(Enum):\n TRAIN = 0\n DEV = 1\n TEST = 2\n\n\nclass SpeechDataset(data.Dataset):\n LABEL_SILENCE = \"__silence__\"\n LABEL_UNKNOWN = \"__unknown__\"\n\n def __init__(self, data, set_type, config):\n super().__init__()\n self.audio_files = list(data.keys())\n self.set_type = set_type\n self.audio_labels = list(data.values())\n config[\"bg_noise_files\"] = list(\n filter(lambda x: x.endswith(\"wav\"), config.get(\"bg_noise_files\", []))\n )\n self.bg_noise_audio = [\n librosa.core.load(file, sr=16000)[0] for file in config[\"bg_noise_files\"]\n ]\n self.unknown_prob = config[\"unknown_prob\"]\n self.silence_prob = config[\"silence_prob\"]\n self.noise_prob = config[\"noise_prob\"]\n self.input_length = config[\"input_length\"]\n self.timeshift_ms = config[\"timeshift_ms\"]\n self._audio_cache = SimpleCache(config[\"cache_size\"])\n self._file_cache = SimpleCache(config[\"cache_size\"])\n n_unk = len(list(filter(lambda x: x == 1, self.audio_labels)))\n self.n_silence = int(self.silence_prob * (len(self.audio_labels) - n_unk))\n self.audio_processor = AudioPreprocessor(\n n_mels=config[\"n_mels\"], n_dct_filters=config[\"n_dct_filters\"], hop_ms=10\n )\n self.audio_preprocess_type = config[\"audio_preprocess_type\"]\n\n @staticmethod\n def default_config():\n config = {}\n config[\"group_speakers_by_id\"] = True\n config[\"silence_prob\"] = 0.1\n config[\"noise_prob\"] = 0.8\n config[\"n_dct_filters\"] = 40\n config[\"input_length\"] = 16000\n config[\"n_mels\"] = 40\n config[\"timeshift_ms\"] = 100\n config[\"unknown_prob\"] = 0.1\n config[\"train_pct\"] = 80\n config[\"dev_pct\"] = 10\n config[\"test_pct\"] = 10\n config[\"wanted_words\"] = [\"command\", \"random\"]\n config[\"data_folder\"] = \"./data/speech_dataset\"\n config[\"audio_preprocess_type\"] = \"MFCCs\"\n return config\n\n def collate_fn(self, data):\n x = None\n y = []\n for audio_data, label in data:\n if self.audio_preprocess_type == \"MFCCs\":\n audio_tensor = torch.from_numpy(\n self.audio_processor.compute_mfccs(audio_data).reshape(1, -1, 40)\n )\n x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)\n elif self.audio_preprocess_type == \"PCEN\":\n audio_tensor = torch.from_numpy(np.expand_dims(audio_data, axis=0))\n audio_tensor = self.audio_processor.compute_pcen(audio_tensor)\n x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)\n y.append(label)\n return x, torch.tensor(y)\n\n def _timeshift_audio(self, data):\n shift = (16000 * self.timeshift_ms) // 1000\n shift = random.randint(-shift, shift)\n a = -min(0, shift)\n b = max(0, shift)\n data = np.pad(data, (a, b), \"constant\")\n return data[: len(data) - a] if a else data[b:]\n\n def load_audio(self, example, silence=False):\n if silence:\n example = \"__silence__\"\n if random.random() < 0.7 or not self.set_type == DatasetType.TRAIN:\n try:\n return self._audio_cache[example]\n except KeyError:\n pass\n in_len = self.input_length\n if self.bg_noise_audio:\n bg_noise = random.choice(self.bg_noise_audio)\n a = random.randint(0, len(bg_noise) - in_len - 1)\n bg_noise = bg_noise[a : a + in_len]\n else:\n bg_noise = np.zeros(in_len)\n\n if silence:\n data = np.zeros(in_len, dtype=np.float32)\n else:\n file_data = self._file_cache.get(example)\n data = (\n librosa.core.load(example, sr=16000)[0]\n if file_data is None\n else file_data\n )\n self._file_cache[example] = data\n data = np.pad(data, (0, max(0, in_len - len(data))), \"constant\")\n if self.set_type == DatasetType.TRAIN:\n data = self._timeshift_audio(data)\n\n if random.random() < self.noise_prob or silence:\n a = random.random() * 0.1\n data = np.clip(a * bg_noise + data, -1, 1)\n\n self._audio_cache[example] = data\n return data\n\n @classmethod\n def splits(cls, config):\n folder = config[\"data_folder\"]\n wanted_words = config[\"wanted_words\"]\n unknown_prob = config[\"unknown_prob\"]\n train_pct = config[\"train_pct\"]\n dev_pct = config[\"dev_pct\"]\n test_pct = config[\"test_pct\"]\n\n words = {word: i + 2 for i, word in enumerate(wanted_words)}\n words.update({cls.LABEL_SILENCE: 0, cls.LABEL_UNKNOWN: 1})\n sets = [{}, {}, {}]\n unknowns = [0] * 3\n bg_noise_files = []\n unknown_files = []\n\n for folder_name in os.listdir(folder):\n path_name = os.path.join(folder, folder_name)\n is_bg_noise = False\n if os.path.isfile(path_name):\n continue\n if folder_name in words:\n label = words[folder_name]\n elif folder_name == \"_background_noise_\":\n is_bg_noise = True\n else:\n label = words[cls.LABEL_UNKNOWN]\n\n for filename in os.listdir(path_name):\n wav_name = os.path.join(path_name, filename)\n if is_bg_noise and os.path.isfile(wav_name):\n bg_noise_files.append(wav_name)\n continue\n elif label == words[cls.LABEL_UNKNOWN]:\n unknown_files.append(wav_name)\n continue\n if config[\"group_speakers_by_id\"]:\n hashname = re.sub(r\"_nohash_.*$\", \"\", filename)\n max_no_wavs = 2 ** 27 - 1\n bucket = int(hashlib.sha1(hashname.encode()).hexdigest(), 16)\n bucket = (bucket % (max_no_wavs + 1)) * (100.0 / max_no_wavs)\n if bucket < dev_pct:\n tag = DatasetType.DEV\n elif bucket < test_pct + dev_pct:\n tag = DatasetType.TEST\n else:\n tag = DatasetType.TRAIN\n sets[tag.value][wav_name] = label\n\n for tag in range(len(sets)):\n unknowns[tag] = int(unknown_prob * len(sets[tag]))\n random.shuffle(unknown_files)\n a = 0\n for i, dataset in enumerate(sets):\n b = a + unknowns[i]\n unk_dict = {u: words[cls.LABEL_UNKNOWN] for u in unknown_files[a:b]}\n dataset.update(unk_dict)\n a = b\n\n train_cfg = ChainMap(dict(bg_noise_files=bg_noise_files), config)\n test_cfg = ChainMap(dict(bg_noise_files=bg_noise_files, noise_prob=0), config)\n datasets = (\n cls(sets[0], DatasetType.TRAIN, train_cfg),\n cls(sets[1], DatasetType.DEV, test_cfg),\n cls(sets[2], DatasetType.TEST, test_cfg),\n )\n return datasets\n\n def __getitem__(self, index):\n if index >= len(self.audio_labels):\n return self.load_audio(None, silence=True), 0\n return self.load_audio(self.audio_files[index]), self.audio_labels[index]\n\n def __len__(self):\n return len(self.audio_labels) + self.n_silence\n\n\n_configs = {\n ConfigType.CNN_TRAD_POOL2.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=64,\n n_feature_maps2=64,\n conv1_size=(20, 8),\n conv2_size=(10, 4),\n conv1_pool=(2, 2),\n conv1_stride=(1, 1),\n conv2_stride=(1, 1),\n conv2_pool=(1, 1),\n tf_variant=True,\n ),\n ConfigType.CNN_ONE_STRIDE1.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=186,\n conv1_size=(101, 8),\n conv1_pool=(1, 1),\n conv1_stride=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n tf_variant=True,\n ),\n ConfigType.CNN_TSTRIDE2.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=78,\n n_feature_maps2=78,\n conv1_size=(16, 8),\n conv2_size=(9, 4),\n conv1_pool=(1, 3),\n conv1_stride=(2, 1),\n conv2_stride=(1, 1),\n conv2_pool=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_TSTRIDE4.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=100,\n n_feature_maps2=78,\n conv1_size=(16, 8),\n conv2_size=(5, 4),\n conv1_pool=(1, 3),\n conv1_stride=(4, 1),\n conv2_stride=(1, 1),\n conv2_pool=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_TSTRIDE8.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=126,\n n_feature_maps2=78,\n conv1_size=(16, 8),\n conv2_size=(5, 4),\n conv1_pool=(1, 3),\n conv1_stride=(8, 1),\n conv2_stride=(1, 1),\n conv2_pool=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_TPOOL2.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=94,\n n_feature_maps2=94,\n conv1_size=(21, 8),\n conv2_size=(6, 4),\n conv1_pool=(2, 3),\n conv1_stride=(1, 1),\n conv2_stride=(1, 1),\n conv2_pool=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_TPOOL3.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=94,\n n_feature_maps2=94,\n conv1_size=(15, 8),\n conv2_size=(6, 4),\n conv1_pool=(3, 3),\n conv1_stride=(1, 1),\n conv2_stride=(1, 1),\n conv2_pool=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_ONE_FPOOL3.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=54,\n conv1_size=(101, 8),\n conv1_pool=(1, 3),\n conv1_stride=(1, 1),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_ONE_FSTRIDE4.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=186,\n conv1_size=(101, 8),\n conv1_pool=(1, 1),\n conv1_stride=(1, 4),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.CNN_ONE_FSTRIDE8.value: dict(\n dropout_prob=0.5,\n height=101,\n width=40,\n n_labels=4,\n n_feature_maps1=336,\n conv1_size=(101, 8),\n conv1_pool=(1, 1),\n conv1_stride=(1, 8),\n dnn1_size=128,\n dnn2_size=128,\n ),\n ConfigType.RES15.value: dict(\n n_labels=12, use_dilation=True, n_layers=13, n_feature_maps=45\n ),\n ConfigType.RES8.value: dict(\n n_labels=12, n_layers=6, n_feature_maps=45, res_pool=(4, 3), use_dilation=False\n ),\n ConfigType.RES26.value: dict(\n n_labels=12, n_layers=24, n_feature_maps=45, res_pool=(2, 2), use_dilation=False\n ),\n ConfigType.RES15_NARROW.value: dict(\n n_labels=12, use_dilation=True, n_layers=13, n_feature_maps=19\n ),\n ConfigType.RES8_NARROW.value: dict(\n n_labels=12, n_layers=6, n_feature_maps=19, res_pool=(4, 3), use_dilation=False\n ),\n ConfigType.RES26_NARROW.value: dict(\n n_labels=12, n_layers=24, n_feature_maps=19, res_pool=(2, 2), use_dilation=False\n ),\n}\n"
] |
[
[
"torch.mean",
"torch.abs",
"numpy.expand_dims",
"torch.load",
"torch.zeros",
"torch.cat",
"torch.no_grad",
"torch.nn.Dropout",
"numpy.pad",
"numpy.clip",
"torch.tensor",
"torch.nn.functional.relu",
"numpy.zeros",
"torch.normal",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kirstenrichardson/rl-baselines-zoo
|
[
"fe7223f8b70fd384f8045690662332f5ae3bc350"
] |
[
"enjoy.py"
] |
[
"import os\nimport sys\nimport argparse\nimport pkg_resources\nimport importlib\nimport warnings\n\n# numpy warnings because of tensorflow\nwarnings.filterwarnings(\"ignore\", category=FutureWarning, module='tensorflow')\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module='gym')\n\nimport gym\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\nimport numpy as np\ntry:\n import highway_env\nexcept ImportError:\n highway_env = None\nimport stable_baselines\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.common.vec_env import VecNormalize, VecFrameStack, VecEnv\n\nfrom utils import ALGOS, create_test_env, get_latest_run_id, get_saved_hyperparams, find_saved_model\n\n# Fix for breaking change in v2.6.0\nif pkg_resources.get_distribution(\"stable_baselines\").version >= \"2.6.0\":\n sys.modules['stable_baselines.ddpg.memory'] = stable_baselines.deepq.replay_buffer\n stable_baselines.deepq.replay_buffer.Memory = stable_baselines.deepq.replay_buffer.ReplayBuffer\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', help='environment ID', type=str, default='CartPole-v1')\n parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents')\n parser.add_argument('--algo', help='RL Algorithm', default='ppo2',\n type=str, required=False, choices=list(ALGOS.keys()))\n parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,\n type=int)\n parser.add_argument('--n-envs', help='number of environments', default=1,\n type=int)\n parser.add_argument('--exp-id', help='Experiment ID (default: -1, no exp folder, 0: latest)', default=-1,\n type=int)\n parser.add_argument('--verbose', help='Verbose mode (0: no output, 1: INFO)', default=1,\n type=int)\n parser.add_argument('--no-render', action='store_true', default=False,\n help='Do not render the environment (useful for tests)')\n parser.add_argument('--deterministic', action='store_true', default=False,\n help='Use deterministic actions')\n parser.add_argument('--stochastic', action='store_true', default=False,\n help='Use stochastic actions (for DDPG/DQN/SAC)')\n parser.add_argument('--norm-reward', action='store_true', default=False,\n help='Normalize reward if applicable (trained with VecNormalize)')\n parser.add_argument('--seed', help='Random generator seed', type=int, default=0)\n parser.add_argument('--reward-log', help='Where to log reward', default='', type=str)\n parser.add_argument('--gym-packages', type=str, nargs='+', default=[], help='Additional external Gym environemnt package modules to import (e.g. gym_minigrid)')\n args = parser.parse_args()\n\n # Going through custom gym packages to let them register in the global registory\n for env_module in args.gym_packages:\n importlib.import_module(env_module)\n\n env_id = args.env\n algo = args.algo\n folder = args.folder\n\n if args.exp_id == 0:\n args.exp_id = get_latest_run_id(os.path.join(folder, algo), env_id)\n print('Loading latest experiment, id={}'.format(args.exp_id))\n\n # Sanity checks\n if args.exp_id > 0:\n log_path = os.path.join(folder, algo, '{}_{}'.format(env_id, args.exp_id))\n else:\n log_path = os.path.join(folder, algo)\n\n\n assert os.path.isdir(log_path), \"The {} folder was not found\".format(log_path)\n\n model_path = find_saved_model(algo, log_path, env_id)\n\n if algo in ['dqn', 'ddpg', 'sac', 'td3']:\n args.n_envs = 1\n\n set_global_seeds(args.seed)\n\n is_atari = 'NoFrameskip' in env_id\n\n stats_path = os.path.join(log_path, env_id)\n hyperparams, stats_path = get_saved_hyperparams(stats_path, norm_reward=args.norm_reward, test_mode=True)\n\n log_dir = args.reward_log if args.reward_log != '' else None\n\n env = create_test_env(env_id, n_envs=args.n_envs, is_atari=is_atari,\n stats_path=stats_path, seed=args.seed, log_dir=log_dir,\n should_render=not args.no_render,\n hyperparams=hyperparams)\n\n # ACER raises errors because the environment passed must have\n # the same number of environments as the model was trained on.\n load_env = None if algo == 'acer' else env\n model = ALGOS[algo].load(model_path, env=load_env)\n\n obs = env.reset()\n\n # Force deterministic for DQN, DDPG, SAC and HER (that is a wrapper around)\n deterministic = args.deterministic or algo in ['dqn', 'ddpg', 'sac', 'her', 'td3'] and not args.stochastic\n\n episode_reward = 0.0\n episode_rewards = []\n ep_len = 0\n # For HER, monitor success rate\n successes = []\n for _ in range(args.n_timesteps):\n action, _ = model.predict(obs, deterministic=deterministic)\n # Random Agent\n # action = [env.action_space.sample()]\n # Clip Action to avoid out of bound errors\n if isinstance(env.action_space, gym.spaces.Box):\n action = np.clip(action, env.action_space.low, env.action_space.high)\n obs, reward, done, infos = env.step(action)\n if not args.no_render:\n env.render('human')\n\n episode_reward += reward[0]\n ep_len += 1\n\n if args.n_envs == 1:\n # For atari the return reward is not the atari score\n # so we have to get it from the infos dict\n if is_atari and infos is not None and args.verbose >= 1:\n episode_infos = infos[0].get('episode')\n if episode_infos is not None:\n print(\"Atari Episode Score: {:.2f}\".format(episode_infos['r']))\n print(\"Atari Episode Length\", episode_infos['l'])\n\n if done and not is_atari and args.verbose > 0:\n # NOTE: for env using VecNormalize, the mean reward\n # is a normalized reward when `--norm_reward` flag is passed\n print(\"Episode Reward: {:.2f}\".format(episode_reward))\n print(\"Episode Length\", ep_len)\n episode_rewards.append(episode_reward)\n episode_reward = 0.0\n ep_len = 0\n\n # Reset also when the goal is achieved when using HER\n if done or infos[0].get('is_success', False):\n if args.algo == 'her' and args.verbose > 1:\n print(\"Success?\", infos[0].get('is_success', False))\n # Alternatively, you can add a check to wait for the end of the episode\n # if done:\n obs = env.reset()\n if args.algo == 'her':\n successes.append(infos[0].get('is_success', False))\n episode_reward, ep_len = 0.0, 0\n\n if args.verbose > 0 and len(successes) > 0:\n print(\"Success rate: {:.2f}%\".format(100 * np.mean(successes)))\n\n if args.verbose > 0 and len(episode_rewards) > 0:\n print(\"Mean reward: {:.2f}\".format(np.mean(episode_rewards)))\n print(\"Number episodes: {:.2f}\".format(len(episode_rewards))) #################### ADDED IN FOR EVAL PURPOSED\n\n # Workaround for https://github.com/openai/gym/issues/893\n if not args.no_render:\n if args.n_envs == 1 and 'Bullet' not in env_id and not is_atari and isinstance(env, VecEnv):\n # DummyVecEnv\n # Unwrap env\n while isinstance(env, VecNormalize) or isinstance(env, VecFrameStack):\n env = env.venv\n env.envs[0].env.close()\n else:\n # SubprocVecEnv\n env.close()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.mean",
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Koichi-Hatake/ImageNetDownloadScript
|
[
"5f50b8b18b1b5ad386e58da04936fb88b8703119"
] |
[
"remove_err_img.py"
] |
[
"#\n# Copyright (C) 2018 Koichi Hatakeyama\n# All rights reserved.\n#\n\n# This script remove following error type image files.\n# 1. Zero file size\n# 2. Flickr error image\n# 3. Invalid image\n# 4. Mono image\n\nimport argparse\nimport datetime\nimport hashlib\nimport numpy as np\nimport os\nimport re\nimport sys\nfrom glob import glob\nfrom pathlib import Path\nfrom PIL import Image\n\n# Pre defined constants\nIMG_DIR = 'master_images2'\nERR_IMG_DIR = 'err_imgs2'\nFLICKER_ERR_IMG_HASH = '880a7a58e05d3e83797f27573bb6d35c'\nFLICKER_ERR_IMG_SIZE = 2051\n\ndef process_err_img(err_img, dest_path):\n print(\" Move: \" + str(err_img) + \" -> \" + str(dest_path))\n err_img.rename(dest_path)\n\ndef parse_args():\n # Parse arguments\n parser = argparse.ArgumentParser(description='Remove error image files r0.01')\n parser.add_argument('--img_dir', '-i', default=IMG_DIR, help='Specify image directory')\n parser.add_argument('--err_img_dir', '-e', default=ERR_IMG_DIR, help='Specify destination directoryfor error file')\n\n return parser.parse_args()\n\ndef main():\n\n # Start time\n start_time = datetime.datetime.now()\n\n # Parse arguments\n args = parse_args()\n img_dir = args.img_dir\n err_img_dir = args.err_img_dir\n\n # Check image directory\n img_path = Path(img_dir)\n if(not img_path.exists()):\n print(\"No such dir: \" + str(img_path))\n sys.exit()\n\n # Check error image directory\n err_img_path = Path(err_img_dir)\n if(not err_img_path.exists()):\n err_img_path.mkdir(parents=True, exist_ok=True)\n \n # Check all image files\n img_files = img_path.glob(\"*.jpg\")\n #total = len(list(img_files))\n cur = 0\n for file in img_files:\n\n # Get size and set error image path to move\n img_size = os.path.getsize(file)\n err_img_path = Path(err_img_dir, file.name)\n\n # Progress\n cur += 1\n print(\"Processing: \" + str(cur))\n \n # 1. Zero file size\n if(img_size == 0):\n print(\"Found zero size image: \" + str(file))\n process_err_img(file, err_img_path)\n continue\n \n # 2. Flickr error image\n if(img_size == FLICKER_ERR_IMG_SIZE):\n with open(file, 'rb') as image:\n img_md5 = hashlib.md5(image.read()).hexdigest()\n image.close()\n if(img_md5 == FLICKER_ERR_IMG_HASH):\n print(\"Found Flickr error img: \" + str(file))\n process_err_img(file, err_img_path)\n continue\n\n # 3. Invalid image\n try:\n img = Image.open(file)\n im = np.array(img)\n img.close()\n except:\n print(\"Image file open error: \" + str(file))\n process_err_img(file, err_img_path)\n continue\n\n # 4. Mono image\n try:\n width, height, col = im.shape\n except:\n print(\"Image file shape error: \" + str(file) + \" : \" + str(im.shape))\n # This type of image file will be used after resizing.\n #process_err_img(file, err_img_path)\n continue\n\n if(col != 3):\n print(\"Image error(mono): \" + str(file) + \":\" + str(col))\n process_err_img(file, err_img_path)\n continue\n\n # Process time\n elapsed_time = datetime.datetime.now() - start_time\n print (\"############\\nElapsed time: \" + str(elapsed_time) + \"\\n############\\n\")\n\n \nif __name__ == '__main__':\n main()\n\n# EOF\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhizhangxian/AutoML
|
[
"839790e9e131f2788202326d87a9264020833122"
] |
[
"modeling/deeplab.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nfrom modeling.aspp import build_aspp\nfrom modeling.decoder import build_decoder\nfrom modeling.backbone import build_backbone\n\n\nclass DeepLab(nn.Module):\n def __init__(self, backbone='resnet', output_stride=16, num_classes=19,\n sync_bn=True, freeze_bn=False, args=None, separate=False):\n super(DeepLab, self).__init__()\n if backbone == 'drn':\n output_stride = 8\n\n if sync_bn == True:\n BatchNorm = SynchronizedBatchNorm2d\n else:\n BatchNorm = nn.BatchNorm2d\n\n self.backbone = build_backbone(\n backbone, output_stride, BatchNorm, args)\n self.aspp = build_aspp(backbone, output_stride, BatchNorm, args, separate)\n self.decoder = build_decoder(\n num_classes, backbone, BatchNorm, args, separate)\n\n if freeze_bn:\n self.freeze_bn()\n\n def forward(self, input):\n x, low_level_feat = self.backbone(input)\n x = self.aspp(x)\n x = self.decoder(x, low_level_feat)\n x = F.interpolate(x, size=input.size()[\n 2:], mode='bilinear', align_corners=True)\n\n return x\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, SynchronizedBatchNorm2d):\n m.eval()\n elif isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def get_1x_lr_params(self):\n modules = [self.backbone]\n for i in range(len(modules)):\n for m in modules[i].named_modules():\n if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \\\n or isinstance(m[1], nn.BatchNorm2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n\n def get_10x_lr_params(self):\n modules = [self.aspp, self.decoder]\n for i in range(len(modules)):\n for m in modules[i].named_modules():\n if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \\\n or isinstance(m[1], nn.BatchNorm2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n\n\nif __name__ == \"__main__\":\n model = DeepLab(backbone='mobilenet', output_stride=16)\n model.eval()\n input = torch.rand(1, 3, 513, 513)\n output = model(input)\n print(output.size())\n"
] |
[
[
"torch.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yaojh01/Mastering-OpenCV-4-with-Python
|
[
"e8f83e314b8ed638edb6515967cfb24361b787af",
"e8f83e314b8ed638edb6515967cfb24361b787af",
"e8f83e314b8ed638edb6515967cfb24361b787af",
"e8f83e314b8ed638edb6515967cfb24361b787af",
"e8f83e314b8ed638edb6515967cfb24361b787af"
] |
[
"Chapter10/01-chapter-content/svm_handwritten_digits_recognition_preprocessing_hog_c_gamma.py",
"Chapter07/01-chapter-content/thresholding_otsu.py",
"Chapter08/01-chapter-content/contours_hu_moments.py",
"Chapter10/01-chapter-content/k_means_color_quantization.py",
"Chapter07/01-chapter-content/thresholding_introduction.py"
] |
[
"\"\"\"\nHandwritten digits recognition using SVM and HoG features and varying the number of\ntraining/testing images with pre-processing of the images. A grid-search on C and gamma is also carried out.\n\"\"\"\n\n# Import required packages:\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\n# Constants:\nSIZE_IMAGE = 20\nNUMBER_CLASSES = 10\n\n\ndef load_digits_and_labels(big_image):\n \"\"\" Returns all the digits from the 'big' image and creates the corresponding labels for each image\"\"\"\n\n # Load the 'big' image containing all the digits:\n digits_img = cv2.imread(big_image, 0)\n\n # Get all the digit images from the 'big' image:\n number_rows = digits_img.shape[1] / SIZE_IMAGE\n rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)\n\n digits = []\n for row in rows:\n row_cells = np.hsplit(row, number_rows)\n for digit in row_cells:\n digits.append(digit)\n digits = np.array(digits)\n\n # Create the labels for each image:\n labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)\n return digits, labels\n\n\ndef deskew(img):\n \"\"\"Pre-processing of the images\"\"\"\n\n m = cv2.moments(img)\n if abs(m['mu02']) < 1e-2:\n return img.copy()\n skew = m['mu11'] / m['mu02']\n M = np.float32([[1, skew, -0.5 * SIZE_IMAGE * skew], [0, 1, 0]])\n img = cv2.warpAffine(img, M, (SIZE_IMAGE, SIZE_IMAGE), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)\n return img\n\n\ndef svm_init(C=12.5, gamma=0.50625):\n \"\"\"Creates empty model and assigns main parameters\"\"\"\n\n model = cv2.ml.SVM_create()\n model.setGamma(gamma)\n model.setC(C)\n model.setKernel(cv2.ml.SVM_RBF)\n model.setType(cv2.ml.SVM_C_SVC)\n model.setTermCriteria((cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-6))\n\n return model\n\n\ndef svm_train(model, samples, responses):\n \"\"\"Returns the trained SVM model based on the samples and responses\"\"\"\n\n model.train(samples, cv2.ml.ROW_SAMPLE, responses)\n return model\n\n\ndef svm_predict(model, samples):\n \"\"\"Returns the predictions\"\"\"\n\n return model.predict(samples)[1].ravel()\n\n\ndef svm_evaluate(model, samples, labels):\n \"\"\"Returns SVM evaluation (accuracy)\"\"\"\n\n predictions = svm_predict(model, samples)\n accuracy = (labels == predictions).mean()\n # print('Percentage Accuracy: %.2f %%' % (accuracy * 100))\n return accuracy * 100\n\n\ndef get_hog():\n \"\"\" Get hog descriptor \"\"\"\n\n # cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins, derivAperture, winSigma, histogramNormType,\n # L2HysThreshold, gammaCorrection, nlevels, signedGradient)\n hog = cv2.HOGDescriptor((SIZE_IMAGE, SIZE_IMAGE), (8, 8), (4, 4), (8, 8), 9, 1, -1, 0, 0.2, 1, 64, True)\n\n print(\"get descriptor size: {}\".format(hog.getDescriptorSize()))\n\n return hog\n\n\ndef raw_pixels(img):\n \"\"\"Return raw pixels as feature from the image\"\"\"\n\n return img.flatten()\n\n\n# Load all the digits and the corresponding labels:\ndigits, labels = load_digits_and_labels('digits.png')\n\n# Shuffle data\n# Constructs a random number generator:\nrand = np.random.RandomState(1234)\n# Randomly permute the sequence:\nshuffle = rand.permutation(len(digits))\ndigits, labels = digits[shuffle], labels[shuffle]\n\n# HoG feature descriptor:\nhog = get_hog()\n\n# Compute the descriptors for all the images.\n# In this case, the HoG descriptor is calculated\nhog_descriptors = []\nfor img in digits:\n hog_descriptors.append(hog.compute(deskew(img)))\nhog_descriptors = np.squeeze(hog_descriptors)\n\n# At this point we split the data into training and testing (50% for each one):\npartition = int(0.9 * len(hog_descriptors))\nhog_descriptors_train, hog_descriptors_test = np.split(hog_descriptors, [partition])\nlabels_train, labels_test = np.split(labels, [partition])\n\nprint('Training SVM model ...')\n# Create a dictionary to store the accuracy when testing:\nresults = defaultdict(list)\n\nfor C in [1, 10, 100, 1000]:\n for gamma in [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5]:\n model = svm_init(C, gamma)\n svm_train(model, hog_descriptors_train, labels_train)\n acc = svm_evaluate(model, hog_descriptors_test, labels_test)\n print(\" {}\".format(\"%.2f\" % acc))\n results[C].append(acc)\n\n# Create the dimensions of the figure and set title:\nfig = plt.figure(figsize=(10, 6))\nplt.suptitle(\"SVM handwritten digits recognition\", fontsize=14, fontweight='bold')\nfig.patch.set_facecolor('silver')\n\n# Show all results using matplotlib capabilities:\nax = plt.subplot(1, 1, 1)\nax.set_xlim(0, 1.5)\ndim = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5]\n\nfor key in results:\n ax.plot(dim, results[key], linestyle='--', marker='o', label=str(key))\n\nplt.legend(loc='upper left', title=\"C\")\nplt.title('Accuracy of the SVM model varying both C and gamma')\nplt.xlabel(\"gamma\")\nplt.ylabel(\"accuracy\")\nplt.show()\n",
"\"\"\"\nOtsu's binarization algorithm\n\"\"\"\n\n# Import required packages:\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef show_img_with_matplotlib(color_img, title, pos):\n \"\"\"Shows an image using matplotlib capabilities\"\"\"\n\n # Convert BGR image to RGB\n img_RGB = color_img[:, :, ::-1]\n\n ax = plt.subplot(2, 2, pos)\n plt.imshow(img_RGB)\n plt.title(title)\n plt.axis('off')\n\n\ndef show_hist_with_matplotlib_gray(hist, title, pos, color, t=-1):\n \"\"\"Shows the histogram using matplotlib capabilities\"\"\"\n\n ax = plt.subplot(2, 2, pos)\n # plt.title(title)\n plt.xlabel(\"bins\")\n plt.ylabel(\"number of pixels\")\n plt.xlim([0, 256])\n plt.axvline(x=t, color='m', linestyle='--')\n plt.plot(hist, color=color)\n\n\n# Create the dimensions of the figure and set title and color:\nfig = plt.figure(figsize=(10, 10))\nplt.suptitle(\"Otsu's binarization algorithm\", fontsize=14, fontweight='bold')\nfig.patch.set_facecolor('silver')\n\n# Load the image and convert it to grayscale:\nimage = cv2.imread('leaf.png')\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Calculate histogram (only for visualization):\nhist = cv2.calcHist([gray_image], [0], None, [256], [0, 256])\n\n# Threshold the image aplying Otsu's algorithm:\nret1, th1 = cv2.threshold(gray_image, 0, 255, cv2.THRESH_TRUNC + cv2.THRESH_OTSU)\n\n# Plot all the images:\nshow_img_with_matplotlib(image, \"image\", 1)\nshow_img_with_matplotlib(cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR), \"gray img\", 2)\nshow_hist_with_matplotlib_gray(hist, \"grayscale histogram\", 3, 'm', ret1)\nshow_img_with_matplotlib(cv2.cvtColor(th1, cv2.COLOR_GRAY2BGR), \"Otsu's binarization\", 4)\n\n# Show the Figure:\nplt.show()\n",
"\"\"\"\nHu moments calculation\n\"\"\"\n\n# Import required packages:\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef centroid(moments):\n \"\"\"Returns centroid based on moments\"\"\"\n\n x_centroid = round(moments['m10'] / moments['m00'])\n y_centroid = round(moments['m01'] / moments['m00'])\n return x_centroid, y_centroid\n\n\ndef draw_contour_outline(img, cnts, color, thickness=1):\n \"\"\"Draws contours outlines of each contour\"\"\"\n\n for cnt in cnts:\n cv2.drawContours(img, [cnt], 0, color, thickness)\n\n\ndef show_img_with_matplotlib(color_img, title, pos):\n \"\"\"Shows an image using matplotlib capabilities\"\"\"\n\n # Convert BGR image to RGB\n img_RGB = color_img[:, :, ::-1]\n\n ax = plt.subplot(1, 1, pos)\n plt.imshow(img_RGB)\n plt.title(title)\n plt.axis('off')\n\n\n# Create the dimensions of the figure and set title:\nfig = plt.figure(figsize=(12, 5))\nplt.suptitle(\"Hu moments\", fontsize=14, fontweight='bold')\nfig.patch.set_facecolor('silver')\n\n# Load the image and convert it to grayscale:\nimage = cv2.imread(\"shape_features.png\")\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Apply cv2.threshold() to get a binary image:\nret, thresh = cv2.threshold(gray_image, 70, 255, cv2.THRESH_BINARY)\n\n# Compute moments:\nM = cv2.moments(thresh, True)\nprint(\"moments: '{}'\".format(M))\n\n# Calculate the centroid of the contour based on moments:\nx, y = centroid(M)\n\n# Compute Hu moments:\nHuM = cv2.HuMoments(M)\nprint(\"Hu moments: '{}'\".format(HuM))\n\n# Find contours in the thresholded image:\n# Note: cv2.findContours() has been changed to return only the contours and the hierarchy\ncontours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n# Compute moments:\nM2 = cv2.moments(contours[0])\nprint(\"moments: '{}'\".format(M2))\n\n# Calculate the centroid of the contour based on moments:\nx2, y2 = centroid(M2)\n\n# Compute Hu moments:\nHuM2 = cv2.HuMoments(M2)\nprint(\"Hu moments: '{}'\".format(HuM2))\n\n# Draw the outline of the detected contour:\ndraw_contour_outline(image, contours, (255, 0, 0), 10)\n\n# Draw the centroids (it should be the same point):\n# (make it big to see the difference)\ncv2.circle(image, (x, y), 25, (255, 0, 0), -1)\ncv2.circle(image, (x2, y2), 25, (0, 255, 0), -1)\nprint(\"('x','y'): ('{}','{}')\".format(x, y))\nprint(\"('x2','y2'): ('{}','{}')\".format(x2, y2))\n\n# Plot the images:\nshow_img_with_matplotlib(image, \"detected contour and centroid\", 1)\n\n# Show the Figure:\nplt.show()\n",
"\"\"\"\nK-means clustering algorithm applied to color quantization\n\"\"\"\n\n# Import required packages:\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef show_img_with_matplotlib(color_img, title, pos):\n \"\"\"Shows an image using matplotlib capabilities\"\"\"\n\n # Convert BGR image to RGB\n img_RGB = color_img[:, :, ::-1]\n\n ax = plt.subplot(2, 3, pos)\n plt.imshow(img_RGB)\n plt.title(title)\n plt.axis('off')\n\n\ndef color_quantization(image, k):\n \"\"\"Performs color quantization using K-means clustering algorithm\"\"\"\n\n # Transform image into 'data':\n data = np.float32(image).reshape((-1, 3))\n # print(data.shape)\n\n # Define the algorithm termination criteria (the maximum number of iterations and/or the desired accuracy):\n # In this case the maximum number of iterations is set to 20 and epsilon = 1.0\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)\n\n # Apply K-means clustering algorithm:\n ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n\n # At this point we can make the image with k colors\n # Convert center to uint8:\n center = np.uint8(center)\n # Replace pixel values with their center value:\n result = center[label.flatten()]\n result = result.reshape(img.shape)\n return result\n\n\n# Create the dimensions of the figure and set title:\nfig = plt.figure(figsize=(16, 8))\nplt.suptitle(\"Color quantization using K-means clustering algorithm\", fontsize=14, fontweight='bold')\nfig.patch.set_facecolor('silver')\n\n# Load BGR image:\nimg = cv2.imread('landscape_1.jpg')\n\n# Apply color quantization:\ncolor_3 = color_quantization(img, 3)\ncolor_5 = color_quantization(img, 5)\ncolor_10 = color_quantization(img, 10)\ncolor_20 = color_quantization(img, 20)\ncolor_40 = color_quantization(img, 40)\n\n# Plot the images:\nshow_img_with_matplotlib(img, \"original image\", 1)\nshow_img_with_matplotlib(color_3, \"color quantization (k = 3)\", 2)\nshow_img_with_matplotlib(color_5, \"color quantization (k = 5)\", 3)\nshow_img_with_matplotlib(color_10, \"color quantization (k = 10)\", 4)\nshow_img_with_matplotlib(color_20, \"color quantization (k = 20)\", 5)\nshow_img_with_matplotlib(color_40, \"color quantization (k = 40)\", 6)\n\n# Show the Figure:\nplt.show()\n",
"\"\"\"\nIntroduction to thresholding techniques\n\"\"\"\n\n# Import required packages:\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef build_sample_image():\n \"\"\"Builds a sample image with 50x50 regions of different tones of gray\"\"\"\n\n # Define the different tones.\n # The end of interval is not included\n tones = np.arange(start=50, stop=300, step=50)\n # print(tones)\n\n # Initialize result with the first 50x50 region with 0-intensity level\n result = np.zeros((50, 50, 3), dtype=\"uint8\")\n\n # Build the image concatenating horizontally the regions:\n for tone in tones:\n img = np.ones((50, 50, 3), dtype=\"uint8\") * tone\n result = np.concatenate((result, img), axis=1)\n\n return result\n\n\ndef show_img_with_matplotlib(color_img, title, pos):\n \"\"\"Shows an image using matplotlib capabilities\"\"\"\n\n # Convert BGR image to RGB\n img_RGB = color_img[:, :, ::-1]\n\n ax = plt.subplot(7, 1, pos)\n plt.imshow(img_RGB)\n plt.title(title)\n plt.axis('off')\n\n\n# Create the dimensions of the figure and set title and color:\nfig = plt.figure(figsize=(6, 9))\nplt.suptitle(\"Thresholding introduction\", fontsize=14, fontweight='bold')\nfig.patch.set_facecolor('silver')\n\n# Load the image and convert it to grayscale:\nimage = build_sample_image()\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Plot the grayscale images and the histograms:\nshow_img_with_matplotlib(cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR),\n \"img with tones of gray - left to right: (0,50,100,150,200,250)\", 1)\n\n# Apply cv2.threshold() with different thresholding values:\nret1, thresh1 = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY)\nret2, thresh2 = cv2.threshold(gray_image, 50, 255, cv2.THRESH_BINARY)\nret3, thresh3 = cv2.threshold(gray_image, 100, 255, cv2.THRESH_BINARY)\nret4, thresh4 = cv2.threshold(gray_image, 150, 255, cv2.THRESH_BINARY)\nret5, thresh5 = cv2.threshold(gray_image, 200, 255, cv2.THRESH_BINARY)\nret6, thresh6 = cv2.threshold(gray_image, 250, 255, cv2.THRESH_BINARY)\n\n# Plot the images:\nshow_img_with_matplotlib(cv2.cvtColor(thresh1, cv2.COLOR_GRAY2BGR), \"threshold = 0\", 2)\nshow_img_with_matplotlib(cv2.cvtColor(thresh2, cv2.COLOR_GRAY2BGR), \"threshold = 50\", 3)\nshow_img_with_matplotlib(cv2.cvtColor(thresh3, cv2.COLOR_GRAY2BGR), \"threshold = 100\", 4)\nshow_img_with_matplotlib(cv2.cvtColor(thresh4, cv2.COLOR_GRAY2BGR), \"threshold = 150\", 5)\nshow_img_with_matplotlib(cv2.cvtColor(thresh5, cv2.COLOR_GRAY2BGR), \"threshold = 200\", 6)\nshow_img_with_matplotlib(cv2.cvtColor(thresh6, cv2.COLOR_GRAY2BGR), \"threshold = 250\", 7)\n\n# Show the Figure:\nplt.show()\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.legend",
"numpy.split",
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.squeeze",
"numpy.vsplit",
"numpy.hsplit",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"numpy.float32",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"numpy.random.RandomState",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.uint8",
"matplotlib.pyplot.subplot",
"numpy.float32",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.ones",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JunHyungKang/SAROD_ICIP
|
[
"71585951f64dc1cc22ed72900eff81f747edec77"
] |
[
"Baseline_yolov3/utils/rl_datasets.py"
] |
[
"import glob\nimport random\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torch.nn.functional as F\n\nfrom utils.augmentations import horisontal_flip\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\n\n\ndef pad_to_square(img, pad_value):\n c, h, w = img.shape\n dim_diff = np.abs(h - w)\n # (upper / left) padding and (lower / right) padding\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n # Determine padding\n pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)\n # Add padding\n img = F.pad(img, pad, \"constant\", value=pad_value)\n\n return img, pad\n\n\ndef resize(image, size):\n image = F.interpolate(image.unsqueeze(0), size=size, mode=\"nearest\").squeeze(0)\n return image\n\n\ndef random_resize(images, min_size=288, max_size=448):\n new_size = random.sample(list(range(min_size, max_size + 1, 32)), 1)[0]\n images = F.interpolate(images, size=new_size, mode=\"nearest\")\n return images\n\n\nclass ImageFolder(Dataset):\n def __init__(self, folder_path, img_size=416):\n self.files = sorted(glob.glob(\"%s/*.*\" % folder_path))\n self.img_size = img_size\n\n def __getitem__(self, index):\n img_path = self.files[index % len(self.files)]\n # Extract image as PyTorch tensor\n img = transforms.ToTensor()(Image.open(img_path))\n # Pad to square resolution\n img, _ = pad_to_square(img, 0)\n # Resize\n img = resize(img, self.img_size)\n\n return img_path, img\n\n def __len__(self):\n return len(self.files)\n\n\nclass ListDataset(Dataset):\n def __init__(self, list_path, img_size=416, augment=True, multiscale=True, normalized_labels=True):\n with open(list_path, \"r\") as file:\n self.img_files = file.readlines()\n\n self.label_files = [\n path.replace(\"img\", \"labels\").replace(\".png\", \".txt\").replace(\".jpg\", \".txt\")\n for path in self.img_files\n ]\n self.img_size = img_size\n self.max_objects = 100\n self.augment = augment\n self.multiscale = multiscale\n self.normalized_labels = normalized_labels\n self.min_size = self.img_size - 3 * 32\n self.max_size = self.img_size + 3 * 32\n self.batch_count = 0\n\n def __getitem__(self, index):\n\n # ---------\n # Image\n # ---------\n\n img_path = self.img_files[index % len(self.img_files)].rstrip()\n\n # Extract image as PyTorch tensor\n img = transforms.ToTensor()(Image.open(img_path).convert('RGB'))\n\n # Handle images with less than three channels\n if len(img.shape) != 3:\n img = img.unsqueeze(0)\n img = img.expand((3, img.shape[1:]))\n\n _, h, w = img.shape\n h_factor, w_factor = (h, w) if self.normalized_labels else (1, 1)\n # Pad to square resolution\n img, pad = pad_to_square(img, 0)\n _, padded_h, padded_w = img.shape\n\n # ---------\n # Label\n # ---------\n\n label_path = self.label_files[index % len(self.img_files)].rstrip()\n\n targets = None\n if os.path.exists(label_path):\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n # Extract coordinates for unpadded + unscaled image\n x1 = w_factor * (boxes[:, 1] - boxes[:, 3] / 2)\n y1 = h_factor * (boxes[:, 2] - boxes[:, 4] / 2)\n x2 = w_factor * (boxes[:, 1] + boxes[:, 3] / 2)\n y2 = h_factor * (boxes[:, 2] + boxes[:, 4] / 2)\n # Adjust for added padding\n x1 += pad[0]\n y1 += pad[2]\n x2 += pad[1]\n y2 += pad[3]\n # Returns (x, y, w, h)\n boxes[:, 1] = ((x1 + x2) / 2) / padded_w\n boxes[:, 2] = ((y1 + y2) / 2) / padded_h\n boxes[:, 3] *= w_factor / padded_w\n boxes[:, 4] *= h_factor / padded_h\n\n targets = torch.zeros((len(boxes), 6))\n targets[:, 1:] = boxes\n\n # Apply augmentations\n if self.augment:\n if np.random.random() < 0.5:\n img, targets = horisontal_flip(img, targets)\n\n return img_path, img, targets\n\n def collate_fn(self, batch):\n paths, imgs, targets = list(zip(*batch))\n # Remove empty placeholder targets\n targets = [boxes for boxes in targets if boxes is not None]\n # Add sample index to targets\n for i, boxes in enumerate(targets):\n boxes[:, 0] = i\n targets = torch.cat(targets, 0)\n # Selects new image size every tenth batch\n if self.multiscale and self.batch_count % 10 == 0:\n self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))\n # Resize images to input shape\n imgs = torch.stack([resize(img, self.img_size) for img in imgs])\n self.batch_count += 1\n return paths, imgs, targets\n\n def __len__(self):\n return len(self.img_files)\n"
] |
[
[
"numpy.random.random",
"numpy.abs",
"torch.cat",
"numpy.loadtxt",
"torch.nn.functional.interpolate",
"torch.nn.functional.pad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HaroldMurcia/FaceMaskDetector
|
[
"0e95ab127424f08caae22a5e55634755988cbd30"
] |
[
"facemask_ori.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 15:56:47 2020\n\n@author: Karan\n\"\"\"\n\n\nimport numpy as np\nimport keras\nimport keras.backend as k\nfrom keras.layers import Conv2D,MaxPooling2D,SpatialDropout2D,Flatten,Dropout,Dense\nfrom keras.models import Sequential,load_model\nfrom keras.optimizers import Adam\nfrom keras.preprocessing import image\nimport cv2\nimport datetime\n\nmodel=Sequential()\nmodel.add(Conv2D(32,(3,3),activation='relu',input_shape=(150,150,3)))\nmodel.add(MaxPooling2D() )\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPooling2D() )\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPooling2D() )\nmodel.add(Flatten())\nmodel.add(Dense(100,activation='relu'))\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\nfrom keras.preprocessing.image import ImageDataGenerator\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\ntest_datagen = ImageDataGenerator(rescale=1./255)\ntraining_set = train_datagen.flow_from_directory(\n 'train',\n target_size=(150,150),\n batch_size=16 ,\n class_mode='binary')\ntest_set = test_datagen.flow_from_directory(\n 'test',\n target_size=(150,150),\n batch_size=16,\n class_mode='binary')\nmodel_saved=model.fit_generator(\n training_set,\n epochs=10,\n validation_data=test_set,\n )\nmodel.save('mymodel.h5',model_saved)\n#To test for individual images\n#test_image=image.load_img('C:/Users/Karan/Desktop/ML Datasets/Face Mask Detection/Dataset/test/without_mask/30.jpg',target_size=(150,150,3))\n#test_image=image.load_img(r'C:\\Users\\Karan\\Pictures\\Camera Roll/21.jpg',\n#self.fail('message') target_size=(150,150,3))\n#test_image\n#test_image=image.img_to_array(test_image)\n#test_image=np.expand_dims(test_image,axis=0)\n#type(mymodel.predict_classes(test_image)[0][0])\n\n\n# IMPLEMENTING LIVE DETECTION OF FACE MASK\n\ncap=cv2.VideoCapture(2)\nface_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nmymodel = load_model('mymodel.h5')\nwhile cap.isOpened():\n _,img=cap.read()\n face=face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=4)\n for(x,y,w,h) in face:\n face_img = img[y:y+h, x:x+w]\n cv2.imwrite('temp.jpg',face_img)\n test_image=image.load_img('temp.jpg',target_size=(150,150,3))\n test_image=image.img_to_array(test_image)\n test_image=np.expand_dims(test_image,axis=0)\n pred=mymodel.predict_classes(test_image)[0][0]\n if pred==1:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)\n cv2.putText(img,'SIN MASCARA',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)\n else:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)\n cv2.putText(img,'CON MASCARA',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),3)\n datet=str(datetime.datetime.now())\n cv2.putText(img,datet,(400,450),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1)\n\n cv2.imshow('img',img)\n\n if cv2.waitKey(1)==ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n"
] |
[
[
"numpy.expand_dims"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Arka161/cnngeometric_pytorch
|
[
"3378914478aad391281b0feec1d7a2c945d0870e"
] |
[
"train.py"
] |
[
"from __future__ import print_function, division\nimport argparse\nimport os\nfrom glob import glob\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom model.cnn_geometric_model import CNNGeometric\nfrom model.loss import TransformedGridLoss\n\nfrom data.synth_dataset import SynthDataset\nfrom data.download_datasets import download_pascal\n\nfrom geotnf.transformation import SynthPairTnf\n\nfrom image.normalization import NormalizeImageDict\n\nfrom util.train_test_fn import train, validate_model\nfrom util.torch_util import save_checkpoint, str_to_bool\n\nfrom options.options import ArgumentParser\n\n\n\"\"\"\n\nScript to evaluate a trained model as presented in the CNNGeometric TPAMI paper\non the PF/PF-pascal/Caltech-101 and TSS datasets\n\n\"\"\"\n\ndef main():\n\n args,arg_groups = ArgumentParser(mode='train').parse()\n print(args)\n\n use_cuda = torch.cuda.is_available()\n device = torch.device('cuda') if use_cuda else torch.device('cpu')\n # Seed\n torch.manual_seed(args.seed)\n if use_cuda:\n torch.cuda.manual_seed(args.seed)\n\n # Download dataset if needed and set paths\n if args.training_dataset == 'pascal':\n\n if args.dataset_image_path == '' and not os.path.exists('datasets/pascal-voc11/TrainVal'):\n download_pascal('datasets/pascal-voc11/')\n\n if args.dataset_image_path == '':\n args.dataset_image_path = 'datasets/pascal-voc11/'\n\n args.dataset_csv_path = 'training_data/pascal-random' \n\n\n # CNN model and loss\n print('Creating CNN model...')\n if args.geometric_model=='affine':\n cnn_output_dim = 6\n elif args.geometric_model=='hom' and args.four_point_hom:\n cnn_output_dim = 8\n elif args.geometric_model=='hom' and not args.four_point_hom:\n cnn_output_dim = 9\n elif args.geometric_model=='tps':\n cnn_output_dim = 18\n\n model = CNNGeometric(use_cuda=use_cuda,\n output_dim=cnn_output_dim,\n **arg_groups['model'])\n\n if args.geometric_model=='hom' and not args.four_point_hom:\n init_theta = torch.tensor([1,0,0,0,1,0,0,0,1], device = device)\n model.FeatureRegression.linear.bias.data+=init_theta\n\n if args.geometric_model=='hom' and args.four_point_hom:\n init_theta = torch.tensor([-1, -1, 1, 1, -1, 1, -1, 1], device = device)\n model.FeatureRegression.linear.bias.data+=init_theta\n\n if args.use_mse_loss:\n print('Using MSE loss...')\n loss = nn.MSELoss()\n else:\n print('Using grid loss...')\n loss = TransformedGridLoss(use_cuda=use_cuda,\n geometric_model=args.geometric_model)\n\n # Initialize Dataset objects\n dataset = SynthDataset(geometric_model=args.geometric_model,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file='train.csv',\n\t\t\t dataset_image_path=args.dataset_image_path,\n\t\t\t transform=NormalizeImageDict(['image']),\n\t\t\t random_sample=args.random_sample)\n\n dataset_val = SynthDataset(geometric_model=args.geometric_model,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file='val.csv',\n\t\t\t dataset_image_path=args.dataset_image_path,\n\t\t\t transform=NormalizeImageDict(['image']),\n\t\t\t random_sample=args.random_sample)\n\n # Set Tnf pair generation func\n pair_generation_tnf = SynthPairTnf(geometric_model=args.geometric_model,\n\t\t\t\t use_cuda=use_cuda)\n\n # Initialize DataLoaders\n dataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n # Optimizer and eventual scheduler\n optimizer = optim.Adam(model.FeatureRegression.parameters(), lr=args.lr)\n\n if args.lr_scheduler:\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,\n T_max=args.lr_max_iter,\n eta_min=1e-6)\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')\n else:\n scheduler = False\n\n # Train\n\n # Set up names for checkpoints\n if args.use_mse_loss:\n ckpt = args.trained_model_fn + '_' + args.geometric_model + '_mse_loss' + args.feature_extraction_cnn\n checkpoint_path = os.path.join(args.trained_model_dir,\n args.trained_model_fn,\n ckpt + '.pth.tar')\n else:\n ckpt = args.trained_model_fn + '_' + args.geometric_model + '_grid_loss' + args.feature_extraction_cnn\n checkpoint_path = os.path.join(args.trained_model_dir,\n args.trained_model_fn,\n ckpt + '.pth.tar')\n if not os.path.exists(args.trained_model_dir):\n os.mkdir(args.trained_model_dir)\n\n # Set up TensorBoard writer\n if not args.log_dir:\n tb_dir = os.path.join(args.trained_model_dir, args.trained_model_fn + '_tb_logs')\n else:\n tb_dir = os.path.join(args.log_dir, args.trained_model_fn + '_tb_logs')\n\n logs_writer = SummaryWriter(tb_dir)\n # add graph, to do so we have to generate a dummy input to pass along with the graph\n dummy_input = {'source_image': torch.rand([args.batch_size, 3, 240, 240], device = device),\n 'target_image': torch.rand([args.batch_size, 3, 240, 240], device = device),\n 'theta_GT': torch.rand([16, 2, 3], device = device)}\n\n logs_writer.add_graph(model, dummy_input)\n\n # Start of training\n print('Starting training...')\n\n best_val_loss = float(\"inf\")\n\n for epoch in range(1, args.num_epochs+1):\n\n # we don't need the average epoch loss so we assign it to _\n _ = train(epoch, model, loss, optimizer,\n dataloader, pair_generation_tnf,\n log_interval=args.log_interval,\n scheduler=scheduler,\n tb_writer=logs_writer)\n\n val_loss = validate_model(model, loss,\n dataloader_val, pair_generation_tnf,\n epoch, logs_writer)\n\n # remember best loss\n is_best = val_loss < best_val_loss\n best_val_loss = min(val_loss, best_val_loss)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'best_val_loss': best_val_loss,\n 'optimizer': optimizer.state_dict(),\n },\n is_best, checkpoint_path)\n\n logs_writer.close()\n print('Done!')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.cuda.manual_seed",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.rand",
"torch.device",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
crsanderford/brainrender
|
[
"a92dc3b08f743721521ae233f15b1814207bf08c"
] |
[
"benchmark/bm_cells.py"
] |
[
"from benchmark.timer import Timer\nfrom brainrender import Scene, actors\nimport numpy as np\nimport random\n\n# create N random cells coordinates\n\n\ndef get_n_random_points_in_region(region, N):\n \"\"\"\n Gets N random points inside (or on the surface) of a mes\n \"\"\"\n\n region_bounds = region.mesh.bounds()\n X = np.random.randint(region_bounds[0], region_bounds[1], size=10000)\n Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000)\n Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000)\n pts = [[x, y, z] for x, y, z in zip(X, Y, Z)]\n\n ipts = region.mesh.insidePoints(pts).points()\n return np.vstack(random.choices(ipts, k=N))\n\n\nfor N_cells in (10000, 100000, 1000000):\n scene = Scene(inset=False)\n coordinates = get_n_random_points_in_region(scene.root, N_cells)\n\n with Timer(scene, name=f\"Rendering {N_cells} cells\"):\n scene.add(actors.Points(coordinates))\n\n scene = Scene(inset=False)\n with Timer(scene, name=f\"Slicing {N_cells} cells\"):\n scene.add(actors.Points(coordinates))\n scene.slice(\"sagittal\")\n\n scene.close()\n"
] |
[
[
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zephyr-fun/human_body_prior
|
[
"35571fe16fddca39553398f6b3eb6d18a23c985b"
] |
[
"src/human_body_prior/models/ik_engine.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),\n# acting on behalf of its Max Planck Institute for Intelligent Systems and the\n# Max Planck Institute for Biological Cybernetics. All rights reserved.\n#\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights\n# on this computer program. You can only use this computer program if you have closed a license agreement\n# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and liable to prosecution.\n# Contact: [email protected]\n#\n#\n# If you use this code in a research publication please consider citing the following:\n#\n# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>\n#\n#\n# Code Developed by:\n# Nima Ghorbani <https://nghorbani.github.io/>\n#\n# 2021.02.12\n\nfrom typing import List, Dict\n\nfrom psbody.mesh import Mesh\nfrom body_visualizer.tools.mesh_tools import rotateXYZ\nfrom body_visualizer.mesh.psbody_mesh_cube import points_to_cubes\nfrom body_visualizer.mesh.psbody_mesh_sphere import points_to_spheres\n\nfrom torch import nn\nimport torch\n\nfrom human_body_prior.tools.model_loader import load_model\n\nimport numpy as np\n\nfrom body_visualizer.tools.vis_tools import colors\nfrom human_body_prior.tools.omni_tools import copy2cpu as c2c\nfrom psbody.mesh import MeshViewers\n\nfrom human_body_prior.tools.omni_tools import log2file\n\nfrom human_body_prior.models.vposer_model import VPoser\nfrom human_body_prior.tools.omni_tools import flatten_list\n\n\ndef visualize(points, bm_f, mvs, kpts_colors, verbosity=2, logger=None):\n from human_body_prior.tools.omni_tools import log2file\n\n if logger is None: logger = log2file()\n\n def view(opt_objs, body_v, virtual_markers, opt_it):\n if verbosity <= 0: return\n opt_objs_cpu = {k: c2c(v) for k, v in opt_objs.items()}\n\n total_loss = np.sum([np.sum(v) for k, v in opt_objs_cpu.items()])\n message = 'it {} -- [total loss = {:.2e}] - {}'.format(opt_it, total_loss, ' | '.join(['%s = %2.2e' % (k, np.sum(v)) for k, v in opt_objs_cpu.items()]))\n logger(message)\n if verbosity>1:\n bs = body_v.shape[0]\n np.random.seed(100)\n frame_ids = list(range(bs)) if bs <= len(mvs) else np.random.choice(bs , size=len(mvs), replace=False).tolist()\n if bs > len(mvs): message += ' -- [frame_ids: {}]'.format(frame_ids)\n for dispId, fId in enumerate(frame_ids): # check for the number of frames in mvs and show a randomly picked number of frames in body if there is more to show than row*cols available\n new_body_v = rotateXYZ(body_v[fId], [-90,0,0])\n\n orig_mrk_mesh = points_to_spheres(rotateXYZ(c2c(points[fId]), [-90,0,0]), radius=0.01, color=kpts_colors)\n virtual_markers_mesh = points_to_cubes(rotateXYZ(virtual_markers[fId], [-90,0,0]), radius=0.01, color=kpts_colors)\n new_body_mesh = Mesh(new_body_v, bm_f, vc=colors['grey'])\n\n # linev = rotateXYZ(np.hstack((c2c(points[fId]), virtual_markers[fId])).reshape((-1, 3)), [-90,0,0])\n # linee = np.arange(len(linev)).reshape((-1, 2))\n # ll = Lines(v=linev, e=linee)\n # ll.vc = (ll.v * 0. + 1) * np.array([0.00, 0.00, 1.00])\n # mvs[dispId].set_dynamic_lines([ll])\n\n # orig_mrk_mesh = points_to_spheres(data_pc, radius=0.01, vc=colors['blue'])\n mvs[dispId].set_dynamic_meshes([orig_mrk_mesh, virtual_markers_mesh])\n mvs[dispId].set_static_meshes([new_body_mesh])\n\n mvs[0].set_titlebar(message)\n # if out_dir is not None: mv.save_snapshot(os.path.join(out_dir, '%05d_it_%.5d.png' %(frame_id, opt_it)))\n return view\n\n\nclass AdamInClosure():\n def __init__(self, var_list, lr, max_iter=100, tolerance_change=1e-5):\n self.optimizer = torch.optim.Adam(var_list, lr)\n self.max_iter = max_iter\n self.tolerance_change = tolerance_change\n\n\n def step(self, closure):\n prev_loss = None\n for it in range(self.max_iter):\n loss = closure()\n self.optimizer.step()\n if prev_loss is None:\n prev_loss = loss\n continue\n if torch.isnan(loss):\n # breakpoint()\n break\n if abs(loss - prev_loss) < self.tolerance_change:\n print('abs(loss - prev_loss) < self.tolerance_change')\n break\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\ndef ik_fit(optimizer, source_kpts_model, static_vars, vp_model, extra_params={}, on_step=None, gstep=0):\n\n data_loss = extra_params.get('data_loss', torch.nn.SmoothL1Loss(reduction='mean'))\n # data_loss =\n # data_loss = torch.nn.L1Loss(reduction='mean')#change with SmoothL1\n\n def fit(weights, free_vars):\n\n fit.gstep += 1\n optimizer.zero_grad()\n\n free_vars['pose_body'] = vp_model.decode(free_vars['poZ_body'])['pose_body'].contiguous().view(-1, 63)\n nonan_mask = torch.isnan(free_vars['poZ_body']).sum(-1) == 0\n\n opt_objs = {}\n\n res = source_kpts_model(free_vars)\n\n opt_objs['data'] = data_loss(res['source_kpts'], static_vars['target_kpts'])\n\n opt_objs['betas'] = torch.pow(free_vars['betas'][nonan_mask],2).sum()\n opt_objs['poZ_body'] = torch.pow(free_vars['poZ_body'][nonan_mask],2).sum()\n\n\n opt_objs = {k: opt_objs[k]*v for k, v in weights.items() if k in opt_objs.keys()}\n loss_total = torch.sum(torch.stack(list(opt_objs.values())))\n # breakpoint()\n\n loss_total.backward()\n\n if on_step is not None:\n on_step(opt_objs, c2c(res['body'].v), c2c(res['source_kpts']), fit.gstep)\n\n fit.free_vars = {k:v for k,v in free_vars.items()}# if k in IK_Engine.fields_to_optimize}\n # fit.nonan_mask = nonan_mask\n fit.final_loss = loss_total\n\n return loss_total\n\n fit.gstep = gstep\n fit.final_loss = None\n fit.free_vars = {}\n # fit.nonan_mask = None\n return fit\n\nclass IK_Engine(nn.Module):\n\n\n def __init__(self,\n vposer_expr_dir: str,\n data_loss,\n optimizer_args: dict={'type':'ADAM'},\n stepwise_weights: List[Dict]=[{'data': 10., 'poZ_body': .01, 'betas': .5}],\n display_rc: tuple = (2,1),\n verbosity: int = 1,\n logger=None,\n ):\n '''\n\n :param vposer_expr_dir: The vposer directory that holds the settings and model snapshot\n :param data_loss: should be a pytorch callable (source, target) that returns the accumulated loss\n :param optimizer_args: arguments for optimizers\n :param stepwise_weights: list of dictionaries. each list element defines weights for one full step of optimization\n if a weight value is left out, its respective object item will be removed as well. imagine optimizing without data term!\n :param display_rc: number of row and columns in case verbosity > 1\n :param verbosity: 0: silent, 1: text, 2: text/visual. running 2 over ssh would need extra work\n :param logger: an instance of human_body_prior.tools.omni_tools.log2file\n '''\n\n\n super(IK_Engine, self).__init__()\n\n assert isinstance(stepwise_weights, list), ValueError('stepwise_weights should be a list of dictionaries.')\n assert np.all(['data' in l for l in stepwise_weights]), ValueError('The term data should be available in every weight of anealed optimization step: {}'.format(stepwise_weights))\n\n self.data_loss = torch.nn.SmoothL1Loss(reduction='mean') if data_loss is None else data_loss\n\n self.stepwise_weights = stepwise_weights\n self.verbosity = verbosity\n self.optimizer_args = optimizer_args\n\n self.logger = log2file() if logger is None else logger\n\n\n if verbosity>1:\n mvs = MeshViewers(display_rc, keepalive=True)\n self.mvs = flatten_list(mvs)\n self.mvs[0].set_background_color(colors['white'])\n else:\n self.mvs=None\n\n self.vp_model, _ = load_model(vposer_expr_dir,\n model_code=VPoser,\n remove_words_in_model_weights='vp_model.',\n disable_grad=True)\n\n\n def forward(self, source_kpts, target_kpts, initial_body_params={}):\n '''\n source_kpts is a function that given body parameters computes source key points that should match target key points\n Try to reconstruct the bps signature by optimizing the body_poZ\n '''\n # if self.rt_ps.verbosity > 0: self.logger('Processing {} frames'.format(points.shape[0]))\n\n bs = target_kpts.shape[0]\n\n\n on_step = visualize(target_kpts,\n kpts_colors=source_kpts.kpts_colors,\n bm_f=source_kpts.bm_f,\n mvs=self.mvs,\n verbosity=self.verbosity,\n logger=self.logger)\n\n comp_device = target_kpts.device\n # comp_device = self.vp_model.named_parameters().__next__()[1].device\n if 'pose_body' not in initial_body_params:\n initial_body_params['pose_body'] = torch.zeros([bs, 63], device=comp_device, dtype=torch.float, requires_grad=False)\n if 'trans' not in initial_body_params:\n initial_body_params['trans'] = torch.zeros([bs, 3], device=comp_device, dtype=torch.float, requires_grad=False)\n if 'betas' not in initial_body_params:\n initial_body_params['betas'] = torch.zeros([bs, 10], device=comp_device, dtype=torch.float, requires_grad=False)\n if 'root_orient' not in initial_body_params:\n initial_body_params['root_orient'] = torch.zeros([bs, 3], device=comp_device, dtype=torch.float, requires_grad=False)\n\n initial_body_params['poZ_body'] = self.vp_model.encode(initial_body_params['pose_body']).mean\n\n free_vars = {k: torch.nn.Parameter(v.detach(), requires_grad=True) for k,v in initial_body_params.items() if k in ['betas', 'trans', 'poZ_body', 'root_orient']}\n static_vars = {\n 'target_kpts': target_kpts,\n # 'trans': initial_body_params['trans'].detach(),\n # 'betas': initial_body_params['betas'].detach(),\n # 'poZ_body': initial_body_params['poZ_body'].detach()\n }\n\n if self.optimizer_args['type'].upper() == 'LBFGS':\n optimizer = torch.optim.LBFGS(list(free_vars.values()),\n lr=self.optimizer_args.get('lr', 1),\n max_iter=self.optimizer_args.get('max_iter', 100),\n tolerance_change=self.optimizer_args.get('tolerance_change', 1e-5),\n max_eval=self.optimizer_args.get('max_eval', None),\n history_size=self.optimizer_args.get('history_size', 100),\n line_search_fn='strong_wolfe')\n\n elif self.optimizer_args['type'].upper() == 'ADAM':\n optimizer = AdamInClosure(list(free_vars.values()),\n lr=self.optimizer_args.get('lr', 1e-3),\n max_iter=self.optimizer_args.get('max_iter', 100),\n tolerance_change=self.optimizer_args.get('tolerance_change', 1e-5),\n )\n else:\n raise ValueError('optimizer_type not recognized.')\n\n gstep = 0\n closure = ik_fit(optimizer,\n source_kpts_model=source_kpts,\n static_vars=static_vars,\n vp_model=self.vp_model,\n extra_params={'data_loss': self.data_loss},\n on_step=on_step,\n gstep=gstep)\n # try:\n\n for wts in self.stepwise_weights:\n optimizer.step(lambda: closure(wts, free_vars))\n free_vars = closure.free_vars\n # except:\n #\n # pass\n\n # if closure.final_loss is None or torch.isnan(closure.final_loss) or torch.any(torch.isnan(free_vars['trans'])):\n # if self.verbosity > 0:\n # self.logger('NaN observed in the optimization results. you might want to restart the refinment procedure.')\n # breakpoint()\n # return None\n\n return closure.free_vars#, closure.nonan_mask\n"
] |
[
[
"torch.optim.Adam",
"torch.nn.SmoothL1Loss",
"torch.isnan",
"numpy.random.seed",
"torch.zeros",
"numpy.all",
"torch.pow",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wenh06/OpenAttack
|
[
"56e3a96f6a4eeaf30b90a275685f37cc7e7b3c7c"
] |
[
"OpenAttack/substitutes/dces.py"
] |
[
"from .base import CharSubstitute\nfrom ..data_manager import DataManager\nimport numpy as np\n\n\ndisallowed = ['TAG', 'MALAYALAM', 'BAMUM', 'HIRAGANA', 'RUNIC', 'TAI', 'SUNDANESE', 'BATAK', 'LEPCHA', 'CHAM',\n 'TELUGU', 'DEVANGARAI', 'BUGINESE', 'MYANMAR', 'LINEAR', 'SYLOTI', 'PHAGS-PA', 'CHEROKEE',\n 'CANADIAN', 'YI', 'LYCIAN', 'HANGUL', 'KATAKANA', 'JAVANESE', 'ARABIC', 'KANNADA', 'BUHID',\n 'TAGBANWA', 'DESERET', 'REJANG', 'BOPOMOFO', 'PERMIC', 'OSAGE', 'TAGALOG', 'MEETEI', 'CARIAN',\n 'UGARITIC', 'ORIYA', 'ELBASAN', 'CYPRIOT', 'HANUNOO', 'GUJARATI', 'LYDIAN', 'MONGOLIAN', 'AVESTAN',\n 'MEROITIC', 'KHAROSHTHI', 'HUNGARIAN', 'KHUDAWADI', 'ETHIOPIC', 'PERSIAN', 'OSMANYA', 'ELBASAN',\n 'TIBETAN', 'BENGALI', 'TURKIC', 'THROWING', 'HANIFI', 'BRAHMI', 'KAITHI', 'LIMBU', 'LAO', 'CHAKMA',\n 'DEVANAGARI', 'ITALIC', 'CJK', 'MEDEFAIDRIN', 'DIAMOND', 'SAURASHTRA', 'ADLAM', 'DUPLOYAN']\ndisallowed_codes = ['1F1A4', 'A7AF'] # 不允许编码\n\n\ndef get_hex_string(ch):\n return '{:04x}'.format(ord(ch)).upper() # 获得字符16进制编码\n\n\nclass DCESSubstitute(CharSubstitute):\n \"\"\"\n :Data Requirements: :py:data:`.AttackAssist.DCES`\n :Package Requirements: * **sklearn**\n \n An implementation of :py:class:`.CharSubstitute`.\n\n DCES substitute used in :py:class:`.VIPERAttacker`.\n\n \"\"\"\n\n def __init__(self):\n self.descs, self.neigh = DataManager.load(\"AttackAssist.DCES\")\n # load\n\n def __call__(self, char, threshold):\n \"\"\"\n :param word: the raw char, threshold: return top k words.\n :return: The result is a list of tuples, *(substitute, 1)*.\n :rtype: list of tuple\n \"\"\"\n c = get_hex_string(char)\n\n if c in self.descs:\n description = self.descs[c][\"description\"]\n else:\n return [char, 1]\n\n tokens = description.split(' ')\n case = 'unknown'\n identifiers = []\n\n for token in tokens:\n if len(token) == 1:\n identifiers.append(token)\n elif token == 'SMALL':\n case = 'SMALL'\n elif token == 'CAPITAL':\n case = 'CAPITAL'\n\n matches = []\n match_ids = []\n for i in identifiers:\n for idx, val in self.descs.items():\n desc_toks = val[\"description\"].split(' ')\n if i in desc_toks and not np.any(np.in1d(desc_toks, disallowed)) and \\\n not np.any(np.in1d(idx, disallowed_codes)) and \\\n not int(idx, 16) > 30000:\n\n desc_toks = np.array(desc_toks)\n case_descriptor = desc_toks[(desc_toks == 'SMALL') | (desc_toks == 'CAPITAL')]\n\n if len(case_descriptor) > 1:\n case_descriptor = case_descriptor[0]\n elif len(case_descriptor) == 0:\n case = 'unknown'\n\n if case == 'unknown' or case == case_descriptor:\n match_ids.append(idx)\n matches.append(val[\"vec\"])\n\n if len(matches) == 0:\n return [(char, 1)]\n\n match_vecs = np.stack(matches)\n Y = match_vecs\n\n self.neigh.fit(Y)\n\n X = self.descs[c][\"vec\"].reshape(1, -1)\n\n if Y.shape[0] > threshold:\n dists, idxs = self.neigh.kneighbors(X, threshold, return_distance=True)\n else:\n dists, idxs = self.neigh.kneighbors(X, Y.shape[0], return_distance=True)\n probs = dists.flatten()\n\n charcodes = [match_ids[idx] for idx in idxs.flatten()]\n \n chars = []\n for charcode in charcodes:\n chars.append(chr(int(charcode, 16)))\n ret = list(zip(chars, probs))\n return list(filter(lambda x: x[1] < threshold, ret))\n"
] |
[
[
"numpy.in1d",
"numpy.array",
"numpy.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsura-crisaldo/qiskit-aqua
|
[
"2b4a70bb24a9170c3735124536dce1d7ffe4ed67"
] |
[
"qiskit/aqua/operators/list_ops/summed_op.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" SummedOp Class \"\"\"\n\nfrom typing import List, Union, cast\nimport warnings\n\nimport numpy as np\n\nfrom qiskit.circuit import ParameterExpression\nfrom .list_op import ListOp\nfrom ..legacy.base_operator import LegacyBaseOperator\nfrom ..legacy.weighted_pauli_operator import WeightedPauliOperator\nfrom ..operator_base import OperatorBase\nfrom ..primitive_ops.primitive_op import PrimitiveOp\n\n\nclass SummedOp(ListOp):\n \"\"\" A class for lazily representing sums of Operators. Often Operators cannot be\n efficiently added to one another, but may be manipulated further so that they can be\n later. This class holds logic to indicate that the Operators in ``oplist`` are meant to\n be added together, and therefore if they reach a point in which they can be, such as after\n evaluation or conversion to matrices, they can be reduced by addition. \"\"\"\n\n def __init__(self,\n oplist: List[OperatorBase],\n coeff: Union[int, float, complex, ParameterExpression] = 1.0,\n abelian: bool = False) -> None:\n \"\"\"\n Args:\n oplist: The Operators being summed.\n coeff: A coefficient multiplying the operator\n abelian: Indicates whether the Operators in ``oplist`` are known to mutually commute.\n \"\"\"\n super().__init__(oplist,\n combo_fn=lambda x: np.sum(x, axis=0),\n coeff=coeff,\n abelian=abelian)\n\n @property\n def num_qubits(self) -> int:\n return self.oplist[0].num_qubits\n\n @property\n def distributive(self) -> bool:\n return True\n\n def add(self, other: OperatorBase) -> OperatorBase:\n \"\"\"Return Operator addition of ``self`` and ``other``, overloaded by ``+``.\n\n Note:\n This appends ``other`` to ``self.oplist`` without checking ``other`` is already\n included or not. If you want to simplify them, please use :meth:`simplify`.\n\n Args:\n other: An ``OperatorBase`` with the same number of qubits as self, and in the same\n 'Operator', 'State function', or 'Measurement' category as self (i.e. the same type\n of underlying function).\n\n Returns:\n A ``SummedOp`` equivalent to the sum of self and other.\n \"\"\"\n self_new_ops = self.oplist if self.coeff == 1 \\\n else [op.mul(self.coeff) for op in self.oplist]\n if isinstance(other, SummedOp):\n other_new_ops = other.oplist if other.coeff == 1 \\\n else [op.mul(other.coeff) for op in other.oplist]\n else:\n other_new_ops = [other]\n return SummedOp(self_new_ops + other_new_ops)\n\n def collapse_summands(self) -> 'SummedOp':\n \"\"\"Return Operator by simplifying duplicate operators.\n\n E.g., ``SummedOp([2 * X ^ Y, X ^ Y]).collapse_summands() -> SummedOp([3 * X ^ Y])``.\n\n Returns:\n A simplified ``SummedOp`` equivalent to self.\n \"\"\"\n oplist = [] # type: List[OperatorBase]\n coeffs = [] # type: List[Union[int, float, complex, ParameterExpression]]\n for op in self.oplist:\n if isinstance(op, PrimitiveOp):\n new_op = PrimitiveOp(op.primitive)\n new_coeff = op.coeff * self.coeff\n if new_op in oplist:\n index = oplist.index(new_op)\n coeffs[index] += new_coeff\n else:\n oplist.append(new_op)\n coeffs.append(new_coeff)\n else:\n if op in oplist:\n index = oplist.index(op)\n coeffs[index] += self.coeff\n else:\n oplist.append(op)\n coeffs.append(self.coeff)\n return SummedOp([op * coeff for op, coeff in zip(oplist, coeffs)]) # type: ignore\n\n # TODO be smarter about the fact that any two ops in oplist could be evaluated for sum.\n def reduce(self) -> OperatorBase:\n \"\"\"Try collapsing list or trees of sums.\n\n Tries to sum up duplicate operators and reduces the operators\n in the sum.\n\n Returns:\n A collapsed version of self, if possible.\n \"\"\"\n # reduce constituents\n reduced_ops = sum(op.reduce() for op in self.oplist) * self.coeff\n\n # group duplicate operators\n if isinstance(reduced_ops, SummedOp):\n reduced_ops = reduced_ops.collapse_summands()\n\n if isinstance(reduced_ops, SummedOp) and len(reduced_ops.oplist) == 1:\n return reduced_ops.oplist[0]\n else:\n return cast(OperatorBase, reduced_ops)\n\n def to_matrix_op(self, massive: bool = False) -> OperatorBase:\n \"\"\" Returns an equivalent Operator composed of only NumPy-based primitives, such as\n ``MatrixOp`` and ``VectorStateFn``. \"\"\"\n accum = self.oplist[0].to_matrix_op(massive=massive) # type: ignore\n for i in range(1, len(self.oplist)):\n accum += self.oplist[i].to_matrix_op(massive=massive) # type: ignore\n\n return accum * self.coeff\n\n def to_legacy_op(self, massive: bool = False) -> LegacyBaseOperator:\n # We do this recursively in case there are SummedOps of PauliOps in oplist.\n legacy_ops = [op.to_legacy_op(massive=massive) for op in self.oplist]\n\n if not all(isinstance(op, WeightedPauliOperator) for op in legacy_ops):\n # If any Operators in oplist cannot be represented by Legacy Operators, the error\n # will be raised in the offending matrix-converted result (e.g. StateFn or ListOp)\n return self.to_matrix_op(massive=massive).to_legacy_op(massive=massive)\n\n if isinstance(self.coeff, ParameterExpression):\n try:\n coeff = float(self.coeff)\n except TypeError:\n raise TypeError('Cannot convert Operator with unbound parameter {} to Legacy '\n 'Operator'.format(self.coeff))\n else:\n coeff = cast(float, self.coeff)\n\n return self.combo_fn(legacy_ops) * coeff\n\n def print_details(self):\n \"\"\"\n Print out the operator in details.\n Returns:\n str: a formatted string describes the operator.\n \"\"\"\n warnings.warn(\"print_details() is deprecated and will be removed in \"\n \"a future release. Instead you can use .to_legacy_op() \"\n \"and call print_details() on it's output\",\n DeprecationWarning)\n ret = self.to_legacy_op().print_details()\n return ret\n\n def equals(self, other: OperatorBase) -> bool:\n \"\"\"Check if other is equal to self.\n\n Note:\n This is not a mathematical check for equality.\n If ``self`` and ``other`` implement the same operation but differ\n in the representation (e.g. different type of summands)\n ``equals`` will evaluate to ``False``.\n\n Args:\n other: The other operator to check for equality.\n\n Returns:\n True, if other and self are equal, otherwise False.\n\n Examples:\n >>> from qiskit.aqua.operators import X, Z\n >>> 2 * X == X + X\n True\n >>> X + Z == Z + X\n True\n \"\"\"\n self_reduced, other_reduced = self.reduce(), other.reduce()\n if not isinstance(other_reduced, type(self_reduced)):\n return False\n\n # check if reduced op is still a SummedOp\n if not isinstance(self_reduced, SummedOp):\n return self_reduced == other_reduced\n\n self_reduced = cast(SummedOp, self_reduced)\n other_reduced = cast(SummedOp, other_reduced)\n if len(self_reduced.oplist) != len(other_reduced.oplist):\n return False\n\n # absorb coeffs into the operators\n if self_reduced.coeff != 1:\n self_reduced = SummedOp(\n [op * self_reduced.coeff for op in self_reduced.oplist]) # type: ignore\n if other_reduced.coeff != 1:\n other_reduced = SummedOp(\n [op * other_reduced.coeff for op in other_reduced.oplist]) # type: ignore\n\n # compare independent of order\n return set(self_reduced) == set(other_reduced)\n"
] |
[
[
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Geunwoo-Jeon/InvCompress
|
[
"be8e32d663bfad9adaf497e723c9d65b5a0f21ee",
"be8e32d663bfad9adaf497e723c9d65b5a0f21ee",
"be8e32d663bfad9adaf497e723c9d65b5a0f21ee",
"be8e32d663bfad9adaf497e723c9d65b5a0f21ee"
] |
[
"codes/compressai/utils/bench/codecs.py",
"codes/tests/test_entropy_models.py",
"codes/compressai/models/ours.py",
"codes/quan/quantizer/ste_lsq.py"
] |
[
"# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport io\nimport os\nimport platform\nimport subprocess\nimport sys\nimport time\n\nfrom tempfile import mkstemp\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport PIL\nimport PIL.Image as Image\nimport torch\n\nfrom pytorch_msssim import ms_ssim\n\nfrom compressai.transforms.functional import rgb2ycbcr, ycbcr2rgb\n\n# from torchvision.datasets.folder\nIMG_EXTENSIONS = (\n \".jpg\",\n \".jpeg\",\n \".png\",\n \".ppm\",\n \".bmp\",\n \".pgm\",\n \".tif\",\n \".tiff\",\n \".webp\",\n)\n\n\ndef filesize(filepath: str) -> int:\n \"\"\"Return file size in bits of `filepath`.\"\"\"\n if not os.path.isfile(filepath):\n raise ValueError(f'Invalid file \"{filepath}\".')\n return os.stat(filepath).st_size\n\n\ndef read_image(filepath: str, mode: str = \"RGB\") -> np.array:\n \"\"\"Return PIL image in the specified `mode` format. \"\"\"\n if not os.path.isfile(filepath):\n raise ValueError(f'Invalid file \"{filepath}\".')\n return Image.open(filepath).convert(mode)\n\n\ndef compute_metrics(\n a: Union[np.array, Image.Image],\n b: Union[np.array, Image.Image],\n max_val: float = 255.0,\n) -> Tuple[float, float]:\n \"\"\"Returns PSNR and MS-SSIM between images `a` and `b`. \"\"\"\n if isinstance(a, Image.Image):\n a = np.asarray(a)\n if isinstance(b, Image.Image):\n b = np.asarray(b)\n\n a = torch.from_numpy(a.copy()).float().unsqueeze(0)\n if a.size(3) == 3:\n a = a.permute(0, 3, 1, 2)\n b = torch.from_numpy(b.copy()).float().unsqueeze(0)\n if b.size(3) == 3:\n b = b.permute(0, 3, 1, 2)\n\n mse = torch.mean((a - b) ** 2).item()\n p = 20 * np.log10(max_val) - 10 * np.log10(mse)\n m = ms_ssim(a, b, data_range=max_val).item()\n return p, m\n\n\ndef run_command(cmd, ignore_returncodes=None):\n cmd = [str(c) for c in cmd]\n try:\n rv = subprocess.check_output(cmd)\n return rv.decode(\"ascii\")\n except subprocess.CalledProcessError as err:\n if ignore_returncodes is not None and err.returncode in ignore_returncodes:\n return err.output\n print(err.output.decode(\"utf-8\"))\n sys.exit(1)\n\n\ndef _get_ffmpeg_version():\n rv = run_command([\"ffmpeg\", \"-version\"])\n return rv.split()[2]\n\n\ndef _get_bpg_version(encoder_path):\n rv = run_command([encoder_path, \"-h\"], ignore_returncodes=[1])\n return rv.split()[4]\n\n\nclass Codec:\n \"\"\"Abstract base class\"\"\"\n\n _description = None\n\n def __init__(self, args):\n self._set_args(args)\n\n def _set_args(self, args):\n return args\n\n @classmethod\n def setup_args(cls, parser):\n pass\n\n @property\n def description(self):\n return self._description\n\n @property\n def name(self):\n raise NotImplementedError()\n\n def _load_img(self, img):\n return os.path.abspath(img)\n\n def _run(self, img, quality, *args, **kwargs):\n raise NotImplementedError()\n\n def run(self, img, quality, *args, **kwargs):\n img = self._load_img(img)\n return self._run(img, quality, *args, **kwargs)\n\n\nclass PillowCodec(Codec):\n \"\"\"Abastract codec based on Pillow bindings.\"\"\"\n\n fmt = None\n\n @property\n def name(self):\n raise NotImplementedError()\n\n def _load_img(self, img):\n return read_image(img)\n\n def _run(self, img, quality, return_rec=False, return_metrics=True):\n start = time.time()\n tmp = io.BytesIO()\n img.save(tmp, format=self.fmt, quality=int(quality))\n enc_time = time.time() - start\n tmp.seek(0)\n size = tmp.getbuffer().nbytes\n\n start = time.time()\n rec = Image.open(tmp)\n rec.load()\n dec_time = time.time() - start\n\n bpp_val = float(size) * 8 / (img.size[0] * img.size[1])\n\n out = {\n \"bpp\": bpp_val,\n \"encoding_time\": enc_time,\n \"decoding_time\": dec_time,\n }\n\n if return_metrics:\n psnr_val, msssim_val = compute_metrics(rec, img)\n out[\"psnr\"] = psnr_val\n out[\"ms-ssim\"] = msssim_val\n\n if return_rec:\n return out, rec\n return out\n\n\nclass JPEG(PillowCodec):\n \"\"\"Use libjpeg linked in Pillow\"\"\"\n\n fmt = \"jpeg\"\n _description = f\"JPEG. Pillow version {PIL.__version__}\"\n\n @property\n def name(self):\n return \"JPEG\"\n\n\nclass WebP(PillowCodec):\n \"\"\"Use libwebp linked in Pillow\"\"\"\n\n fmt = \"webp\"\n _description = f\"WebP. Pillow version {PIL.__version__}\"\n\n @property\n def name(self):\n return \"WebP\"\n\n\nclass BinaryCodec(Codec):\n \"\"\"Call a external binary.\"\"\"\n\n fmt = None\n\n def _run(self, img, quality, return_rec=False, return_metrics=True):\n fd0, png_filepath = mkstemp(suffix=\".png\")\n fd1, out_filepath = mkstemp(suffix=self.fmt)\n\n # Encode\n start = time.time()\n run_command(self._get_encode_cmd(img, quality, out_filepath))\n enc_time = time.time() - start\n size = filesize(out_filepath)\n\n # Decode\n start = time.time()\n run_command(self._get_decode_cmd(out_filepath, png_filepath))\n dec_time = time.time() - start\n\n # Read image\n img = read_image(img)\n rec = read_image(png_filepath)\n os.close(fd0)\n os.remove(png_filepath)\n os.close(fd1)\n os.remove(out_filepath)\n\n bpp_val = float(size) * 8 / (img.size[0] * img.size[1])\n\n out = {\n \"bpp\": bpp_val,\n \"encoding_time\": enc_time,\n \"decoding_time\": dec_time,\n }\n\n if return_metrics:\n psnr_val, msssim_val = compute_metrics(rec, img)\n out[\"psnr\"] = psnr_val\n out[\"ms-ssim\"] = msssim_val\n\n if return_rec:\n return out, rec\n return out\n\n def _get_encode_cmd(self, img, quality, out_filepath):\n raise NotImplementedError()\n\n def _get_decode_cmd(self, out_filepath, rec_filepath):\n raise NotImplementedError()\n\n\nclass JPEG2000(BinaryCodec):\n \"\"\"Use ffmpeg version.\n (Not built-in support in default Pillow builds)\n \"\"\"\n\n fmt = \".jp2\"\n\n @property\n def name(self):\n return \"JPEG2000\"\n\n @property\n def description(self):\n return f\"JPEG2000. ffmpeg version {_get_ffmpeg_version()}\"\n\n def _get_encode_cmd(self, img, quality, out_filepath):\n cmd = [\n \"ffmpeg\",\n \"-loglevel\",\n \"panic\",\n \"-y\",\n \"-i\",\n img,\n \"-vcodec\",\n \"jpeg2000\",\n \"-pix_fmt\",\n \"yuv444p\",\n \"-c:v\",\n \"libopenjpeg\",\n \"-compression_level\",\n quality,\n out_filepath,\n ]\n return cmd\n# jpeg2000\n def _get_decode_cmd(self, out_filepath, rec_filepath):\n cmd = [\"ffmpeg\", \"-loglevel\", \"panic\", \"-y\", \"-i\", out_filepath, rec_filepath]\n return cmd\n\n\nclass BPG(BinaryCodec):\n \"\"\"BPG from Fabrice Bellard.\"\"\"\n\n fmt = \".bpg\"\n\n @property\n def name(self):\n return (\n f\"BPG {self.bitdepth}b {self.subsampling_mode} {self.encoder} \"\n f\"{self.color_mode}\"\n )\n\n @property\n def description(self):\n return f\"BPG. BPG version {_get_bpg_version(self.encoder_path)}\"\n\n @classmethod\n def setup_args(cls, parser):\n super().setup_args(parser)\n parser.add_argument(\n \"-m\",\n choices=[\"420\", \"444\"],\n default=\"444\",\n help=\"subsampling mode (default: %(default)s)\",\n )\n parser.add_argument(\n \"-b\",\n choices=[\"8\", \"10\"],\n default=\"8\",\n help=\"bitdepth (default: %(default)s)\",\n )\n parser.add_argument(\n \"-c\",\n choices=[\"rgb\", \"ycbcr\"],\n default=\"ycbcr\",\n help=\"colorspace (default: %(default)s)\",\n )\n parser.add_argument(\n \"-e\",\n choices=[\"jctvc\", \"x265\"],\n default=\"x265\",\n help=\"HEVC implementation (default: %(default)s)\",\n )\n parser.add_argument(\"--encoder-path\", default=\"bpgenc\", help=\"BPG encoder path\")\n parser.add_argument(\"--decoder-path\", default=\"bpgdec\", help=\"BPG decoder path\")\n\n def _set_args(self, args):\n args = super()._set_args(args)\n self.color_mode = args.c\n self.encoder = args.e\n self.subsampling_mode = args.m\n self.bitdepth = args.b\n self.encoder_path = \"/home/felix/disk2/libbpg/bpgenc\" #args.encoder_path\n self.decoder_path = \"/home/felix/disk2/libbpg/bpgdec\"\n return args\n\n def _get_encode_cmd(self, img, quality, out_filepath):\n if not 0 <= quality <= 51:\n raise ValueError(f\"Invalid quality value: {quality} (0,51)\")\n cmd = [\n self.encoder_path,\n \"-o\",\n out_filepath,\n \"-q\",\n str(quality),\n \"-f\",\n self.subsampling_mode,\n \"-e\",\n self.encoder,\n \"-c\",\n self.color_mode,\n \"-b\",\n self.bitdepth,\n img,\n ]\n return cmd\n\n def _get_decode_cmd(self, out_filepath, rec_filepath):\n cmd = [self.decoder_path, \"-o\", rec_filepath, out_filepath]\n return cmd\n\n\nclass TFCI(BinaryCodec):\n \"\"\"Tensorflow image compression format from tensorflow/compression\"\"\"\n\n fmt = \".tfci\"\n _models = [\n \"bmshj2018-factorized-mse\",\n \"bmshj2018-hyperprior-mse\",\n \"mbt2018-mean-mse\",\n ]\n\n @property\n def description(self):\n return \"TFCI\"\n\n @property\n def name(self):\n return f\"{self.model}\"\n\n @classmethod\n def setup_args(cls, parser):\n super().setup_args(parser)\n parser.add_argument(\n \"-m\",\n \"--model\",\n choices=cls._models,\n default=cls._models[0],\n help=\"model architecture (default: %(default)s)\",\n )\n parser.add_argument(\n \"-p\",\n \"--path\",\n required=True,\n help=\"tfci python script path (default: %(default)s)\",\n )\n\n def _set_args(self, args):\n args = super()._set_args(args)\n self.model = args.model\n self.tfci_path = args.path\n return args\n\n def _get_encode_cmd(self, img, quality, out_filepath):\n if not 1 <= quality <= 8:\n raise ValueError(f\"Invalid quality value: {quality} (1, 8)\")\n cmd = [\n sys.executable,\n self.tfci_path,\n \"compress\",\n f\"{self.model}-{quality:d}\",\n img,\n out_filepath,\n ]\n return cmd\n\n def _get_decode_cmd(self, out_filepath, rec_filepath):\n cmd = [sys.executable, self.tfci_path, \"decompress\", out_filepath, rec_filepath]\n return cmd\n\n\ndef get_vtm_encoder_path(build_dir):\n system = platform.system()\n try:\n elfnames = {\"Darwin\": \"EncoderApp\", \"Linux\": \"EncoderAppStatic\"}\n return os.path.join(build_dir, elfnames[system])\n except KeyError as err:\n raise RuntimeError(f'Unsupported platform \"{system}\"') from err\n\n\ndef get_vtm_decoder_path(build_dir):\n system = platform.system()\n try:\n elfnames = {\"Darwin\": \"DecoderApp\", \"Linux\": \"DecoderAppStatic\"}\n return os.path.join(build_dir, elfnames[system])\n except KeyError as err:\n raise RuntimeError(f'Unsupported platform \"{system}\"') from err\n\n\nclass VTM(Codec):\n \"\"\"VTM: VVC reference software\"\"\"\n\n fmt = \".bin\"\n\n @property\n def description(self):\n return \"VTM\"\n\n @property\n def name(self):\n return \"VTM\"\n\n @classmethod\n def setup_args(cls, parser):\n super().setup_args(parser)\n parser.add_argument(\n \"-b\",\n \"--build-dir\",\n type=str,\n default = \"/home/felix/disk2/VVCSoftware_VTM/bin\",\n help=\"VTM build dir\",\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n type=str,\n default = \"/home/felix/disk2/VVCSoftware_VTM/cfg/encoder_intra_vtm.cfg\",\n help=\"VTM config file\",\n )\n parser.add_argument(\n \"--rgb\", action=\"store_true\", help=\"Use RGB color space (over YCbCr)\"\n )\n\n def _set_args(self, args):\n args = super()._set_args(args)\n self.encoder_path = get_vtm_encoder_path(args.build_dir)\n self.decoder_path = get_vtm_decoder_path(args.build_dir)\n self.config_path = args.config\n self.rgb = args.rgb\n return args\n\n def _run(self, img, quality, return_rec=False, return_metrics=True):\n if not 0 <= quality <= 63:\n raise ValueError(f\"Invalid quality value: {quality} (0,63)\")\n\n # Taking 8bit input for now\n bitdepth = 8\n\n # Convert input image to yuv 444 file\n arr = np.asarray(read_image(img))\n fd, yuv_path = mkstemp(suffix=\".yuv\")\n out_filepath = os.path.splitext(yuv_path)[0] + \".bin\"\n\n arr = arr.transpose((2, 0, 1)) # color channel first\n\n if not self.rgb:\n # convert rgb content to YCbCr\n rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)\n arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)\n arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)\n\n with open(yuv_path, \"wb\") as f:\n f.write(arr.tobytes())\n\n # Encode\n height, width = arr.shape[1:]\n cmd = [\n self.encoder_path,\n \"-i\",\n yuv_path,\n \"-c\",\n self.config_path,\n \"-q\",\n quality,\n \"-o\",\n \"/dev/null\",\n \"-b\",\n out_filepath,\n \"-wdt\",\n width,\n \"-hgt\",\n height,\n \"-fr\",\n \"1\",\n \"-f\",\n \"1\",\n \"--InputChromaFormat=444\",\n \"--InputBitDepth=8\",\n \"--ConformanceMode=1\",\n ]\n\n if self.rgb:\n cmd += [\n \"--InputColourSpaceConvert=RGBtoGBR\",\n \"--SNRInternalColourSpace=1\",\n \"--OutputInternalColourSpace=0\",\n ]\n start = time.time()\n run_command(cmd)\n enc_time = time.time() - start\n\n # cleanup encoder input\n os.close(fd)\n os.unlink(yuv_path)\n\n # Decode\n cmd = [self.decoder_path, \"-b\", out_filepath, \"-o\", yuv_path, \"-d\", 8]\n if self.rgb:\n cmd.append(\"--OutputInternalColourSpace=GBRtoRGB\")\n\n start = time.time()\n run_command(cmd)\n dec_time = time.time() - start\n\n # Compute PSNR\n rec_arr = np.fromfile(yuv_path, dtype=np.uint8)\n rec_arr = rec_arr.reshape(arr.shape)\n\n arr = arr.astype(np.float32) / (2 ** bitdepth - 1)\n rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)\n if not self.rgb:\n arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()\n rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()\n\n bpp = filesize(out_filepath) * 8.0 / (height * width)\n\n # Cleanup\n os.unlink(yuv_path)\n os.unlink(out_filepath)\n\n out = {\n \"bpp\": bpp,\n \"encoding_time\": enc_time,\n \"decoding_time\": dec_time,\n }\n\n if return_metrics:\n psnr_val, msssim_val = compute_metrics(arr, rec_arr, max_val=1.0)\n out[\"psnr\"] = psnr_val\n out[\"ms-ssim\"] = msssim_val\n\n if return_rec:\n rec = Image.fromarray(\n (rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)\n )\n return out, rec\n return out\n\n\nclass HM(Codec):\n \"\"\"HM: H.265/HEVC reference software\"\"\"\n\n fmt = \".bin\"\n\n @property\n def description(self):\n return \"HM\"\n\n @property\n def name(self):\n return \"HM\"\n\n @classmethod\n def setup_args(cls, parser):\n super().setup_args(parser)\n parser.add_argument(\n \"-b\",\n \"--build-dir\",\n type=str,\n required=True,\n help=\"HM build dir\",\n )\n parser.add_argument(\n \"-c\", \"--config\", type=str, required=True, help=\"HM config file\"\n )\n parser.add_argument(\n \"--rgb\", action=\"store_true\", help=\"Use RGB color space (over YCbCr)\"\n )\n\n def _set_args(self, args):\n args = super()._set_args(args)\n self.encoder_path = os.path.join(args.build_dir, \"TAppEncoderStatic\")\n self.decoder_path = os.path.join(args.build_dir, \"TAppDecoderStatic\")\n self.config_path = args.config\n self.rgb = args.rgb\n return args\n\n def _run(self, img, quality, return_rec=False, return_metrics=True):\n if not 0 <= quality <= 51:\n raise ValueError(f\"Invalid quality value: {quality} (0,51)\")\n\n # Convert input image to yuv 444 file\n arr = np.asarray(read_image(img))\n fd, yuv_path = mkstemp(suffix=\".yuv\")\n out_filepath = os.path.splitext(yuv_path)[0] + \".bin\"\n bitdepth = 8\n\n arr = arr.transpose((2, 0, 1)) # color channel first\n\n if not self.rgb:\n # convert rgb content to YCbCr\n rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)\n arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)\n arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)\n\n with open(yuv_path, \"wb\") as f:\n f.write(arr.tobytes())\n\n # Encode\n height, width = arr.shape[1:]\n cmd = [\n self.encoder_path,\n \"-i\",\n yuv_path,\n \"-c\",\n self.config_path,\n \"-q\",\n quality,\n \"-o\",\n \"/dev/null\",\n \"-b\",\n out_filepath,\n \"-wdt\",\n width,\n \"-hgt\",\n height,\n \"-fr\",\n \"1\",\n \"-f\",\n \"1\",\n \"--InputChromaFormat=444\",\n \"--InputBitDepth=8\",\n \"--SEIDecodedPictureHash\",\n \"--Level=5.1\",\n \"--CUNoSplitIntraACT=0\",\n \"--ConformanceMode=1\",\n ]\n\n if self.rgb:\n cmd += [\n \"--InputColourSpaceConvert=RGBtoGBR\",\n \"--SNRInternalColourSpace=1\",\n \"--OutputInternalColourSpace=0\",\n ]\n start = time.time()\n\n run_command(cmd)\n enc_time = time.time() - start\n\n # cleanup encoder input\n os.close(fd)\n os.unlink(yuv_path)\n\n # Decode\n cmd = [self.decoder_path, \"-b\", out_filepath, \"-o\", yuv_path, \"-d\", 8]\n\n if self.rgb:\n cmd.append(\"--OutputInternalColourSpace=GBRtoRGB\")\n\n start = time.time()\n run_command(cmd)\n dec_time = time.time() - start\n # Compute PSNR\n rec_arr = np.fromfile(yuv_path, dtype=np.uint8)\n rec_arr = rec_arr.reshape(arr.shape)\n arr = arr.astype(np.float32) / (2 ** bitdepth - 1)\n rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)\n if not self.rgb:\n arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()\n rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()\n\n bpp = filesize(out_filepath) * 8.0 / (height * width)\n\n # Cleanup\n os.unlink(yuv_path)\n os.unlink(out_filepath)\n\n out = {\n \"bpp\": bpp,\n \"encoding_time\": enc_time,\n \"decoding_time\": dec_time,\n }\n\n if return_metrics:\n psnr_val, msssim_val = compute_metrics(arr, rec_arr, max_val=1.0)\n out[\"psnr\"] = psnr_val\n out[\"ms-ssim\"] = msssim_val\n\n if return_rec:\n rec = Image.fromarray(\n (rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)\n )\n return out, rec\n return out\n\n\nclass AV1(Codec):\n \"\"\"AV1: AOM reference software\"\"\"\n\n fmt = \".webm\"\n\n @property\n def description(self):\n return \"AV1\"\n\n @property\n def name(self):\n return \"AV1\"\n\n @classmethod\n def setup_args(cls, parser):\n super().setup_args(parser)\n parser.add_argument(\n \"-b\",\n \"--build-dir\",\n type=str,\n required=True,\n help=\"AOM binaries dir\",\n )\n\n def _set_args(self, args):\n args = super()._set_args(args)\n self.encoder_path = os.path.join(args.build_dir, \"aomenc\")\n self.decoder_path = os.path.join(args.build_dir, \"aomdec\")\n return args\n\n def _run(self, img, quality, return_rec=False, return_metrics=True):\n if not 0 <= quality <= 63:\n raise ValueError(f\"Invalid quality value: {quality} (0,63)\")\n\n # Convert input image to yuv 444 file\n arr = np.asarray(read_image(img))\n fd, yuv_path = mkstemp(suffix=\".yuv\")\n out_filepath = os.path.splitext(yuv_path)[0] + \".webm\"\n bitdepth = 8\n\n arr = arr.transpose((2, 0, 1)) # color channel first\n\n # convert rgb content to YCbCr\n rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)\n arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)\n arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)\n\n with open(yuv_path, \"wb\") as f:\n f.write(arr.tobytes())\n\n # Encode\n height, width = arr.shape[1:]\n cmd = [\n self.encoder_path,\n \"-w\",\n width,\n \"-h\",\n height,\n \"--fps=1/1\",\n \"--limit=1\",\n \"--input-bit-depth=8\",\n \"--cpu-used=0\",\n \"--threads=1\",\n \"--passes=2\",\n \"--end-usage=q\",\n \"--cq-level=\" + str(quality),\n \"--i444\",\n \"--skip=0\",\n \"--tune=psnr\",\n \"--psnr\",\n \"--bit-depth=8\",\n \"-o\",\n out_filepath,\n yuv_path,\n ]\n\n start = time.time()\n run_command(cmd)\n enc_time = time.time() - start\n\n # cleanup encoder input\n os.close(fd)\n os.unlink(yuv_path)\n\n # Decode\n cmd = [\n self.decoder_path,\n out_filepath,\n \"-o\",\n yuv_path,\n \"--rawvideo\",\n \"--output-bit-depth=8\",\n ]\n\n start = time.time()\n run_command(cmd)\n dec_time = time.time() - start\n\n # Compute PSNR\n rec_arr = np.fromfile(yuv_path, dtype=np.uint8)\n rec_arr = rec_arr.reshape(arr.shape)\n\n arr = arr.astype(np.float32) / (2 ** bitdepth - 1)\n rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)\n\n arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()\n rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()\n\n bpp = filesize(out_filepath) * 8.0 / (height * width)\n\n # Cleanup\n os.unlink(yuv_path)\n os.unlink(out_filepath)\n\n out = {\n \"bpp\": bpp,\n \"encoding_time\": enc_time,\n \"decoding_time\": dec_time,\n }\n\n if return_metrics:\n psnr_val, msssim_val = compute_metrics(arr, rec_arr, max_val=1.0)\n out[\"psnr\"] = psnr_val\n out[\"ms-ssim\"] = msssim_val\n\n if return_rec:\n rec = Image.fromarray(\n (rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)\n )\n return out, rec\n return out\n",
"# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport torch\n\nfrom compressai.entropy_models import (\n EntropyBottleneck,\n EntropyModel,\n GaussianConditional,\n)\nfrom compressai.models.priors import FactorizedPrior\nfrom compressai.zoo import bmshj2018_factorized, bmshj2018_hyperprior\n\n\[email protected]\ndef entropy_model():\n return EntropyModel()\n\n\nclass TestEntropyModel:\n def test_quantize_invalid(self, entropy_model):\n x = torch.rand(1, 3, 4, 4)\n with pytest.raises(ValueError):\n entropy_model.quantize(x, mode=\"toto\")\n\n def test_quantize_noise(self, entropy_model):\n x = torch.rand(1, 3, 4, 4)\n y = entropy_model.quantize(x, \"noise\")\n\n assert y.shape == x.shape\n assert ((y - x) <= 0.5).all()\n assert ((y - x) >= -0.5).all()\n assert (y != torch.round(x)).any()\n\n def test__quantize(self, entropy_model):\n x = torch.rand(1, 3, 4, 4)\n s = torch.rand(1).item()\n torch.manual_seed(s)\n y0 = entropy_model.quantize(x, \"noise\")\n torch.manual_seed(s)\n\n with pytest.warns(UserWarning):\n y1 = entropy_model._quantize(x, \"noise\")\n assert (y0 == y1).all()\n\n def test_quantize_symbols(self, entropy_model):\n x = torch.rand(1, 3, 4, 4)\n y = entropy_model.quantize(x, \"symbols\")\n\n assert y.shape == x.shape\n assert (y == torch.round(x).int()).all()\n\n def test_quantize_dequantize(self, entropy_model):\n x = torch.rand(1, 3, 4, 4)\n means = torch.rand(1, 3, 4, 4)\n y = entropy_model.quantize(x, \"dequantize\", means)\n\n assert y.shape == x.shape\n assert (y == torch.round(x - means) + means).all()\n\n def test_dequantize(self, entropy_model):\n x = torch.randint(-32, 32, (1, 3, 4, 4))\n means = torch.rand(1, 3, 4, 4)\n y = entropy_model.dequantize(x, means)\n\n assert y.shape == x.shape\n assert y.type() == means.type()\n\n with pytest.warns(UserWarning):\n yy = entropy_model._dequantize(x, means)\n assert (yy == y).all()\n\n def test_forward(self, entropy_model):\n with pytest.raises(NotImplementedError):\n entropy_model()\n\n def test_invalid_coder(self):\n with pytest.raises(ValueError):\n entropy_model = EntropyModel(entropy_coder=\"huffman\")\n\n with pytest.raises(ValueError):\n entropy_model = EntropyModel(entropy_coder=0xFF)\n\n def test_invalid_inputs(self, entropy_model):\n with pytest.raises(TypeError):\n entropy_model.compress(torch.rand(1, 3))\n with pytest.raises(ValueError):\n entropy_model.compress(torch.rand(1, 3), torch.rand(2, 3))\n with pytest.raises(ValueError):\n entropy_model.compress(torch.rand(1, 3, 1, 1), torch.rand(2, 3))\n\n def test_invalid_cdf(self, entropy_model):\n x = torch.rand(1, 32, 16, 16)\n indexes = torch.rand(1, 32, 16, 16)\n with pytest.raises(ValueError):\n entropy_model.compress(x, indexes)\n\n def test_invalid_cdf_length(self, entropy_model):\n x = torch.rand(1, 32, 16, 16)\n indexes = torch.rand(1, 32, 16, 16)\n entropy_model._quantized_cdf.resize_(32, 1)\n\n with pytest.raises(ValueError):\n entropy_model.compress(x, indexes)\n\n entropy_model._cdf_length.resize_(32, 1)\n with pytest.raises(ValueError):\n entropy_model.compress(x, indexes)\n\n def test_invalid_offsets(self, entropy_model):\n x = torch.rand(1, 32, 16, 16)\n indexes = torch.rand(1, 32, 16, 16)\n entropy_model._quantized_cdf.resize_(32, 1)\n entropy_model._cdf_length.resize_(32)\n with pytest.raises(ValueError):\n entropy_model.compress(x, indexes)\n\n def test_invalid_decompress(self, entropy_model):\n with pytest.raises(TypeError):\n entropy_model.decompress([\"ssss\"])\n\n with pytest.raises(ValueError):\n entropy_model.decompress(\"sss\", torch.rand(1, 3, 4, 4))\n\n with pytest.raises(ValueError):\n entropy_model.decompress([\"sss\"], torch.rand(1, 4, 4))\n\n with pytest.raises(ValueError):\n entropy_model.decompress([\"sss\"], torch.rand(2, 4, 4))\n\n with pytest.raises(ValueError):\n entropy_model.decompress([\"sss\"], torch.rand(1, 4, 4), torch.rand(2, 4, 4))\n\n\nclass TestEntropyBottleneck:\n def test_forward_training(self):\n entropy_bottleneck = EntropyBottleneck(128)\n x = torch.rand(1, 128, 32, 32)\n y, y_likelihoods = entropy_bottleneck(x)\n\n assert isinstance(entropy_bottleneck, EntropyModel)\n assert y.shape == x.shape\n assert y_likelihoods.shape == x.shape\n\n assert ((y - x) <= 0.5).all()\n assert ((y - x) >= -0.5).all()\n assert (y != torch.round(x)).any()\n\n def test_forward_inference(self):\n entropy_bottleneck = EntropyBottleneck(128)\n entropy_bottleneck.eval()\n x = torch.rand(1, 128, 32, 32)\n y, y_likelihoods = entropy_bottleneck(x)\n\n assert y.shape == x.shape\n assert y_likelihoods.shape == x.shape\n\n assert (y == torch.round(x)).all()\n\n def test_loss(self):\n entropy_bottleneck = EntropyBottleneck(128)\n loss = entropy_bottleneck.loss()\n\n assert len(loss.size()) == 0\n assert loss.numel() == 1\n\n def test_scripting(self):\n entropy_bottleneck = EntropyBottleneck(128)\n x = torch.rand(1, 128, 32, 32)\n\n torch.manual_seed(32)\n y0 = entropy_bottleneck(x)\n\n m = torch.jit.script(entropy_bottleneck)\n\n torch.manual_seed(32)\n y1 = m(x)\n\n assert torch.allclose(y0[0], y1[0])\n assert torch.all(y1[1] == 0) # not yet supported\n\n def test_update(self):\n # get a pretrained model\n net = bmshj2018_factorized(quality=1, pretrained=True).eval()\n assert not net.update()\n assert not net.update(force=False)\n assert net.update(force=True)\n\n def test_script(self):\n eb = EntropyBottleneck(32)\n eb = torch.jit.script(eb)\n x = torch.rand(1, 32, 4, 4)\n x_q, likelihoods = eb(x)\n assert (likelihoods == torch.zeros_like(x_q)).all()\n\n\nclass TestGaussianConditional:\n def test_invalid_scale_table(self):\n with pytest.raises(ValueError):\n GaussianConditional(1)\n\n with pytest.raises(ValueError):\n GaussianConditional([])\n\n with pytest.raises(ValueError):\n GaussianConditional(())\n\n with pytest.raises(ValueError):\n GaussianConditional(torch.rand(10))\n\n with pytest.raises(ValueError):\n GaussianConditional([2, 1])\n\n with pytest.raises(ValueError):\n GaussianConditional([0, 1, 2])\n\n with pytest.raises(ValueError):\n GaussianConditional([], scale_bound=None)\n\n with pytest.raises(ValueError):\n GaussianConditional([], scale_bound=-0.1)\n\n def test_forward_training(self):\n gaussian_conditional = GaussianConditional(None)\n x = torch.rand(1, 128, 32, 32)\n scales = torch.rand(1, 128, 32, 32)\n y, y_likelihoods = gaussian_conditional(x, scales)\n\n assert isinstance(gaussian_conditional, EntropyModel)\n assert y.shape == x.shape\n assert y_likelihoods.shape == x.shape\n\n assert ((y - x) <= 0.5).all()\n assert ((y - x) >= -0.5).all()\n assert (y != torch.round(x)).any()\n\n def test_forward_inference(self):\n gaussian_conditional = GaussianConditional(None)\n gaussian_conditional.eval()\n x = torch.rand(1, 128, 32, 32)\n scales = torch.rand(1, 128, 32, 32)\n y, y_likelihoods = gaussian_conditional(x, scales)\n\n assert y.shape == x.shape\n assert y_likelihoods.shape == x.shape\n\n assert (y == torch.round(x)).all()\n\n def test_forward_training_mean(self):\n gaussian_conditional = GaussianConditional(None)\n x = torch.rand(1, 128, 32, 32)\n scales = torch.rand(1, 128, 32, 32)\n means = torch.rand(1, 128, 32, 32)\n y, y_likelihoods = gaussian_conditional(x, scales, means)\n\n assert y.shape == x.shape\n assert y_likelihoods.shape == x.shape\n\n assert ((y - x) <= 0.5).all()\n assert ((y - x) >= -0.5).all()\n assert (y != torch.round(x)).any()\n\n def test_forward_inference_mean(self):\n gaussian_conditional = GaussianConditional(None)\n gaussian_conditional.eval()\n x = torch.rand(1, 128, 32, 32)\n scales = torch.rand(1, 128, 32, 32)\n means = torch.rand(1, 128, 32, 32)\n y, y_likelihoods = gaussian_conditional(x, scales, means)\n\n assert y.shape == x.shape\n assert y_likelihoods.shape == x.shape\n\n assert (y == torch.round(x - means) + means).all()\n\n def test_scripting(self):\n gaussian_conditional = GaussianConditional(None)\n x = torch.rand(1, 128, 32, 32)\n scales = torch.rand(1, 128, 32, 32)\n means = torch.rand(1, 128, 32, 32)\n\n torch.manual_seed(32)\n y0 = gaussian_conditional(x, scales, means)\n\n m = torch.jit.script(gaussian_conditional)\n\n torch.manual_seed(32)\n y1 = m(x, scales, means)\n\n assert torch.allclose(y0[0], y1[0])\n assert torch.allclose(y0[1], y1[1])\n\n def test_update(self):\n # get a pretrained model\n net = bmshj2018_hyperprior(quality=1, pretrained=True).eval()\n assert not net.update()\n assert not net.update(force=False)\n\n quantized_cdf = net.gaussian_conditional._quantized_cdf\n offset = net.gaussian_conditional._offset\n cdf_length = net.gaussian_conditional._cdf_length\n assert net.update(force=True)\n\n def approx(a, b):\n return ((a - b).abs() <= 2).all()\n\n assert approx(net.gaussian_conditional._cdf_length, cdf_length)\n assert approx(net.gaussian_conditional._offset, offset)\n assert approx(net.gaussian_conditional._quantized_cdf, quantized_cdf)\n",
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport warnings\nfrom .priors import JointAutoregressiveHierarchicalPriors\nfrom .our_utils import *\nfrom compressai.layers import *\nfrom .waseda import Cheng2020Anchor\n\nclass InvCompress(Cheng2020Anchor):\n def __init__(self, N=192, **kwargs):\n super().__init__(N=N)\n self.g_a = None\n self.g_s = None\n self.enh = EnhModule(64)\n self.inv = InvComp(M=N)\n self.attention = AttModule(N)\n \n\n def g_a_func(self, x):\n x = self.enh(x)\n x = self.inv(x)\n x = self.attention(x)\n return x\n\n def g_s_func(self, x):\n x = self.attention(x, rev = True)\n x = self.inv(x, rev=True)\n x = self.enh(x, rev=True)\n return x\n\n def forward(self, x):\n y = self.g_a_func(x)\n z = self.h_a(y)\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n params = self.h_s(z_hat)\n\n y_hat = self.gaussian_conditional.quantize(\n y, \"noise\" if self.training else \"dequantize\"\n )\n ctx_params = self.context_prediction(y_hat)\n gaussian_params = self.entropy_parameters(\n torch.cat((params, ctx_params), dim=1)\n )\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)\n x_hat = self.g_s_func(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods}\n }\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"h_a.0.weight\"].size(0)\n net = cls(N)\n net.load_state_dict(state_dict)\n return net\n\n def compress(self, x):\n if next(self.parameters()).device != torch.device(\"cpu\"):\n warnings.warn(\n \"Inference on GPU is not recommended for the autoregressive \"\n \"models (the entropy coder is run sequentially on CPU).\"\n )\n\n y = self.g_a_func(x)\n z = self.h_a(y)\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n params = self.h_s(z_hat)\n\n s = 4 # scaling factor between z and y\n kernel_size = 5 # context prediction kernel size\n padding = (kernel_size - 1) // 2\n\n y_height = z_hat.size(2) * s\n y_width = z_hat.size(3) * s\n\n y_hat = F.pad(y, (padding, padding, padding, padding))\n\n y_strings = []\n for i in range(y.size(0)):\n string = self._compress_ar(\n y_hat[i : i + 1],\n params[i : i + 1],\n y_height,\n y_width,\n kernel_size,\n padding,\n )\n y_strings.append(string)\n\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:], \"y\": y}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n\n if next(self.parameters()).device != torch.device(\"cpu\"):\n warnings.warn(\n \"Inference on GPU is not recommended for the autoregressive \"\n \"models (the entropy coder is run sequentially on CPU).\"\n )\n\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n params = self.h_s(z_hat)\n\n s = 4 # scaling factor between z and y\n kernel_size = 5 # context prediction kernel size\n padding = (kernel_size - 1) // 2\n\n y_height = z_hat.size(2) * s\n y_width = z_hat.size(3) * s\n\n y_hat = torch.zeros(\n (z_hat.size(0), self.M, y_height + 2 * padding, y_width + 2 * padding),\n device=z_hat.device,\n )\n\n for i, y_string in enumerate(strings[0]):\n self._decompress_ar(\n y_string,\n y_hat[i : i + 1],\n params[i : i + 1],\n y_height,\n y_width,\n kernel_size,\n padding,\n )\n\n y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))\n x_hat = self.g_s_func(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n",
"import torch as t\n\nfrom .lsq import LsqQuan\n\n\ndef grad_scale(x, scale):\n y = x\n y_grad = x * scale\n return (y - y_grad).detach() + y_grad\n\n\ndef round_ste(x):\n y = x.round()\n y_grad = 512 * (x - x.round()).pow(10)\n return (y - y_grad).detach() + y_grad\n\n\nclass STELsq(LsqQuan):\n def forward(self, x):\n if self.per_channel:\n s_grad_scale = 1.0 / ((self.thd_pos * x.numel()) ** 0.5)\n else:\n s_grad_scale = 1.0 / ((self.thd_pos * x.numel()) ** 0.5)\n s_scale = grad_scale(self.s, s_grad_scale) # s와 같은데 grad scale 적용된 버전\n x = x / s_scale\n x = t.clamp(x, self.thd_neg, self.thd_pos)\n x = round_ste(x)\n x = x * s_scale\n return x\n"
] |
[
[
"numpy.asarray",
"torch.mean",
"numpy.log10",
"numpy.fromfile"
],
[
"torch.jit.script",
"torch.all",
"torch.randint",
"torch.manual_seed",
"torch.round",
"torch.zeros_like",
"torch.rand",
"torch.allclose"
],
[
"torch.device",
"torch.nn.functional.pad",
"torch.cat"
],
[
"torch.clamp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kacperkan/ucsgnet
|
[
"c13b204361e59c5b92a7983e929305e17a906b65",
"c13b204361e59c5b92a7983e929305e17a906b65"
] |
[
"ucsgnet/ucsgnet/train_2d.py",
"ucsgnet/ucsgnet/net_2d.py"
] |
[
"import argparse\nimport json\nimport os\n\nimport pytorch_lightning as pl\nimport torch\nfrom pytorch_lightning import Trainer\n\nfrom ucsgnet.callbacks import ModelCheckpoint\nfrom ucsgnet.loggers import TensorBoardLogger\nfrom ucsgnet.ucsgnet.net_2d import Net\n\nMAX_NB_EPOCHS = 251\n\n\ndef get_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=\"Training code for a CSG-Net.\", add_help=False\n )\n parser.add_argument(\n \"--train\",\n dest=\"train_split_config\",\n type=str,\n help=\"Path to training split of samples with one of generators\",\n required=True,\n )\n parser.add_argument(\n \"--valid\",\n dest=\"valid_split_config\",\n type=str,\n help=\"Path to training split of samples generated with of generators\",\n required=True,\n )\n parser.add_argument(\n \"--processed\",\n dest=\"processed_data_path\",\n type=str,\n help=\"Base folder of processed data\",\n required=True,\n )\n parser.add_argument(\n \"--pretrained_path\",\n dest=\"checkpoint_path\",\n type=str,\n help=(\n \"If provided, then it assumes pretraining and continuation of \"\n \"training\"\n ),\n default=\"\",\n )\n parser.add_argument(\n \"--experiment_name\",\n type=str,\n help=\"Name of the experiment\",\n default=\"test\",\n )\n parser = Net.add_model_specific_args(parser)\n return parser.parse_args()\n\n\ndef training(\n model: pl.LightningModule, experiment_name: str, args: argparse.Namespace\n):\n model_saving_path = os.path.join(\"models\", experiment_name, \"initial\")\n if not os.path.exists(model_saving_path):\n os.makedirs(model_saving_path, exist_ok=True)\n\n with open(os.path.join(model_saving_path, \"params.json\"), \"w\") as f:\n json.dump(vars(args), f)\n logger = TensorBoardLogger(\n os.path.join(model_saving_path, \"logs\"), log_train_every_n_step=200\n )\n\n checkpointer = ModelCheckpoint(\n filepath=os.path.join(model_saving_path, \"ckpts\", \"model.ckpt\"),\n monitor=\"valid_loss\",\n period=10,\n )\n\n trainer = Trainer(\n gpus=1 if torch.cuda.is_available() else 0,\n distributed_backend=\"dp\",\n default_save_path=model_saving_path,\n logger=logger,\n max_epochs=MAX_NB_EPOCHS,\n early_stop_callback=None,\n checkpoint_callback=checkpointer,\n progress_bar_refresh_rate=1,\n )\n # fitting\n trainer.fit(model)\n\n\ndef train(args: argparse.Namespace):\n model = Net(args)\n model.build(\n args.train_split_config,\n args.valid_split_config,\n args.processed_data_path,\n )\n\n if args.checkpoint_path and len(args.checkpoint_path) > 0:\n print(f\"Loading pretrained model from: {args.checkpoint_path}\")\n model = model.load_from_checkpoint(args.checkpoint_path)\n training(model, args.experiment_name + \"_main\", args)\n\n\nif __name__ == \"__main__\":\n train(get_args())\n",
"import argparse\nimport typing as t\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.optim as optim\nimport torchvision\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom ucsgnet.common import THREADS, TrainingStage\nfrom ucsgnet.dataset import SimpleDataset, get_simple_2d_transforms\nfrom ucsgnet.ucsgnet.csg_layers import RelationLayer\nfrom ucsgnet.ucsgnet.extractors import Decoder, Extractor2D\nfrom ucsgnet.ucsgnet.losses import get_composite_loss\nfrom ucsgnet.ucsgnet.metrics import mse\nfrom ucsgnet.ucsgnet.model import CSGNet\nfrom ucsgnet.ucsgnet.shape_evaluators import create_compound_evaluator\nfrom ucsgnet.utils import get_simple_dataset_paths_from_config\n\n\nclass Net(pl.LightningModule):\n def __init__(self, hparams: argparse.Namespace):\n super().__init__()\n self.hparams = hparams\n self.net = CSGNet(\n Extractor2D(),\n Decoder(),\n create_compound_evaluator(\n self.hparams.use_planes,\n self.hparams.shapes_per_type,\n self.hparams.num_dimensions,\n ),\n self.hparams.shapes_per_type,\n self.hparams.out_shapes_per_layer,\n self.hparams.weight_binarizing_threshold,\n self.hparams.num_csg_layers,\n )\n\n self.train_split_config_: t.Optional[str] = None\n self.valid_split_config_: t.Optional[str] = None\n self.data_path_: t.Optional[str] = None\n\n self.__optimizers: t.Optional[t.Sequence[Optimizer]] = None\n self._base_mode = TrainingStage.INITIAL_TRAINING\n\n (\n trainable_params_count,\n non_trainable_params_count,\n ) = self.num_of_parameters\n\n print(\"Num of trainable params: {}\".format(trainable_params_count))\n print(\n \"Num of not trainable params: {}\".format(\n non_trainable_params_count\n )\n )\n\n def turn_fine_tuning_mode(self):\n self.switch_mode(TrainingStage.FINE_TUNING)\n\n def turn_initial_training_mode(self):\n self.switch_mode(TrainingStage.INITIAL_TRAINING)\n\n def switch_mode(self, new_mode: TrainingStage):\n self._base_mode = new_mode\n self.net.switch_mode(new_mode)\n\n def build(\n self, train_split_config: str, valid_split_config: str, data_path: str\n ):\n self.train_split_config_ = train_split_config\n self.valid_split_config_ = valid_split_config\n self.data_path_ = data_path\n\n @property\n def num_of_parameters(self) -> t.Tuple[int, int]:\n total_trainable_params = 0\n total_nontrainable_params = 0\n\n for param in self.parameters(recurse=True):\n if param.requires_grad:\n total_trainable_params += np.prod(param.shape)\n else:\n total_nontrainable_params += np.prod(param.shape)\n return total_trainable_params, total_nontrainable_params\n\n def forward(\n self,\n images: torch.Tensor,\n points: torch.Tensor,\n *,\n return_distances_to_base_shapes: bool = False,\n return_intermediate_output_csg: bool = False,\n return_scaled_distances_to_shapes: bool = False,\n retain_latent_code: bool = False,\n retain_shape_params: bool = False,\n ) -> t.Union[torch.Tensor, t.Tuple[torch.Tensor, ...]]:\n return self.net(\n images,\n points,\n return_distances_to_base_shapes=return_distances_to_base_shapes,\n return_intermediate_output_csg=return_intermediate_output_csg,\n return_scaled_distances_to_shapes=return_scaled_distances_to_shapes,\n retain_shape_params=retain_shape_params,\n retain_latent_code=retain_latent_code,\n )\n\n def training_step(\n self,\n batch: t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],\n batch_idx: int,\n ) -> t.Dict[str, t.Any]:\n self.logger.train()\n image, points, trues, bounding_volume = batch\n predictions, distances_to_base_shapes, intermediate_results = self(\n image,\n points,\n return_distances_to_base_shapes=True,\n return_intermediate_output_csg=True,\n )\n total_loss, partial_losses_dict = get_composite_loss(\n predictions,\n trues,\n bounding_volume,\n points,\n intermediate_results,\n self.net.csg_layers_,\n self.net.evaluator_,\n self._base_mode,\n self.net.use_planes,\n self.global_step,\n self.net.scaler_,\n )\n\n if self.hparams.use_planes:\n self.logger.log_histogram(\n f\"planes_params\",\n self.net.evaluator_.last_predicted_parameters.reshape((-1,)),\n self.global_step,\n )\n else:\n for j, (name, tensor) in enumerate(\n self.net.evaluator_.get_all_last_predicted_parameters_of_shapes()\n ):\n self.logger.log_histogram(\n f\"evaluate_{name}_0_{j}\",\n tensor.reshape((-1,)),\n self.global_step,\n )\n\n translation_vectors = (\n self.net.evaluator_.get_all_translation_vectors()\n )\n self.logger.log_histogram(\n f\"translation_x_0\",\n translation_vectors[..., 0].reshape((-1,)),\n self.global_step,\n )\n self.logger.log_histogram(\n f\"translation_y_0\",\n translation_vectors[..., 1].reshape((-1,)),\n self.global_step,\n )\n if self.hparams.num_dimensions == 3:\n self.logger.log_histogram(\n f\"translation_z_0\",\n translation_vectors[..., 2].reshape((-1,)),\n self.global_step,\n )\n\n for i, layer in enumerate(self.net.csg_layers_): # type: RelationLayer\n self.logger.log_histogram(\n f\"rel_layer_dist_temp_{i}/vals\",\n layer.temperature_.reshape((-1,)),\n self.global_step,\n )\n\n self.logger.log_histogram(\n \"scaler/m\", self.net.scaler_.m.reshape((-1,)), self.global_step\n )\n\n tqdm_dict = {\n \"train_loss\": total_loss,\n \"train_predictions_avg\": predictions.mean(),\n **{\n \"train_\" + key: value\n for key, value in partial_losses_dict.items()\n },\n **{\n f\"lr_{i}\": torch.tensor(\n optimizer.param_groups[0][\"lr\"], dtype=torch.float\n )\n for i, optimizer in enumerate(self.__optimizers)\n },\n }\n\n logger_dict = {\n \"loss\": total_loss,\n \"predictions_avg\": predictions.mean(),\n **partial_losses_dict,\n **{\n f\"lr_{i}\": torch.tensor(\n optimizer.param_groups[0][\"lr\"], dtype=torch.float\n )\n for i, optimizer in enumerate(self.__optimizers)\n },\n }\n\n output = OrderedDict(\n {\"loss\": total_loss, \"progress_bar\": tqdm_dict, \"log\": logger_dict}\n )\n\n return output\n\n def validation_step(\n self,\n batch: t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],\n batch_idx: int,\n ) -> t.Dict[str, t.Any]:\n image, points, trues, bounding_volume = batch\n predictions, distances_to_base_shapes, intermediate_results = self(\n image,\n points,\n return_distances_to_base_shapes=True,\n return_intermediate_output_csg=True,\n )\n total_loss, partial_losses_dict = get_composite_loss(\n predictions,\n trues,\n bounding_volume,\n points,\n intermediate_results,\n self.net.csg_layers_,\n self.net.evaluator_,\n self._base_mode,\n self.net.use_planes,\n self.global_step,\n self.net.scaler_,\n )\n\n logger_dict = {\n \"loss\": total_loss,\n **partial_losses_dict,\n \"mse\": mse(self.binarize(predictions), trues),\n }\n\n output = OrderedDict({\"loss\": total_loss, \"log\": logger_dict})\n\n return output\n\n def validation_end(\n self, outputs: t.List[t.Dict[str, t.Any]]\n ) -> t.Dict[str, t.Any]:\n self.logger.valid()\n means = defaultdict(int)\n for output in outputs:\n for key, value in output[\"log\"].items():\n means[key] += value\n means = {key: value / len(outputs) for key, value in means.items()}\n logger_dict = means\n tqdm_dict = {\n \"valid_\" + key: value.item() for key, value in means.items()\n }\n result = {\n \"valid_loss\": means[\"loss\"],\n \"progress_bar\": tqdm_dict,\n \"log\": logger_dict,\n }\n return result\n\n def configure_optimizers(\n self,\n ) -> t.Tuple[t.Sequence[Optimizer], t.Sequence[optim.lr_scheduler.StepLR]]:\n optimizer = optim.Adam(\n self.parameters(),\n lr=self.hparams.lr,\n betas=(self.hparams.beta1, self.hparams.beta2),\n )\n\n self.__optimizers = [optimizer]\n return [optimizer], []\n\n def _dataloader_simple(\n self, training: bool, split_path: str\n ) -> DataLoader:\n batch_size = self.hparams.batch_size\n renders = get_simple_dataset_paths_from_config(\n self.data_path_, split_path\n )\n transforms = get_simple_2d_transforms()\n\n loader = DataLoader(\n dataset=SimpleDataset(\n renders,\n None,\n self.hparams.points_per_sample_in_batch,\n transforms,\n ),\n batch_size=batch_size,\n shuffle=training,\n drop_last=training,\n num_workers=THREADS,\n )\n return loader\n\n def train_dataloader(self) -> DataLoader:\n return self._dataloader_simple(True, self.train_split_config_)\n\n def val_dataloader(self) -> DataLoader:\n return self._dataloader_simple(False, self.valid_split_config_)\n\n def __next_elem_from_loader(\n self, loader: DataLoader\n ) -> t.Tuple[torch.Tensor, ...]:\n images, coords, distances, _ = next(iter(loader))\n if self.on_gpu:\n images = images.cuda()\n coords = coords.cuda()\n distances = distances.cuda()\n return images, coords, distances\n\n def on_epoch_end(self):\n val_loader = self.val_dataloader()\n (images, coords, distances) = self.__next_elem_from_loader(val_loader)\n\n images = images[:16]\n coords = coords[:16]\n distances = distances[:16]\n\n b, c, h, w = images.shape\n final_predictions = self(images, coords).reshape((b, c, h, w))\n\n input_images = torchvision.utils.make_grid(images, normalize=True)\n gt = torchvision.utils.make_grid(\n distances.view_as(images), normalize=True\n )\n pred_grid = torchvision.utils.make_grid(\n final_predictions, normalize=True\n )\n\n binarized_pred_grid = torchvision.utils.make_grid(\n self.binarize(final_predictions), normalize=True\n )\n\n self.logger.experiment.add_image(\n \"input_images\", input_images, self.current_epoch\n )\n self.logger.experiment.add_image(\"gt\", gt, self.current_epoch)\n self.logger.experiment.add_image(\n \"reconstruction\", pred_grid, self.current_epoch\n )\n self.logger.experiment.add_image(\n \"binarized_pred\", binarized_pred_grid, self.current_epoch\n )\n\n @classmethod\n def binarize(cls, predictions: torch.Tensor) -> torch.Tensor:\n return (predictions >= 0.5).float()\n\n @staticmethod\n def add_model_specific_args(\n parent_parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(parents=[parent_parser])\n\n parser.add_argument(\n \"--num_dimensions\",\n help=\"Number of dimensions to be evaulated on\",\n type=int,\n default=2,\n )\n parser.add_argument(\n \"--shapes_per_type\",\n help=(\n \"Number of shapes per type, ex. 64 will create 64 squares and \"\n \"64 circles\"\n ),\n type=int,\n default=8,\n )\n parser.add_argument(\n \"--lr\",\n help=\"Learning rate of the optimizer\",\n type=float,\n default=1e-3,\n )\n\n parser.add_argument(\n \"--beta1\",\n help=\"Beta_1 parameter of the Adam optimizer\",\n type=float,\n default=0.5,\n )\n\n parser.add_argument(\n \"--beta2\",\n help=\"Beta_2 parameter of the Adam optimizer\",\n type=float,\n default=0.99,\n )\n parser.add_argument(\n \"--batch_size\", help=\"Batch size\", type=int, default=16\n )\n parser.add_argument(\n \"--points\",\n type=int,\n dest=\"points_per_sample_in_batch\",\n help=\"Number of SDF samples per sample in a batch.\",\n default=1024,\n )\n parser.add_argument(\n \"--sampling_count\",\n type=int,\n help=\"Num of sampling to perform in relational layers\",\n default=5,\n )\n parser.add_argument(\n \"--out_shapes_per_layer\",\n type=int,\n help=\"Number of output shapes per layer\",\n default=2,\n )\n parser.add_argument(\n \"--weight_binarizing_threshold\",\n type=float,\n help=(\n \"Thresholding value for weights. If weight > `threshold` \"\n \"then it is set to 1. If -`threshold` < weight <= \"\n \"`threshold then set 0 and to -1 otherwise.\"\n ),\n default=0.1,\n )\n parser.add_argument(\n \"--use_planes\",\n action=\"store_true\",\n help=(\n \"Whether use normal shapes (circles, squares etc.) or \"\n \"planes that are combined later. Note, that for planes, \"\n \"it is recommended to set `shapes_per_type` much higher\"\n ),\n )\n parser.add_argument(\n \"--num_csg_layers\",\n type=int,\n help=\"Number of relation prediction layers\",\n default=2,\n )\n\n return parser\n"
] |
[
[
"torch.cuda.is_available"
],
[
"numpy.prod",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Omekaago101/Intracranial-Hemorrhage-Classification
|
[
"4f53da3a3869be7b451edc558ef06c5c41083b4b",
"4f53da3a3869be7b451edc558ef06c5c41083b4b",
"4f53da3a3869be7b451edc558ef06c5c41083b4b"
] |
[
"src/wavelet-FE/models/fwht.py",
"src/wavelet-FE/datasets/custom_dataset.py",
"src/wavelet-FE/models/levit.py"
] |
[
"# Created by moritz ([email protected])\nimport torch\nimport numpy as np\nfrom scipy.linalg import hadamard\n\n\ndef matmul_wht(x, h_mat=None, inverse=False):\n \"\"\"\n Welsh-Hadamard transform by matrix multiplication.\n @ param x: The sequence to be transformed [batchsize, seq_len].\n @ param inverse: If true computes the inverse transform.\n \"\"\"\n n = x.shape[-1]\n\n if h_mat is None:\n h_mat = torch.from_numpy(hadamard(n).astype(np.float32))\n if x.device.type == 'cuda':\n h_mat = h_mat.cuda()\n y = torch.nn.functional.linear(x, h_mat, bias=None)\n if not inverse:\n y = y/n\n return y\n\n\ndef fwht(x, inverse=False):\n \"\"\"\n Matlab inspired fast welsh-hadamard transform.\n :param inverse: If true the ifwht is computed.\n :param x: The tensor to be transformed\n :return: The welsh hadamard coefficients.\n \"\"\"\n\n x = x.clone()\n\n n = x.shape[-1]\n if n < 2:\n return x\n\n if n % 2 != 0:\n raise AssertionError(\"Input feature dimension must be a power of two.\")\n\n for i in range(0, n, 2):\n x[..., i] = x[..., i] + x[..., i+1]\n x[..., i+1] = x[..., i] - 2 * x[..., i+1]\n\n l = 1\n y = torch.zeros(x.shape, dtype=x.dtype, device=x.device)\n for nStage in range(2, int(np.log2(n) + 1)): # np.log2(n) = number of stages in the flow diagram\n # calculate coefficients for the ith stage specified by nStage\n m = int(np.power(2, l))\n jb = 0\n k = 0\n while k < n:\n # print('jb, jb+m, k, n, m', jb, jb+m, k, n, m)\n for j in range(jb, jb+m, 2):\n y[..., k] = x[..., j] + x[..., j+m]\n y[..., k+1] = x[..., j] - x[..., j+m]\n y[..., k+2] = x[..., j+1] - x[..., j+1+m]\n y[..., k+3] = x[..., j+1] + x[..., j+1+m]\n k = k + 4\n jb = jb + 2*m\n\n # store coefficients in x at the end of each stage\n x = y.clone()\n l = l + 1\n # perform scaling of coefficients\n if not inverse:\n y = x / n\n return y\n\n\ndef walsh_hadamard_transform(seq_in, inverse=False, scale=True):\n \"\"\"Utility function for the Walsh Hadamard Transform,\n produces Hadamard ordered coefficients.\n Based on: https://docs.sympy.org/latest/_modules/sympy/discrete/transforms.html#fwht\"\"\"\n assert seq_in.dtype == torch.float32, 'float tensor input required.'\n\n a = seq_in.clone()\n\n if inverse and scale:\n a *= len(a)\n\n n = a.shape[-1]\n if n < 2:\n return a\n\n if n % 2 != 0:\n raise AssertionError(\"Input feature dimension must be a power of two.\")\n\n # zero padding\n # a += [S.Zero]*(n - len(a))\n h = 2\n while h <= n:\n hf, ut = h // 2, n // h\n for i in range(0, n, h):\n for j in range(hf):\n u, v = a[..., i + j], a[..., i + j + hf]\n a[..., i + j], a[..., i + j + hf] = u + v, u - v\n h *= 2\n\n if inverse:\n a = a/n\n else:\n # scale if desired\n if scale:\n a = a/(len(a)*1.0)\n return a\n\n\nif __name__ == '__main__':\n seq = torch.tensor([1., 1., 1., 1., 0, 0, 0, 1.])\n print('len', len(seq))\n seq_freq = walsh_hadamard_transform(seq)\n print('freq', seq_freq)\n seq_freq_scl = walsh_hadamard_transform(seq, scale=False)\n print('freq scl', seq_freq_scl)\n seq_rec = walsh_hadamard_transform(seq_freq, inverse=True)\n print(seq_rec.numpy(), seq - seq_rec)\n seq_rec_scl = walsh_hadamard_transform(seq_freq_scl, inverse=True, scale=False)\n print(seq_rec_scl.numpy(), seq - seq_rec_scl)\n\n fwht_seq = fwht(seq)\n print(fwht_seq)\n\n # haramard\n res = matmul_wht(seq.unsqueeze(0), inverse=False)\n print('res', res)\n inv = matmul_wht(res, inverse=True)\n print('inv', inv)",
"import cv2\nimport os\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom albumentations.pytorch.transforms import ToTensorV2\nfrom albumentations import (MedianBlur, Compose, Normalize, OpticalDistortion, HorizontalFlip,\n VerticalFlip, ShiftScaleRotate, Transpose, OneOf, IAAAdditiveGaussianNoise,\n GaussNoise, RandomGamma, Blur, RandomBrightness, HueSaturationValue,\n RandomBrightnessContrast, GridDistortion,Lambda, NoOp, CenterCrop, Resize,RandomResizedCrop\n )\nclass IntracranialDataset(Dataset):\n \n def __init__(self, cfg, df, path, labels,AUTOCROP,HFLIP,TRANSPOSE,mode='train'):\n self.path = path\n self.data = df\n self.labels = labels\n self.crop = AUTOCROP\n self.cfg = cfg\n self.mode = mode\n self.transpose = TRANSPOSE\n self.hflip = HFLIP\n self.lbls = cfg.CONST.LABELS\n if self.mode == \"train\":\n self.transform = Compose([\n RandomResizedCrop(cfg.DATA.IMG_SIZE, cfg.DATA.IMG_SIZE,\n interpolation=cv2.INTER_LINEAR, scale=(0.8, 1)),\n OneOf([\n HorizontalFlip(p=1.),\n VerticalFlip(p=1.),\n ]),\n OneOf([\n ShiftScaleRotate(\n shift_limit=0.0625,\n scale_limit=0.1,\n rotate_limit=30,\n border_mode=cv2.BORDER_CONSTANT,\n value=0,\n p=1.),\n GridDistortion(\n distort_limit=0.2,\n border_mode=cv2.BORDER_CONSTANT,\n value=0,\n p=1.),\n OpticalDistortion(\n distort_limit=0.2,\n shift_limit=0.15,\n border_mode=cv2.BORDER_CONSTANT,\n value=0,\n p=1.),\n NoOp()\n ]),\n OneOf([\n IAAAdditiveGaussianNoise(p=1.),\n GaussNoise(p=1.),\n NoOp()\n ]),\n OneOf([\n MedianBlur(blur_limit=3, p=1.),\n Blur(blur_limit=3, p=1.),\n NoOp()\n ])\n ])\n elif self.mode == 'test' or self.mode == 'valid':\n HFLIPVAL = 1.0 if self.hflip == 'T' else 0.0\n TRANSPOSEVAL = 1.0 if self.transpose == 'P' else 0.0\n self.transform = Compose([\n HorizontalFlip(p=HFLIPVAL),\n Transpose(p=TRANSPOSEVAL),\n Normalize(mean=[0.22363983, 0.18190407, 0.2523437 ], \n std=[0.32451536, 0.2956294, 0.31335256], max_pixel_value=255.0, p=1.0),\n ])\n self.totensor = ToTensorV2()\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.path, self.data.loc[idx, 'Image'] + '.jpg')\n #img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE) \n img = cv2.imread(img_name) \n if self.crop:\n try:\n try:\n img = self.autocrop(img, threshold=0, kernsel_size = img.shape[0]//15)\n except:\n img = self.autocrop(img, threshold=0) \n except:\n 1 \n img = cv2.resize(img,(self.cfg.DATA.IMG_SIZE,self.cfg.DATA.IMG_SIZE))\n if self.mode == \"train\": \n augmented = self.transform(image=img)\n img = augmented['image'] \n if self.labels:\n labels = torch.tensor(\n self.data.loc[idx, self.cfg.CONST.LABELS])\n return {'image': img, 'labels': labels} \n else: \n return {'image': img}\n \n def autocrop(image, threshold=0):\n \"\"\"Crops any edges below or equal to threshold\n Crops blank image to 1x1.\n Returns cropped image.\n https://stackoverflow.com/questions/13538748/crop-black-edges-with-opencv\n \"\"\"\n\n if len(image.shape) == 3:\n flatImage = np.max(image, 2)\n else:\n flatImage = image\n rows = np.where(np.max(flatImage, 0) > threshold)[0]\n cols = np.where(np.max(flatImage, 1) > threshold)[0]\n image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]\n #logger.info(image.shape)\n sqside = max(image.shape)\n imageout = np.zeros((sqside, sqside, 3), dtype = 'uint8')\n imageout[:image.shape[0], :image.shape[1],:] = image.copy()\n return imageout",
"from math import ceil\n\nimport torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\n\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange\n#from fast_food import FastFoodLayer\nfrom .fast_food import FastFoodLayer\n# helpers\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef cast_tuple(val, l = 3):\n val = val if isinstance(val, tuple) else (val,)\n return (*val, *((val[-1],) * max(l - len(val), 0)))\n\ndef always(val):\n return lambda *args, **kwargs: val\n\n# classes\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, mult, dropout = 0.):\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(dim, dim * mult, 1),\n nn.Hardswish(),\n nn.Dropout(dropout),\n nn.Conv2d(dim * mult, dim, 1),\n nn.Dropout(dropout)\n )\n def forward(self, x):\n return self.net(x)\n\nclass Attention(nn.Module):\n def __init__(self, dim, fmap_size, heads = 8, dim_key = 32, dim_value = 64, dropout = 0., dim_out = None, downsample = False):\n super().__init__()\n inner_dim_key = dim_key * heads\n inner_dim_value = dim_value * heads\n dim_out = default(dim_out, dim)\n\n self.heads = heads\n self.scale = dim_key ** -0.5\n\n self.to_q = nn.Sequential(nn.Conv2d(dim, inner_dim_key, 1, stride = (2 if downsample else 1), bias = False), nn.BatchNorm2d(inner_dim_key))\n self.to_k = nn.Sequential(nn.Conv2d(dim, inner_dim_key, 1, bias = False), nn.BatchNorm2d(inner_dim_key))\n self.to_v = nn.Sequential(nn.Conv2d(dim, inner_dim_value, 1, bias = False), nn.BatchNorm2d(inner_dim_value))\n\n self.attend = nn.Softmax(dim = -1)\n\n out_batch_norm = nn.BatchNorm2d(dim_out)\n nn.init.zeros_(out_batch_norm.weight)\n\n self.to_out = nn.Sequential(\n nn.GELU(),\n nn.Conv2d(inner_dim_value, dim_out, 1),\n out_batch_norm,\n nn.Dropout(dropout)\n )\n\n # positional bias\n\n self.pos_bias = nn.Embedding(fmap_size * fmap_size, heads)\n\n q_range = torch.arange(0, fmap_size, step = (2 if downsample else 1))\n k_range = torch.arange(fmap_size)\n\n q_pos = torch.stack(torch.meshgrid(q_range, q_range), dim = -1)\n k_pos = torch.stack(torch.meshgrid(k_range, k_range), dim = -1)\n\n q_pos, k_pos = map(lambda t: rearrange(t, 'i j c -> (i j) c'), (q_pos, k_pos))\n rel_pos = (q_pos[:, None, ...] - k_pos[None, :, ...]).abs()\n\n x_rel, y_rel = rel_pos.unbind(dim = -1)\n pos_indices = (x_rel * fmap_size) + y_rel\n\n self.register_buffer('pos_indices', pos_indices)\n\n def apply_pos_bias(self, fmap):\n bias = self.pos_bias(self.pos_indices)\n bias = rearrange(bias, 'i j h -> () h i j')\n return fmap + (bias / self.scale)\n\n def forward(self, x):\n b, n, *_, h = *x.shape, self.heads\n\n q = self.to_q(x)\n y = q.shape[2]\n\n qkv = (q, self.to_k(x), self.to_v(x))\n q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = h), qkv)\n\n dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n dots = self.apply_pos_bias(dots)\n\n attn = self.attend(dots)\n v = v.type(torch.float32)\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h (x y) d -> b (h d) x y', h = h, y = y)\n return self.to_out(out)\n\nclass Transformer(nn.Module):\n def __init__(self, dim, fmap_size, depth, heads, dim_key, dim_value, mlp_mult = 2, dropout = 0., dim_out = None, downsample = False):\n super().__init__()\n dim_out = default(dim_out, dim)\n self.layers = nn.ModuleList([])\n self.attn_residual = (not downsample) and dim == dim_out\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim, fmap_size = fmap_size, heads = heads, dim_key = dim_key, dim_value = dim_value, dropout = dropout, downsample = downsample, dim_out = dim_out),\n FeedForward(dim_out, mlp_mult, dropout = dropout)\n ]))\n def forward(self, x):\n for attn, ff in self.layers:\n attn_res = (x if self.attn_residual else 0)\n x = attn(x) + attn_res\n x = ff(x) + x\n return x\n\nclass LeViT(nn.Module):\n def __init__(\n self,\n *,\n image_size,\n num_classes,\n dim,\n depth,\n heads,\n mlp_mult,\n stages = 3,\n dim_key = 32,\n dim_value = 64,\n dropout = 0.,\n num_distill_classes = None\n ):\n super().__init__()\n\n dims = cast_tuple(dim, stages)\n depths = cast_tuple(depth, stages)\n layer_heads = cast_tuple(heads, stages)\n\n assert all(map(lambda t: len(t) == stages, (dims, depths, layer_heads))), 'dimensions, depths, and heads must be a tuple that is less than the designated number of stages'\n\n '''\n self.conv_embedding = nn.Sequential(\n nn.Conv2d(3, 32, 3, stride = 2, padding = 1),\n nn.Conv2d(32, 64, 3, stride = 2, padding = 1),\n nn.Conv2d(64, 128, 3, stride = 2, padding = 1),\n nn.Conv2d(128, dims[0], 3, stride = 2, padding = 1)\n )'''\n\n #map_size = image_size // (2 ** 4)\n fmap_size = image_size\n layers = []\n\n for ind, dim, depth, heads in zip(range(stages), dims, depths, layer_heads):\n is_last = ind == (stages - 1)\n layers.append(Transformer(dim, fmap_size, depth, heads, dim_key, dim_value, mlp_mult, dropout))\n\n if not is_last:\n next_dim = dims[ind + 1]\n layers.append(Transformer(dim, fmap_size, 1, heads * 2, dim_key, dim_value, dim_out = next_dim, downsample = True))\n fmap_size = ceil(fmap_size / 2)\n\n self.backbone = nn.Sequential(*layers)\n\n self.pool = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n Rearrange('... () () -> ...')\n )\n\n self.distill_head = nn.Linear(dim, num_distill_classes) if exists(num_distill_classes) else always(None)\n self.mlp_head = nn.Linear(dim, num_classes)\n self.fast_food = FastFoodLayer(dim)\n\n def forward(self, x):\n #x = self.conv_embedding(img)\n\n x = self.backbone(x) \n\n x = self.pool(x)\n x = self.fast_food(x)\n out = self.mlp_head(x)\n \n distill = self.distill_head(x)\n if exists(distill):\n print(distill.shape)\n return out, distill\n\n return out\n\n\nif __name__ == '__main__':\n model = LeViT(\n image_size=14,\n num_classes = 6,\n dim = (1024,2048,4096),\n depth = 4,\n heads = (4,6,8),\n mlp_mult = 2, \n dropout=0.1\n )\n \n x = torch.rand((8,1024,14,14))\n c = model(x)\n print(c.shape)\n"
] |
[
[
"numpy.log2",
"scipy.linalg.hadamard",
"numpy.power",
"torch.zeros",
"torch.tensor",
"torch.nn.functional.linear"
],
[
"numpy.max",
"numpy.zeros",
"torch.tensor"
],
[
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.GELU",
"torch.einsum",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Hardswish",
"torch.arange",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.rand",
"torch.nn.BatchNorm2d",
"torch.nn.init.zeros_",
"torch.meshgrid"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hebafer/models
|
[
"5dc6421f562ea447e501fa355a48a6ee89856a1d"
] |
[
"runnables/binning.py"
] |
[
"import argparse\nimport mars.dataframe as md\nimport os\nimport pandas as pd\nfrom bin.binning_calculator import calc_stats, calc_two_dim_binning_stats, get_cols_bin_boundaries\nfrom run_io.db_adapter import convertDSNToRfc1738\nfrom sqlalchemy import create_engine\n\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser(allow_abbrev=False)\n parser.add_argument(\"--dbname\", type=str, required=True)\n parser.add_argument(\"--columns\", type=str, required=True)\n parser.add_argument(\"--bin_method\", type=str, required=False)\n parser.add_argument(\"--bin_num\", type=str, required=False)\n parser.add_argument(\"--bin_input_table\", type=str, required=False)\n parser.add_argument(\"--reverse_cumsum\", type=bool, default=False)\n parser.add_argument(\"--two_dim_bin_cols\", type=str, required=False)\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = build_argument_parser()\n args, _ = parser.parse_known_args()\n columns = args.columns.split(',')\n bin_method_array = args.bin_method.split(',') if args.bin_method else None\n bin_num_array = [int(item) for item in args.bin_num.split(',')] if args.bin_num else None\n\n select_input = os.getenv(\"SQLFLOW_TO_RUN_SELECT\")\n output = os.getenv(\"SQLFLOW_TO_RUN_INTO\")\n output_tables = output.split(',')\n datasource = os.getenv(\"SQLFLOW_DATASOURCE\")\n\n assert len(output_tables) == 1, \"The output tables shouldn't be null and can contain only one.\"\n\n url = convertDSNToRfc1738(datasource, args.dbname)\n engine = create_engine(url)\n input_md = md.read_sql(\n sql=select_input,\n con=engine)\n input_md.execute()\n\n cols_bin_boundaries = {}\n if args.bin_input_table:\n print(\"Get provided bin boundaries from table {}\".format(args.bin_input_table))\n bin_input_df = pd.read_sql_table(\n table_name=args.bin_input_table,\n con=engine)\n cols_bin_boundaries = get_cols_bin_boundaries(bin_input_df)\n\n if set(columns) > cols_bin_boundaries.keys():\n raise ValueError(\"The provided bin boundaries contains keys: {}. But they cannot cover all the \\\n input columns: {}\".format(cols_bin_boundaries.keys(), columns))\n\n print(\"Ignore the bin_num and bin_method arguments\")\n bin_num_array = [None] * len(columns)\n bin_method_array = [None] * len(columns)\n else:\n if len(bin_num_array) == 1:\n bin_num_array = bin_num_array * len(columns)\n else:\n assert(len(bin_num_array) == len(columns))\n\n if len(bin_method_array) == 1:\n bin_method_array = bin_method_array * len(columns)\n else:\n assert(len(bin_method_array) == len(columns))\n \n print(\"Calculate the statistics result for columns: {}\".format(columns))\n stats_df = calc_stats(\n input_md,\n columns,\n bin_method_array,\n bin_num_array,\n cols_bin_boundaries,\n args.reverse_cumsum)\n\n print(\"Persist the statistics result into the table {}\".format(output_tables[0]))\n stats_df.to_sql(\n name=output_tables[0],\n con=engine,\n index=False\n )\n"
] |
[
[
"pandas.read_sql_table"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jucyai/red-panda
|
[
"a47c81cdf42c9c9e057432106c505ea415edccba"
] |
[
"red_panda/aws/s3.py"
] |
[
"import warnings\nfrom io import StringIO, BytesIO\nimport logging\n\nimport pandas as pd\nimport boto3\n\nfrom red_panda.pandas import PANDAS_TOCSV_KWARGS, PANDAS_READ_TABLE_KWARGS\nfrom red_panda.aws import (\n S3_PUT_KWARGS,\n S3_GET_KWARGS,\n S3_CREATE_BUCKET_KWARGS,\n)\nfrom red_panda.utils import filter_kwargs, make_valid_uri\nfrom red_panda.aws import AWSUtils\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass S3Utils(AWSUtils):\n \"\"\"AWS S3 operations.\n\n Args:\n aws_config: AWS configuration.\n\n Attributes:\n aws_config (dict): AWS configuration.\n \"\"\"\n\n def __init__(self, aws_config: dict):\n super().__init__(aws_config=aws_config)\n\n def _connect_s3(self):\n \"\"\"Get S3 session.\n\n If key/secret are not provided, boto3's default behavior is falling back to awscli configs\n and environment variables.\n \"\"\"\n return boto3.resource(\n \"s3\",\n aws_access_key_id=self.aws_config.get(\"aws_access_key_id\"),\n aws_secret_access_key=self.aws_config.get(\"aws_secret_access_key\"),\n aws_session_token=self.aws_config.get(\"aws_session_token\"),\n )\n\n def _check_s3_bucket_existence(self, bucket: str) -> bool:\n s3 = self.get_s3_client()\n try:\n s3.head_bucket(Bucket=bucket)\n except s3.exceptions.ClientError:\n LOGGER.warning(f\"{bucket} does not exist or you do not have access to it.\")\n return False\n else:\n return True\n\n def _check_s3_key_existence(self, bucket: str, key: str) -> bool:\n s3 = self.get_s3_client()\n try:\n s3.head_object(Bucket=bucket, Key=key)\n except s3.exceptions.ClientError:\n LOGGER.warning(\n f\"{bucket}/{key} does not exist or you do not have access to it.\"\n )\n return False\n else:\n return True\n\n def _get_s3_pattern_existence(self, bucket: str, pattern: str) -> list:\n s3 = self.get_s3_resource()\n all_keys = [\n o.key for o in s3.Bucket(bucket).objects.all() if o.key.startswith(pattern)\n ]\n return all_keys\n\n def get_s3_resource(self):\n \"\"\"Return a boto3 S3 resource\"\"\"\n return self._connect_s3()\n\n def get_s3_client(self):\n \"\"\"Return a boto3 S3 client\"\"\"\n return self._connect_s3().meta.client\n\n def list_buckets(self) -> list:\n \"\"\"List all buckets.\n \n Returns:\n All S3 buckets for the account.\n \"\"\"\n s3 = self.get_s3_client()\n response = s3.list_buckets()\n buckets = [bucket[\"Name\"] for bucket in response[\"Buckets\"]]\n return buckets\n\n def list_object_keys(self, bucket: str, prefix: str = \"\") -> list:\n \"\"\"List all object keys.\n\n Args:\n bucket: Bucket name.\n prefix: Any prefix for the object.\n\n Returns:\n A list of all objects in a bucket given certain prefix.\n \"\"\"\n s3 = self.get_s3_client()\n response = s3.list_objects(Bucket=bucket, Prefix=prefix)\n return [o[\"Key\"] for o in response[\"Contents\"]]\n\n def create_bucket(self, bucket: str, error: str = \"warn\", **kwargs):\n \"\"\"Check and create bucket.\n\n Args:\n bucket: S3 bucket name.\n error (optional): Specify `warn` or `raise` or `silent`. How to handle if bucket already\n exists. Default is `warn`.\n **kwargs: Additional keyword arguments for creating bucket.\n\n Returns:\n The response from `boto3.create_bucket`.\n \"\"\"\n s3 = self.get_s3_client()\n if self._check_s3_bucket_existence(bucket):\n if error == \"raise\":\n raise ValueError(f\"{bucket} already exists\")\n elif error == \"warn\":\n warnings.warn(f\"{bucket} already exists\")\n extra_kwargs = filter_kwargs(kwargs, S3_CREATE_BUCKET_KWARGS)\n return s3.create_bucket(Bucket=bucket, **extra_kwargs)\n\n def file_to_s3(self, file_name: str, bucket: str, key: str, **kwargs):\n \"\"\"Put a file to S3.\n\n Args:\n file_name: Local file name.\n bucket: S3 bucket name.\n key: S3 key.\n **kwargs: ExtraArgs for `boto3.client.upload_file`.\n \"\"\"\n s3 = self._connect_s3()\n self._check_s3_bucket_existence(bucket)\n s3_put_kwargs = filter_kwargs(kwargs, S3_PUT_KWARGS)\n s3.meta.client.upload_file(\n file_name, Bucket=bucket, Key=key, ExtraArgs=s3_put_kwargs\n )\n\n def df_to_s3(self, df: pd.DataFrame, bucket: str, key: str, **kwargs):\n \"\"\"Put DataFrame to S3.\n\n Args:\n df: Source dataframe.\n bucket: S3 bucket name.\n key: S3 key.\n **kwargs: kwargs for `boto3.Bucket.put_object` and `pandas.DataFrame.to_csv`.\n \"\"\"\n s3 = self._connect_s3()\n buffer = StringIO()\n to_csv_kwargs = filter_kwargs(kwargs, PANDAS_TOCSV_KWARGS)\n df.to_csv(buffer, **to_csv_kwargs)\n self._check_s3_bucket_existence(bucket)\n s3_put_kwargs = filter_kwargs(kwargs, S3_PUT_KWARGS)\n s3.Bucket(bucket).put_object(Key=key, Body=buffer.getvalue(), **s3_put_kwargs)\n\n def delete_from_s3(self, bucket: str, key: str):\n \"\"\"Delete object from S3.\n\n Args:\n bucket: S3 bucket name.\n key: S3 key.\n \"\"\"\n s3 = self._connect_s3()\n if self._check_s3_key_existence(bucket, key):\n s3.meta.client.delete_object(Bucket=bucket, Key=key)\n else:\n LOGGER.warning(f\"{bucket}: {key} does not exist.\")\n\n def delete_bucket(self, bucket: str):\n \"\"\"Empty and delete bucket.\n\n Args:\n bucket: S3 bucket name.\n\n TODO:\n * Handle when there is bucket versioning.\n \"\"\"\n s3_bucket = self.get_s3_resource().Bucket(bucket)\n s3_bucket.objects.all().delete()\n s3_bucket.delete()\n\n def s3_to_obj(self, bucket: str, key: str, **kwargs) -> BytesIO:\n \"\"\"Read S3 object into memory as BytesIO.\n\n Args:\n bucket: S3 bucket name.\n key: S3 key.\n **kwargs: kwargs for `boto3.client.get_object`.\n \"\"\"\n s3_get_kwargs = filter_kwargs(kwargs, S3_GET_KWARGS)\n s3 = self.get_s3_client()\n obj = s3.get_object(Bucket=bucket, Key=key, **s3_get_kwargs)\n return BytesIO(obj[\"Body\"].read())\n\n def s3_to_file(self, bucket: str, key: str, file_name: str, **kwargs):\n \"\"\"Download S3 object as local file.\n\n Args:\n bucket: S3 bucket name.\n key: S3 key.\n file_name: Local file name.\n **kwargs: kwargs for `boto3.client.download_file`.\n \"\"\"\n s3_get_kwargs = filter_kwargs(kwargs, S3_GET_KWARGS)\n s3 = self.get_s3_resource()\n s3.Bucket(bucket).download_file(\n Key=key, Filename=file_name, ExtraArgs=s3_get_kwargs\n )\n\n def s3_to_df(self, bucket: str, key: str, **kwargs):\n \"\"\"Read S3 object into memory as DataFrame\n\n Only supporting delimited files. Default is tab delimited files.\n\n Args:\n bucket: S3 bucket name.\n key: S3 key.\n **kwargs: kwargs for `pandas.read_table` and `boto3.client.get_object`.\n\n Returns:\n A DataFrame.\n \"\"\"\n s3_get_kwargs = filter_kwargs(kwargs, S3_GET_KWARGS)\n read_table_kwargs = filter_kwargs(kwargs, PANDAS_READ_TABLE_KWARGS)\n buffer = self.s3_to_obj(bucket, key, **s3_get_kwargs)\n return pd.read_csv(buffer, **read_table_kwargs)\n\n def s3_folder_to_df(self, bucket: str, folder: str, prefix: str = None, **kwargs):\n \"\"\"Read all files in folder with prefix to a df.\n\n Args:\n bucket: S3 bucket name.\n folder: S3 folder.\n prefix: File prefix.\n\n Returns:\n A DataFrame.\n \"\"\"\n s3_get_kwargs = filter_kwargs(kwargs, S3_GET_KWARGS)\n read_table_kwargs = filter_kwargs(kwargs, PANDAS_READ_TABLE_KWARGS)\n if folder[-1] != \"/\":\n folder = folder + \"/\"\n pattern = make_valid_uri(folder, prefix or \"/\")\n allfiles = [f for f in self.list_object_keys(bucket, pattern) if f != folder]\n dfs = []\n for f in allfiles:\n LOGGER.info(f\"Reading file {f}\")\n dfs.append(self.s3_to_df(bucket, f, **s3_get_kwargs, **read_table_kwargs))\n return pd.concat(dfs)\n"
] |
[
[
"pandas.concat",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
yuweijiang/HGL-pytorch
|
[
"d7a54fce83a5678777a02bc50176e7fa527d7f9f",
"80238500b96edf051d750670de7300168e456424"
] |
[
"data/get_bert_embeddings/optimization.py",
"train.py"
] |
[
"# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport re\r\nimport tensorflow as tf\r\n\r\n\r\ndef create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\r\n \"\"\"Creates an optimizer training op.\"\"\"\r\n global_step = tf.train.get_or_create_global_step()\r\n\r\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\r\n\r\n # Implements linear decay of the learning rate.\r\n learning_rate = tf.train.polynomial_decay(\r\n learning_rate,\r\n global_step,\r\n num_train_steps,\r\n end_learning_rate=0.0,\r\n power=1.0,\r\n cycle=False)\r\n\r\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\r\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\r\n if num_warmup_steps:\r\n global_steps_int = tf.cast(global_step, tf.int32)\r\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\r\n\r\n global_steps_float = tf.cast(global_steps_int, tf.float32)\r\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\r\n\r\n warmup_percent_done = global_steps_float / warmup_steps_float\r\n warmup_learning_rate = init_lr * warmup_percent_done\r\n\r\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\r\n learning_rate = (\r\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\r\n\r\n # It is recommended that you use this optimizer for fine tuning, since this\r\n # is how the model was trained (note that the Adam m/v variables are NOT\r\n # loaded from init_checkpoint.)\r\n optimizer = AdamWeightDecayOptimizer(\r\n learning_rate=learning_rate,\r\n weight_decay_rate=0.01,\r\n beta_1=0.9,\r\n beta_2=0.999,\r\n epsilon=1e-6,\r\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\r\n\r\n if use_tpu:\r\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\r\n\r\n tvars = tf.trainable_variables()\r\n grads = tf.gradients(loss, tvars)\r\n\r\n # This is how the model was pre-trained.\r\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\r\n\r\n train_op = optimizer.apply_gradients(\r\n zip(grads, tvars), global_step=global_step)\r\n\r\n new_global_step = global_step + 1\r\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\r\n return train_op\r\n\r\n\r\nclass AdamWeightDecayOptimizer(tf.train.Optimizer):\r\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\r\n\r\n def __init__(self,\r\n learning_rate,\r\n weight_decay_rate=0.0,\r\n beta_1=0.9,\r\n beta_2=0.999,\r\n epsilon=1e-6,\r\n exclude_from_weight_decay=None,\r\n name=\"AdamWeightDecayOptimizer\"):\r\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\r\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\r\n\r\n self.learning_rate = learning_rate\r\n self.weight_decay_rate = weight_decay_rate\r\n self.beta_1 = beta_1\r\n self.beta_2 = beta_2\r\n self.epsilon = epsilon\r\n self.exclude_from_weight_decay = exclude_from_weight_decay\r\n\r\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\r\n \"\"\"See base class.\"\"\"\r\n assignments = []\r\n for (grad, param) in grads_and_vars:\r\n if grad is None or param is None:\r\n continue\r\n\r\n param_name = self._get_variable_name(param.name)\r\n\r\n m = tf.get_variable(\r\n name=param_name + \"/adam_m\",\r\n shape=param.shape.as_list(),\r\n dtype=tf.float32,\r\n trainable=False,\r\n initializer=tf.zeros_initializer())\r\n v = tf.get_variable(\r\n name=param_name + \"/adam_v\",\r\n shape=param.shape.as_list(),\r\n dtype=tf.float32,\r\n trainable=False,\r\n initializer=tf.zeros_initializer())\r\n\r\n # Standard Adam update.\r\n next_m = (\r\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\r\n next_v = (\r\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\r\n tf.square(grad)))\r\n\r\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\r\n\r\n # Just adding the square of the weights to the loss function is *not*\r\n # the correct way of using L2 regularization/weight decay with Adam,\r\n # since that will interact with the m and v parameters in strange ways.\r\n #\r\n # Instead we want ot decay the weights in a manner that doesn't interact\r\n # with the m/v parameters. This is equivalent to adding the square\r\n # of the weights to the loss with plain (non-momentum) SGD.\r\n if self._do_use_weight_decay(param_name):\r\n update += self.weight_decay_rate * param\r\n\r\n update_with_lr = self.learning_rate * update\r\n\r\n next_param = param - update_with_lr\r\n\r\n assignments.extend(\r\n [param.assign(next_param),\r\n m.assign(next_m),\r\n v.assign(next_v)])\r\n return tf.group(*assignments, name=name)\r\n\r\n def _do_use_weight_decay(self, param_name):\r\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\r\n if not self.weight_decay_rate:\r\n return False\r\n if self.exclude_from_weight_decay:\r\n for r in self.exclude_from_weight_decay:\r\n if re.search(r, param_name) is not None:\r\n return False\r\n return True\r\n\r\n def _get_variable_name(self, param_name):\r\n \"\"\"Get the variable name from the tensor name.\"\"\"\r\n m = re.match(\"^(.*):\\\\d+$\", param_name)\r\n if m is not None:\r\n param_name = m.group(1)\r\n return param_name\r\n",
"\"\"\"\r\nTraining script. Should be pretty adaptable to whatever.\r\n\"\"\"\r\nimport argparse\r\nimport os\r\nimport shutil\r\n\r\nimport multiprocessing\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nfrom allennlp.common.params import Params\r\nfrom allennlp.training.learning_rate_schedulers import LearningRateScheduler\r\nfrom allennlp.training.optimizers import Optimizer\r\nfrom torch.nn import DataParallel\r\nfrom torch.nn.modules import BatchNorm2d\r\nfrom tqdm import tqdm\r\n\r\nfrom dataloaders.vcr import VCR, VCRLoader\r\nfrom utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \\\r\n restore_checkpoint, print_para, restore_best_checkpoint\r\n\r\nimport logging\r\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)\r\n\r\n# This is needed to make the imports work\r\nfrom allennlp.models import Model\r\nimport models\r\n\r\n#################################\r\n#################################\r\n######## Data loading stuff\r\n#################################\r\n#################################\r\n\r\nparser = argparse.ArgumentParser(description='train')\r\nparser.add_argument(\r\n '-params',\r\n dest='params',\r\n help='Params location',\r\n type=str,\r\n)\r\nparser.add_argument(\r\n '-rationale',\r\n action=\"store_true\",\r\n help='use rationale',\r\n)\r\nparser.add_argument(\r\n '-folder',\r\n dest='folder',\r\n help='folder location',\r\n type=str,\r\n)\r\nparser.add_argument(\r\n '-restore',\r\n dest='restore',\r\n action=\"store_true\",\r\n help=\"reload checkpoint\"\r\n)\r\nparser.add_argument(\r\n '-train',\r\n dest='train',\r\n action=\"store_true\",\r\n help=\"train checkpoint\"\r\n)\r\nparser.add_argument(\r\n '-test',\r\n dest='test',\r\n action=\"store_true\",\r\n help=\"test checkpoint\"\r\n)\r\nparser.add_argument(\r\n '-no_tqdm',\r\n dest='no_tqdm',\r\n action='store_true',\r\n)\r\n\r\nargs = parser.parse_args()\r\n\r\nparams = Params.from_file(args.params)\r\ntrain, val, test = VCR.splits(mode='rationale' if args.rationale else 'answer',\r\n embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),\r\n only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', True))\r\nNUM_GPUS = torch.cuda.device_count()\r\nNUM_CPUS = multiprocessing.cpu_count()\r\nif NUM_GPUS == 0:\r\n raise ValueError(\"you need gpus!\")\r\n\r\ndef _to_gpu(td):\r\n if NUM_GPUS > 1:\r\n return td\r\n for k in td:\r\n if k != 'metadata':\r\n td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(\r\n non_blocking=True)\r\n return td\r\nnum_workers = (4 * NUM_GPUS if NUM_CPUS == 32 else 2*NUM_GPUS)-1\r\nprint(f\"Using {num_workers} workers out of {NUM_CPUS} possible\", flush=True)\r\nloader_params = {'batch_size': 96 // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}\r\ntrain_loader = VCRLoader.from_dataset(train, **loader_params)\r\nval_loader = VCRLoader.from_dataset(val, **loader_params)\r\ntest_loader = VCRLoader.from_dataset(test, **loader_params)\r\n\r\nARGS_RESET_EVERY = 100\r\nprint(\"Loading {} for {}\".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True)\r\nmodel = Model.from_params(vocab=train.vocab, params=params['model'])\r\n# for submodule in model.detector.backbone.modules():\r\n# if isinstance(submodule, BatchNorm2d):\r\n# submodule.track_running_stats = False\r\n\r\nmodel = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()\r\noptimizer = Optimizer.from_params([x for x in model.named_parameters() if x[1].requires_grad],\r\n params['trainer']['optimizer'])\r\n\r\nlr_scheduler_params = params['trainer'].pop(\"learning_rate_scheduler\", None)\r\nscheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params) if lr_scheduler_params else None\r\n\r\nif os.path.exists(args.folder) and args.restore:\r\n print('restore is True')\r\n print(\"Found folder! restoring\", flush=True)\r\n start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=args.folder,\r\n learning_rate_scheduler=scheduler)\r\nelse:\r\n print(\"Making directories\")\r\n os.makedirs(args.folder, exist_ok=True)\r\n start_epoch, val_metric_per_epoch = 0, []\r\n shutil.copy2(args.params, args.folder)\r\n\r\nparam_shapes = print_para(model)\r\nnum_batches = 0\r\nif args.train:\r\n print('It is training!!!!!')\r\n for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch):\r\n train_results = []\r\n norms = []\r\n model.train()\r\n for b, (time_per_batch, batch) in enumerate(time_batch(train_loader if args.no_tqdm else tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):\r\n batch = _to_gpu(batch)\r\n optimizer.zero_grad()\r\n output_dict = model(**batch)\r\n loss = output_dict['loss'].mean() + output_dict['cnn_regularization_loss'].mean()\r\n loss.backward()\r\n\r\n num_batches += 1\r\n if scheduler:\r\n scheduler.step_batch(num_batches)\r\n\r\n norms.append(\r\n clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)\r\n )\r\n optimizer.step()\r\n\r\n train_results.append(pd.Series({'epoch': epoch_num,\r\n 'loss': output_dict['loss'].mean().item(),\r\n 'crl': output_dict['cnn_regularization_loss'].mean().item(),\r\n 'accuracy': (model.module if NUM_GPUS > 1 else model).get_metrics(\r\n reset=(b % ARGS_RESET_EVERY) == 0)[\r\n 'accuracy'],\r\n 'sec_per_batch': time_per_batch,\r\n 'hr_per_epoch': len(train_loader) * time_per_batch / 3600,\r\n }))\r\n if b % ARGS_RESET_EVERY == 0 and b > 0:\r\n norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join(\r\n param_shapes[['shape', 'size']]).sort_values('norm', ascending=False)\r\n\r\n print(\"e{:2d}b{:5d}/{:5d}. norms: \\nsumm:\\n{}\\n~~~~~~~~~~~~~~~~~~\\n\".format(\r\n epoch_num, b, len(train_loader),\r\n pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),\r\n ), flush=True)\r\n\r\n print(\"---\\nTRAIN EPOCH {:2d}:\\n{}\\n----\".format(epoch_num, pd.DataFrame(train_results).mean()))\r\n val_probs = []\r\n val_labels = []\r\n val_loss_sum = 0.0\r\n model.eval()\r\n for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):\r\n with torch.no_grad():\r\n batch = _to_gpu(batch)\r\n output_dict = model(**batch)\r\n val_probs.append(output_dict['label_probs'].detach().cpu().numpy())\r\n val_labels.append(batch['label'].detach().cpu().numpy())\r\n val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0]\r\n val_labels = np.concatenate(val_labels, 0)\r\n val_probs = np.concatenate(val_probs, 0)\r\n val_loss_avg = val_loss_sum / val_labels.shape[0]\r\n\r\n val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))\r\n if scheduler:\r\n scheduler.step(val_metric_per_epoch[-1], epoch_num)\r\n\r\n print(\"Val epoch {} has acc {:.3f} and loss {:.3f}\".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg),\r\n flush=True)\r\n if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']):\r\n print(\"Stopping at epoch {:2d}\".format(epoch_num))\r\n break\r\n save_checkpoint(model, optimizer, args.folder, epoch_num, val_metric_per_epoch,\r\n is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1))\r\nif args.test:\r\n print('It is validation!!!!')\r\n print(\"STOPPING. now running the best model on the validation set\", flush=True)\r\n # Load best\r\n restore_best_checkpoint(model, args.folder)\r\n model.eval()\r\n val_probs = []\r\n val_labels = []\r\n for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):\r\n with torch.no_grad():\r\n batch = _to_gpu(batch)\r\n output_dict = model(**batch)\r\n val_probs.append(output_dict['label_probs'].detach().cpu().numpy())\r\n val_labels.append(batch['label'].detach().cpu().numpy())\r\n val_labels = np.concatenate(val_labels, 0)\r\n val_probs = np.concatenate(val_probs, 0)\r\n acc = float(np.mean(val_labels == val_probs.argmax(1)))\r\n print(\"Final val accuracy is {:.3f}\".format(acc))\r\n np.save(os.path.join(args.folder, f'valpreds.npy'), val_probs)\r\n"
] |
[
[
"tensorflow.train.polynomial_decay",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.zeros_initializer",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.train.get_or_create_global_step",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.clip_by_global_norm",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.sqrt",
"tensorflow.group"
],
[
"pandas.DataFrame",
"numpy.concatenate",
"torch.nn.DataParallel",
"numpy.argmax",
"torch.no_grad",
"torch.cuda.device_count"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
naveen-chalasani/natural-language-processing-and-anomaly-detection
|
[
"6c6ea44f1966f7abe37c452d84dd24cffd572e1e"
] |
[
"notebooks/sessions.py"
] |
[
"import os \nimport re\nimport numpy as np \nimport pandas as pd\nfrom collections import OrderedDict\n\n# extract session info from log file and save the output in sessions.csv\n\ndef hdfs_sessions(log_file):\n \n session = 0\n sequence_in_session = 0\n block_id_list = list()\n session_info = OrderedDict()\n \n log_data = pd.read_csv(log_file, engine='c', na_filter=False, memory_map=True, header=None, error_bad_lines=False)\n \n for index, row in log_data.iterrows():\n print(index)\n \n block_ids_in_row = re.findall(r'(blk_-?\\d+)', row[0])\n #block_ids = '; '.join(sorted(set(block_ids_in_row)))\n block_id = block_ids_in_row[0]\n \n if block_id not in block_id_list:\n block_id_list.append(block_id)\n session += 1\n sequence_in_session = 1\n else:\n sequence_in_session += 1\n \n temp_list = [None] * 3\n temp_list[0] = block_id\n temp_list[1] = session\n temp_list[2] = sequence_in_session\n session_info[index + 1] = temp_list\n \n df = pd.DataFrame.from_dict(session_info, orient = 'index', columns=['block_id', 'session', 'sequence_in_session'])\n df.to_csv(\"sessions.csv\")\n\n# , error_bad_lines=False\n# hdfs_sessions('HDFS_2k.log')\n# hdfs_sessions('HDFS.log')\nhdfs_sessions('content.txt')\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
hadim/maskflow
|
[
"6a70725ba26c6e65189936fd5c242c5ab15d6952",
"6a70725ba26c6e65189936fd5c242c5ab15d6952"
] |
[
"maskflow/tests/test_bbox.py",
"maskflow/mask.py"
] |
[
"import numpy as np\nimport numpy.testing as npt\n\nimport maskflow\n\n\ndef test_from_masks():\n masks = []\n\n mask = np.zeros((128, 128), dtype=\"uint8\")\n mask[20:50, 80:90] = 1\n masks.append(mask)\n\n mask = np.zeros((128, 128), dtype=\"uint8\")\n mask[80:950, 50:80] = 1\n masks.append(mask)\n\n bboxes = maskflow.bbox.from_masks(masks)\n\n excepted_bboxes = [[20, 80, 29, 9], [80, 50, 47, 29]]\n npt.assert_equal(bboxes, excepted_bboxes)\n",
"from itertools import groupby\n\nimport numpy as np\nfrom skimage import measure\nfrom PIL import Image\n\n\ndef merge_mask(mask):\n multipliers = np.arange(1, mask.shape[0] + 1)[:, np.newaxis, np.newaxis]\n return np.sum(mask * multipliers, axis=0)\n\n\ndef resize_binary_mask(array, new_size):\n image = Image.fromarray(array.astype(np.uint8) * 255)\n image = image.resize(new_size)\n return np.asarray(image).astype(np.bool_)\n\n\ndef close_contour(contour):\n if not np.array_equal(contour[0], contour[-1]):\n contour = np.vstack((contour, contour[0]))\n return contour\n\n\ndef binary_mask_to_rle(binary_mask):\n rle = {'counts': [], 'size': list(binary_mask.shape)}\n counts = rle.get('counts')\n for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order='F'))):\n if i == 0 and value == 1:\n counts.append(0)\n counts.append(len(list(elements)))\n\n return rle\n\n\ndef binary_mask_to_polygon(binary_mask, tolerance=0):\n \"\"\"Convert a binary mask to COCO polygon representation,\n Args:\n binary_mask: a 2D binary numpy array where '1's represent the object\n tolerance: Maximum distance from original points of polygon to approximated\n polygonal chain. If tolerance is 0, the original coordinate array is returned.\n \"\"\"\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(\n binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour.ravel().tolist()\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\n segmentation = [0 if i < 0 else i for i in segmentation]\n polygons.append(segmentation)\n\n return polygons\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.zeros"
],
[
"numpy.pad",
"numpy.array_equal",
"numpy.asarray",
"numpy.arange",
"numpy.subtract",
"numpy.flip",
"numpy.sum",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
galaxy-captain/Deep-Learning
|
[
"06f0f931c750db6267148f27f48ab5c0eb02abaa"
] |
[
"tensorflow_basic/examples/variable_1.py"
] |
[
"#!/usr/bin/env python\n# -*- coding:utf-8 -*\nimport tensorflow as tf\n\nstate = tf.Variable(0,name='counter')\nprint(state.name)\n\none = tf.constant(1)\n\nnew_value = tf.add(state,one)\nupdate = tf.assign(state,new_value)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as session:\n session.run(init)\n for _ in range(3):\n session.run(update)\n print(session.run(state))\n"
] |
[
[
"tensorflow.constant",
"tensorflow.Variable",
"tensorflow.assign",
"tensorflow.global_variables_initializer",
"tensorflow.add",
"tensorflow.Session"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
kumarak93/LinearConv
|
[
"2cd9945c769c16206fd380c6b23b76d44102805f"
] |
[
"models/allconv_xcnn.py"
] |
[
"import torch\nimport torchvision\nimport torchvision.transforms as transforms\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport time\nfrom torchsummary import summary\nimport os\nimport math\nimport summ\n#from thop import profile\n\nimport argparse\nap = argparse.ArgumentParser()\nap.add_argument(\"-t\", \"--type\", type=str, required=True, help=\"rank ratio or sparse\")\nap.add_argument(\"-r\", \"--rank\", type=float, default=1, help=\"rank of Matrix\")\nap.add_argument(\"-s\", \"--sparsity\", type=float, default=0, help=\"sparsity of Matrix\")\nargs = vars(ap.parse_args())\nrank = args['rank'] #0.75\nsparsity = args['sparsity'] #0.25\nexp_type = args['type']\nif exp_type == 'rank':\n netType = 'xCNNlow'\nelif exp_type == 'sparse':\n netType = 'xCNN'\nreg_const_2 = 0.01\nreq_percentile = sparsity\nthres_step = 0.00001 #0.001\nprune_step = 500\n#classi_new\n\nepochs = 350\nstart_epoch = 0\nbatch_size = 50 #49000, 1000\ndirectory = './'\ncheckpoint_dir = directory+'ckpt/allconv_xcnn/'\nroot = directory+'dataset/'\nload_ckpt = False\nload_ckpt_num = 0\nload_path = checkpoint_dir+'net_epoch_'+str(load_ckpt_num)+'.ckpt'\ntime_every = 100\nverbose = True\nloss_every = 980\n\n#rank = 1\nreg_const = 0.01\nreg_per_batches = 10\nlrate = 0.0001\n# NO MILESTONES USED ////////////////////\n\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR10(root=root, train=True,download=False, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\ntestset = torchvision.datasets.CIFAR10(root=root, train=False, download=False, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \n\n########################################################################################################################\n########################################################################################################################\n\nclass xCNN(torch.nn.Module): \n def __init__(self, channels, filters, kernel_size, padding=1, stride=1, groups=1, bias=True): \n super(xCNN, self).__init__() \n self.filters = filters \n self.times = 2 #ratio 1/2 \n self.kernel_size = kernel_size \n self.channels = channels//groups \n self.padding = padding \n self.stride = stride \n self.biasTrue = bias \n self.groups = groups \n \n self.counter = 0 \n self.threshold = 0 \n #self.mask = torch.abs(self.linear_weights) > self.threshold \n \n self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device)) \n self.linear_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, filters//self.times).to(device)) \n \n torch.nn.init.xavier_uniform(self.conv_weights) \n self.linear_weights.data.uniform_(-0.1, 0.1) \n \n #self.mask = torch.abs(self.linear_weights) > self.threshold \n \n if self.biasTrue: \n self.bias = nn.Parameter(torch.Tensor(filters).to(device)) \n self.bias.data.uniform_(-0.1, 0.1) \n \n self.mask = nn.Parameter(torch.abs(self.linear_weights) > self.threshold, requires_grad = False) \n self.mask.requires_grad = False \n \n def forward(self, input): \n \n self.counter += 1 \n if self.counter == prune_step: \n self.counter = 0 \n self.mask = nn.Parameter(torch.abs(self.linear_weights) > self.threshold, requires_grad = False) \n self.percentile = 1. - float(torch.sum(self.mask).item())/(self.mask.shape[0]**2) \n #self.threshold += (req_percentile - self.percentile) * thres_step \n self.threshold += (2./(1.+10**(10*(self.percentile-req_percentile)))-1) * thres_step \n print('pruned... %.2f, %.5f' %(self.percentile, self.threshold)) \n \n self.mask = nn.Parameter(self.mask.type(torch.FloatTensor).to(device), requires_grad = False) \n temp = self.linear_weights * self.mask \n self.correlated_weights = torch.mm(temp, self.conv_weights.reshape(self.filters//self.times,-1))\\\n .reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size) \n\n if self.biasTrue: \n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n bias=self.bias, padding=self.padding, stride=self.stride) \n else: \n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n padding=self.padding, stride=self.stride) \n\nclass xCNNlow(torch.nn.Module):\n def __init__(self, channels, filters, kernel_size, padding=1, stride=1, groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels//groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n\n self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, int((filters//self.times)*self.rank)).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(int((filters//self.times)*self.rank), filters//self.times).to(device))\n \n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n \n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input): \n self.correlated_weights = torch.mm(self.column_weights, torch.mm(self.row_weights,self.conv_weights.reshape(self.filters//self.times,-1)))\\\n .reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size) \n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n bias=self.bias, padding=self.padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n padding=self.padding, stride=self.stride)\n\ndef l1Loss(feat): \n loss = 0 \n param = {} \n for i in feat.named_parameters(): \n if 'linear_weights' in i[0]: \n dat = i[1] \n #corr = corrcoef(dat.reshape(dat.shape[0], -1)) \n loss += torch.sum(torch.abs(dat)) \n return loss \n\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n\n multiply_adds = 1\n\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n\n out_h = y.size(2)\n out_w = y.size(3)\n\n # ops per output element\n # kernel_mul = kh * kw * cin\n # kernel_add = kh * kw * cin - 1\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n\n # total ops\n # num_out_elements = y.numel()\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n\n # per output element\n total_mul_1 = m.filters//m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw) # (m.filters - m.filters//m.times)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters//m.times) * (cin * kh * kw) # (m.filters - m.filters//m.times)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 + total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\ndef count_op_xCNN(m, x, y):\n x = x[0]\n\n multiply_adds = 1\n\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n\n out_h = y.size(2)\n out_w = y.size(3)\n\n # ops per output element\n # kernel_mul = kh * kw * cin\n # kernel_add = kh * kw * cin - 1\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n\n # total ops\n # num_out_elements = y.numel()\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // 1 #m.groups=1\n\n # per output element\n total_mul = m.filters//m.times\n total_add = total_mul - 1\n num_elements = (m.filters - m.filters//m.times) * (cin * kh * kw)\n lin_ops = (total_mul + total_add) * num_elements\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\ndef corrcoef(x):\n mean_x = torch.mean(x, dim=1, keepdim=True)\n xm = x.sub(mean_x.expand_as(x))\n c = xm.mm(xm.t())\n c = c / (x.size(1) - 1)\n d = torch.diag(c)\n stddev = torch.pow(d, 0.5)\n c = c/stddev[:,None]\n c = c/stddev[None,:]\n c = torch.clamp(c, -1.0, 1.0)\n return c\n\ndef corrLoss(feat):\n loss = 0\n param = {}\n for i in feat.named_parameters():\n if 'conv_weights' in i[0]:\n dat = i[1]\n corr = corrcoef(dat.reshape(dat.shape[0], -1))\n loss += torch.sum(torch.abs(corr - torch.eye(corr.shape[0]).to(device)))\n return loss\n\ndef summary(model, input):\n with summ.TorchSummarizeDf(model) as tdf:\n x = torch.rand(input).to(device)\n y = model(x)\n df = tdf.make_df()\n print(df)\n\nclass AllConvNet(nn.Module):\n def __init__(self, cfg, input_size=3, n_classes=10, **kwargs):\n super(AllConvNet, self).__init__()\n if cfg == 'CNN':\n self.conv1 = nn.Conv2d(input_size, 96, 3, padding=1)\n self.conv2 = nn.Conv2d(96, 96, 3, padding=1)\n self.conv3 = nn.Conv2d(96, 96, 3, padding=1, stride=2)\n self.conv4 = nn.Conv2d(96, 192, 3, padding=1)\n self.conv5 = nn.Conv2d(192, 192, 3, padding=1)\n self.conv6 = nn.Conv2d(192, 192, 3, padding=1, stride=2)\n self.conv7 = nn.Conv2d(192, 192, 3, padding=1)\n self.conv8 = nn.Conv2d(192, 192, 1)\n self.class_conv = nn.Conv2d(192, n_classes, 1)\n elif cfg == 'xCNN':\n self.conv1 = xCNN(input_size, 96, 3, padding=1)\n self.conv2 = xCNN(96, 96, 3, padding=1)\n self.conv3 = xCNN(96, 96, 3, padding=1, stride=2)\n self.conv4 = xCNN(96, 192, 3, padding=1)\n self.conv5 = xCNN(192, 192, 3, padding=1)\n self.conv6 = xCNN(192, 192, 3, padding=1, stride=2)\n self.conv7 = xCNN(192, 192, 3, padding=1)\n self.conv8 = xCNN(192, 192, 1)\n self.class_conv = nn.Conv2d(192, n_classes, 1)\n elif cfg == 'xCNNlow':\n self.conv1 = xCNNlow(input_size, 96, 3, padding=1, rank=rank)\n self.conv2 = xCNNlow(96, 96, 3, padding=1, rank=rank)\n self.conv3 = xCNNlow(96, 96, 3, padding=1, stride=2, rank=rank)\n self.conv4 = xCNNlow(96, 192, 3, padding=1, rank=rank)\n self.conv5 = xCNNlow(192, 192, 3, padding=1, rank=rank)\n self.conv6 = xCNNlow(192, 192, 3, padding=1, stride=2, rank=rank)\n self.conv7 = xCNNlow(192, 192, 3, padding=1, rank=rank)\n self.conv8 = xCNNlow(192, 192, 1, rank=rank)\n self.class_conv = nn.Conv2d(192, n_classes, 1)\n\n def forward(self, x):\n x_drop = F.dropout(x, .2)\n conv1_out = F.relu(self.conv1(x_drop))\n conv2_out = F.relu(self.conv2(conv1_out))\n conv3_out = F.relu(self.conv3(conv2_out))\n conv3_out_drop = F.dropout(conv3_out, .5)\n conv4_out = F.relu(self.conv4(conv3_out_drop))\n conv5_out = F.relu(self.conv5(conv4_out))\n conv6_out = F.relu(self.conv6(conv5_out))\n conv6_out_drop = F.dropout(conv6_out, .5)\n conv7_out = F.relu(self.conv7(conv6_out_drop))\n conv8_out = F.relu(self.conv8(conv7_out))\n\n class_out = F.relu(self.class_conv(conv8_out))\n pool_out = F.adaptive_avg_pool2d(class_out, 1)\n pool_out.squeeze_(-1)\n pool_out.squeeze_(-1)\n return pool_out\n\n########################################################################################################################\n########################################################################################################################\n\n \nnet = AllConvNet(netType).to(device)\nsummary(net, (1,3,32,32))\n#flops, params = profile(net, input_size=(1, 3, 32,32), custom_ops={xCNN: count_op_xCNN, xCNNlow: count_op_xCNNlow})\n#print(flops,params)\n\ncriterion = nn.CrossEntropyLoss()\n\nopt = optim.Adam(net.parameters(), lr=lrate)\n\nif os.path.isfile(load_path) and load_ckpt:\n checkpoint = torch.load(load_path)\n net.load_state_dict(checkpoint['model_state_dict'])\n opt.load_state_dict(checkpoint['opt'])\n start_epoch = checkpoint['epoch'] + 1\n print('model loaded')\n\nfor epoch in range(start_epoch, epochs): # loop over the dataset multiple times\n running_loss = 0.0\n start = time.time()\n for ind, data in enumerate(trainloader, 0):\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n outputs = net(inputs)\n\n opt.zero_grad()\n loss = criterion(outputs, labels)\n total_loss = loss\n\n #***********************************\n reg_loss = 0; l1loss = 0;\n if ind % reg_per_batches == reg_per_batches - 1:\n for layer in list(net.children()):\n reg_loss += corrLoss(layer)\n if exp_type == 'sparse':\n l1loss += l1Loss(layer) \n total_loss = loss + reg_const * reg_loss + reg_const_2 * l1loss \n total_loss.backward()\n opt.step()\n\n running_loss += loss.item()\n\n if ind % time_every == time_every - 1 and verbose:\n end = time.time()\n print('[%d, %5d, time: %d ms loss:%.3f reg:%.3f total:%.3f]' %(epoch + 1, ind+1, (end-start)*1000,\n \tloss, reg_loss, total_loss))\n start = time.time()\n\n if ind % loss_every == loss_every - 1:\n test_loss = 0\n accuracy = 0\n net.eval()\n with torch.no_grad():\n for inputs, labels in testloader:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = net.forward(inputs)\n batch_loss = criterion(logps, labels)\n test_loss += batch_loss.item() \n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n end = time.time()\n print('time: %d ms, Epoch: %d/%d, Train loss: %.3f, Test loss: %.3f, Test accuracy: %.3f'\n %((end-start)*1000, epoch+1, epochs, running_loss/len(trainloader), test_loss/len(testloader), accuracy/len(testloader)))\n running_loss = 0.\n start = time.time()\n net.train()\n\n if epoch % 20 == 0:\n save_dict = {\n 'epoch': epoch,\n 'model_state_dict': net.state_dict(),\n 'opt': opt.state_dict(),\n }\n torch.save(save_dict, checkpoint_dir+'net_epoch_'+str(epoch)+'.ckpt')\n\nprint('Finished Training')\n\n"
] |
[
[
"torch.mean",
"torch.abs",
"torch.load",
"torch.nn.functional.dropout",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.no_grad",
"torch.cuda.is_available",
"torch.pow",
"torch.nn.CrossEntropyLoss",
"torch.eye",
"torch.rand",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.exp",
"torch.diag",
"torch.Tensor",
"torch.clamp",
"torch.nn.init.xavier_uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MorganeAudrain/Calcium_new
|
[
"1af0ab4f70b91d1ca55c6053112c1744b1da1bd3",
"1af0ab4f70b91d1ca55c6053112c1744b1da1bd3"
] |
[
"steps/source_extraction.py",
"steps/alignment.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport datetime\nimport data_base_manipulation as db\nimport analysis_files_manipulation as fm\n\nimport caiman as cm \nfrom caiman.source_extraction import cnmf\nfrom caiman.source_extraction.cnmf import params as params\n\nimport caiman.base.rois\nimport logging\n\nimport numpy as np \nimport os\nimport psutil\n\n#step = 'source_extraction'\n\n#%% MAIN\ndef run_source_extraction(row, parameters, dview, session_wise = False):\n '''\n This is the function for source extraction.\n Its goal is to take in a .mmap file,\n perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file. \n \n This function is only runnable on the cn76 server because it requires parralel processing. \n \n Args:\n row: pd.DataFrame object\n The row corresponding to the analysis state to be source extracted. \n \n Returns:\n row: pd.DataFrame object\n The row corresponding to the source extracted analysis state. \n '''\n step_index = 4\n row_local = row.copy()\n row_local.loc['source_extraction_parameters'] = str(parameters)\n row_local = db.set_version_analysis('source_extraction',row_local,session_wise)\n index = row_local.name\n\n # Determine input path\n if parameters['session_wise']:\n input_mmap_file_path = eval(row_local.loc['alignment_output'])['main']\n else: \n input_mmap_file_path = eval(row_local.loc['motion_correction_output'])['main']\n if not os.path.isfile(input_mmap_file_path):\n logging.error('Input file does not exist. Cancelling.')\n return row_local\n \n # Determine output paths\n file_name = db.create_file_name(step_index, index)\n data_dir = 'data/interim/source_extraction/session_wise/' if parameters['session_wise'] else 'data/interim/source_extraction/trial_wise/'\n output_file_path = data_dir + f'main/{file_name}.hdf5'\n \n \n # Create a dictionary with parameters\n output = {\n 'main': output_file_path,\n 'meta':{\n 'analysis' : {\n 'analyst' : os.environ['ANALYST'],\n 'date' : datetime.datetime.today().strftime(\"%m-%d-%Y\"),\n 'time' : datetime.datetime.today().strftime(\"%H:%M:%S\"),\n },\n 'duration': {}\n }\n }\n \n # Load memmory mappable input file\n if os.path.isfile(input_mmap_file_path):\n Yr, dims, T = cm.load_memmap(input_mmap_file_path)\n# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')\n images = Yr.T.reshape((T,) + dims, order='F')\n else:\n logging.warning(f'{index} .mmap file does not exist. Cancelling')\n return row_local\n \n # SOURCE EXTRACTION\n # Check if the summary images are already there\n corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(index, gSig_abs = parameters['gSig'][0])\n \n if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path): \n # Already computed summary images\n logging.info(f'{index} Already computed summary images')\n cn_filter = np.load(corr_npy_file_path)\n pnr = np.load(pnr_npy_file_path)\n else:\n # Compute summary images\n t0 = datetime.datetime.today()\n logging.info(f'{index} Computing summary images')\n cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig = parameters['gSig'][0], swap_dim=False)\n dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes\n output['meta']['duration']['summary_images'] = dt \n logging.info(f'{index} Computed summary images. dt = {dt} min')\n # Saving summary images as npy files\n gSig = parameters['gSig'][0]\n corr_npy_file_path = data_dir + f'/meta/corr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'\n pnr_npy_file_path = data_dir + f'/meta/pnr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'\n with open(corr_npy_file_path, 'wb') as f:\n np.save(f, cn_filter)\n with open(pnr_npy_file_path, 'wb') as f:\n np.save(f, pnr)\n \n # Store the paths in the meta dictionary \n output['meta']['corr'] = {'main': corr_npy_file_path, 'meta': {}}\n output['meta']['pnr'] = {'main': pnr_npy_file_path, 'meta': {}}\n \n # Calculate min, mean, max value for cn_filter and pnr\n corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(), cn_filter.max()\n output['meta']['corr']['meta'] = {'min': corr_min, 'mean': corr_mean, 'max': corr_max}\n pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()\n output['meta']['pnr']['meta'] = {'min': pnr_min, 'mean': pnr_mean, 'max': pnr_max}\n \n # If min_corr and min_pnr are specified via a linear equation, calculate \n # this value \n if type(parameters['min_corr']) == list:\n min_corr = parameters['min_corr'][0]*corr_mean + parameters['min_corr'][1]\n parameters['min_corr'] = min_corr\n logging.info(f'{index} Automatically setting min_corr = {min_corr}')\n if type(parameters['min_pnr']) == list:\n min_pnr = parameters['min_pnr'][0]*pnr_mean + parameters['min_pnr'][1]\n parameters['min_pnr'] = min_pnr\n logging.info(f'{index} Automatically setting min_pnr = {min_pnr}')\n\n # Set the parameters for caiman\n opts = params.CNMFParams(params_dict = parameters) \n \n # SOURCE EXTRACTION \n logging.info(f'{index} Performing source extraction')\n t0 = datetime.datetime.today()\n n_processes = psutil.cpu_count()\n logging.info(f'{index} n_processes: {n_processes}')\n cnm = cnmf.CNMF(n_processes = n_processes, dview = dview, params = opts)\n cnm.fit(images)\n cnm.estimates.dims = dims \n \n # Store the number of neurons\n output['meta']['K'] = len(cnm.estimates.C)\n \n # Calculate the center of masses\n cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A, images.shape[1], images.shape[2])\n \n # Save the cnmf object as a hdf5 file \n logging.info(f'{index} Saving cnmf object')\n cnm.save(output_file_path)\n dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes\n output['meta']['duration']['source_extraction'] = dt\n logging.info(f'{index} Source extraction finished. dt = {dt} min')\n \n # Write necessary variables in row and return\n row_local.loc['source_extraction_parameters'] = str(parameters)\n row_local.loc['source_extraction_output'] = str(output)\n \n return row_local\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Sebastian,Casper\n\"\"\"\n\nimport logging\nimport caiman as cm\nimport caiman.motion_correction\nfrom caiman.motion_correction import MotionCorrect, high_pass_filter_space\nfrom caiman.source_extraction.cnmf import params as params\nfrom caiman.mmapping import load_memmap\n\nimport datetime\nimport os\nimport numpy as np \nimport pickle\nimport math \nimport scipy\nimport scipy.stats\n\nimport src.data_base_manipulation as db\nimport src.paths as paths\n\nstep_index = 3\ndef run_alignmnet(states_df, parameters, dview):\n '''\n This is the main function for the alignment step. It applies methods \n from the CaImAn package used originally in motion correction\n to do alignment. \n \n Args:\n df: pd.DataFrame\n A dataframe containing the analysis states you want to have aligned.\n parameters: dict\n The alignment parameters.\n dview: object\n The dview object\n \n Returns:\n df: pd.DataFrame\n A dataframe containing the aligned analysis states. \n '''\n \n\n # Sort the dataframe correctly\n df = states_df.copy()\n df = df.sort_values(by = paths.multi_index_structure)\n\n # Determine the mouse and session of the dataset\n index = df.iloc[0].name\n mouse, session, *r = index\n #alignment_v = index[len(paths.data_structure) + step_index]\n alignment_v = len(df)\n alignment_index = (mouse, session, alignment_v)\n \n # Determine the output .mmap file name\n file_name = f'mouse_{mouse}_session_{session}_v{alignment_v}'\n output_mmap_file_path = f'data/interim/alignment/main/{file_name}.mmap'\n\n try:\n df.reset_index()[['trial','is_rest']].set_index(['trial','is_rest'], verify_integrity = True)\n except ValueError:\n logging.error('You passed multiple of the same trial in the dataframe df')\n return df\n \n output = {\n 'meta' : { \n 'analysis': {\n 'analyst': os.environ['ANALYST'],\n 'date': datetime.datetime.today().strftime(\"%m-%d-%Y\"),\n 'time': datetime.datetime.today().strftime(\"%H:%M:%S\")\n },\n 'duration': {}\n }\n }\n\n # Get necessary parameters\n motion_correction_parameters_list = []\n motion_correction_output_list = [] \n input_mmap_file_list = []\n trial_index_list = []\n for idx, row in df.iterrows():\n motion_correction_parameters_list.append(eval(row.loc['motion_correction_parameters']) )\n motion_correction_output = eval(row.loc['motion_correction_output'])\n motion_correction_output_list.append(motion_correction_output)\n input_mmap_file_list.append(motion_correction_output['main'])\n trial_index_list.append(db.get_trial_name(idx[2],idx[3]))\n \n # MOTION CORRECTING EACH INDIVIDUAL MOVIE WITH RESPECT TO A TEMPLATE MADE OF THE FIRST MOVIE\n logging.info(f'{alignment_index} Performing motion correction on all movies with respect to a template made of \\\n the first movie.')\n t0 = datetime.datetime.today()\n\n # Create a template of the first movie\n template_index = trial_index_list.index(parameters['make_template_from_trial'])\n m0 = cm.load(input_mmap_file_list[template_index])\n m0_filt = cm.movie(\n np.array([high_pass_filter_space(m_, parameters['gSig_filt']) for m_ in m0]))\n template0 = cm.motion_correction.bin_median(m0_filt.motion_correct(5, 5, template=None)[0]) # may be improved in the future\n\n # Setting the parameters\n opts = params.CNMFParams(params_dict = parameters) \t\t\t\t\t\t\t\t\t\t\t\t\t \n \n # Create a motion correction object \n mc = MotionCorrect(input_mmap_file_list, dview = dview, **opts.get_group('motion'))\n \n # Perform non-rigid motion correction\n mc.motion_correct(template = template0, save_movie = True)\n \n # Cropping borders\n x_ = math.ceil(abs(np.array(mc.shifts_rig)[:,1].max()) if np.array(mc.shifts_rig)[:,1].max() > 0 else 0)\n _x = math.ceil(abs(np.array(mc.shifts_rig)[:,1].min()) if np.array(mc.shifts_rig)[:,1].min() < 0 else 0)\n y_ = math.ceil(abs(np.array(mc.shifts_rig)[:,0].max()) if np.array(mc.shifts_rig)[:,0].max() > 0 else 0)\n _y = math.ceil(abs(np.array(mc.shifts_rig)[:,0].min()) if np.array(mc.shifts_rig)[:,0].min() < 0 else 0)\n \n dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes\n output['meta']['duration']['motion_correction'] = dt\n logging.info(f'{alignment_index} Performed motion correction. dt = {dt} min.')\n \n # CONCATENATING ALL MOTION CORRECTED MOVIES\n logging.info(f'{alignment_index} Concatenating all motion corrected movies.')\n \n # Load all movies into memory \n m_list = [cm.load(fname) for fname in mc.fname_tot_rig]\n \n # Crop all movies to those border pixels\n for idx, m in enumerate(m_list):\n m_list[idx] = m.crop(x_,_x,y_,_y,0,0)\n output['meta']['cropping_points'] = [x_,_x,y_,_y]\n \n # Concatenate them using the concat function \n m_concat = cm.concatenate(m_list, axis = 0)\n \n # Create a timeline and store it\n timeline = [[trial_index_list[0],0]]\n for i in range(1,len(m_list)):\n m = m_list[i]\n timeline.append([trial_index_list[i], timeline[i-1][1] + m.shape[0]])\n# timeline_pkl_file_path = f'data/interim/alignment/meta/timeline/{file_name}.pkl'\n# with open(timeline_pkl_file_path,'wb') as f:\n# pickle.dump(timeline,f) \n# output['meta']['timeline'] = timeline_pkl_file_path\n output['meta']['timeline'] = timeline \n \n # Save the concatenated movie\n output_mmap_file_path_tot = m_concat.save(output_mmap_file_path)\n output['main'] = output_mmap_file_path_tot\n \n# # Delete the motion corrected movies\n# for fname in mc.fname_tot_rig:\n# os.remove(fname)\n \n dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes\n output['meta']['duration']['concatenation'] = dt\n logging.info(f'{alignment_index} Performed concatenation. dt = {dt} min.')\n \n for idx, row in df.iterrows():\n df.loc[idx, 'alignment_output'] = str(output)\n df.loc[idx, 'alignment_parameters'] = str(parameters)\n \n return df\n\n#%% METRICS\n \ndef get_correlations(df):\n '''\n Get the correlation of both the origin movies and the aligned movie w.r.t a template created\n of the movie to which the movies were aligned\n \n Args:\n df: pd.DataFrame\n The dataframe containing the aligned analysis states. \n \n Returns:\n df: pd.DataFrame\n The dataframe containing the aligned analysis states with\n the metrics stored in the meta output.\n '''\n \n # Load a dummy index\n index = df.iloc[0].name\n \n # Load the original movies and the aligned, concatenated movie\n original_m_list = []\n trial_name_list = [] \n for index, row in df.iterrows():\n motion_correction_output = eval(row.loc['motion_correction_output'])\n m = cm.load(motion_correction_output['main'])\n original_m_list.append(m)\n trial_name_list.append(db.get_trial_name(index[2],index[3]))\n alignment_output = eval(df.iloc[0].loc['alignment_output'])\n aligned_m = cm.load(alignment_output['main'])\n \n # Load the cropping points and timeline and to which trial the alignment was done \n cropping_points = alignment_output['meta']['cropping_points']\n timeline = alignment_output['meta']['timeline']\n alignment_parameters = eval(df.iloc[0].loc['alignment_parameters'])\n make_template_from_trial = alignment_parameters['make_template_from_trial']\n template_index = trial_name_list.index(make_template_from_trial)\n \n # Crop the original movies\n cropped_original_m_list = [] \n for i, m in enumerate(original_m_list):\n [x_, _x, y_, _y] = cropping_points\n cropped_original_m_list.append(m.crop(x_,_x,y_,_y,0,0))\n \n # Concatenate the original movie\n m_original_concat = cm.concatenate(cropped_original_m_list, axis = 0)\n \n # ORIGINAL MOVIE CORRELATIONS\n # Create a template of the movie to which alignment has taken place \n m0 = cropped_original_m_list[template_index]\n m0_filt = cm.movie(\n np.array([high_pass_filter_space(m_, alignment_parameters['gSig_filt']) for m_ in m0]))\n tmpl = caiman.motion_correction.bin_median(m0_filt.motion_correct(5, 5, template=None)[0]) # may be improved in the future\n # Calculate the correlations of each movie with respect to that template\n logging.debug('Computing original movie correlations')\n t0 = datetime.datetime.today()\n correlations_orig = []\n count = 0\n m_compute = m_original_concat - np.min(m_original_concat)\n for fr in m_compute:\n if count % 100 == 0:\n logging.debug(f'Frame {count}')\n count += 1\n correlations_orig.append(scipy.stats.pearsonr(\n fr.flatten(), tmpl.flatten())[0])\n dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes\n logging.debug(f'Computed original movie correlations. dt = {dt} min')\n \n # ALIGNED CORRELATIONS \n # Create a template of the movie to which alignment has taken place \n m0 = aligned_m[timeline[template_index][1]:timeline[template_index + 1][1]] if template_index != len(timeline) -1 else aligned_m[timeline[template_index][1]:] \n m0_filt = cm.movie(\n np.array([high_pass_filter_space(m_, alignment_parameters['gSig_filt']) for m_ in m0]))\n tmpl = caiman.motion_correction.bin_median(m0_filt.motion_correct(5, 5, template=None)[0]) # may be improved in the future\n\n # Calculate the correlations of each movie with respect to that template\n logging.debug('Computing aligned movie correlations')\n t0 = datetime.datetime.today()\n correlations_aligned = []\n count = 0\n m_compute = aligned_m - np.min(aligned_m)\n for fr in m_compute:\n if count % 100 == 0:\n logging.debug(f'Frame {count}')\n count += 1\n correlations_aligned.append(scipy.stats.pearsonr(\n fr.flatten(), tmpl.flatten())[0])\n dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes\n logging.debug(f'Computed aligned movie correlations. dt = {dt} min') \n\n # STORE THE CORRELATIONS\n correlations = {'original': correlations_orig, 'aligned': correlations_aligned}\n metrics_pkl_file_path = f'data/interim/alignment/meta/correlations/mouse_{index[0]}_session{index[1]}_v{index[7]}.pkl'\n with open(metrics_pkl_file_path, 'wb') as f:\n pickle.dump(correlations, f)\n alignment_output['meta']['correlations'] = metrics_pkl_file_path\n \n for idx, row in df.iterrows():\n df.loc[idx, 'alignment_output'] = str(alignment_output)\n \n return df "
] |
[
[
"numpy.load",
"numpy.save"
],
[
"numpy.array",
"numpy.min"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
raj713335/Data-Science-Hackathon-And-Competition
|
[
"09c41a4856e07f912cdfe3d50cb1c7faec3e708a"
] |
[
"HACKER EARTH/Identify The Dance Form/Model_Predict.py"
] |
[
"# Load the required Libraries\r\n\r\n\r\nimport tensorflow as tf\r\nimport matplotlib.image as img\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport collections\r\nfrom shutil import copy\r\nfrom shutil import copytree, rmtree\r\nimport tensorflow.keras.backend as K\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing import image\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport random\r\nfrom tensorflow.keras import regularizers\r\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\r\nfrom tensorflow.keras.models import Sequential, Model\r\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger\r\nfrom tensorflow.keras.optimizers import SGD\r\nfrom tensorflow.keras.regularizers import l2\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import models\r\nimport cv2\r\nimport matplotlib.image as mpimg\r\nimport PIL\r\nimport pandas as pd\r\n\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\n# Checking if GPU is enabled\r\n\r\n# Check if GPU is enabled\r\nprint(tf.__version__)\r\nprint(tf.test.gpu_device_name())\r\n\r\n\r\n# Setting up Location For the Directorys\r\n\r\ndir_path=os.getcwd()\r\ndir_path=dir_path.replace(\"\\\\\",\"/\")\r\n\r\n# The path to the dataset Directory\r\nbase_directory=dir_path+'/dataset'\r\n\r\n# The path to the training images directory\r\ntrain_dir=base_directory+'/train'\r\n\r\n# The path to the test images directory\r\ntest_dir=base_directory+'/test'\r\n\r\n# Creating a list variable containing the names of all the images\r\ntest_csv_data=pd.read_csv(base_directory+'/test.csv')\r\n\r\npredict_images_list=test_csv_data.Image.values.tolist()\r\n\r\n\r\n\r\ntrain_csv_data=pd.read_csv(base_directory+'/train.csv')\r\ndance_class_types=train_csv_data.target.unique().tolist()\r\n\r\nprint(predict_images_list)\r\nprint(len(predict_images_list))\r\n\r\n\r\n\r\n\r\n# Loading the created model from running the Model_Creator.py file.\r\n\r\n# Loading the best saved model to make predictions\r\nmodel = load_model('best_model_8class.hdf5',compile = False)\r\n\r\n# Loading The Model Summary\r\nmodel.summary()\r\n\r\n\r\n# Predicting the values of test images\r\n\r\n# for each in predict_images_list:\r\n# predict_class(model_best, test_dir+'/'+each, True)\r\n\r\n\r\n\r\n\r\n# Defining Helper Functions\r\n\r\ndef deprocess_image(x):\r\n # normalize tensor: center on 0., ensure std is 0.1\r\n x -= x.mean()\r\n x /= (x.std() + 1e-5)\r\n x *= 0.1\r\n\r\n # clip to [0, 1]\r\n x += 0.5\r\n x = np.clip(x, 0, 1)\r\n\r\n # convert to RGB array\r\n x *= 255\r\n x = np.clip(x, 0, 255).astype('uint8')\r\n return x\r\n\r\n\r\ndef generate_pattern(layer_name, filter_index, size=150):\r\n # Build a loss function that maximizes the activation\r\n # of the nth filter of the layer considered.\r\n layer_output = model.get_layer(layer_name).output\r\n loss = K.mean(layer_output[:, :, :, filter_index])\r\n\r\n # Compute the gradient of the input picture wrt this loss\r\n grads = K.gradients(loss, model.input)[0]\r\n\r\n # Normalization trick: we normalize the gradient\r\n grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)\r\n\r\n # This function returns the loss and grads given the input picture\r\n iterate = K.function([model.input], [loss, grads])\r\n\r\n # We start from a gray image with some noise\r\n input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.\r\n\r\n # Run gradient ascent for 40 steps\r\n step = 1.\r\n for i in range(40):\r\n loss_value, grads_value = iterate([input_img_data])\r\n input_img_data += grads_value * step\r\n\r\n img = input_img_data[0]\r\n return deprocess_image(img)\r\n\r\n\r\n\r\n\r\ndef get_activations(img, model_activations):\r\n img = image.load_img(img, target_size=(299, 299))\r\n img = image.img_to_array(img)\r\n img = np.expand_dims(img, axis=0)\r\n img /= 255.\r\n plt.imshow(img[0])\r\n plt.show()\r\n return model_activations.predict(img)\r\n\r\n\r\ndef show_activations(activations, layer_names):\r\n images_per_row = 16\r\n\r\n # Now let's display our feature maps\r\n for layer_name, layer_activation in zip(layer_names, activations):\r\n # This is the number of features in the feature map\r\n n_features = layer_activation.shape[-1]\r\n\r\n # The feature map has shape (1, size, size, n_features)\r\n size = layer_activation.shape[1]\r\n\r\n # We will tile the activation channels in this matrix\r\n n_cols = n_features // images_per_row\r\n display_grid = np.zeros((size * n_cols, images_per_row * size))\r\n\r\n # We'll tile each filter into this big horizontal grid\r\n for col in range(n_cols):\r\n for row in range(images_per_row):\r\n channel_image = layer_activation[0,\r\n :, :,\r\n col * images_per_row + row]\r\n # Post-process the feature to make it visually palatable\r\n channel_image -= channel_image.mean()\r\n channel_image /= channel_image.std()\r\n channel_image *= 64\r\n channel_image += 128\r\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\r\n display_grid[col * size: (col + 1) * size,\r\n row * size: (row + 1) * size] = channel_image\r\n\r\n # Display the grid\r\n scale = 1. / size\r\n plt.figure(figsize=(scale * display_grid.shape[1],\r\n scale * display_grid.shape[0]))\r\n plt.title(layer_name)\r\n plt.grid(False)\r\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\r\n\r\n plt.show()\r\n\r\n\r\nprint(len(model.layers))\r\n\r\n\r\n# We start with index 1 instead of 0, as input layer is at index 0\r\nlayers = [layer.output for layer in model.layers[1:11]]\r\n# We now initialize a model which takes an input and outputs the above chosen layers\r\nactivations_output = models.Model(inputs=model.input, outputs=layers)\r\n\r\n\r\nprint(layers)\r\n\r\n\r\n#Get the names of all the selected layers\r\n\r\nlayer_names = []\r\nfor layer in model.layers[1:11]:\r\n layer_names.append(layer.name)\r\nprint(layer_names)\r\n\r\n\r\n# Visualize the activations of intermediate layers from layer 1 to 10\r\n\r\nfor i in range(0,1):\r\n toss=random.randint(0,len(predict_images_list)-1)\r\n activations = get_activations(test_dir+'/'+predict_images_list[toss] ,activations_output)\r\n show_activations(activations, layer_names)\r\n\r\n\r\n# Get the index of activation_1 layer which has sparse activations\r\nind = layer_names.index('activation_1')\r\nsparse_activation = activations[ind]\r\na = sparse_activation[0, :, :, 13]\r\nprint(a)\r\n\r\nprint(all (np.isnan(a[j][k]) for j in range(a.shape[0]) for k in range(a.shape[1])))\r\n\r\n# Get the index of batch_normalization_1 layer which has sparse activations\r\nind = layer_names.index('batch_normalization_1')\r\nsparse_activation = activations[ind]\r\nb = sparse_activation[0, :, :, 13]\r\nprint(b)\r\n\r\n\r\n#Show the activation outputs of 1st, 2nd and 3rd Conv2D layer activations to compare how layers get abstract with depth\r\n\r\nfirst_convlayer_activation = activations[0]\r\nsecond_convlayer_activation = activations[3]\r\nthird_convlayer_activation = activations[6]\r\nf,ax = plt.subplots(1,3, figsize=(10,10))\r\nax[0].imshow(first_convlayer_activation[0, :, :, 3], cmap='viridis')\r\nax[0].axis('OFF')\r\nax[0].set_title('Conv2d_1')\r\nax[1].imshow(second_convlayer_activation[0, :, :, 3], cmap='viridis')\r\nax[1].axis('OFF')\r\nax[1].set_title('Conv2d_2')\r\nax[2].imshow(third_convlayer_activation[0, :, :, 3], cmap='viridis')\r\nax[2].axis('OFF')\r\nax[2].set_title('Conv2d_3')\r\n\r\n\r\ndef get_attribution(food):\r\n img = image.load_img(food, target_size=(299, 299))\r\n img = image.img_to_array(img)\r\n img /= 255.\r\n f, ax = plt.subplots(1, 3, figsize=(15, 15))\r\n ax[0].imshow(img)\r\n\r\n img = np.expand_dims(img, axis=0)\r\n\r\n preds = model.predict(img)\r\n class_id = np.argmax(preds[0])\r\n ax[0].set_title(\"Input Image\")\r\n class_output = model.output[:, class_id]\r\n last_conv_layer = model.get_layer(\"mixed10\")\r\n\r\n # grads = K.gradients(class_output, last_conv_layer.output)[0]\r\n # pooled_grads = K.mean(grads, axis=(0, 1, 2))\r\n # iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])\r\n # pooled_grads_value, conv_layer_output_value = iterate([img])\r\n # for i in range(2048):\r\n # conv_layer_output_value[:, :, i] *= pooled_grads_value[i]\r\n #\r\n # heatmap = np.mean(conv_layer_output_value, axis=-1)\r\n # heatmap = np.maximum(heatmap, 0)\r\n # heatmap /= np.max(heatmap)\r\n # ax[1].imshow(heatmap)\r\n # ax[1].set_title(\"Heat map\")\r\n\r\n # act_img = cv2.imread(food)\r\n # heatmap = cv2.resize(heatmap, (act_img.shape[1], act_img.shape[0]))\r\n # heatmap = np.uint8(255 * heatmap)\r\n # heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\r\n # superimposed = cv2.addWeighted(act_img, 0.6, heatmap, 0.4, 0)\r\n # cv2.imwrite('classactivation.png', superimposed)\r\n # img_act = image.load_img('classactivation.png', target_size=(299, 299))\r\n # ax[2].imshow(img_act)\r\n # ax[2].set_title(\"Class Activation\")\r\n # plt.show()\r\n return preds\r\n\r\n\r\n\r\n#Getting Heat Maps in Image\r\nfor i in range(0,2):\r\n toss=random.randint(0,len(predict_images_list)-1)\r\n pred = get_attribution(test_dir+'/'+predict_images_list[toss])\r\n\r\n print(pred)\r\n\r\n\r\noutput_list=[]\r\n\r\nfor i in range(0,len(predict_images_list)):\r\n pred = get_attribution(test_dir+'/'+predict_images_list[i]).tolist()\r\n pred=pred[0]\r\n dance_form_id=pred.index(max(pred))\r\n print(pred.index(max(pred)))\r\n output_list.append([predict_images_list[i],dance_class_types[dance_form_id]])\r\n\r\n\r\ndfObj = pd.DataFrame(output_list, columns = ['Image','target'])\r\n\r\n\r\ndfObj.to_csv('test.csv',index=False)\r\n\r\n\r\n\r\n"
] |
[
[
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"tensorflow.test.gpu_device_name",
"tensorflow.keras.preprocessing.image.load_img",
"pandas.DataFrame",
"pandas.read_csv",
"numpy.clip",
"tensorflow.keras.backend.function",
"tensorflow.keras.backend.square",
"numpy.argmax",
"numpy.zeros",
"tensorflow.keras.preprocessing.image.img_to_array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.keras.models.Model",
"numpy.isnan",
"tensorflow.keras.backend.gradients",
"matplotlib.pyplot.show",
"numpy.random.random",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid",
"tensorflow.keras.backend.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
}
] |
EraYaN/FletcherFiltering
|
[
"cc1f0955b5c543d6ee465197c2fc238f4b578f08"
] |
[
"generate_data_tests.py"
] |
[
"# Copyright (c) 2019 Erwin de Haan. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file is part of the FletcherFiltering project\nfrom fletcherfiltering.common.data_generation import generate_random_data\nimport pyarrow as pa\nimport time\nimport numpy as np\nfrom pathlib import Path\nfrom fletcherfiltering import settings\nfrom math import ceil,floor\n\nDATA_SIZE = 1*1000*1000*1000\n\n\n#name = 'Simple'\nname = 'Float'\n\n#schema = pa.schema([('pkid', pa.int32(), False)])\nschema = pa.schema([('pkid', pa.int32(), False),\n ('half1', pa.float16(), False),\n ('float1', pa.float32(), False),\n ('double1', pa.float64(), False)])\nschema_size = 0\nschema_size_bits = 0\nfor col in schema:\n schema_size += ceil(col.type.bit_width/8)\n schema_size_bits += col.type.bit_width\n\nprint(\"One row in the schema takes {} bytes or {} bits ({} bytes).\".format(schema_size, schema_size_bits, schema_size_bits/8))\n\nSIZE_PER_RECORD = schema_size\n\nDATA_COUNT = floor(DATA_SIZE/SIZE_PER_RECORD)\n\nprint(\"Generating {} rows of size {} for a total size of {} ({})\".format(DATA_COUNT, SIZE_PER_RECORD, DATA_COUNT*SIZE_PER_RECORD, DATA_SIZE))\n\nmetadata_in = {b'fletcher_mode': b'read',\n b'fletcher_name': settings.INPUT_NAME.encode('ascii')}\n# Add the metadata to the schema\nschema = schema.add_metadata(metadata_in)\n\nschema_pk = 'pkid'\n\nstart = time.time()\n\ndata = generate_random_data(schema, schema_pk, DATA_COUNT, columnar=True)\n\n\n\nrb_data = []\nfor col in schema:\n type_func = (lambda x: x)\n\n if col.type == pa.float16():\n type_func = np.float16\n elif col.type == pa.float32():\n type_func = np.float32\n elif col.type == pa.float64():\n type_func = np.float64\n elif col.type == pa.int8():\n type_func = np.int8\n elif col.type == pa.uint8():\n type_func = np.uint8\n elif col.type == pa.int16():\n type_func = np.int16\n elif col.type == pa.uint16():\n type_func = np.uint16\n elif col.type == pa.int32():\n type_func = np.int32\n elif col.type == pa.uint32():\n type_func = np.uint32\n elif col.type == pa.int64():\n type_func = np.int64\n elif col.type == pa.uint64():\n type_func = np.uint64\n elif pa.types.is_timestamp(col.type):\n type_func = 'datetime64[{}]'.format(col.type.unit)\n\n rb_data.append(pa.array(np.asarray(data[col.name], dtype=type_func), col.type))\n\n\n# Create a RecordBatch from the Arrays.\nrecordbatch = pa.RecordBatch.from_arrays(rb_data, schema)\n\n# Create an Arrow RecordBatchFileWriter.\nwriter = pa.RecordBatchFileWriter(Path('{0}{1}.rb'.format(name, settings.DATA_SUFFIX)),\n schema)\n# Write the RecordBatch.\nwriter.write(recordbatch)\n\nwriter.close()\n\nend = time.time()\n\nprint(end - start)\n"
] |
[
[
"numpy.asarray"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
serhatataman/generate-bitbirds
|
[
"e571deaec06c8edb5eb783ca50662fd08dacce6b"
] |
[
"single_image_creator.py"
] |
[
"from PIL import Image\nimport numpy as np\n\nbg = (123, 209, 224)\nhr = (255, 171, 0)\nsk = (241, 194, 125)\njk = (121, 86, 156)\ney = (135, 181, 44)\nbd = (0, 0, 0)\nmt = (0, 0, 0)\neb = (255, 255, 255)\n\npixels_list = [\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bd, bd, bd, bd, bd, bd, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bd, hr, hr, hr, hr, hr, hr, bd, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, hr, hr, hr, hr, hr, hr, hr, hr, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, hr, sk, hr, sk, hr, sk, hr, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, sk, sk, sk, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, eb, eb, sk, eb, eb, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, ey, eb, sk, ey, eb, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, sk, sk, sk, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, sk, sk, sk, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, sk, sk, mt, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, mt, mt, mt, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, sk, sk, sk, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bd, sk, sk, sk, sk, sk, sk, sk, bd, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bd, jk, sk, sk, jk, jk, bd, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bd, bd, bd, bd, bd, jk, sk, sk, jk, jk, bd, bd, bd, bd, bd, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bd, jk, jk, jk, jk, jk, sk, sk, jk, jk, jk, jk, jk, jk, bd, bd, bg, bg, bg, bg],\n [bg, bg, bg, bg, bd, bd, jk, jk, jk, jk, jk, sk, sk, jk, jk, jk, jk, jk, jk, jk, bd, bg, bg, bg, bg],\n [bg, bg, bg, bg, bd, jk, jk, jk, jk, jk, jk, sk, sk, jk, jk, jk, jk, jk, jk, jk, bd, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n [bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg],\n]\n\navatar_array = np.array(pixels_list, dtype=np.uint8)\navatar_image = Image.fromarray(avatar_array)\navatar_image = avatar_image.resize((480, 480), resample=Image.NEAREST)\navatar_image.save(\"avatar.png\")\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tharky/missing_data_imputation
|
[
"60048269d8c970298d892880f3370198fd50cbc4"
] |
[
"imputation.py"
] |
[
"import timeit\r\nimport numpy as np\r\nimport math\r\nfrom collections import Counter\r\n#############################\r\n# Initialization / Settings #\r\n#############################\r\nsep = ',' # Separator\r\nfileName = \"kddn\" # File name\r\nfileNameLoss = fileName+\".5loss\" # File name with lost data\r\ncreateOutputFile = True\r\ncalculateMSE = True\r\n#############\r\n# Functions #\r\n#############\r\ndef printProgress(t,k,y,x):\r\n # Function that prints the progress.\r\n print(\"Info: Imputed\", t, \"out of\", k, \":\", y, \"-> (\", x, \")\")\r\n\r\n\r\ndef printLine():\r\n # Function that prints a line.\r\n print(\"--------------------------------------------\")\r\n\r\n\r\ndef elapsedStr():\r\n # Function that calculates elapsed time and returns it as a string. Needs init for global tT first.\r\n global tT\r\n t = abs(tT-timeit.default_timer())\r\n h = int(t / 3600)\r\n m = int((t - 3600 * h) / 60)\r\n s = round((t - 3600 * h) - 60 * m)\r\n tT = timeit.default_timer()\r\n if h+m+s < 0.1:\r\n strT = \"[really quick].\"\r\n else:\r\n strT = \"in [{:>02d}:{:>02d}:{:>02d}].\".format(h,m,s)\r\n return strT\r\n\r\n\r\ndef isfloat(s):\r\n # Function to check if value is float. Returns true if castable.\r\n try:\r\n float(s)\r\n return True\r\n except:\r\n return False\r\n\r\n\r\ndef give_id(v):\r\n # Function to give ids to strings. Helps to make numerical calculations easier. Needs global id lst and strings lst.\r\n global strID, strings\r\n if v in strings:\r\n return strings[v]\r\n else:\r\n print(\"NewID: {:>12s} replaced with id: {:<4d}\".format(v, strID))\r\n strings[v] = strID\r\n strID += 1\r\n return strID-1\r\n\r\n\r\ndef get_id(v):\r\n # Function that returns the string of the given id.\r\n global strings\r\n v = round(v)\r\n return next((st for st, k in strings.items() if k == v), None)\r\n\r\n\r\ndef mse():\r\n # Function that calculates M S Error.\r\n total = 0.0\r\n global original, imported, missing, miss\r\n for _, v in enumerate(missing):\r\n i, j = v\r\n x = imported[i][j]\r\n y = original[i][j]\r\n if isfloat(y):\r\n y = float(y)\r\n else:\r\n y = give_id(y)\r\n total += abs(x - y)\r\n return math.sqrt(total/miss)\r\n###################\r\n# File Operations #\r\n###################\r\ntT = timeit.default_timer() # Initialization of elapsed() function.\r\nwith open(fileNameLoss, 'r', errors='replace') as inputFile, open(fileName, 'r', errors='replace') as inputFileOrg:\r\n imported = [] # Imported file as 2d array\r\n row = 0 # Imported file's rows\r\n missing = [] # Imported missing data indexes\r\n importedNM = [] # Imported non missing array\r\n importedNM_index = [] # Imported non missing indexes (holds indexes of NM to rewrite later)\r\n tagList = [] # Holds tags at the end of lines (to exclude them from imputation)\r\n tagListNM = [] # Holds tags of non-missing lines (to use in LSE method)\r\n strings = {} # Holds strings as ids to rewrite later\r\n strID = 0 # Initial ID.\r\n stringColumns = [] # Holds if columns are strings or not\r\n style = [] # Holds input style to output similar to input (int/float)\r\n print(\"Info: Importing file [{}], please wait...\".format(fileName))\r\n for idx, l in enumerate(inputFile): #TODO: Make importing with numpy to get rid of redundant lists, many useless code and algorithms.\r\n l = l.replace('\\n', '') # Hardcoded to remove any unnecessary lines in a file.\r\n imported.append(l.split(sep))\r\n row += 1 # Cheap way to get row amount\r\n if calculateMSE:\r\n original = [] # Imported original file without missing values\r\n for idx_, l in enumerate(inputFileOrg):\r\n l = l.replace('\\n', '') # Hardcoded to remove any unnecessary lines in a file.\r\n original.append(l.split(sep))\r\ncol = len(imported[0]) - 1 # Cheap way to get column amount\r\nprint(\"Info: File has has {} rows and {} columns.\".format(row,col))\r\nfor idx in range(col): # Get value type (to rewrite later)\r\n i = imported[0][idx]\r\n if i.find('.') != -1: # Cheap way to check if string is float or not.\r\n style.append('f')\r\n elif i.find('.') == -1:\r\n style.append('d')\r\n if isfloat(i):\r\n stringColumns.append(False)\r\n else:\r\n stringColumns.append(True)\r\nfor i in range(row):\r\n tagList.append(imported[i][col]) # Get the tag of this row and then...\r\n del(imported[i][-1]) # ...remove it from the main list.\r\n missingFlag = False\r\n for j in range(col): # Scan for missing elements.\r\n if imported[i][j] != '': # If not missing, do conversions, give ids, etc.\r\n v = imported[i][j]\r\n if not stringColumns[j]: # If data is not in a string column...\r\n imported[i][j] = float(v) # ...cast it as a float and add it to the list...\r\n else:\r\n imported[i][j] = give_id(v) # ...if it is, give it an id and add the id to the list.\r\n else: # If found a missing string:\r\n missing.append( [i,j] ) # Add the index to missing array.\r\n missingFlag = True # Flag this row to make appropriate changes.\r\n if not missingFlag:\r\n importedNM_index.append(i) # Add indexes to the smaller NM array\r\n importedNM.append(imported[i]) # Add elements to the NM array\r\n tagListNM.append(tagList[i]) # Add the tag of that row to the NM tag array\r\ntags = Counter(tagList).most_common() # All tags\r\nmiss = len(missing)\r\nprint(\"Info: Data has {} missing elements\".format(miss))\r\ndataSet = np.array(imported) # Whole data set\r\ndataSetNM = np.array(importedNM) # Non missing data set\r\nprint(\"Info: File import completed\",elapsedStr())\r\nprintLine()\r\n##############\r\n# Imputation #\r\n##############\r\nchoice = int(input(\"Method?:\\n•Least Squares Data Imputation (1)\\n•Naive Bayes Imputation (2)\\n•Hot Deck Imputation (3)\\n•Imputation with Most Frequent Element (4)\\nSelection:\"))\r\nprint(\"Info: Imputing process started... This may take a long time...\")\r\nif choice == 1: # -------------- Least Squares Data Imputation --------------\r\n nonZero = dataSetNM # Gets non-zero columns\r\n nonZeroT = nonZero.transpose()\r\n tagSet = np.array([give_id(t) for t in tagList])\r\n tagSetNM = np.array([tagSet[i] for i in importedNM_index])\r\n B = np.dot(np.dot(np.linalg.pinv(np.dot(nonZeroT, nonZero)), nonZeroT), tagSetNM) # LSE formula ((Bt*B)^-1)*Bt*y\r\n for idx, v in enumerate(missing):\r\n i, j = v\r\n sumB = sum([b*imported[i][idx] for idx, b in enumerate(B) if idx != j]) # Sum of all elements in B except the missing column's\r\n imported[i][j] = (tagSet[i] - sumB) / B[j]\r\n printProgress(idx + 1,miss,v,imported[i][j])\r\nelif choice == 2: # -------------- Naive Bayes Imputation --------------\r\n for idx, v in enumerate(missing):\r\n i, j = v\r\n tagMiss = tagList[i] # Missing data's tag\r\n currentColumn = [r[j] for k,r in enumerate(importedNM) if tagListNM[k] == tagMiss]\r\n imported[i][j] = Counter(currentColumn).most_common(1)[0][0]\r\n # TODO: Generate frequency tables beforehand to make imputing faster\r\n printProgress(idx + 1, miss, v, imported[i][j])\r\nelif choice == 3: # -------------- Hot Deck Imputation --------------\r\n kHD = 20\r\n for idx, v in enumerate(missing): # For each missing element in data set\r\n i, j = v\r\n euclidean = []\r\n euclideanTotal = 0\r\n for r in range(len(importedNM)): # Loop all non-missing rows...\r\n for c in range(col): # ...and all of its columns...\r\n if c != j: # ...except itself...\r\n euclideanTotal += (imported[i][c] - importedNM[r][c])**2 # ...to calculate the euclidean distance of both...\r\n e = math.sqrt(euclideanTotal)\r\n euclidean.append( [e, importedNM_index[r]] ) # Append found euclidean and index of that in the original data set\r\n sorted(euclidean, key=lambda l: l[0], reverse=True) # Sorts the euclidean list by their first value\r\n lst = [imported[euclidean[r][1]][j] for r in range(kHD)] # Gets the list of first kHD elements of those values\r\n imported[i][j] = Counter(lst).most_common(1)[0][0] # Imputes the most common element from above list.\r\n printProgress(idx + 1, miss, v, imported[i][j])\r\nelif choice == 4: # -------------- Imputation with Most Frequent Element --------------\r\n for idx, v in enumerate(missing):\r\n i, j = v\r\n currentColumn = [r[j] for r in importedNM]\r\n imported[i][j] = Counter(currentColumn).most_common(1)[0][0] # Simply imputes the most common element of that column regardless of any other information.\r\n printProgress(idx + 1, miss, v, imported[i][j])\r\nelse:\r\n print(\"Error: Wrong input. No imputations done.\")\r\nprint(\"Info: Imputed list generated\",elapsedStr())\r\nprintLine()\r\nif calculateMSE:\r\n print(\"Info: MSE: {:.3f}%\".format(mse()))\r\n printLine()\r\n################\r\n# File writing #\r\n################\r\nif createOutputFile:\r\n with open(fileName + \".imputed\", 'w', errors='replace') as outputFile:\r\n print(\"Info: Generating output file...\")\r\n for i in range(row):\r\n for j in range(col):\r\n x = imported[i][j]\r\n if stringColumns[j]:\r\n imported[i][j] = get_id(x)\r\n else:\r\n if style[j] == 'f':\r\n x = \"{:.2f}\".format(x)\r\n elif style[j] == 'd':\r\n x = \"{:d}\".format(int(x))\r\n imported[i][j] = str(x)\r\n line = sep.join(imported[i]) + sep + tagList[i] + '\\n'\r\n outputFile.write(line)\r\n outputFile.truncate()\r\n print(\"Info: Output file written\",elapsedStr())\r\n printLine()\r\n"
] |
[
[
"numpy.dot",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adubowski/set-mlp-keras
|
[
"14d0e4cecdf8ebd7d9e3fc31d180c1c11f982a7b"
] |
[
"SET-MLP-Keras-Weights-Mask/set_mlp_keras_cifar10.py"
] |
[
"# Author: Decebal Constantin Mocanu et al.;\n# Proof of concept implementation of Sparse Evolutionary Training (SET) of Multi Layer Perceptron (MLP) on CIFAR10 using Keras and a mask over weights.\n# This implementation can be used to test SET in varying conditions, using the Keras framework versatility, e.g. various optimizers, activation layers, tensorflow\n# Also it can be easily adapted for Convolutional Neural Networks or other models which have dense layers\n# However, due the fact that the weights are stored in the standard Keras format (dense matrices), this implementation can not scale properly.\n# If you would like to build and SET-MLP with over 100000 neurons, please use the pure Python implementation from the folder \"SET-MLP-Sparse-Python-Data-Structures\"\n\n# This is a pre-alpha free software and was tested with Python 3.5.2, Keras 2.1.3, Keras_Contrib 0.0.2, Tensorflow 1.5.0, Numpy 1.14;\n# The code is distributed in the hope that it may be useful, but WITHOUT ANY WARRANTIES; The use of this software is entirely at the user's own risk;\n# For an easy understanding of the code functionality please read the following articles.\n\n# If you use parts of this code please cite the following articles:\n# @article{Mocanu2018SET,\n# author = {Mocanu, Decebal Constantin and Mocanu, Elena and Stone, Peter and Nguyen, Phuong H. and Gibescu, Madeleine and Liotta, Antonio},\n# journal = {Nature Communications},\n# title = {Scalable Training of Artificial Neural Networks with Adaptive Sparse Connectivity inspired by Network Science},\n# year = {2018},\n# doi = {10.1038/s41467-018-04316-3}\n# }\n\n# @Article{Mocanu2016XBM,\n# author=\"Mocanu, Decebal Constantin and Mocanu, Elena and Nguyen, Phuong H. and Gibescu, Madeleine and Liotta, Antonio\",\n# title=\"A topological insight into restricted Boltzmann machines\",\n# journal=\"Machine Learning\",\n# year=\"2016\",\n# volume=\"104\",\n# number=\"2\",\n# pages=\"243--270\",\n# doi=\"10.1007/s10994-016-5570-z\",\n# url=\"https://doi.org/10.1007/s10994-016-5570-z\"\n# }\n\n# @phdthesis{Mocanu2017PhDthesis,\n# title = \"Network computations in artificial intelligence\",\n# author = \"D.C. Mocanu\",\n# year = \"2017\",\n# isbn = \"978-90-386-4305-2\",\n# publisher = \"Eindhoven University of Technology\",\n# }\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\n\n# from keras.utils.generic_utils import get_custom_objects\n#\n# def swish(x, beta = 1):\n# return x * K.sigmoid(beta * x)\n#\n# get_custom_objects().update({'swish': Activation(swish)})\n\nfrom keras import optimizers\nimport tensorflow as tf\nimport numpy as np\nimport gc\nfrom keras import backend as K\nfrom keras.datasets import cifar10 as dataset\n\ndataset_name = \"cifar10\"\nactivation_function = \"softplus\"\n\nfrom keras.utils import np_utils\n\n\nclass Constraint(object):\n\n def __call__(self, w):\n return w\n\n def get_config(self):\n return {}\n\n\nclass MaskWeights(Constraint):\n\n def __init__(self, mask):\n self.mask = mask\n self.mask = K.cast(self.mask, K.floatx())\n\n def __call__(self, w):\n w *= self.mask\n return w\n\n def get_config(self):\n return {'mask': self.mask}\n\n\ndef find_first_pos(array, value):\n idx = (np.abs(array - value)).argmin()\n return idx\n\n\ndef find_last_pos(array, value):\n idx = (np.abs(array - value))[::-1].argmin()\n return array.shape[0] - idx\n\n\ndef createWeightsMask(epsilon, noRows, noCols):\n # generate an Erdos Renyi sparse weights mask\n mask_weights = np.random.rand(noRows, noCols)\n prob = 1 - (epsilon * (noRows + noCols)) / (\n noRows * noCols) # normal tp have 8x connections - sparsity level, change epsilon\n # print(\"Sparsity: \" + str(prob))\n mask_weights[mask_weights < prob] = 0\n mask_weights[mask_weights >= prob] = 1\n noParameters = np.sum(mask_weights)\n print(\"Create Sparse Matrix: No parameters, NoRows, NoCols \", noParameters, noRows, noCols)\n return [noParameters, mask_weights]\n\n\nclass SET_MLP_CIFAR10:\n def __init__(self):\n # set model parameters\n self.epsilon = 10 # control the sparsity level as discussed in the paper - default:20\n self.zeta = 0.3 # the fraction of the weights removed\n self.batch_size = 100 # batch size\n self.maxepoches = 500 # number of epochs, default 1000, 500 should be fine as well\n self.learning_rate = 0.01 # SGD learning rate\n self.num_classes = 10 # number of classes\n self.momentum = 0.9 # SGD momentum\n\n # generate an Erdos Renyi sparse weights mask for each layer\n [self.noPar1, self.wm1] = createWeightsMask(self.epsilon, 32 * 32 * 3, 4000)\n [self.noPar2, self.wm2] = createWeightsMask(self.epsilon, 4000, 1000)\n [self.noPar3, self.wm3] = createWeightsMask(self.epsilon, 1000, 4000)\n\n # initialize layers weights\n self.w1 = None\n self.w2 = None\n self.w3 = None\n self.w4 = None\n\n # initialize weights for SReLu activation function\n self.wSRelu1 = None\n self.wSRelu2 = None\n self.wSRelu3 = None\n\n # create a SET-MLP model\n self.create_model()\n\n # train the SET-MLP model\n self.train()\n\n def create_model(self):\n # create a SET-MLP model for CIFAR10 with 3 hidden layers\n self.model = Sequential()\n self.model.add(Flatten(input_shape=(32, 32, 3)))\n self.model.add(Dense(4000, name=\"sparse_1\", kernel_constraint=MaskWeights(self.wm1), weights=self.w1, activation=activation_function))\n # self.model.add(tf.nn.swish(features=))\n self.model.add(Dropout(0.3))\n self.model.add(Dense(1000, name=\"sparse_2\", kernel_constraint=MaskWeights(self.wm2), weights=self.w2, activation=activation_function))\n # self.model.add(SReLU(name=\"srelu2\",weights=self.wSRelu2))\n self.model.add(Dropout(0.3))\n self.model.add(Dense(4000, name=\"sparse_3\", kernel_constraint=MaskWeights(self.wm3), weights=self.w3, activation=activation_function))\n # self.model.add(SReLU(name=\"srelu3\",weights=self.wSRelu3))\n self.model.add(Dropout(0.3))\n self.model.add(Dense(self.num_classes, name=\"dense_4\",\n weights=self.w4)) # please note that there is no need for a sparse output layer as the number of classes is much smaller than the number of input hidden neurons\n self.model.add(Activation('softmax'))\n\n def rewireMask(self, weights, noWeights):\n # rewire weight matrix\n\n # remove zeta largest negative and smallest positive weights\n values = np.sort(weights.ravel())\n firstZeroPos = find_first_pos(values, 0)\n lastZeroPos = find_last_pos(values, 0)\n largestNegative = values[int((1 - self.zeta) * firstZeroPos)]\n smallestPositive = values[\n int(min(values.shape[0] - 1, lastZeroPos + self.zeta * (values.shape[0] - lastZeroPos)))]\n rewiredWeights = weights.copy()\n rewiredWeights[rewiredWeights > smallestPositive] = 1\n rewiredWeights[rewiredWeights < largestNegative] = 1\n rewiredWeights[rewiredWeights != 1] = 0\n weightMaskCore = rewiredWeights.copy()\n\n # add zeta random weights\n nrAdd = 0\n noRewires = noWeights - np.sum(rewiredWeights)\n\n\n while (nrAdd < noRewires):\n # emptyI, emptyJ = np.where(rewiredWeights == 0)\n #\n # i = np.random.choice(emptyI)\n # j = np.random.choice(np.where(rewiredWeights[i] == 0)[0])\n\n\n i = np.random.randint(0, rewiredWeights.shape[0])\n j = np.random.randint(0, rewiredWeights.shape[1])\n if (rewiredWeights[i, j] == 0):\n rewiredWeights[i, j] = 1\n nrAdd += 1\n\n return [rewiredWeights, weightMaskCore]\n\n def weightsEvolution(self):\n # this represents the core of the SET procedure. It removes the weights closest to zero in each layer and add new random weights\n self.w1 = self.model.get_layer(\"sparse_1\").get_weights()\n self.w2 = self.model.get_layer(\"sparse_2\").get_weights()\n self.w3 = self.model.get_layer(\"sparse_3\").get_weights()\n self.w4 = self.model.get_layer(\"dense_4\").get_weights()\n\n # self.wSRelu1 = self.model.get_layer(\"srelu1\").get_weights()\n # self.wSRelu2 = self.model.get_layer(\"srelu2\").get_weights()\n # self.wSRelu3 = self.model.get_layer(\"srelu3\").get_weights()\n\n [self.wm1, self.wm1Core] = self.rewireMask(self.w1[0], self.noPar1)\n [self.wm2, self.wm2Core] = self.rewireMask(self.w2[0], self.noPar2)\n [self.wm3, self.wm3Core] = self.rewireMask(self.w3[0], self.noPar3)\n\n self.w1[0] = self.w1[0] * self.wm1Core\n self.w2[0] = self.w2[0] * self.wm2Core\n self.w3[0] = self.w3[0] * self.wm3Core\n\n def train(self):\n\n # read CIFAR10 data\n [x_train, x_test, y_train, y_test] = self.read_data()\n\n # data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n datagen.fit(x_train)\n\n self.model.summary()\n\n # training process in a for loop\n self.loss_per_epoch = []\n self.acc_per_epoch = []\n self.val_loss_per_epoch = []\n self.val_acc_per_epoch = []\n for epoch in range(0, self.maxepoches):\n sgd = optimizers.SGD(lr=self.learning_rate, momentum=self.momentum)\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\n historytemp = self.model.fit_generator(datagen.flow(x_train, y_train,\n batch_size=self.batch_size),\n steps_per_epoch=x_train.shape[0] // self.batch_size,\n epochs=epoch,\n validation_data=(x_test, y_test),\n initial_epoch=epoch - 1)\n # print(historytemp.history)\n self.loss_per_epoch.append(historytemp.history['loss'][0])\n self.acc_per_epoch.append(historytemp.history['accuracy'][0])\n self.val_loss_per_epoch.append(historytemp.history['val_loss'][0])\n self.val_acc_per_epoch.append(historytemp.history['val_accuracy'][0])\n\n # ugly hack to avoid tensorflow memory increase for multiple fit_generator calls. Theano shall work more nicely this but it is outdated in general\n self.weightsEvolution()\n gc.collect()\n K.clear_session()\n self.create_model()\n\n self.loss_per_epoch = np.asarray(self.loss_per_epoch)\n self.acc_per_epoch = np.asarray(self.acc_per_epoch)\n self.val_loss_per_epoch = np.asarray(self.val_loss_per_epoch)\n self.val_acc_per_epoch = np.asarray(self.val_acc_per_epoch)\n\n def read_data(self):\n\n # read CIFAR10 data\n (x_train, y_train), (x_test, y_test) = dataset.load_data()\n y_train = np_utils.to_categorical(y_train, self.num_classes)\n y_test = np_utils.to_categorical(y_test, self.num_classes)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n # normalize data\n xTrainMean = np.mean(x_train, axis=0)\n xTtrainStd = np.std(x_train, axis=0)\n x_train = (x_train - xTrainMean) / xTtrainStd\n x_test = (x_test - xTrainMean) / xTtrainStd\n\n return [x_train, x_test, y_train, y_test]\n\n\nif __name__ == '__main__':\n K.clear_session()\n gc.collect()\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Restrict TensorFlow to only use the fourth GPU\n tf.config.experimental.set_visible_devices(gpus[0], 'GPU')\n\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\n # create and run a SET-MLP model on CIFAR10\n SET = SET_MLP_CIFAR10()\n\n # save accuracies over for all training epochs\n # in \"results\" folder you can find the output of running this file\n # if not os.path.exists(\"results/\" + dataset_name + \"/\" + activation_function + \"/set_mlp_e\" + str(SET.epsilon) + \"_loss.txt\"):\n np.savetxt(\"results/\" + dataset_name + \"/\" + activation_function + \"/set_mlp_e\" + str(SET.epsilon) + \"_loss.txt\", np.asarray(SET.loss_per_epoch))\n np.savetxt(\"results/\" + dataset_name + \"/\" + activation_function + \"/set_mlp_e\" + str(SET.epsilon) + \"_acc.txt\", np.asarray(SET.acc_per_epoch))\n np.savetxt(\"results/\" + dataset_name + \"/\" + activation_function + \"/set_mlp_e\" + str(SET.epsilon) + \"_val_loss.txt\", np.asarray(SET.val_loss_per_epoch))\n np.savetxt(\"results/\" + dataset_name + \"/\" + activation_function + \"/set_mlp_e\" + str(SET.epsilon) + \"_val_acc.txt\", np.asarray(SET.val_acc_per_epoch))\n\n # Cleaning up is needed due to Python memory leak\n del SET\n gc.collect()\n K.clear_session()\n tf.compat.v1.reset_default_graph()\n print(\"Done\")\n"
] |
[
[
"tensorflow.config.experimental.list_logical_devices",
"numpy.abs",
"tensorflow.config.experimental.set_memory_growth",
"numpy.asarray",
"tensorflow.config.experimental.list_physical_devices",
"numpy.std",
"numpy.mean",
"numpy.random.rand",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.config.experimental.set_visible_devices",
"numpy.sum",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
morrocoy/hsi
|
[
"da6a2923dff831e927aaea04ba657ddcb1b7e4eb",
"da6a2923dff831e927aaea04ba657ddcb1b7e4eb"
] |
[
"hsi/gui/graphicsItems/BaseImagCtrlItem.py",
"hsi/core/hs_formats.py"
] |
[
"import sys\nimport copy\n\nimport numpy as np\nimport pyqtgraph as pg\n\nfrom ...bindings.Qt import QtWidgets, QtCore\nfrom ...log import logmanager\nfrom ...misc import check_is_an_array, check_class\n\nfrom .ColorBarItem import ColorBarItem\nfrom .InfiniteLine import InfiniteLine\n\nlogger = logmanager.getLogger(__name__)\n\n\n__all__ = ['BaseImagCtrlItem']\n\n\nclass BaseImagCtrlItem(pg.GraphicsWidget):\n\n sigCursorPositionChangeFinished = QtCore.Signal(object)\n sigCursorPositionChanged = QtCore.Signal(object)\n\n def __init__(self, *args, **kwargs):\n\n parent = kwargs.get('parent', None)\n pg.GraphicsWidget.__init__(self, parent)\n\n self.linkedImageControlItem = None\n\n self.plotItem = pg.PlotItem()\n\n self.imageItem = pg.ImageItem()\n self.plotItem.addItem(self.imageItem)\n\n self.cursorX = InfiniteLine(angle=90, movable=True, pen=(150, 150, 150), hoverPen=(255, 255, 255))\n self.cursorY = InfiniteLine(angle=0, movable=True, pen=(150, 150, 150), hoverPen=(255, 255, 255))\n self.cursorX.setPos(0)\n self.cursorY.setPos(0)\n self.cursorX.setZValue(10)\n self.cursorY.setZValue(11)\n self.cursorX.connect(self.cursorY)\n self.cursorY.connect(self.cursorX)\n\n self.plotItem.addItem(self.cursorX, ignoreBounds=True)\n self.plotItem.addItem(self.cursorY, ignoreBounds=True)\n self.plotViewBox = self.plotItem.getViewBox()\n\n self.mainLayout = QtWidgets.QGraphicsGridLayout()\n self.setLayout(self.mainLayout)\n self.mainLayout.setContentsMargins(1, 1, 1, 1)\n self.mainLayout.setSpacing(0)\n\n self.mainLayout.addItem(self.plotItem, 0, 0)\n\n # Connect signals\n self.cursorX.sigPositionChangeFinished.connect(self.cursorPositionChangeFinishedEvent)\n self.cursorY.sigPositionChangeFinished.connect(self.cursorPositionChangeFinishedEvent)\n self.cursorX.sigPositionChanged.connect(self.cursorPositionChangeEvent)\n self.cursorY.sigPositionChanged.connect(self.cursorPositionChangeEvent)\n\n\n def cursorPositionChangeFinishedEvent(self):\n # print(ev.pos())\n self.sigCursorPositionChangeFinished.emit(self)\n logger.debug(\"Emit cursorPositionChangeFinished\")\n\n\n def cursorPositionChangeEvent(self):\n # print(ev.pos())\n self.sigCursorPositionChanged.emit(self)\n\n # if not self.linkedImageControlItem is None:\n # x = self.cursorX.getXPos()\n # y = self.cursorY.getYPos()\n # self.linkedImageControlItem.cursorX.setPos(x)\n # self.linkedImageControlItem.cursorY.setPos(y)\n logger.debug(\"emit cursorPositionChanged\")\n\n\n def getCursorPos(self):\n x = self.cursorX.getXPos()\n y = self.cursorY.getYPos()\n return [x, y]\n\n\n def setCursorPos(self, pos):\n self.cursorX.setPos(pos[0])\n self.cursorY.setPos(pos[1])\n\n\n def setAspectLocked(self, lock=True):\n self.plotViewBox.setAspectLocked(lock)\n\n\n def invertY(self, enable=True):\n self.plotViewBox.invertY(enable)\n\n\n def invertX(self, enable=True):\n self.plotViewBox.invertX(enable)\n\n\n def autoRange(self, *args, **kwargs):\n self.plotViewBox.autoRange(*args, **kwargs)\n\n\n def setImage(self, data):\n \"\"\" Sets the image data\n \"\"\"\n if isinstance(data, list):\n data = np.array(data)\n if not isinstance(data, np.ndarray):\n raise Exception(\"Plot data must be ndarray.\")\n\n if data.ndim == 2:\n nrows, ncols = data.shape\n nchan = 1\n self.imageItem.setImage(data, axisOrder='row-major')\n elif data.ndim == 3:\n nrows, ncols, nchan = data.shape\n self.imageItem.setImage(data, axisOrder='row-major')\n else:\n raise Exception(\"Plot data must be 2D or 3D ndarray.\")\n\n self.cursorX.setBounds((0, ncols-1))\n self.cursorY.setBounds((0, nrows-1))\n\n # width = self.imageItem.width()\n # height = self.imageItem.width()\n # self.cursorX.setPos((width // 2))\n # self.cursorY.setPos((height // 2))\n\n\n def setXYLink(self, graphicsItems):\n if isinstance(graphicsItems, pg.PlotItem):\n self.plotItem.setXLink(graphicsItems)\n self.plotItem.setYLink(graphicsItems)\n self.linkedImageControlItem = None\n elif isinstance(graphicsItems, BaseImagCtrlItem):\n self.plotItem.setXLink(graphicsItems.plotItem)\n self.plotItem.setYLink(graphicsItems.plotItem)\n self.linkedImageControlItem = None\n # self.linkedImageControlItem = graphicsItems\n # graphicsItems.linkedImageControlItem = self\n # self.plotItem.setXLink(self.linkedImageControlItem.plotItem)\n # self.plotItem.setYLink(self.linkedImageControlItem.plotItem)\n else:\n raise TypeError(\"Unexpected type {}, was expecting {}\".format(\n type(graphicsItems), (pg.PlotItem, BaseImagCtrlItem)))\n\n\n\n\n",
"import numpy\n\n\nclass HSFormatFlag(object):\n _counter = -1\n _flags = []\n\n def __init__(self, key, format_id):\n \"\"\"Constructor\n\n Parameters\n ----------\n key : str\n The string identifier for the hsformat flag.\n format_id : int\n The id for the hsformat flag.\n \"\"\"\n self._key = key\n self._id = format_id\n\n @property\n def key(self):\n \"\"\"str: The flag label.\"\"\"\n return self._key\n\n @property\n def id(self):\n \"\"\"str: The flag id.\"\"\"\n return self._id\n\n @classmethod\n def from_str(cls, key):\n \"\"\"Get the hsformat flag from a string identifier.\"\"\"\n for flag in cls._flags:\n if key == flag.key:\n return flag\n return None\n\n @classmethod\n def has_flag(cls, flag):\n \"\"\"list: A list of available flags.\"\"\"\n if flag in cls._flags:\n return True\n else:\n return False\n\n @classmethod\n def get_flags(cls):\n \"\"\"Get a list of available flags.\"\"\"\n return cls._flags\n\n @classmethod\n def set(cls, key):\n \"\"\"Adds a new hsformat flag.\n\n Parameters\n ----------\n key : str\n The string identifier for the hsformat flag.\n \"\"\"\n cls._counter += 1\n flag = cls(key, cls._counter)\n cls._flags.append(flag)\n return flag\n\n\n# HSFormatIntensity = HSFormatFlag.fromKey(\"INTENSITY\")\n# HSFormatAbsorption = HSFormatFlag.fromKey(\"ABSORPTION\")\n# HSFormatExtinction = HSFormatFlag.fromKey(\"EXTINCTION\")\n# HSFormatRefraction = HSFormatFlag.fromKey(\"REFRACTION\")\n\nHSIntensity = HSFormatFlag.set(\"Intensity\")\n\"\"\"static hsi.HSFormatFlag :\n Intensity \n\"\"\"\n\nHSAbsorption = HSFormatFlag.set(\"Absorption\")\n\"\"\"static hsi.HSFormatFlag :\n Absorption \n\"\"\"\n\nHSExtinction = HSFormatFlag.set(\"Extinction\")\n\"\"\"static hsi.HSFormatFlag :\n Extinction \n\"\"\"\n\nHSRefraction = HSFormatFlag.set(\"Refraction\")\n\"\"\"static hsi.HSFormatFlag :\n Refraction \n\"\"\"\n\nHSFormatDefault = HSIntensity\n\n# HSFormatIntensity is HSFormatAbsorption\n# HSFormatIntensity.key\n# HSFormatIntensity.id\n#\n# HSFormatAbsorption.id\n#\n# flag = HSFormatFlag.from_str(\"Extinction\")\n# flag is HSFormatExtinction\n# flag is HSFormatIntensity\n\n\ndef convert(target_format, source_format, spec, wavelen=None):\n \"\"\"Convert spectral data between different formats.\n\n The formats may be one of\n\n - :class:`hsi.HSIntensity`\n - :class:`hsi.HSAbsorption`\n - :class:`hsi.HSExtinction`\n - :class:`hsi.HSRefraction`\n\n Parameters\n ----------\n target_format : HSFormatFlag\n The target hsformat.\n source_format : HSFormatFlag\n The source hsformat.\n spec : numpy.ndarray\n The spectral data.\n wavelen : list or numpy.ndarray, optional\n The wavelengths at which the spectral data are sampled. Required for\n conversions which involve the hsformat :class:`hsi.HSRefraction`.\n\n Returns\n -------\n numpy.ndarray\n The spectral data in the new hsformat.\n\n \"\"\"\n if spec is None:\n return None\n\n if isinstance(spec, list):\n spec = numpy.array(spec)\n if isinstance(wavelen, list):\n wavelen = numpy.array(wavelen)\n\n if not isinstance(spec, numpy.ndarray):\n raise Exception(\"convert: Argument 'spec' must be ndarray.\")\n\n if wavelen is None:\n if target_format is HSRefraction or source_format is HSRefraction:\n raise Exception(\"convert: Require argument 'wavelen'.\")\n else:\n if not isinstance(wavelen, numpy.ndarray) or wavelen.ndim > 1:\n raise Exception(\"convert: Argument 'wavelen' must be 1D ndarray.\")\n\n # if available reshape wavelen for broadcasting\n if wavelen is None:\n rwavelen = None\n else:\n ndim = spec.ndim\n if ndim > 1:\n axes = tuple(range(1, ndim))\n rwavelen = numpy.expand_dims(wavelen, axis=axes)\n else:\n rwavelen = wavelen\n\n # Absorption coefficient (ua) and extinction coefficient (eps):\n # ua = eps * log(10)\n # Complex refractive Index :n = n_r + i n_i\n # Absorption coefficient and imaginary refractive index:\n # ua = 4 pi n_i/ lambda\n # Intensity: I = exp(-ua * l) = 10 ** (-eps * l) = exp(-4 pi n_i / lambda)\n wscale = 1e9\n\n # from intensity\n if target_format is HSIntensity and source_format is HSIntensity:\n return spec\n if target_format is HSAbsorption and source_format is HSIntensity:\n spec[spec == 0] = 1.2e-16\n return -numpy.log(numpy.abs(spec))\n if target_format is HSExtinction and source_format is HSIntensity:\n spec[spec == 0] = 1.2e-16\n return -numpy.log10(numpy.abs(spec))\n if target_format is HSRefraction and source_format is HSIntensity:\n spec[spec == 0] = 1.2e-16\n return -numpy.log(numpy.abs(spec)) * (rwavelen * wscale) / (\n 4 * numpy.pi)\n\n # from absorption\n if target_format is HSIntensity and source_format is HSAbsorption:\n return numpy.exp(-spec)\n if target_format is HSAbsorption and source_format is HSAbsorption:\n return spec\n if target_format is HSExtinction and source_format is HSAbsorption:\n return spec / numpy.log(10)\n if target_format is HSRefraction and source_format is HSAbsorption:\n return spec * (rwavelen * wscale) / (4 * numpy.pi)\n\n # from extinction\n if target_format is HSIntensity and source_format is HSExtinction:\n return 10. ** (-spec)\n if target_format is HSAbsorption and source_format is HSExtinction:\n return spec * numpy.log(10)\n if target_format is HSExtinction and source_format is HSExtinction:\n return spec\n if target_format is HSRefraction and source_format is HSExtinction:\n return spec * numpy.log(10) * (rwavelen * wscale) / (4 * numpy.pi)\n\n # from refraction\n if target_format is HSIntensity and source_format is HSRefraction:\n return numpy.exp(-spec * (4 * numpy.pi) / (rwavelen * wscale))\n if target_format is HSAbsorption and source_format is HSRefraction:\n return spec * (4 * numpy.pi) / (rwavelen * wscale)\n if target_format is HSExtinction and source_format is HSRefraction:\n return spec * (4 * numpy.pi) / (rwavelen * wscale) / numpy.log(10)\n if target_format is HSRefraction and source_format is HSRefraction:\n return spec\n"
] |
[
[
"numpy.array"
],
[
"numpy.log",
"numpy.expand_dims",
"numpy.abs",
"numpy.array",
"numpy.exp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xychen-ocn/lagtraj
|
[
"fbcae751faa7f9b9f9a72d20abf71bb92e007bdd"
] |
[
"lagtraj/utils/gradient_calculation.py"
] |
[
"import numpy as np\nimport xarray as xr\n\nfrom .. import njit\nfrom .geometry import lat_dist, lon_dist\n\n\n@njit\ndef _boundary_gradients(x_array, y_array, val_array):\n \"\"\"Numba function to calculate gradients, dismissing filtered points\"\"\"\n len_temp = np.shape(val_array)[0]\n len_levels = np.shape(val_array)[1]\n len_lats = np.shape(val_array)[2]\n len_lons = np.shape(val_array)[3]\n x_gradient_array = np.empty((len_temp, len_levels))\n y_gradient_array = np.empty((len_temp, len_levels))\n for this_time in range(len_temp):\n for this_level in range(len_levels):\n dxval_tot = 0.0\n dyval_tot = 0.0\n dx_tot = 0.0\n dy_tot = 0.0\n # This calculates the x-gradient as a weighted average over latitude\n # The way the averaging is done now is such that the weight of a particular\n # latitude/longitude is proportional to the length of the segment over\n # which a gradient is calculated at that latitude/longitude.\n # This length varies, and may even be zero in extreme cases,\n # due to the use of a mask (and also a bit due to the lat-lon grid,\n # but that would mostly be notable close to the poles). This changes\n # ensures there is no inappropriately high weighting given when a\n # gradient is calculated over a short (or zero) distance.\n # The ~np.isnan operation is used to filter out masked values\n # The first and last filtered values are used\n for this_lat in range(len_lats):\n vals_at_lat = val_array[this_time, this_level, this_lat, :].flatten()\n x_at_lat = x_array[this_lat, :].flatten()\n vals_filtered = vals_at_lat[~np.isnan(vals_at_lat)]\n if len(vals_filtered) > 1:\n x_filtered = x_at_lat[~np.isnan(vals_at_lat)]\n dxval_tot = dxval_tot + vals_filtered[-1] - vals_filtered[0]\n dx_tot = dx_tot + x_filtered[-1] - x_filtered[0]\n # This similarly calculates the y-gradient weighted average\n for this_lon in range(len_lons):\n vals_at_lon = val_array[this_time, this_level, :, this_lon].flatten()\n y_at_lat = y_array[:, this_lon].flatten()\n vals_filtered = vals_at_lon[~np.isnan(vals_at_lon)]\n if len(vals_filtered) > 1:\n y_filtered = y_at_lat[~np.isnan(vals_at_lon)]\n dyval_tot = dyval_tot + vals_filtered[-1] - vals_filtered[0]\n dy_tot = dy_tot + y_filtered[-1] - y_filtered[0]\n # Average these gradients (not weighted at this point, but filtering out all nan values due to e.g. division by zero!)\n if abs(dx_tot) > 1e-4:\n x_gradient_array[this_time, this_level] = dxval_tot / dx_tot\n else:\n x_gradient_array[this_time, this_level] = np.nan\n if abs(dy_tot) > 1e-4:\n y_gradient_array[this_time, this_level] = dyval_tot / dy_tot\n else:\n y_gradient_array[this_time, this_level] = np.nan\n return x_gradient_array, y_gradient_array\n\n\n@njit\ndef _regression_gradients(x_array, y_array, val_array):\n \"\"\"Numba function for regression gradients\"\"\"\n len_temp = np.shape(val_array)[0]\n len_levels = np.shape(val_array)[1]\n x_gradient_array = np.empty((len_temp, len_levels))\n y_gradient_array = np.empty((len_temp, len_levels))\n x_flat = x_array.flatten()\n y_flat = y_array.flatten()\n ones_flat = np.ones(np.shape(x_flat))\n for this_time in range(len_temp):\n for this_level in range(len_levels):\n # For each level and time, put all valid (non-nan) x and y locations\n # as well as the corresponding data, in 1D arrays\n data_flat = val_array[this_time, this_level, :, :].flatten()\n data_flat_filter = np.expand_dims(data_flat[~np.isnan(data_flat)], axis=1)\n x_flat_filter = np.expand_dims(x_flat[~np.isnan(data_flat)], axis=1)\n y_flat_filter = np.expand_dims(y_flat[~np.isnan(data_flat)], axis=1)\n if (np.nanmin(x_flat_filter) < np.nanmax(x_flat_filter)) and (\n np.nanmin(y_flat_filter) < np.nanmax(y_flat_filter)\n ):\n ones_flat_filter = np.expand_dims(\n ones_flat[~np.isnan(data_flat)], axis=1\n )\n oxy_mat = np.hstack((ones_flat_filter, x_flat_filter, y_flat_filter))\n # Use the normal method to find the best fit of a plane through the data\n # At each individual model level\n theta = np.dot(\n np.dot(\n np.linalg.pinv(np.dot(oxy_mat.transpose(), oxy_mat)),\n oxy_mat.transpose(),\n ),\n data_flat_filter,\n )\n x_gradient_array[this_time, this_level] = theta[1][0]\n y_gradient_array[this_time, this_level] = theta[2][0]\n else:\n x_gradient_array[this_time, this_level] = np.nan\n y_gradient_array[this_time, this_level] = np.nan\n return x_gradient_array, y_gradient_array\n\n\ndef _era5_boundary_gradients(da_box, ds_ref_pt):\n \"\"\"Calculate gradients from boundary values\n Using distances along latitude and longitude axes\n Weight by box size?\"\"\"\n lat1_point, lon1_point = ds_ref_pt.lat.values, ds_ref_pt.lon.values\n lon2_mg, lat2_mg = np.meshgrid(da_box.lon.values, da_box.lat.values)\n # Use the distance at the actual latitude to calculate gradients between boundaries\n x_array = lon_dist(lon1_point, lon2_mg, lat2_mg)\n y_array = lat_dist(lat1_point, lat2_mg)\n val_array = da_box.values\n return _boundary_gradients(x_array, y_array, val_array)\n\n\ndef _era5_regression_gradients(da_box, ds_ref_pt):\n \"\"\"Calculate gradients function using local coordinate system and\n Regression, using the normal equation\"\"\"\n lat1_point, lon1_point = ds_ref_pt.lat.values, ds_ref_pt.lon.values\n lon2_mg, lat2_mg = np.meshgrid(da_box.lon.values, da_box.lat.values)\n # Use the center latitude for projection onto plane\n x_array = lon_dist(lon1_point, lon2_mg, lat1_point)\n y_array = lat_dist(lat1_point, lat2_mg)\n val_array = da_box.values\n return _regression_gradients(x_array, y_array, val_array)\n\n\ndef calc_horizontal_gradients(da_field, ds_ref_pt, method=\"regression\"):\n \"\"\"\n Compute horizontal gradients in `da_field` using `method`. Any nan-values\n in `da_field` will be ignored in the gradient calculation\n \"\"\"\n\n if method == \"regression\":\n x_gradient_array, y_gradient_array = _era5_regression_gradients(\n da_field, ds_ref_pt=ds_ref_pt\n )\n elif method == \"boundary\":\n x_gradient_array, y_gradient_array = _era5_boundary_gradients(\n da_field, ds_ref_pt=ds_ref_pt\n )\n else:\n raise NotImplementedError(f\"Gradient method `{method}` not implemented\")\n\n v = da_field.name\n da_dphidx = xr.DataArray(\n x_gradient_array,\n name=f\"d{v}dx\",\n dims=(\"time\", \"level\"),\n coords=dict(time=da_field.time, level=da_field.level),\n attrs=dict(\n long_name=f\"{da_field.long_name} x-gradient\",\n units=f\"{da_field.units} m**-1\",\n ),\n )\n da_dphidy = xr.DataArray(\n y_gradient_array,\n name=f\"d{v}dy\",\n dims=(\"time\", \"level\"),\n coords=dict(time=da_field.time, level=da_field.level),\n attrs=dict(\n long_name=f\"{da_field.long_name} y-gradient\",\n units=f\"{da_field.units} m**-1\",\n ),\n )\n\n return da_dphidx, da_dphidy\n"
] |
[
[
"numpy.nanmax",
"numpy.hstack",
"numpy.isnan",
"numpy.nanmin",
"numpy.shape",
"numpy.meshgrid",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mcguires5/adversarial-robustness-toolbox
|
[
"f8b0552859eaf31c5b66e1d14d28b89178795ad0",
"f8b0552859eaf31c5b66e1d14d28b89178795ad0",
"f8b0552859eaf31c5b66e1d14d28b89178795ad0",
"f8b0552859eaf31c5b66e1d14d28b89178795ad0",
"f8b0552859eaf31c5b66e1d14d28b89178795ad0"
] |
[
"art/attacks/evasion/pixel_threshold.py",
"tests/defences/preprocessor/test_spatial_smoothing.py",
"tests/defences/preprocessor/test_mp3_compression.py",
"tests/attacks/test_elastic_net.py",
"art/attacks/evasion/deepfool.py"
] |
[
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the Threshold Attack and Pixel Attack.\nThe Pixel Attack is a generalisation of One Pixel Attack.\n\n| One Pixel Attack Paper link:\n https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations\n (arXiv link: https://arxiv.org/pdf/1710.08864.pdf)\n| Pixel and Threshold Attack Paper link:\n https://arxiv.org/abs/1906.06026\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom itertools import product\nfrom typing import List, Optional, Tuple, Union, TYPE_CHECKING\n\nimport numpy as np\n\n# Currently, a modified version of SciPy's differential evolution is used in\n# code. An ideal version would be using the import as follows,\n# from scipy.optimize import differential_evolution\n# In the meantime, the modified implementation is used which is defined in the\n# lines `453-1457`.\n\nfrom scipy._lib.six import xrange, string_types\nfrom scipy._lib._util import check_random_state\nfrom scipy.optimize.optimize import _status_message\nfrom scipy.optimize import OptimizeResult, minimize\nfrom tqdm import tqdm\n\nfrom art.attacks.attack import EvasionAttack\nfrom art.estimators.estimator import BaseEstimator, NeuralNetworkMixin\nfrom art.estimators.classification.classifier import ClassifierMixin\nfrom art.utils import compute_success, to_categorical, check_and_transform_label_format\n\nif TYPE_CHECKING:\n from art.estimators.classification.classifier import Classifier\n\nlogger = logging.getLogger(__name__)\n\n\nclass PixelThreshold(EvasionAttack):\n \"\"\"\n These attacks were originally implemented by Vargas et al. (2019) & Su et al.(2019).\n\n | One Pixel Attack Paper link:\n https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations\n (arXiv link: https://arxiv.org/pdf/1710.08864.pdf)\n | Pixel and Threshold Attack Paper link:\n https://arxiv.org/abs/1906.06026\n \"\"\"\n\n attack_params = EvasionAttack.attack_params + [\"th\", \"es\", \"targeted\", \"verbose\"]\n _estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassifierMixin)\n\n def __init__(self, classifier: \"Classifier\", th: Optional[int], es: int, targeted: bool, verbose: bool,) -> None:\n \"\"\"\n Create a :class:`.PixelThreshold` instance.\n\n :param classifier: A trained classifier.\n :param th: threshold value of the Pixel/ Threshold attack. th=None indicates finding a minimum threshold.\n :param es: Indicates whether the attack uses CMAES (0) or DE (1) as Evolutionary Strategy.\n :param targeted: Indicates whether the attack is targeted (True) or untargeted (False).\n :param verbose: Indicates whether to print verbose messages of ES used.\n \"\"\"\n super(PixelThreshold, self).__init__(estimator=classifier)\n\n self._project = True\n self.type_attack = -1\n self.th = th\n self.es = es\n self.targeted = targeted\n self.verbose = verbose\n PixelThreshold._check_params(self)\n\n if self.estimator.channels_first:\n self.img_rows = self.estimator.input_shape[-2]\n self.img_cols = self.estimator.input_shape[-1]\n self.img_channels = self.estimator.input_shape[-3]\n else:\n self.img_rows = self.estimator.input_shape[-3]\n self.img_cols = self.estimator.input_shape[-2]\n self.img_channels = self.estimator.input_shape[-1]\n\n def _check_params(self) -> None:\n if self.th is not None:\n if self.th <= 0:\n raise ValueError(\"The perturbation size `eps` has to be positive.\")\n if not isinstance(self.es, int):\n raise ValueError(\"The flag `es` has to be of type int.\")\n if not isinstance(self.targeted, bool):\n raise ValueError(\"The flag `targeted` has to be of type bool.\")\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The flag `verbose` has to be of type bool.\")\n\n def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, max_iter: int = 100, **kwargs) -> np.ndarray:\n \"\"\"\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs.\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial\n samples. Otherwise, model predictions are used as labels to avoid the \"label leaking\" effect\n (explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.\n :param max_iter: Maximum number of optimisation iterations.\n :return: An array holding the adversarial examples.\n \"\"\"\n y = check_and_transform_label_format(y, self.estimator.nb_classes, return_one_hot=False)\n\n if y is None:\n if self.targeted:\n raise ValueError(\"Target labels `y` need to be provided for a targeted attack.\")\n y = np.argmax(self.estimator.predict(x), axis=1)\n else:\n if len(y.shape) > 1:\n y = np.argmax(y, axis=1)\n\n if self.th is None:\n logger.info(\"Performing minimal perturbation Attack.\")\n\n if np.max(x) <= 1:\n x = x * 255.0\n\n adv_x_best = []\n for image, target_class in tqdm(zip(x, y), desc=\"Pixel threshold\"):\n if self.th is None:\n self.min_th = 127\n start, end = 1, 127\n while True:\n image_result: Union[List[np.ndarray], np.ndarray] = []\n threshold = (start + end) // 2\n success, trial_image_result = self._attack(image, target_class, threshold, max_iter)\n if image_result or success:\n image_result = trial_image_result\n if success:\n end = threshold - 1\n else:\n start = threshold + 1\n if success:\n self.min_th = threshold\n if end < start:\n if isinstance(image_result, list) and not image_result:\n success = False\n image_result = image\n break\n else:\n success, image_result = self._attack(image, target_class, self.th, max_iter)\n adv_x_best += [image_result]\n\n adv_x_best = np.array(adv_x_best)\n\n if np.max(x) <= 1:\n x = x / 255.0\n\n if y is not None:\n y = to_categorical(y, self.estimator.nb_classes)\n\n logger.info(\n \"Success rate of Attack: %.2f%%\", 100 * compute_success(self.estimator, x, y, adv_x_best, self.targeted, 1),\n )\n return adv_x_best\n\n def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]:\n \"\"\"\n Define the bounds for the image `img` within the limits `limit`.\n \"\"\"\n\n def bound_limit(value):\n return np.clip(value - limit, 0, 255), np.clip(value + limit, 0, 255)\n\n minbounds, maxbounds, bounds, initial = [], [], [], []\n\n for i, j, k in product(range(img.shape[-3]), range(img.shape[-2]), range(img.shape[-1])):\n temp = img[i, j, k]\n initial += [temp]\n bound = bound_limit(temp)\n if self.es == 0:\n minbounds += [bound[0]]\n maxbounds += [bound[1]]\n else:\n bounds += [bound]\n if self.es == 0:\n bounds = [minbounds, maxbounds]\n\n return bounds, initial\n\n def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Perturbs the given image `img` with the given perturbation `x`.\n \"\"\"\n return img\n\n def _attack_success(self, adv_x, x, target_class):\n \"\"\"\n Checks whether the given perturbation `adv_x` for the image `img` is successful.\n \"\"\"\n predicted_class = np.argmax(self.estimator.predict(self._perturb_image(adv_x, x))[0])\n return bool(\n (self.targeted and predicted_class == target_class)\n or (not self.targeted and predicted_class != target_class)\n )\n\n def _attack(\n self, image: np.ndarray, target_class: np.ndarray, limit: int, max_iter: int\n ) -> Tuple[bool, np.ndarray]:\n \"\"\"\n Attack the given image `image` with the threshold `limit` for the `target_class` which is true label for\n untargeted attack and targeted label for targeted attack.\n \"\"\"\n bounds, initial = self._get_bounds(image, limit)\n\n def predict_fn(x):\n predictions = self.estimator.predict(self._perturb_image(x, image))[:, target_class]\n return predictions if not self.targeted else 1 - predictions\n\n def callback_fn(x, convergence=None):\n if self.es == 0:\n if self._attack_success(x.result[0], image, target_class):\n raise Exception(\"Attack Completed :) Earlier than expected\")\n else:\n return self._attack_success(x, image, target_class)\n\n if self.es == 0:\n from cma import CMAOptions\n\n opts = CMAOptions()\n if not self.verbose:\n opts.set(\"verbose\", -9)\n opts.set(\"verb_disp\", 40000)\n opts.set(\"verb_log\", 40000)\n opts.set(\"verb_time\", False)\n\n opts.set(\"bounds\", bounds)\n\n if self.type_attack == 0:\n std = 63\n else:\n std = limit\n\n from cma import CMAEvolutionStrategy\n\n strategy = CMAEvolutionStrategy(initial, std / 4, opts)\n\n try:\n strategy.optimize(\n predict_fn,\n maxfun=max(1, 400 // len(bounds)) * len(bounds) * 100,\n callback=callback_fn,\n iterations=1,\n )\n except Exception as exception:\n if self.verbose:\n print(exception)\n\n adv_x = strategy.result[0]\n else:\n strategy = differential_evolution(\n predict_fn,\n bounds,\n disp=self.verbose,\n maxiter=max_iter,\n popsize=max(1, 400 // len(bounds)),\n recombination=1,\n atol=-1,\n callback=callback_fn,\n polish=False,\n )\n adv_x = strategy.x\n\n if self._attack_success(adv_x, image, target_class):\n return True, self._perturb_image(adv_x, image)[0]\n else:\n return False, image\n\n\nclass PixelAttack(PixelThreshold):\n \"\"\"\n This attack was originally implemented by Vargas et al. (2019). It is generalisation of One Pixel Attack originally\n implemented by Su et al. (2019).\n\n | One Pixel Attack Paper link:\n https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations\n (arXiv link: https://arxiv.org/pdf/1710.08864.pdf)\n | Pixel Attack Paper link:\n https://arxiv.org/abs/1906.06026\n \"\"\"\n\n def __init__(\n self,\n classifier: \"Classifier\",\n th: Optional[int] = None,\n es: int = 0,\n targeted: bool = False,\n verbose: bool = False,\n ) -> None:\n \"\"\"\n Create a :class:`.PixelAttack` instance.\n\n :param classifier: A trained classifier.\n :param th: threshold value of the Pixel/ Threshold attack. th=None indicates finding a minimum threshold.\n :param es: Indicates whether the attack uses CMAES (0) or DE (1) as Evolutionary Strategy.\n :param targeted: Indicates whether the attack is targeted (True) or untargeted (False).\n :param verbose: Indicates whether to print verbose messages of ES used.\n \"\"\"\n super(PixelAttack, self).__init__(classifier, th, es, targeted, verbose)\n self.type_attack = 0\n\n def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Perturbs the given image `img` with the given perturbation `x`.\n \"\"\"\n if x.ndim < 2:\n x = np.array([x])\n imgs = np.tile(img, [len(x)] + [1] * (x.ndim + 1))\n x = x.astype(int)\n for adv, image in zip(x, imgs):\n for pixel in np.split(adv, len(adv) // (2 + self.img_channels)):\n x_pos, y_pos, *rgb = pixel\n if not self.estimator.channels_first:\n image[x_pos % self.img_rows, y_pos % self.img_cols] = rgb\n else:\n image[:, x_pos % self.img_rows, y_pos % self.img_cols] = rgb\n return imgs\n\n def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]:\n \"\"\"\n Define the bounds for the image `img` within the limits `limit`.\n \"\"\"\n initial: List[np.ndarray] = []\n bounds: List[List[int]]\n if self.es == 0:\n for count, (i, j) in enumerate(product(range(self.img_rows), range(self.img_cols))):\n initial += [i, j]\n for k in range(self.img_channels):\n if not self.estimator.channels_first:\n initial += [img[i, j, k]]\n else:\n initial += [img[k, i, j]]\n\n if count == limit - 1:\n break\n else:\n continue\n min_bounds = [0, 0]\n for _ in range(self.img_channels):\n min_bounds += [0]\n min_bounds = min_bounds * limit\n max_bounds = [self.img_rows, self.img_cols]\n for _ in range(self.img_channels):\n max_bounds += [255]\n max_bounds = max_bounds * limit\n bounds = [min_bounds, max_bounds]\n else:\n bounds = [[0, self.img_rows], [0, self.img_cols]]\n for _ in range(self.img_channels):\n bounds += [[0, 255]]\n bounds = bounds * limit\n return bounds, initial\n\n\nclass ThresholdAttack(PixelThreshold):\n \"\"\"\n This attack was originally implemented by Vargas et al. (2019).\n\n | Paper link:\n https://arxiv.org/abs/1906.06026\n \"\"\"\n\n def __init__(\n self,\n classifier: \"Classifier\",\n th: Optional[int] = None,\n es: int = 0,\n targeted: bool = False,\n verbose: bool = False,\n ) -> None:\n \"\"\"\n Create a :class:`.PixelThreshold` instance.\n\n :param classifier: A trained classifier.\n :param th: threshold value of the Pixel/ Threshold attack. th=None indicates finding a minimum threshold.\n :param es: Indicates whether the attack uses CMAES (0) or DE (1) as Evolutionary Strategy.\n :param targeted: Indicates whether the attack is targeted (True) or untargeted (False).\n :param verbose: Indicates whether to print verbose messages of ES used.\n \"\"\"\n super(ThresholdAttack, self).__init__(classifier, th, es, targeted, verbose)\n self.type_attack = 1\n\n def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Perturbs the given image `img` with the given perturbation `x`.\n \"\"\"\n if x.ndim < 2:\n x = x[None, ...]\n imgs = np.tile(img, [len(x)] + [1] * (x.ndim + 1))\n x = x.astype(int)\n for adv, image in zip(x, imgs):\n for count, (i, j, k) in enumerate(\n product(range(image.shape[-3]), range(image.shape[-2]), range(image.shape[-1]),)\n ):\n image[i, j, k] = adv[count]\n return imgs\n\n\n# TODO: Make the attack compatible with current version of SciPy Optimize\n# Differential Evolution\n\n\"\"\"\nA slight modification to Scipy's implementation of differential evolution.\nTo speed up predictions, the entire parameters array is passed to `self.func`,\nwhere a neural network model can batch its computations and execute in parallel\nSearch for `CHANGES` to find all code changes.\n\nDan Kondratyuk 2018\n\nOriginal code adapted from\nhttps://github.com/scipy/scipy/blob/70e61dee181de23fdd8d893eaa9491100e2218d7/scipy/optimize/_differentialevolution.py\n----------\ndifferential_evolution:The differential evolution global optimization algorithm\nAdded by Andrew Nelson 2014\n\"\"\"\n\n# Copyright (c) 2001, 2002 Enthought, Inc.\n# All rights reserved.\n# Copyright (c) 2003-2017 SciPy Developers.\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# a. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# b. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# c. Neither the name of Enthought nor the names of the SciPy Developers\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# SciPy bundles a number of libraries that are compatibly licensed. We list\n# these here.\n# Name: Numpydoc\n# Files: doc/sphinxext/numpydoc/*\n# License: 2-clause BSD\n# For details, see doc/sphinxext/LICENSE.txt\n# Name: scipy-sphinx-theme\n# Files: doc/scipy-sphinx-theme/*\n# License: 3-clause BSD, PSF and Apache 2.0\n# For details, see doc/sphinxext/LICENSE.txt\n# Name: Six\n# Files: scipy/_lib/six.py\n# License: MIT\n# For details, see the header inside scipy/_lib/six.py\n# Name: Decorator\n# Files: scipy/_lib/decorator.py\n# License: 2-clause BSD\n# For details, see the header inside scipy/_lib/decorator.py\n# Name: ID\n# Files: scipy/linalg/src/id_dist/*\n# License: 3-clause BSD\n# For details, see scipy/linalg/src/id_dist/doc/doc.tex\n# Name: L-BFGS-B\n# Files: scipy/optimize/lbfgsb/*\n# License: BSD license\n# For details, see scipy/optimize/lbfgsb/README\n# Name: SuperLU\n# Files: scipy/sparse/linalg/dsolve/SuperLU/*\n# License: 3-clause BSD\n# For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt\n# Name: ARPACK\n# Files: scipy/sparse/linalg/eigen/arpack/ARPACK/*\n# License: 3-clause BSD\n# For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING\n# Name: Qhull\n# Files: scipy/spatial/qhull/*\n# License: Qhull license (BSD-like)\n# For details, see scipy/spatial/qhull/COPYING.txt\n# Name: Cephes\n# Files: scipy/special/cephes/*\n# License: 3-clause BSD\n# Distributed under 3-clause BSD license with permission from the author,\n# see https://lists.debian.org/debian-legal/2004/12/msg00295.html\n# Cephes Math Library Release 2.8: June, 2000\n# Copyright 1984, 1995, 2000 by Stephen L. Moshier\n# This software is derived from the Cephes Math Library and is\n# incorporated herein by permission of the author.\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the <organization> nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# Name: Faddeeva\n# Files: scipy/special/Faddeeva.*\n# License: MIT\n# Copyright (c) 2012 Massachusetts Institute of Technology\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n__all__ = [\"differential_evolution\"]\n\n_MACHEPS = np.finfo(np.float64).eps\n\n\ndef differential_evolution(\n func,\n bounds,\n args=(),\n strategy=\"best1bin\",\n maxiter=1000,\n popsize=15,\n tol=0.01,\n mutation=(0.5, 1),\n recombination=0.7,\n seed=None,\n callback=None,\n disp=False,\n polish=True,\n init=\"latinhypercube\",\n atol=0,\n):\n \"\"\"Finds the global minimum of a multivariate function.\n Differential Evolution is stochastic in nature (does not use gradient\n methods) to find the minimium, and can search large areas of candidate\n space, but often requires larger numbers of function evaluations than\n conventional gradient based techniques.\n The algorithm is due to Storn and Price [1]_.\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence\n Bounds for variables. ``(min, max)`` pairs for each element in ``x``,\n defining the lower and upper bounds for the optimizing argument of\n `func`. It is required to have ``len(bounds) == len(x)``.\n ``len(bounds)`` is used to determine the number of parameters in ``x``.\n args : tuple, optional\n Any additional fixed parameters needed to\n completely specify the objective function.\n strategy : str, optional\n The differential evolution strategy to use. Should be one of:\n - 'best1bin'\n - 'best1exp'\n - 'rand1exp'\n - 'randtobest1exp'\n - 'currenttobest1exp'\n - 'best2exp'\n - 'rand2exp'\n - 'randtobest1bin'\n - 'currenttobest1bin'\n - 'best2bin'\n - 'rand2bin'\n - 'rand1bin'\n The default is 'best1bin'.\n maxiter : int, optional\n The maximum number of generations over which the entire population is\n evolved. The maximum number of function evaluations (with no polishing)\n is: ``(maxiter + 1) * popsize * len(x)``\n popsize : int, optional\n A multiplier for setting the total population size. The population has\n ``popsize * len(x)`` individuals (unless the initial population is\n supplied via the `init` keyword).\n tol : float, optional\n Relative tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n mutation : float or tuple(float, float), optional\n The mutation constant. In the literature this is also known as\n differential weight, being denoted by F.\n If specified as a float it should be in the range [0, 2].\n If specified as a tuple ``(min, max)`` dithering is employed. Dithering\n randomly changes the mutation constant on a generation by generation\n basis. The mutation constant for that generation is taken from\n ``U[min, max)``. Dithering can help speed convergence significantly.\n Increasing the mutation constant increases the search radius, but will\n slow down convergence.\n recombination : float, optional\n The recombination constant, should be in the range [0, 1]. In the\n literature this is also known as the crossover probability, being\n denoted by CR. Increasing this value allows a larger number of mutants\n to progress into the next generation, but at the risk of population\n stability.\n seed : int or `np.random.RandomState`, optional\n If `seed` is not specified the `np.RandomState` singleton is used.\n If `seed` is an int, a new `np.random.RandomState` instance is used,\n seeded with seed.\n If `seed` is already a `np.random.RandomState instance`, then that\n `np.random.RandomState` instance is used.\n Specify `seed` for repeatable minimizations.\n disp : bool, optional\n Display status messages\n callback : callable, `callback(xk, convergence=val)`, optional\n A function to follow the progress of the minimization. ``xk`` is\n the current value of ``x0``. ``val`` represents the fractional\n value of the population convergence. When ``val`` is greater than one\n the function halts. If callback returns `True`, then the minimization\n is halted (any polishing is still carried out).\n polish : bool, optional\n If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`\n method is used to polish the best population member at the end, which\n can improve the minimization slightly.\n init : str or array-like, optional\n Specify which type of population initialization is performed. Should be\n one of:\n - 'latinhypercube'\n - 'random'\n - array specifying the initial population. The array should have\n shape ``(M, len(x))``, where len(x) is the number of parameters.\n `init` is clipped to `bounds` before use.\n The default is 'latinhypercube'. Latin Hypercube sampling tries to\n maximize coverage of the available parameter space. 'random'\n initializes the population randomly - this has the drawback that\n clustering can occur, preventing the whole of parameter space being\n covered. Use of an array to specify a population subset could be used,\n for example, to create a tight bunch of initial guesses in an location\n where the solution is known to exist, thereby reducing time for\n convergence.\n atol : float, optional\n Absolute tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a `OptimizeResult` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing, then\n OptimizeResult also contains the ``jac`` attribute.\n Notes\n -----\n Differential evolution is a stochastic population based method that is\n useful for global optimization problems. At each pass through the\n population the algorithm mutates each candidate solution by mixing with\n other candidate solutions to create a trial candidate. There are several\n strategies [2]_ for creating trial candidates, which suit some problems\n more than others. The 'best1bin' strategy is a good starting point for many\n systems. In this strategy two members of the population are randomly\n chosen. Their difference is used to mutate the best member (the `best` in\n `best1bin`), :math:`b_0`,\n so far:\n .. math::\n b' = b_0 + mutation * (population[rand0] - population[rand1])\n A trial vector is then constructed. Starting with a randomly chosen 'i'th\n parameter the trial is sequentially filled (in modulo) with parameters from\n `b'` or the original candidate. The choice of whether to use `b'` or the\n original candidate is made with a binomial distribution (the 'bin' in\n 'best1bin') - a random number in [0, 1) is generated. If this number is\n less than the `recombination` constant then the parameter is loaded from\n `b'`, otherwise it is loaded from the original candidate. The final\n parameter is always loaded from `b'`. Once the trial candidate is built\n its fitness is assessed. If the trial is better than the original candidate\n then it takes its place. If it is also better than the best overall\n candidate it also replaces that.\n To improve your chances of finding a global minimum use higher `popsize`\n values, with higher `mutation` and (dithering), but lower `recombination`\n values. This has the effect of widening the search radius, but slowing\n convergence.\n .. versionadded:: 0.15.0\n Examples\n --------\n Let us consider the problem of minimizing the Rosenbrock function. This\n function is implemented in `rosen` in `scipy.optimize`.\n >>> from scipy.optimize import rosen, differential_evolution\n >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]\n >>> result = differential_evolution(rosen, bounds)\n >>> result.x, result.fun\n (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)\n Next find the minimum of the Ackley function\n (http://en.wikipedia.org/wiki/Test_functions_for_optimization).\n >>> from scipy.optimize import differential_evolution\n >>> import numpy as np\n >>> def ackley(x):\n ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))\n ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi *x[1]))\n ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e\n >>> bounds = [(-5, 5), (-5, 5)]\n >>> result = differential_evolution(ackley, bounds)\n >>> result.x, result.fun\n (array([ 0., 0.]), 4.4408920985006262e-16)\n References\n ----------\n .. [1] Storn, R and Price, K, Differential Evolution - a Simple and\n Efficient Heuristic for Global Optimization over Continuous Spaces,\n Journal of Global Optimization, 1997, 11, 341 - 359.\n .. [2] http://www1.icsi.berkeley.edu/~storn/code.html\n .. [3] http://en.wikipedia.org/wiki/Differential_evolution\n \"\"\"\n\n solver = DifferentialEvolutionSolver(\n func,\n bounds,\n args=args,\n strategy=strategy,\n maxiter=maxiter,\n popsize=popsize,\n tol=tol,\n mutation=mutation,\n recombination=recombination,\n seed=seed,\n polish=polish,\n callback=callback,\n disp=disp,\n init=init,\n atol=atol,\n )\n return solver.solve()\n\n\nclass DifferentialEvolutionSolver:\n \"\"\"This class implements the differential evolution solver\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence\n Bounds for variables. ``(min, max)`` pairs for each element in ``x``,\n defining the lower and upper bounds for the optimizing argument of\n `func`. It is required to have ``len(bounds) == len(x)``.\n ``len(bounds)`` is used to determine the number of parameters in ``x``.\n args : tuple, optional\n Any additional fixed parameters needed to\n completely specify the objective function.\n strategy : str, optional\n The differential evolution strategy to use. Should be one of:\n - 'best1bin'\n - 'best1exp'\n - 'rand1exp'\n - 'randtobest1exp'\n - 'currenttobest1exp'\n - 'best2exp'\n - 'rand2exp'\n - 'randtobest1bin'\n - 'currenttobest1bin'\n - 'best2bin'\n - 'rand2bin'\n - 'rand1bin'\n The default is 'best1bin'\n maxiter : int, optional\n The maximum number of generations over which the entire population is\n evolved. The maximum number of function evaluations (with no polishing)\n is: ``(maxiter + 1) * popsize * len(x)``\n popsize : int, optional\n A multiplier for setting the total population size. The population has\n ``popsize * len(x)`` individuals (unless the initial population is\n supplied via the `init` keyword).\n tol : float, optional\n Relative tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n mutation : float or tuple(float, float), optional\n The mutation constant. In the literature this is also known as\n differential weight, being denoted by F.\n If specified as a float it should be in the range [0, 2].\n If specified as a tuple ``(min, max)`` dithering is employed. Dithering\n randomly changes the mutation constant on a generation by generation\n basis. The mutation constant for that generation is taken from\n U[min, max). Dithering can help speed convergence significantly.\n Increasing the mutation constant increases the search radius, but will\n slow down convergence.\n recombination : float, optional\n The recombination constant, should be in the range [0, 1]. In the\n literature this is also known as the crossover probability, being\n denoted by CR. Increasing this value allows a larger number of mutants\n to progress into the next generation, but at the risk of population\n stability.\n seed : int or `np.random.RandomState`, optional\n If `seed` is not specified the `np.random.RandomState` singleton is\n used.\n If `seed` is an int, a new `np.random.RandomState` instance is used,\n seeded with `seed`.\n If `seed` is already a `np.random.RandomState` instance, then that\n `np.random.RandomState` instance is used.\n Specify `seed` for repeatable minimizations.\n disp : bool, optional\n Display status messages\n callback : callable, `callback(xk, convergence=val)`, optional\n A function to follow the progress of the minimization. ``xk`` is\n the current value of ``x0``. ``val`` represents the fractional\n value of the population convergence. When ``val`` is greater than one\n the function halts. If callback returns `True`, then the minimization\n is halted (any polishing is still carried out).\n polish : bool, optional\n If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method\n is used to polish the best population member at the end. This requires\n a few more function evaluations.\n maxfun : int, optional\n Set the maximum number of function evaluations. However, it probably\n makes more sense to set `maxiter` instead.\n init : str or array-like, optional\n Specify which type of population initialization is performed. Should be\n one of:\n - 'latinhypercube'\n - 'random'\n - array specifying the initial population. The array should have\n shape ``(M, len(x))``, where len(x) is the number of parameters.\n `init` is clipped to `bounds` before use.\n The default is 'latinhypercube'. Latin Hypercube sampling tries to\n maximize coverage of the available parameter space. 'random'\n initializes the population randomly - this has the drawback that\n clustering can occur, preventing the whole of parameter space being\n covered. Use of an array to specify a population could be used, for\n example, to create a tight bunch of initial guesses in an location\n where the solution is known to exist, thereby reducing time for\n convergence.\n atol : float, optional\n Absolute tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n \"\"\"\n\n # Dispatch of mutation strategy method (binomial or exponential).\n _binomial = {\n \"best1bin\": \"_best1\",\n \"randtobest1bin\": \"_randtobest1\",\n \"currenttobest1bin\": \"_currenttobest1\",\n \"best2bin\": \"_best2\",\n \"rand2bin\": \"_rand2\",\n \"rand1bin\": \"_rand1\",\n }\n _exponential = {\n \"best1exp\": \"_best1\",\n \"rand1exp\": \"_rand1\",\n \"randtobest1exp\": \"_randtobest1\",\n \"currenttobest1exp\": \"_currenttobest1\",\n \"best2exp\": \"_best2\",\n \"rand2exp\": \"_rand2\",\n }\n\n __init_error_msg = (\n \"The population initialization method must be one of \"\n \"'latinhypercube' or 'random', or an array of shape \"\n \"(M, N) where N is the number of parameters and M>5\"\n )\n\n def __init__(\n self,\n func,\n bounds,\n args=(),\n strategy=\"best1bin\",\n maxiter=1000,\n popsize=15,\n tol=0.01,\n mutation=(0.5, 1),\n recombination=0.7,\n seed=None,\n maxfun=np.inf,\n callback=None,\n disp=False,\n polish=True,\n init=\"latinhypercube\",\n atol=0,\n ):\n\n if strategy in self._binomial:\n self.mutation_func = getattr(self, self._binomial[strategy])\n elif strategy in self._exponential:\n self.mutation_func = getattr(self, self._exponential[strategy])\n else:\n raise ValueError(\"Please select a valid mutation strategy\")\n self.strategy = strategy\n\n self.callback = callback\n self.polish = polish\n\n # relative and absolute tolerances for convergence\n self.tol, self.atol = tol, atol\n\n # Mutation constant should be in [0, 2). If specified as a sequence\n # then dithering is performed.\n self.scale = mutation\n if not np.all(np.isfinite(mutation)) or np.any(np.array(mutation) >= 2) or np.any(np.array(mutation) < 0):\n raise ValueError(\n \"The mutation constant must be a float in \"\n \"U[0, 2), or specified as a tuple(min, max)\"\n \" where min < max and min, max are in U[0, 2).\"\n )\n\n self.dither = None\n if hasattr(mutation, \"__iter__\") and len(mutation) > 1:\n self.dither = [mutation[0], mutation[1]]\n self.dither.sort()\n\n self.cross_over_probability = recombination\n\n self.func = func\n self.args = args\n\n # convert tuple of lower and upper bounds to limits\n # [(low_0, high_0), ..., (low_n, high_n]\n # -> [[low_0, ..., low_n], [high_0, ..., high_n]]\n self.limits = np.array(bounds, dtype=\"float\").T\n if np.size(self.limits, 0) != 2 or not np.all(np.isfinite(self.limits)):\n raise ValueError(\n \"bounds should be a sequence containing \" \"real valued (min, max) pairs for each value\" \" in x\"\n )\n\n if maxiter is None: # the default used to be None\n maxiter = 1000\n self.maxiter = maxiter\n if maxfun is None: # the default used to be None\n maxfun = np.inf\n self.maxfun = maxfun\n\n # population is scaled to between [0, 1].\n # We have to scale between parameter <-> population\n # save these arguments for _scale_parameter and\n # _unscale_parameter. This is an optimization\n self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])\n self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])\n\n self.parameter_count = np.size(self.limits, 1)\n\n self.random_number_generator = check_random_state(seed)\n\n # default population initialization is a latin hypercube design, but\n # there are other population initializations possible.\n # the minimum is 5 because 'best2bin' requires a population that's at\n # least 5 long\n self.num_population_members = max(5, popsize * self.parameter_count)\n\n self.population_shape = (self.num_population_members, self.parameter_count)\n\n self._nfev = 0\n if isinstance(init, string_types):\n if init == \"latinhypercube\":\n self.init_population_lhs()\n elif init == \"random\":\n self.init_population_random()\n else:\n raise ValueError(self.__init_error_msg)\n else:\n self.init_population_array(init)\n\n self.disp = disp\n\n def init_population_lhs(self):\n \"\"\"\n Initializes the population with Latin Hypercube Sampling.\n Latin Hypercube Sampling ensures that each parameter is uniformly\n sampled over its range.\n \"\"\"\n rng = self.random_number_generator\n\n # Each parameter range needs to be sampled uniformly. The scaled\n # parameter range ([0, 1)) needs to be split into\n # `self.num_population_members` segments, each of which has the\n # following size:\n segsize = 1.0 / self.num_population_members\n\n # Within each segment we sample from a uniform random distribution.\n # We need to do this sampling for each parameter.\n samples = (\n segsize * rng.random_sample(self.population_shape)\n # Offset each segment to cover the entire parameter range\n # [0, 1)\n + np.linspace(0.0, 1.0, self.num_population_members, endpoint=False)[:, np.newaxis]\n )\n\n # Create an array for population of candidate solutions.\n self.population = np.zeros_like(samples)\n\n # Initialize population of candidate solutions by permutation of the\n # random samples.\n for j in range(self.parameter_count):\n order = rng.permutation(range(self.num_population_members))\n self.population[:, j] = samples[order, j]\n\n # reset population energies\n self.population_energies = np.ones(self.num_population_members) * np.inf\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n def init_population_random(self):\n \"\"\"\n Initialises the population at random. This type of initialization\n can possess clustering, Latin Hypercube sampling is generally better.\n \"\"\"\n rng = self.random_number_generator\n self.population = rng.random_sample(self.population_shape)\n\n # reset population energies\n self.population_energies = np.ones(self.num_population_members) * np.inf\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n def init_population_array(self, init):\n \"\"\"\n Initialises the population with a user specified population.\n Parameters\n ----------\n init : np.ndarray\n Array specifying subset of the initial population. The array should\n have shape (M, len(x)), where len(x) is the number of parameters.\n The population is clipped to the lower and upper `bounds`.\n \"\"\"\n # make sure you're using a float array\n popn = np.asfarray(init)\n\n if np.size(popn, 0) < 5 or popn.shape[1] != self.parameter_count or len(popn.shape) != 2:\n raise ValueError(\"The population supplied needs to have shape\" \" (M, len(x)), where M > 4.\")\n\n # scale values and clip to bounds, assigning to population\n self.population = np.clip(self._unscale_parameters(popn), 0, 1)\n\n self.num_population_members = np.size(self.population, 0)\n\n self.population_shape = (self.num_population_members, self.parameter_count)\n\n # reset population energies\n self.population_energies = np.ones(self.num_population_members) * np.inf\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n @property\n def x(self):\n \"\"\"\n The best solution from the solver\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n \"\"\"\n return self._scale_parameters(self.population[0])\n\n @property\n def convergence(self):\n \"\"\"\n The standard deviation of the population energies divided by their\n mean.\n \"\"\"\n return np.std(self.population_energies) / np.abs(np.mean(self.population_energies) + _MACHEPS)\n\n def solve(self):\n \"\"\"\n Runs the DifferentialEvolutionSolver.\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing,\n then OptimizeResult also contains the ``jac`` attribute.\n \"\"\"\n nit, warning_flag = 0, False\n status_message = _status_message[\"success\"]\n\n # The population may have just been initialized (all entries are\n # np.inf). If it has you have to calculate the initial energies.\n # Although this is also done in the evolve generator it's possible\n # that someone can set maxiter=0, at which point we still want the\n # initial energies to be calculated (the following loop isn't run).\n if np.all(np.isinf(self.population_energies)):\n self._calculate_population_energies()\n\n # do the optimisation.\n for nit in xrange(1, self.maxiter + 1):\n # evolve the population by a generation\n try:\n next(self)\n except StopIteration:\n warning_flag = True\n status_message = _status_message[\"maxfev\"]\n break\n\n if self.disp:\n print(\"differential_evolution step %d: f(x)= %g\" % (nit, self.population_energies[0]))\n\n # should the solver terminate?\n convergence = self.convergence\n\n if (\n self.callback\n and self.callback(self._scale_parameters(self.population[0]), convergence=self.tol / convergence,)\n is True\n ):\n warning_flag = True\n status_message = \"callback function requested stop early \" \"by returning True\"\n break\n\n intol = np.std(self.population_energies) <= self.atol + self.tol * np.abs(np.mean(self.population_energies))\n if warning_flag or intol:\n break\n\n else:\n status_message = _status_message[\"maxiter\"]\n warning_flag = True\n\n de_result = OptimizeResult(\n x=self.x,\n fun=self.population_energies[0],\n nfev=self._nfev,\n nit=nit,\n message=status_message,\n success=(warning_flag is not True),\n )\n\n if self.polish:\n result = minimize(self.func, np.copy(de_result.x), method=\"L-BFGS-B\", bounds=self.limits.T, args=self.args,)\n\n self._nfev += result.nfev\n de_result.nfev = self._nfev\n\n if result.fun < de_result.fun:\n de_result.fun = result.fun\n de_result.x = result.x\n de_result.jac = result.jac\n # to keep internal state consistent\n self.population_energies[0] = result.fun\n self.population[0] = self._unscale_parameters(result.x)\n\n return de_result\n\n def _calculate_population_energies(self):\n \"\"\"\n Calculate the energies of all the population members at the same time.\n Puts the best member in first place. Useful if the population has just\n been initialised.\n \"\"\"\n\n ##############\n # CHANGES: self.func operates on the entire parameters array\n ##############\n itersize = max(0, min(len(self.population), self.maxfun - self._nfev + 1))\n candidates = self.population[:itersize]\n parameters = np.array([self._scale_parameters(c) for c in candidates]) # TODO: can be vectorized\n energies = self.func(parameters, *self.args)\n self.population_energies = energies\n self._nfev += itersize\n\n # for index, candidate in enumerate(self.population):\n # if self._nfev > self.maxfun:\n # break\n\n # parameters = self._scale_parameters(candidate)\n # self.population_energies[index] = self.func(parameters,\n # *self.args)\n # self._nfev += 1\n\n ##############\n ##############\n\n minval = np.argmin(self.population_energies)\n\n # put the lowest energy into the best solution position.\n lowest_energy = self.population_energies[minval]\n self.population_energies[minval] = self.population_energies[0]\n self.population_energies[0] = lowest_energy\n\n self.population[[0, minval], :] = self.population[[minval, 0], :]\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"\n Evolve the population by a single generation\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n fun : float\n Value of objective function obtained from the best solution.\n \"\"\"\n # the population may have just been initialized (all entries are\n # np.inf). If it has you have to calculate the initial energies\n if np.all(np.isinf(self.population_energies)):\n self._calculate_population_energies()\n\n if self.dither is not None:\n self.scale = self.random_number_generator.rand() * (self.dither[1] - self.dither[0]) + self.dither[0]\n\n ##############\n # CHANGES: self.func operates on the entire parameters array\n ##############\n\n itersize = max(0, min(self.num_population_members, self.maxfun - self._nfev + 1))\n trials = np.array([self._mutate(c) for c in range(itersize)]) # TODO:can be vectorized\n for trial in trials:\n self._ensure_constraint(trial)\n parameters = np.array([self._scale_parameters(trial) for trial in trials])\n energies = self.func(parameters, *self.args)\n self._nfev += itersize\n\n for candidate, (energy, trial) in enumerate(zip(energies, trials)):\n # if the energy of the trial candidate is lower than the\n # original population member then replace it\n if energy < self.population_energies[candidate]:\n self.population[candidate] = trial\n self.population_energies[candidate] = energy\n\n # if the trial candidate also has a lower energy than the\n # best solution then replace that as well\n if energy < self.population_energies[0]:\n self.population_energies[0] = energy\n self.population[0] = trial\n\n # for candidate in range(self.num_population_members):\n # if self._nfev > self.maxfun:\n # raise StopIteration\n\n # # create a trial solution\n # trial = self._mutate(candidate)\n\n # # ensuring that it's in the range [0, 1)\n # self._ensure_constraint(trial)\n\n # # scale from [0, 1) to the actual parameter value\n # parameters = self._scale_parameters(trial)\n\n # # determine the energy of the objective function\n # energy = self.func(parameters, *self.args)\n # self._nfev += 1\n\n # # if the energy of the trial candidate is lower than the\n # # original population member then replace it\n # if energy < self.population_energies[candidate]:\n # self.population[candidate] = trial\n # self.population_energies[candidate] = energy\n\n # # if the trial candidate also has a lower energy than the\n # # best solution then replace that as well\n # if energy < self.population_energies[0]:\n # self.population_energies[0] = energy\n # self.population[0] = trial\n\n ##############\n ##############\n\n return self.x, self.population_energies[0]\n\n def next(self):\n \"\"\"\n Evolve the population by a single generation\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n fun : float\n Value of objective function obtained from the best solution.\n \"\"\"\n # next() is required for compatibility with Python2.7.\n return self.__next__()\n\n def _scale_parameters(self, trial):\n \"\"\"\n scale from a number between 0 and 1 to parameters.\n \"\"\"\n return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2\n\n def _unscale_parameters(self, parameters):\n \"\"\"\n scale from parameters to a number between 0 and 1.\n \"\"\"\n return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5\n\n def _ensure_constraint(self, trial):\n \"\"\"\n make sure the parameters lie between the limits\n \"\"\"\n for index in np.where((trial < 0) | (trial > 1))[0]:\n trial[index] = self.random_number_generator.rand()\n\n def _mutate(self, candidate):\n \"\"\"\n create a trial vector based on a mutation strategy\n \"\"\"\n trial = np.copy(self.population[candidate])\n\n rng = self.random_number_generator\n\n fill_point = rng.randint(0, self.parameter_count)\n\n if self.strategy in [\"currenttobest1exp\", \"currenttobest1bin\"]:\n bprime = self.mutation_func(candidate, self._select_samples(candidate, 5))\n else:\n bprime = self.mutation_func(self._select_samples(candidate, 5))\n\n if self.strategy in self._binomial:\n crossovers = rng.rand(self.parameter_count)\n crossovers = crossovers < self.cross_over_probability\n # the last one is always from the bprime vector for binomial\n # If you fill in modulo with a loop you have to set the last one to\n # true. If you don't use a loop then you can have any random entry\n # be True.\n crossovers[fill_point] = True\n trial = np.where(crossovers, bprime, trial)\n return trial\n\n elif self.strategy in self._exponential:\n i = 0\n while i < self.parameter_count and rng.rand() < self.cross_over_probability:\n trial[fill_point] = bprime[fill_point]\n fill_point = (fill_point + 1) % self.parameter_count\n i += 1\n\n return trial\n\n def _best1(self, samples):\n \"\"\"\n best1bin, best1exp\n \"\"\"\n r0, r1 = samples[:2]\n return self.population[0] + self.scale * (self.population[r0] - self.population[r1])\n\n def _rand1(self, samples):\n \"\"\"\n rand1bin, rand1exp\n \"\"\"\n r0, r1, r2 = samples[:3]\n return self.population[r0] + self.scale * (self.population[r1] - self.population[r2])\n\n def _randtobest1(self, samples):\n \"\"\"\n randtobest1bin, randtobest1exp\n \"\"\"\n r0, r1, r2 = samples[:3]\n bprime = np.copy(self.population[r0])\n bprime += self.scale * (self.population[0] - bprime)\n bprime += self.scale * (self.population[r1] - self.population[r2])\n return bprime\n\n def _currenttobest1(self, candidate, samples):\n \"\"\"\n currenttobest1bin, currenttobest1exp\n \"\"\"\n r0, r1 = samples[:2]\n bprime = self.population[candidate] + self.scale * (\n self.population[0] - self.population[candidate] + self.population[r0] - self.population[r1]\n )\n return bprime\n\n def _best2(self, samples):\n \"\"\"\n best2bin, best2exp\n \"\"\"\n r0, r1, r2, r3 = samples[:4]\n bprime = self.population[0] + self.scale * (\n self.population[r0] + self.population[r1] - self.population[r2] - self.population[r3]\n )\n\n return bprime\n\n def _rand2(self, samples):\n \"\"\"\n rand2bin, rand2exp\n \"\"\"\n r0, r1, r2, r3, r4 = samples\n bprime = self.population[r0] + self.scale * (\n self.population[r1] + self.population[r2] - self.population[r3] - self.population[r4]\n )\n\n return bprime\n\n def _select_samples(self, candidate, number_samples):\n \"\"\"\n obtain random integers from range(self.num_population_members),\n without replacement. You can't have the original candidate either.\n \"\"\"\n idxs = list(range(self.num_population_members))\n idxs.remove(candidate)\n self.random_number_generator.shuffle(idxs)\n idxs = idxs[:number_samples]\n return idxs\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom art.defences.preprocessor import SpatialSmoothing\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef image_batch(channels_first):\n \"\"\"\n Image fixture of shape NHWC and NCHW.\n \"\"\"\n test_input = np.repeat(np.array(range(6)).reshape(6, 1), 24, axis=1).reshape(2, 3, 4, 6)\n if not channels_first:\n test_input = np.transpose(test_input, (0, 2, 3, 1))\n test_output = test_input.copy()\n return test_input, test_output\n\n\[email protected]\ndef video_batch(channels_first):\n \"\"\"\n Video fixture of shape NFHWC and NCFHW.\n \"\"\"\n test_input = np.repeat(np.array(range(6)).reshape(6, 1), 24, axis=1).reshape(1, 3, 2, 4, 6)\n if not channels_first:\n test_input = np.transpose(test_input, (0, 2, 3, 4, 1))\n test_output = test_input.copy()\n return test_input, test_output\n\n\[email protected]\ndef tabular_batch():\n \"\"\"\n Create tabular data fixture of shape (batch_size, features).\n \"\"\"\n return np.zeros((2, 4))\n\n\nclass TestLocalSpatialSmoothing:\n \"\"\"\n Test SpatialSmoothing.\n \"\"\"\n\n def test_spatial_smoothing_median_filter_call(self):\n test_input = np.array([[[[1, 2], [3, 4]]]])\n test_output = np.array([[[[1, 2], [3, 3]]]])\n spatial_smoothing = SpatialSmoothing(channels_first=True, window_size=2)\n\n assert_array_equal(spatial_smoothing(test_input)[0], test_output)\n\n @pytest.mark.parametrize(\"channels_first\", [True, False])\n @pytest.mark.parametrize(\"window_size\", [1, 2, 10])\n def test_spatial_smoothing_image_data(self, image_batch, channels_first, window_size):\n test_input, test_output = image_batch\n spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=window_size)\n\n assert_array_equal(spatial_smoothing(test_input)[0], test_output)\n\n @pytest.mark.parametrize(\"channels_first\", [True, False])\n def test_spatial_smoothing_video_data(self, video_batch, channels_first):\n test_input, test_output = video_batch\n spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=2)\n\n assert_array_equal(spatial_smoothing(test_input)[0], test_output)\n\n def test_non_spatial_data_error(self, tabular_batch):\n test_input = tabular_batch\n spatial_smoothing = SpatialSmoothing(channels_first=True)\n\n exc_msg = \"Unrecognized input dimension. Spatial smoothing can only be applied to image and video data.\"\n with pytest.raises(ValueError, match=exc_msg):\n spatial_smoothing(test_input)\n\n def test_window_size_error(self):\n exc_msg = \"Sliding window size must be a positive integer.\"\n with pytest.raises(ValueError, match=exc_msg):\n SpatialSmoothing(window_size=0)\n\n def test_triple_clip_values_error(self):\n exc_msg = \"'clip_values' should be a tuple of 2 floats or arrays containing the allowed data range.\"\n with pytest.raises(ValueError, match=exc_msg):\n SpatialSmoothing(clip_values=(0, 1, 2))\n\n def test_relation_clip_values_error(self):\n exc_msg = \"Invalid 'clip_values': min >= max.\"\n with pytest.raises(ValueError, match=exc_msg):\n SpatialSmoothing(clip_values=(1, 0))\n\n\nif __name__ == \"__main__\":\n pytest.cmdline.main(\"-q -s {} --mlFramework=tensorflow --durations=0\".format(__file__).split(\" \"))\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom art.defences.preprocessor import Mp3Compression\n\nlogger = logging.getLogger(__name__)\n\n\nclass AudioInput:\n \"\"\"\n Create audio batch.\n \"\"\"\n\n def __init__(self, channels_first, channels, sample_rate=44100, batch_size=2):\n self.channels_first = channels_first\n self.channels = channels\n self.sample_rate = sample_rate\n self.batch_size = batch_size\n\n def get_data(self):\n if self.channels_first:\n return np.zeros((self.batch_size, self.channels, self.sample_rate), dtype=np.int16)\n else:\n return np.zeros((self.batch_size, self.sample_rate, self.channels), dtype=np.int16)\n\n\[email protected](params=[1, 2], ids=[\"mono\", \"stereo\"])\ndef audio_batch(request, channels_first):\n \"\"\"\n Audio fixtures of shape `(batch_size, channels, samples)` or `(batch_size, samples, channels)`.\n \"\"\"\n channels = request.param\n audio_input = AudioInput(channels_first, channels)\n test_input = audio_input.get_data()\n test_output = test_input.copy()\n return test_input, test_output, audio_input.sample_rate\n\n\[email protected]\ndef image_batch():\n \"\"\"Create image fixture of shape (batch_size, channels, width, height).\"\"\"\n return np.zeros((2, 1, 4, 4))\n\n\nclass TestMp3Compression:\n \"\"\"Test Mp3Compresssion.\"\"\"\n\n def test_sample_rate_error(self):\n exc_msg = \"Sample rate be must a positive integer.\"\n with pytest.raises(ValueError, match=exc_msg):\n Mp3Compression(sample_rate=0)\n\n def test_non_temporal_data_error(self, image_batch):\n test_input = image_batch\n mp3compression = Mp3Compression(sample_rate=16000)\n\n exc_msg = \"Mp3 compression can only be applied to temporal data across at least one channel.\"\n with pytest.raises(ValueError, match=exc_msg):\n mp3compression(test_input)\n\n @pytest.mark.parametrize(\"channels_first\", [True, False])\n def test_mp3_compresssion(self, audio_batch, channels_first):\n test_input, test_output, sample_rate = audio_batch\n mp3compression = Mp3Compression(sample_rate=sample_rate, channels_first=channels_first)\n\n assert_array_equal(mp3compression(test_input)[0], test_output)\n\n\nif __name__ == \"__main__\":\n pytest.cmdline.main(\"-q -s {} --mlFramework=tensorflow --durations=0\".format(__file__).split(\" \"))\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport keras.backend as k\nimport numpy as np\n\nfrom art.attacks.evasion.elastic_net import ElasticNet\nfrom art.estimators.classification.classifier import ClassGradientsMixin\nfrom art.estimators.classification.keras import KerasClassifier\nfrom art.utils import random_targets, to_categorical\nfrom tests.attacks.utils import backend_test_classifier_type_check_fail\nfrom tests.utils import (\n TestBase,\n get_image_classifier_kr,\n get_image_classifier_pt,\n get_image_classifier_tf,\n get_tabular_classifier_kr,\n get_tabular_classifier_pt,\n get_tabular_classifier_tf,\n master_seed,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestElasticNet(TestBase):\n \"\"\"\n A unittest class for testing the ElasticNet attack.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n master_seed(seed=1234)\n super().setUpClass()\n\n cls.n_train = 500\n cls.n_test = 10\n cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]\n cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]\n cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]\n cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]\n\n def setUp(self):\n master_seed(seed=1234)\n super().setUp()\n\n def test_tensorflow_failure_attack(self):\n \"\"\"\n Test the corner case when attack fails.\n :return:\n \"\"\"\n # Build TensorFlowClassifier\n tfc, sess = get_image_classifier_tf()\n\n # Failure attack\n ead = ElasticNet(\n classifier=tfc, targeted=True, max_iter=0, binary_search_steps=0, learning_rate=0, initial_const=1\n )\n params = {\"y\": random_targets(self.y_test_mnist, tfc.nb_classes)}\n x_test_adv = ead.generate(self.x_test_mnist, **params)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n np.testing.assert_almost_equal(self.x_test_mnist, x_test_adv, 3)\n\n # Clean-up session\n if sess is not None:\n sess.close()\n\n def test_tensorflow_mnist(self):\n \"\"\"\n First test with the TensorFlowClassifier.\n :return:\n \"\"\"\n x_test_original = self.x_test_mnist.copy()\n\n # Build TensorFlowClassifier\n tfc, sess = get_image_classifier_tf(from_logits=True)\n\n # First attack\n ead = ElasticNet(classifier=tfc, targeted=True, max_iter=2)\n params = {\"y\": random_targets(self.y_test_mnist, tfc.nb_classes)}\n x_test_adv = ead.generate(self.x_test_mnist, **params)\n expected_x_test_adv = np.asarray(\n [\n 0.45704955,\n 0.43627003,\n 0.57238287,\n 1.0,\n 0.11541145,\n 0.12619308,\n 0.48318917,\n 0.3457903,\n 0.17863746,\n 0.09060935,\n 0.0,\n 0.00963121,\n 0.0,\n 0.04749763,\n 0.4058206,\n 0.17860745,\n 0.0,\n 0.9153206,\n 0.84564775,\n 0.20603634,\n 0.10586322,\n 0.00947509,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n )\n np.testing.assert_array_almost_equal(x_test_adv[0, 14, :, 0], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(params[\"y\"], axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", target)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate on MNIST: %.2f%%\", (100 * sum(target == y_pred_adv) / len(target)))\n self.assertTrue((target == y_pred_adv).any())\n\n # Second attack\n ead = ElasticNet(classifier=tfc, targeted=False, max_iter=2)\n params = {\"y\": random_targets(self.y_test_mnist, tfc.nb_classes)}\n x_test_adv = ead.generate(self.x_test_mnist, **params)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(params[\"y\"], axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", target)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate on MNIST: %.2f%%\", (100 * sum(target != y_pred_adv) / float(len(target))))\n np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))\n\n # Third attack\n ead = ElasticNet(classifier=tfc, targeted=False, max_iter=2)\n params = {}\n x_test_adv = ead.generate(self.x_test_mnist, **params)\n expected_x_test_adv = np.asarray(\n [\n 0.22866514,\n 0.21826893,\n 0.22902338,\n 0.06268515,\n 0.0,\n 0.0,\n 0.04822975,\n 0.0,\n 0.0,\n 0.0,\n 0.05555382,\n 0.0,\n 0.0,\n 0.0,\n 0.38986346,\n 0.10653087,\n 0.32385707,\n 0.98043066,\n 0.75790393,\n 0.16486718,\n 0.16069527,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n )\n np.testing.assert_array_almost_equal(x_test_adv[0, 14, :, 0], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n y_pred = np.argmax(tfc.predict(self.x_test_mnist), axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", y_pred)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate: %.2f%%\", (100 * sum(y_pred != y_pred_adv) / float(len(y_pred))))\n np.testing.assert_array_equal(y_pred_adv, np.asarray([0, 4, 7, 9, 0, 7, 7, 3, 0, 7]))\n\n # First attack without batching\n ead_wob = ElasticNet(classifier=tfc, targeted=True, max_iter=2, batch_size=1)\n params = {\"y\": random_targets(self.y_test_mnist, tfc.nb_classes)}\n x_test_adv = ead_wob.generate(self.x_test_mnist, **params)\n expected_x_test_adv = np.asarray(\n [\n 0.3287169,\n 0.31374657,\n 0.42853343,\n 0.8994576,\n 0.19850709,\n 0.11997936,\n 0.5622535,\n 0.43854535,\n 0.19387433,\n 0.12516324,\n 0.0,\n 0.10933565,\n 0.02162433,\n 0.07120894,\n 0.95224255,\n 0.3072921,\n 0.48966524,\n 1.0,\n 0.3814998,\n 0.15782641,\n 0.52283823,\n 0.12852049,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n )\n np.testing.assert_array_almost_equal(x_test_adv[0, 14, :, 0], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(params[\"y\"], axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", target)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate: %.2f%%\", (100 * sum(target == y_pred_adv) / float(len(target))))\n self.assertTrue((target == y_pred_adv).any())\n\n # Second attack without batching\n ead_wob = ElasticNet(classifier=tfc, targeted=False, max_iter=2, batch_size=1)\n params = {\"y\": random_targets(self.y_test_mnist, tfc.nb_classes)}\n x_test_adv = ead_wob.generate(self.x_test_mnist, **params)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(params[\"y\"], axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", target)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate: %.2f%%\", (100 * sum(target != y_pred_adv) / float(len(target))))\n np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_mnist))), 0.0, delta=0.00001)\n\n # Close session\n if sess is not None:\n sess.close()\n\n def test_keras_mnist(self):\n \"\"\"\n Second test with the KerasClassifier.\n :return:\n \"\"\"\n x_test_original = self.x_test_mnist.copy()\n\n # Build KerasClassifier\n krc = get_image_classifier_kr()\n\n # First attack\n ead = ElasticNet(classifier=krc, targeted=True, max_iter=2)\n y_target = to_categorical(np.asarray([6, 6, 7, 4, 9, 7, 9, 0, 1, 0]), nb_classes=10)\n x_test_adv = ead.generate(self.x_test_mnist, y=y_target)\n expected_x_test_adv = np.asarray(\n [\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.00183569,\n 0.0,\n 0.0,\n 0.49765405,\n 1.0,\n 0.6467149,\n 0.0033755,\n 0.0052456,\n 0.0,\n 0.01104407,\n 0.00495547,\n 0.02747423,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n )\n np.testing.assert_array_almost_equal(x_test_adv[2, 14, :, 0], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(y_target, axis=1)\n y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", target)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate: %.2f%%\", (100 * sum(target == y_pred_adv) / float(len(target))))\n self.assertTrue((target == y_pred_adv).any())\n\n # Second attack\n ead = ElasticNet(classifier=krc, targeted=False, max_iter=2)\n y_target = to_categorical(np.asarray([9, 5, 6, 7, 1, 6, 1, 5, 8, 5]), nb_classes=10)\n x_test_adv = ead.generate(self.x_test_mnist, y=y_target)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)\n logger.debug(\"EAD target: %s\", y_target)\n logger.debug(\"EAD actual: %s\", y_pred_adv)\n logger.info(\"EAD success rate: %.2f\", (100 * sum(target != y_pred_adv) / float(len(target))))\n self.assertTrue((target != y_pred_adv).any())\n np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_mnist))), 0.0, delta=0.00001)\n\n k.clear_session()\n\n def test_pytorch_mnist(self):\n \"\"\"\n Third test with the PyTorchClassifier.\n :return:\n \"\"\"\n x_test = np.reshape(self.x_test_mnist, (self.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)\n x_test_original = x_test.copy()\n\n # Build PyTorchClassifier\n ptc = get_image_classifier_pt(from_logits=False)\n\n # First attack\n ead = ElasticNet(classifier=ptc, targeted=True, max_iter=2)\n params = {\"y\": random_targets(self.y_test_mnist, ptc.nb_classes)}\n x_test_adv = ead.generate(x_test, **params)\n expected_x_test_adv = np.asarray(\n [\n 0.01678124,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.00665895,\n 0.0,\n 0.11374763,\n 0.36250514,\n 0.5472948,\n 0.9308808,\n 1.0,\n 0.99920374,\n 0.86274165,\n 0.6346757,\n 0.5597227,\n 0.24191494,\n 0.25882354,\n 0.0091916,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n )\n np.testing.assert_array_almost_equal(x_test_adv[2, 0, :, 14], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(params[\"y\"], axis=1)\n y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)\n self.assertTrue((target == y_pred_adv).any())\n\n # Second attack\n ead = ElasticNet(classifier=ptc, targeted=False, max_iter=2)\n params = {\"y\": random_targets(self.y_test_mnist, ptc.nb_classes)}\n x_test_adv = ead.generate(x_test, **params)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n target = np.argmax(params[\"y\"], axis=1)\n y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)\n self.assertTrue((target != y_pred_adv).any())\n np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)\n\n def test_classifier_type_check_fail(self):\n backend_test_classifier_type_check_fail(ElasticNet, [ClassGradientsMixin])\n\n def test_keras_iris_clipped(self):\n classifier = get_tabular_classifier_kr()\n attack = ElasticNet(classifier, targeted=False, max_iter=10)\n x_test_adv = attack.generate(self.x_test_iris)\n expected_x_test_adv = np.asarray([0.85931635, 0.44633555, 0.65658355, 0.23840423])\n np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n np.testing.assert_array_equal(\n predictions_adv,\n np.asarray(\n [\n 1,\n 1,\n 1,\n 2,\n 1,\n 1,\n 1,\n 2,\n 1,\n 2,\n 1,\n 1,\n 1,\n 2,\n 1,\n 1,\n 2,\n 2,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 2,\n 1,\n 2,\n 1,\n 2,\n 1,\n 0,\n 1,\n 1,\n 1,\n 2,\n 0,\n 2,\n 2,\n 1,\n 1,\n 2,\n ]\n ),\n )\n accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"EAD success rate on Iris: %.2f%%\", (accuracy * 100))\n\n def test_keras_iris_unbounded(self):\n classifier = get_tabular_classifier_kr()\n\n # Recreate a classifier without clip values\n classifier = KerasClassifier(model=classifier._model, use_logits=False, channels_first=True)\n attack = ElasticNet(classifier, targeted=False, max_iter=10)\n x_test_adv = attack.generate(self.x_test_iris)\n expected_x_test_adv = np.asarray([0.85931635, 0.44633555, 0.65658355, 0.23840423])\n np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n np.testing.assert_array_equal(\n predictions_adv,\n np.asarray(\n [\n 1,\n 1,\n 1,\n 2,\n 1,\n 1,\n 1,\n 2,\n 1,\n 2,\n 1,\n 1,\n 1,\n 2,\n 1,\n 1,\n 2,\n 2,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 2,\n 1,\n 2,\n 1,\n 2,\n 1,\n 0,\n 1,\n 1,\n 1,\n 2,\n 0,\n 2,\n 2,\n 1,\n 1,\n 2,\n ]\n ),\n )\n accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"EAD success rate on Iris: %.2f%%\", (accuracy * 100))\n\n def test_tensorflow_iris(self):\n classifier, _ = get_tabular_classifier_tf()\n\n # Test untargeted attack\n attack = ElasticNet(classifier, targeted=False, max_iter=10)\n x_test_adv = attack.generate(self.x_test_iris)\n expected_x_test_adv = np.asarray([0.8479195, 0.42525578, 0.70166135, 0.28664514])\n np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n np.testing.assert_array_equal(\n predictions_adv,\n np.asarray(\n [\n 1,\n 2,\n 2,\n 2,\n 1,\n 1,\n 1,\n 2,\n 1,\n 2,\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 1,\n 2,\n 1,\n 0,\n 2,\n 2,\n 1,\n 2,\n 0,\n 2,\n 2,\n 1,\n 1,\n 2,\n ]\n ),\n )\n accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"EAD success rate on Iris: %.2f%%\", (accuracy * 100))\n\n # Test targeted attack\n targets = random_targets(self.y_test_iris, nb_classes=3)\n attack = ElasticNet(classifier, targeted=True, max_iter=10)\n x_test_adv = attack.generate(self.x_test_iris, **{\"y\": targets})\n expected_x_test_adv = np.asarray([0.8859426, 0.51877, 0.5014498, 0.05447771])\n np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n np.testing.assert_array_equal(\n predictions_adv,\n np.asarray(\n [\n 0,\n 0,\n 0,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 0,\n 2,\n 0,\n 0,\n 2,\n 2,\n 0,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 0,\n 0,\n 0,\n 2,\n 0,\n 2,\n 2,\n 2,\n 2,\n 2,\n 0,\n 0,\n 0,\n 2,\n 2,\n 2,\n 2,\n 2,\n 0,\n 2,\n ]\n ),\n )\n\n accuracy = np.sum(predictions_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Targeted EAD success rate on Iris: %.2f%%\", (accuracy * 100))\n\n def test_pytorch_iris(self):\n classifier = get_tabular_classifier_pt()\n attack = ElasticNet(classifier, targeted=False, max_iter=10)\n x_test_adv = attack.generate(self.x_test_iris.astype(np.float32))\n expected_x_test_adv = np.asarray([0.8479194, 0.42525578, 0.70166135, 0.28664517])\n np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n np.testing.assert_array_equal(\n predictions_adv,\n np.asarray(\n [\n 1,\n 2,\n 2,\n 2,\n 1,\n 1,\n 1,\n 2,\n 1,\n 2,\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 1,\n 2,\n 1,\n 0,\n 2,\n 2,\n 1,\n 2,\n 0,\n 2,\n 2,\n 1,\n 1,\n 2,\n ]\n ),\n )\n\n accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"EAD success rate on Iris: %.2f%%\", (accuracy * 100))\n\n def test_scikitlearn(self):\n from sklearn.linear_model import LogisticRegression\n from sklearn.svm import SVC, LinearSVC\n\n from art.estimators.classification.scikitlearn import SklearnClassifier\n\n scikitlearn_test_cases = [\n LogisticRegression(solver=\"lbfgs\", multi_class=\"auto\"),\n SVC(gamma=\"auto\"),\n LinearSVC(),\n ]\n\n x_test_original = self.x_test_iris.copy()\n\n for model in scikitlearn_test_cases:\n classifier = SklearnClassifier(model=model, clip_values=(0, 1))\n classifier.fit(x=self.x_test_iris, y=self.y_test_iris)\n\n # Test untargeted attack\n attack = ElasticNet(classifier, targeted=False, max_iter=2)\n x_test_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == predictions_adv).all())\n accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"EAD success rate of \" + classifier.__class__.__name__ + \" on Iris: %.2f%%\", (accuracy * 100))\n\n # Test targeted attack\n targets = random_targets(self.y_test_iris, nb_classes=3)\n attack = ElasticNet(classifier, targeted=True, max_iter=2)\n x_test_adv = attack.generate(self.x_test_iris, **{\"y\": targets})\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertLessEqual(np.amax(x_test_adv), 1.0)\n self.assertGreaterEqual(np.amin(x_test_adv), 0.0)\n\n predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertTrue((np.argmax(targets, axis=1) == predictions_adv).any())\n accuracy = np.sum(predictions_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\n \"Targeted EAD success rate of \" + classifier.__class__.__name__ + \" on Iris: %.2f%%\", (accuracy * 100)\n )\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_iris))), 0.0, delta=0.00001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the white-box attack `DeepFool`.\n\n| Paper link: https://arxiv.org/abs/1511.04599\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import Optional\n\nimport numpy as np\nfrom tqdm import trange\n\nfrom art.config import ART_NUMPY_DTYPE\nfrom art.estimators.classification.classifier import (\n ClassGradientsMixin,\n ClassifierGradients,\n)\nfrom art.attacks.attack import EvasionAttack\nfrom art.utils import compute_success, is_probability\n\nlogger = logging.getLogger(__name__)\n\n\nclass DeepFool(EvasionAttack):\n \"\"\"\n Implementation of the attack from Moosavi-Dezfooli et al. (2015).\n\n | Paper link: https://arxiv.org/abs/1511.04599\n \"\"\"\n\n attack_params = EvasionAttack.attack_params + [\n \"max_iter\",\n \"epsilon\",\n \"nb_grads\",\n \"batch_size\",\n \"verbose\",\n ]\n _estimator_requirements = (ClassGradientsMixin,)\n\n def __init__(\n self,\n classifier: ClassifierGradients,\n max_iter: int = 100,\n epsilon: float = 1e-6,\n nb_grads: int = 10,\n batch_size: int = 1,\n verbose: bool = True,\n ) -> None:\n \"\"\"\n Create a DeepFool attack instance.\n\n :param classifier: A trained classifier.\n :param max_iter: The maximum number of iterations.\n :param epsilon: Overshoot parameter.\n :param nb_grads: The number of class gradients (top nb_grads w.r.t. prediction) to compute. This way only the\n most likely classes are considered, speeding up the computation.\n :param batch_size: Batch size\n \"\"\"\n super(DeepFool, self).__init__(estimator=classifier)\n self.max_iter = max_iter\n self.epsilon = epsilon\n self.nb_grads = nb_grads\n self.batch_size = batch_size\n self.verbose = verbose\n self._check_params()\n if self.estimator.clip_values is None:\n logger.warning(\n \"The `clip_values` attribute of the estimator is `None`, therefore this instance of DeepFool will by \"\n \"default generate adversarial perturbations scaled for input values in the range [0, 1] but not clip \"\n \"the adversarial example.\"\n )\n\n def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:\n \"\"\"\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs to be attacked.\n :param y: An array with the original labels to be predicted.\n :return: An array holding the adversarial examples.\n \"\"\"\n x_adv = x.astype(ART_NUMPY_DTYPE)\n preds = self.estimator.predict(x, batch_size=self.batch_size)\n\n if is_probability(preds[0]):\n logger.warning(\n \"It seems that the attacked model is predicting probabilities. DeepFool expects logits as model output \"\n \"to achieve its full attack strength.\"\n )\n\n # Determine the class labels for which to compute the gradients\n use_grads_subset = self.nb_grads < self.estimator.nb_classes\n if use_grads_subset:\n # TODO compute set of unique labels per batch\n grad_labels = np.argsort(-preds, axis=1)[:, : self.nb_grads]\n labels_set = np.unique(grad_labels)\n else:\n labels_set = np.arange(self.estimator.nb_classes)\n sorter = np.arange(len(labels_set))\n\n # Pick a small scalar to avoid division by 0\n tol = 10e-8\n\n # Compute perturbation with implicit batching\n for batch_id in trange(\n int(np.ceil(x_adv.shape[0] / float(self.batch_size))), desc=\"DeepFool\", disable=not self.verbose\n ):\n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size\n batch = x_adv[batch_index_1:batch_index_2].copy()\n\n # Get predictions and gradients for batch\n f_batch = preds[batch_index_1:batch_index_2]\n fk_hat = np.argmax(f_batch, axis=1)\n if use_grads_subset:\n # Compute gradients only for top predicted classes\n grd = np.array([self.estimator.class_gradient(batch, label=_) for _ in labels_set])\n grd = np.squeeze(np.swapaxes(grd, 0, 2), axis=0)\n else:\n # Compute gradients for all classes\n grd = self.estimator.class_gradient(batch)\n\n # Get current predictions\n active_indices = np.arange(len(batch))\n current_step = 0\n while active_indices.size > 0 and current_step < self.max_iter:\n # Compute difference in predictions and gradients only for selected top predictions\n labels_indices = sorter[np.searchsorted(labels_set, fk_hat, sorter=sorter)]\n grad_diff = grd - grd[np.arange(len(grd)), labels_indices][:, None]\n f_diff = f_batch[:, labels_set] - f_batch[np.arange(len(f_batch)), labels_indices][:, None]\n\n # Choose coordinate and compute perturbation\n norm = np.linalg.norm(grad_diff.reshape(len(grad_diff), len(labels_set), -1), axis=2) + tol\n value = np.abs(f_diff) / norm\n value[np.arange(len(value)), labels_indices] = np.inf\n l_var = np.argmin(value, axis=1)\n absolute1 = abs(f_diff[np.arange(len(f_diff)), l_var])\n draddiff = grad_diff[np.arange(len(grad_diff)), l_var].reshape(len(grad_diff), -1)\n pow1 = pow(np.linalg.norm(draddiff, axis=1), 2,) + tol\n r_var = absolute1 / pow1\n r_var = r_var.reshape((-1,) + (1,) * (len(x.shape) - 1))\n r_var = r_var * grad_diff[np.arange(len(grad_diff)), l_var]\n\n # Add perturbation and clip result\n if self.estimator.clip_values is not None:\n batch[active_indices] = np.clip(\n batch[active_indices]\n + r_var[active_indices] * (self.estimator.clip_values[1] - self.estimator.clip_values[0]),\n self.estimator.clip_values[0],\n self.estimator.clip_values[1],\n )\n else:\n batch[active_indices] += r_var[active_indices]\n\n # Recompute prediction for new x\n f_batch = self.estimator.predict(batch)\n fk_i_hat = np.argmax(f_batch, axis=1)\n\n # Recompute gradients for new x\n if use_grads_subset:\n # Compute gradients only for (originally) top predicted classes\n grd = np.array([self.estimator.class_gradient(batch, label=_) for _ in labels_set])\n grd = np.squeeze(np.swapaxes(grd, 0, 2), axis=0)\n else:\n # Compute gradients for all classes\n grd = self.estimator.class_gradient(batch)\n\n # Stop if misclassification has been achieved\n active_indices = np.where(fk_i_hat == fk_hat)[0]\n\n current_step += 1\n\n # Apply overshoot parameter\n x_adv1 = x_adv[batch_index_1:batch_index_2]\n x_adv2 = (1 + self.epsilon) * (batch - x_adv[batch_index_1:batch_index_2])\n x_adv[batch_index_1:batch_index_2] = x_adv1 + x_adv2\n if self.estimator.clip_values is not None:\n np.clip(\n x_adv[batch_index_1:batch_index_2],\n self.estimator.clip_values[0],\n self.estimator.clip_values[1],\n out=x_adv[batch_index_1:batch_index_2],\n )\n\n logger.info(\n \"Success rate of DeepFool attack: %.2f%%\",\n 100 * compute_success(self.estimator, x, y, x_adv, batch_size=self.batch_size),\n )\n return x_adv\n\n def _check_params(self) -> None:\n if not isinstance(self.max_iter, (int, np.int)) or self.max_iter <= 0:\n raise ValueError(\"The number of iterations must be a positive integer.\")\n\n if not isinstance(self.nb_grads, (int, np.int)) or self.nb_grads <= 0:\n raise ValueError(\"The number of class gradients to compute must be a positive integer.\")\n\n if self.epsilon < 0:\n raise ValueError(\"The overshoot parameter must not be negative.\")\n\n if self.batch_size <= 0:\n raise ValueError(\"The batch size `batch_size` has to be positive.\")\n"
] |
[
[
"numpy.linspace",
"numpy.max",
"numpy.zeros_like",
"numpy.argmin",
"numpy.mean",
"numpy.where",
"scipy.optimize.OptimizeResult",
"numpy.clip",
"numpy.finfo",
"numpy.asfarray",
"numpy.size",
"scipy._lib._util.check_random_state",
"numpy.copy",
"numpy.std",
"numpy.argmax",
"scipy._lib.six.xrange",
"numpy.array",
"numpy.isfinite",
"numpy.ones",
"numpy.isinf",
"numpy.fabs"
],
[
"numpy.array",
"numpy.zeros",
"numpy.transpose"
],
[
"numpy.zeros"
],
[
"numpy.amax",
"sklearn.linear_model.LogisticRegression",
"numpy.abs",
"numpy.asarray",
"numpy.amin",
"numpy.reshape",
"numpy.testing.assert_almost_equal",
"numpy.argmax",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.swapaxes",
"numpy.abs",
"numpy.unique",
"numpy.clip",
"numpy.arange",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.argmin",
"numpy.searchsorted",
"numpy.argsort",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hayman42/ft-bert-pyt
|
[
"860e5a0e80d834ae8f663c1e4ffe9d70359e7897"
] |
[
"eval_diff.py"
] |
[
"from typing import OrderedDict\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom modeling import BertForSequenceClassification, BertConfig\nfrom transformers import AutoTokenizer\nimport datasets\nfrom tqdm import tqdm\nfrom time import time\nfrom quantize import quantize\nfrom quantized_modeling import BertQuantizedEncoder\nimport random\nfrom test_utils import *\n\n\ndef main():\n config = BertConfig.from_json_file(config_file)\n tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_path, config=config)\n rawdata = datasets.load_dataset(\"glue\", \"mrpc\")[\"train\"]\n loader = DataLoader(rawdata, batch_size=n_samples, shuffle=True)\n\n if no_cuda:\n device = torch.device(\"cpu\")\n elif local_rank == -1:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n torch.cuda.set_device(local_rank)\n device = torch.device(\"cuda\", local_rank)\n\n # set dropout prob to 0\n # config.hidden_dropout_prob = 0\n # config.attention_probs_dropout_prob = 0\n # get each encoder output\n config.output_all_encoded_layers = True\n state_dict = torch.load(init_checkpoint)\n\n if task_name == \"mrpc\" or task_name == \"qnli\":\n orig = BertForSequenceClassification(config, 2)\n quant = BertForSequenceClassification(config, 2)\n elif task_name == \"mnli\":\n orig = BertForSequenceClassification(config, 3)\n quant = BertForSequenceClassification(config, 3)\n apply_quantization(orig, config, state_dict)\n apply_quantization(quant, config, state_dict, quantization_schemes)\n orig.to(device)\n quant.to(device)\n print(quantization_schemes)\n\n orig.eval()\n quant.eval()\n\n if fp16:\n orig.half()\n quant.half()\n\n with torch.no_grad():\n a = time()\n for data in loader:\n processed_data = process_glue_mrpc_data(data, task_name, tokenizer, device)\n for i in range(1):\n eval_diff(orig, quant, processed_data, i)\n break\n print(\"total time:\", time() - a)\n\n\ntask_name = \"mrpc\"\nmodel_dir = f\"/workspace/ft-bert-pyt/model/bert-base-cased-{task_name}/\"\nconfig_file = model_dir + \"config.json\"\ninit_checkpoint = model_dir + \"pytorch_model.bin\"\nvocab_file = model_dir + \"vocab.txt\"\ntokenizer_config_path = model_dir + \"tokenizer_config.json\"\ntokenizer_path = model_dir\nlocal_rank = 0\nn_samples = 100\ndo_lower_case = False\nno_cuda = False\nfp16 = False\nquantization_schemes = [random.randint(0, 3) for i in range(12)]\nquantization_schemes = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n# quantization_schemes = [0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0]\npos = 11\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.cuda.set_device",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kansakitw/dragonpilotamd
|
[
"83295e6746e685b22e218bd0bd943df674e42a81"
] |
[
"selfdrive/controls/lib/longitudinal_planner.py"
] |
[
"#!/usr/bin/env python3\nimport math\nimport numpy as np\nfrom common.numpy_fast import interp\n\nimport cereal.messaging as messaging\nfrom cereal import log\nfrom common.realtime import DT_MDL\nfrom common.realtime import sec_since_boot\nfrom selfdrive.modeld.constants import T_IDXS\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.controls.lib.fcw import FCWChecker\nfrom selfdrive.controls.lib.longcontrol import LongCtrlState\nfrom selfdrive.controls.lib.lead_mpc import LeadMpc\nfrom selfdrive.controls.lib.long_mpc import LongitudinalMpc\nfrom selfdrive.controls.lib.limits_long_mpc import LimitsLongitudinalMpc\nfrom selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N\nfrom selfdrive.controls.lib.vision_turn_controller import VisionTurnController\nfrom selfdrive.controls.lib.speed_limit_controller import SpeedLimitController, SpeedLimitResolver\nfrom selfdrive.controls.lib.turn_speed_controller import TurnSpeedController\nfrom selfdrive.controls.lib.events import Events\nfrom selfdrive.swaglog import cloudlog\n\nLON_MPC_STEP = 0.2 # first step is 0.2s\nAWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted\nA_CRUISE_MIN = -1.2\nA_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]\nA_CRUISE_MAX_BP = [0., 15., 25., 40.]\n\n# Lookup table for turns\n_A_TOTAL_MAX_V = [1.7, 3.2]\n_A_TOTAL_MAX_BP = [20., 40.]\n\nDP_FOLLOWING_DIST = {\n 0: 1.2,\n 1: 1.5,\n 2: 1.8,\n 3: 2.2,\n}\n\nDP_ACCEL_ECO = 0\nDP_ACCEL_NORMAL = 1\nDP_ACCEL_SPORT = 2\n\n# accel profile by @arne182 modified by @wer5lcy\n_DP_CRUISE_MIN_V = [-2.0, -1.8, -1.6, -1.4, -1.2]\n_DP_CRUISE_MIN_V_ECO = [-2.0, -1.6, -1.4, -1.2, -1.0]\n_DP_CRUISE_MIN_V_SPORT = [-3.0, -2.6, -2.3, -2.0, -1.0]\n_DP_CRUISE_MIN_BP = [0.0, 5.0, 10.0, 20.0, 55.0]\n\n_DP_CRUISE_MAX_V = [1.6, 1.4, 1.0, 0.6, 0.3]\n_DP_CRUISE_MAX_V_ECO = [1.5, 1.3, 0.8, 0.4, 0.2]\n_DP_CRUISE_MAX_V_SPORT = [3.0, 3.5, 3.0, 2.0, 2.0]\n_DP_CRUISE_MAX_BP = [0., 5., 10., 20., 55.]\n\ndef dp_calc_cruise_accel_limits(v_ego, dp_profile):\n if dp_profile == DP_ACCEL_ECO:\n a_cruise_min = interp(v_ego, _DP_CRUISE_MIN_BP, _DP_CRUISE_MIN_V_ECO)\n a_cruise_max = interp(v_ego, _DP_CRUISE_MAX_BP, _DP_CRUISE_MAX_V_ECO)\n elif dp_profile == DP_ACCEL_SPORT:\n a_cruise_min = interp(v_ego, _DP_CRUISE_MIN_BP, _DP_CRUISE_MIN_V_SPORT)\n a_cruise_max = interp(v_ego, _DP_CRUISE_MAX_BP, _DP_CRUISE_MAX_V_SPORT)\n else:\n a_cruise_min = interp(v_ego, _DP_CRUISE_MIN_BP, _DP_CRUISE_MIN_V)\n a_cruise_max = interp(v_ego, _DP_CRUISE_MAX_BP, _DP_CRUISE_MAX_V)\n return a_cruise_min, a_cruise_max\n\ndef get_max_accel(v_ego):\n return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)\n\n\ndef limit_accel_in_turns(v_ego, angle_steers, a_target, CP):\n \"\"\"\n This function returns a limited long acceleration allowed, depending on the existing lateral acceleration\n this should avoid accelerating when losing the target in turns\n \"\"\"\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))\n\n return [a_target[0], min(a_target[1], a_x_allowed)]\n\n\nclass Planner():\n def __init__(self, CP):\n self.CP = CP\n self.mpcs = {}\n self.mpcs['lead0'] = LeadMpc(0)\n self.mpcs['lead1'] = LeadMpc(1)\n self.mpcs['cruise'] = LongitudinalMpc()\n self.mpcs['custom'] = LimitsLongitudinalMpc()\n\n self.fcw = False\n self.fcw_checker = FCWChecker()\n\n self.v_desired = 0.0\n self.a_desired = 0.0\n self.longitudinalPlanSource = 'cruise'\n self.alpha = np.exp(-DT_MDL/2.0)\n self.lead_0 = log.ModelDataV2.LeadDataV3.new_message()\n self.lead_1 = log.ModelDataV2.LeadDataV3.new_message()\n\n self.v_desired_trajectory = np.zeros(CONTROL_N)\n self.a_desired_trajectory = np.zeros(CONTROL_N)\n\n # dp\n self.dp_accel_profile_ctrl = False\n self.dp_accel_profile = DP_ACCEL_ECO\n self.dp_following_profile_ctrl = False\n self.dp_following_profile = 3\n self.dp_following_dist = 2.2 # default val\n self.vision_turn_controller = VisionTurnController(CP)\n self.speed_limit_controller = SpeedLimitController()\n self.events = Events()\n self.turn_speed_controller = TurnSpeedController()\n\n def update(self, sm, CP):\n # dp\n self.dp_accel_profile_ctrl = sm['dragonConf'].dpAccelProfileCtrl\n self.dp_accel_profile = sm['dragonConf'].dpAccelProfile\n self.dp_following_profile_ctrl = sm['dragonConf'].dpFollowingProfileCtrl\n self.dp_following_profile = sm['dragonConf'].dpFollowingProfile\n self.dp_following_dist = DP_FOLLOWING_DIST[0 if not self.dp_following_profile_ctrl else self.dp_following_profile]\n self.mpcs['lead0'].set_following_distance(self.dp_following_dist)\n self.mpcs['lead1'].set_following_distance(self.dp_following_dist)\n\n cur_time = sec_since_boot()\n v_ego = sm['carState'].vEgo\n a_ego = sm['carState'].aEgo\n\n v_cruise_kph = sm['controlsState'].vCruise\n v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)\n v_cruise = v_cruise_kph * CV.KPH_TO_MS\n\n long_control_state = sm['controlsState'].longControlState\n force_slow_decel = sm['controlsState'].forceDecel\n\n self.lead_0 = sm['radarState'].leadOne\n self.lead_1 = sm['radarState'].leadTwo\n\n enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)\n if not enabled or sm['carState'].gasPressed:\n self.v_desired = v_ego\n self.a_desired = a_ego\n\n # Prevent divergence, smooth in current v_ego\n self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego\n self.v_desired = max(0.0, self.v_desired)\n\n # Get acceleration and active solutions for custom long mpc.\n a_mpc, active_mpc, c_source = self.mpc_solutions(enabled, self.v_desired, self.a_desired, v_cruise, sm)\n\n\n if not self.dp_accel_profile_ctrl:\n accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]\n else:\n accel_limits = dp_calc_cruise_accel_limits(v_cruise, self.dp_accel_profile)\n accel_limits_turns = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)\n if force_slow_decel:\n # if required so, force a smooth deceleration\n accel_limits_turns[1] = min(accel_limits_turns[1], AWARENESS_DECEL)\n accel_limits_turns[0] = min(accel_limits_turns[0], accel_limits_turns[1])\n\n # clip limits, cannot init MPC outside of bounds\n accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired)\n accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired)\n self.mpcs['cruise'].set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])\n\n # ensure lower accel limit (for braking) is lower than target acc for custom controllers.\n accel_limits = [min(accel_limits_turns[0], a_mpc['custom']), accel_limits_turns[1]]\n self.mpcs['custom'].set_accel_limits(accel_limits[0], accel_limits[1])\n\n next_a = np.inf\n for key in self.mpcs:\n self.mpcs[key].set_cur_state(self.v_desired, self.a_desired)\n self.mpcs[key].update(sm['carState'], sm['radarState'], v_cruise, a_mpc[key], active_mpc[key])\n # picks slowest solution from accel in ~0.2 seconds\n if self.mpcs[key].status and active_mpc[key] and self.mpcs[key].a_solution[5] < next_a:\n self.longitudinalPlanSource = c_source if key == 'custom' else key\n self.v_desired_trajectory = self.mpcs[key].v_solution[:CONTROL_N]\n self.a_desired_trajectory = self.mpcs[key].a_solution[:CONTROL_N]\n self.j_desired_trajectory = self.mpcs[key].j_solution[:CONTROL_N]\n next_a = self.mpcs[key].a_solution[5]\n\n # determine fcw\n if self.mpcs['lead0'].new_lead:\n self.fcw_checker.reset_lead(cur_time)\n blinkers = sm['carState'].leftBlinker or sm['carState'].rightBlinker\n self.fcw = self.fcw_checker.update(self.mpcs['lead0'].mpc_solution, cur_time,\n sm['controlsState'].active,\n v_ego, sm['carState'].aEgo,\n self.lead_1.dRel, self.lead_1.vLead, self.lead_1.aLeadK,\n self.lead_1.yRel, self.lead_1.vLat,\n self.lead_1.fcw, blinkers) and not sm['carState'].brakePressed\n if self.fcw:\n cloudlog.info(\"FCW triggered %s\", self.fcw_checker.counters)\n\n # Interpolate 0.05 seconds and save as starting point for next iteration\n a_prev = self.a_desired\n self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))\n self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev)/2.0\n\n def publish(self, sm, pm):\n plan_send = messaging.new_message('longitudinalPlan')\n\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])\n\n longitudinalPlan = plan_send.longitudinalPlan\n longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']\n longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']\n\n longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]\n longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]\n longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]\n\n longitudinalPlan.hasLead = self.mpcs['lead0'].status\n longitudinalPlan.longitudinalPlanSource = self.longitudinalPlanSource\n longitudinalPlan.fcw = self.fcw\n\n longitudinalPlan.visionTurnControllerState = self.vision_turn_controller.state\n longitudinalPlan.visionTurnSpeed = float(self.vision_turn_controller.v_turn)\n\n longitudinalPlan.speedLimitControlState = self.speed_limit_controller.state\n longitudinalPlan.speedLimit = float(self.speed_limit_controller.speed_limit)\n longitudinalPlan.speedLimitOffset = float(self.speed_limit_controller.speed_limit_offset)\n longitudinalPlan.distToSpeedLimit = float(self.speed_limit_controller.distance)\n longitudinalPlan.isMapSpeedLimit = bool(self.speed_limit_controller.source == SpeedLimitResolver.Source.map_data)\n longitudinalPlan.eventsDEPRECATED = self.events.to_msg()\n\n longitudinalPlan.turnSpeedControlState = self.turn_speed_controller.state\n longitudinalPlan.turnSpeed = float(self.turn_speed_controller.speed_limit)\n longitudinalPlan.distToTurn = float(self.turn_speed_controller.distance)\n longitudinalPlan.turnSign = int(self.turn_speed_controller.turn_sign)\n\n pm.send('longitudinalPlan', plan_send)\n\n def mpc_solutions(self, enabled, v_ego, a_ego, v_cruise, sm):\n # Update controllers\n self.vision_turn_controller.update(enabled, v_ego, a_ego, v_cruise, sm)\n self.events = Events()\n self.speed_limit_controller.update(enabled, v_ego, a_ego, sm, v_cruise, self.events)\n self.turn_speed_controller.update(enabled, v_ego, a_ego, sm)\n\n # Pick solution with lowest acceleration target.\n a_solutions = {None: float(\"inf\")}\n\n if self.vision_turn_controller.is_active:\n a_solutions['turn'] = self.vision_turn_controller.a_target\n\n if self.speed_limit_controller.is_active:\n a_solutions['limit'] = self.speed_limit_controller.a_target\n\n if self.turn_speed_controller.is_active:\n a_solutions['turnlimit'] = self.turn_speed_controller.a_target\n\n source = min(a_solutions, key=a_solutions.get)\n\n a_sol = {\n 'cruise': a_ego, # Irrelevant\n 'lead0': a_ego, # Irrelevant\n 'lead1': a_ego, # Irrelevant\n 'custom': 0. if source is None else a_solutions[source],\n }\n\n active_sol = {\n 'cruise': True, # Irrelevant\n 'lead0': True, # Irrelevant\n 'lead1': True, # Irrelevant\n 'custom': source is not None,\n }\n\n return a_sol, active_sol, source\n"
] |
[
[
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sfjddrgrg/test
|
[
"92cea908acb4c700f8b2d74dd19ed12e78ede73a"
] |
[
"assignments/2018/assignment1/cs231n/classifiers/linear_classifier.py"
] |
[
"from __future__ import print_function\n\nimport numpy as np\nfrom cs231n.classifiers.linear_svm import *\nfrom cs231n.classifiers.softmax import *\n\nclass LinearClassifier(object):\n\n def __init__(self):\n self.W = None\n\n def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,\n batch_size=200, verbose=False):\n \"\"\"\n Train this linear classifier using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) containing training data; there are N\n training samples each of dimension D.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c\n means that X[i] has label 0 <= c < C for C classes.\n - learning_rate: (float) learning rate for optimization.\n - reg: (float) regularization strength.\n - num_iters: (integer) number of steps to take when optimizing\n - batch_size: (integer) number of training examples to use at each step.\n - verbose: (boolean) If true, print progress during optimization.\n\n Outputs:\n A list containing the value of the loss function at each training iteration.\n \"\"\"\n num_train, dim = X.shape\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n if self.W is None:\n # lazily initialize W\n self.W = 0.001 * np.random.randn(dim, num_classes)\n\n # Run stochastic gradient descent to optimize W\n loss_history = []\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: #\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (dim, batch_size) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n #pass\n batch_idx = np.random.choice(num_train, batch_size, replace=True)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # evaluate loss and gradient\n loss, grad = self.loss(X_batch, y_batch, reg)\n loss_history.append(loss)\n\n # perform parameter update\n #########################################################################\n # TODO: #\n # Update the weights using the gradient and the learning rate. #\n #########################################################################\n #pass\n self.W += -learning_rate * grad\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n return loss_history\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this linear classifier to predict labels for\n data points.\n\n Inputs:\n - X: A numpy array of shape (N, D) containing training data; there are N\n training samples each of dimension D.\n\n Returns:\n - y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional\n array of length N, and each element is an integer giving the predicted\n class.\n \"\"\"\n y_pred = np.zeros(X.shape[0])\n ###########################################################################\n # TODO: #\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n #pass\n scores = X.dot(self.W)\n y_pred = np.argmax(scores, axis=1)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return y_pred\n \n def loss(self, X_batch, y_batch, reg):\n \"\"\"\n Compute the loss function and its derivative. \n Subclasses will override this.\n\n Inputs:\n - X_batch: A numpy array of shape (N, D) containing a minibatch of N\n data points; each point has dimension D.\n - y_batch: A numpy array of shape (N,) containing labels for the minibatch.\n - reg: (float) regularization strength.\n\n Returns: A tuple containing:\n - loss as a single float\n - gradient with respect to self.W; an array of the same shape as W\n \"\"\"\n pass\n\n\nclass LinearSVM(LinearClassifier):\n \"\"\" A subclass that uses the Multiclass SVM loss function \"\"\"\n\n def loss(self, X_batch, y_batch, reg):\n return svm_loss_vectorized(self.W, X_batch, y_batch, reg)\n\n\nclass Softmax(LinearClassifier):\n \"\"\" A subclass that uses the Softmax + Cross-entropy loss function \"\"\"\n\n def loss(self, X_batch, y_batch, reg):\n return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)\n\n"
] |
[
[
"numpy.random.choice",
"numpy.max",
"numpy.argmax",
"numpy.random.randn",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SamProell/yarppg
|
[
"9e3c43b502423562a5cb20c670a5cd878ed30c71"
] |
[
"yarppg/rppg/rppg.py"
] |
[
"from datetime import datetime\nimport pathlib\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom PyQt5.QtCore import pyqtSignal, QObject\n\nfrom yarppg.rppg.camera import Camera\n\n\ndef write_dataframe(path, df):\n path = pathlib.Path(path)\n if path.suffix.lower() == \".csv\":\n df.to_csv(path, float_format=\"%.7f\", index=False)\n elif path.suffix.lower() in {\".pkl\", \".pickle\"}:\n df.to_pickle(path)\n elif path.suffix.lower() in {\".feather\"}:\n df.to_feather(path)\n else:\n raise IOError(\"Unknown file extension '{}'\".format(path.suffix))\n\n\nclass RPPG(QObject):\n new_update = pyqtSignal(float)\n _dummy_signal = pyqtSignal(float)\n\n def __init__(self, roi_detector, parent=None, video=0,\n hr_calculator=None):\n QObject.__init__(self, parent)\n self.roi = None\n self._processors = []\n self._roi_detector = roi_detector\n\n self._set_camera(video)\n\n self._dts = []\n self.last_update = datetime.now()\n\n self.output_frame = None\n self.hr_calculator = hr_calculator\n\n if self.hr_calculator is not None:\n self.new_hr = self.hr_calculator.new_hr\n else:\n self.new_hr = self._dummy_signal\n\n self.output_filename = None\n\n def _set_camera(self, video):\n self._cam = Camera(video=video, parent=self)\n self._cam.new_frame.connect(self.frame_received)\n\n def add_processor(self, processor):\n self._processors.append(processor)\n\n def frame_received(self, frame):\n self.output_frame = frame\n self.roi = self._roi_detector(frame)\n\n for processor in self._processors:\n processor(frame[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]])\n\n if self.hr_calculator is not None:\n self.hr_calculator.update(self)\n\n dt = self._update_time()\n self.new_update.emit(dt)\n\n def _update_time(self):\n dt = (datetime.now() - self.last_update).total_seconds()\n self.last_update = datetime.now()\n self._dts.append(dt)\n\n return dt\n\n def get_vs(self, n=None):\n for processor in self._processors:\n if n is None:\n yield np.array(processor.vs, copy=True)\n else:\n yield np.array(processor.vs[-n:], copy=True)\n\n def get_ts(self, n=None):\n if n is None:\n dts = self._dts\n else:\n dts = self._dts[-n:]\n return np.cumsum(dts)\n\n def get_fps(self, n=5):\n return 1/np.mean(self._dts[-n:])\n\n def save_signals(self):\n path = pathlib.Path(self.output_filename)\n path.parent.mkdir(parents=True, exist_ok=True)\n\n df = self.get_dataframe()\n write_dataframe(path)\n\n def get_dataframe(self):\n names = [\"ts\"] + [\"p%d\" % i for i in range(self.num_processors)]\n data = np.vstack((self.get_ts(),) + tuple(self.get_vs())).T\n\n return pd.DataFrame(data=data, columns=names)\n\n @property\n def num_processors(self):\n return len(self._processors)\n\n @property\n def processor_names(self):\n return [str(p) for p in self._processors]\n\n def start(self):\n self._cam.start()\n\n def finish(self):\n print(\"finishing up...\")\n if self.output_filename is not None:\n self.save_signals()\n self._cam.stop()\n"
] |
[
[
"numpy.array",
"numpy.mean",
"numpy.cumsum",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
feimeng93/probabilistic-bvp-solver
|
[
"d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb"
] |
[
"experiments/work_precision.py"
] |
[
"\"\"\"Try out probsolve_bvp.\"\"\"\nimport time\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom probnum import diffeq, filtsmooth\nfrom probnum import random_variables as random_variables\nfrom probnum import random_variables, statespace\nfrom probnum._randomvariablelist import _RandomVariableList\nfrom probnumeval import timeseries\nfrom scipy.integrate import solve_bvp\nfrom tqdm import tqdm\n\nfrom bvps import bridges, bvp_solver, problem_examples\n\n# Easy aliases\nanees = timeseries.anees\nrmse = timeseries.root_mean_square_error\n\n\nTMAX = 1.0\nXI = 0.001\n\n\nbvp = problem_examples.bratus_second_order(tmax=1.0)\nbvp1st = bvp.to_first_order()\n\nTOL = 1e-5\n\ninitial_grid = np.linspace(bvp.t0, bvp.tmax, 300)\ninitial_guess = np.ones((bvp1st.dimension, len(initial_grid)))\n\nrefsol = solve_bvp(bvp1st.f, bvp1st.scipy_bc, initial_grid, initial_guess, tol=TOL)\nrefsol_fine = solve_bvp(\n bvp1st.f, bvp1st.scipy_bc, initial_grid, initial_guess, tol=1e-10\n)\nassert refsol_fine.success\nbvp.solution = refsol_fine.sol\n\nresults = {}\n\ntestlocations = np.linspace(bvp.t0, bvp.tmax, 50)\n\n\nfor q in [3, 4, 5]:\n print()\n print()\n print(\"q\", q)\n print()\n\n results[q] = {}\n\n ibm = statespace.IBM(\n ordint=q,\n spatialdim=1,\n forward_implementation=\"sqrt\",\n backward_implementation=\"sqrt\",\n )\n # ibm.equivalent_discretisation_preconditioned._proc_noise_cov_cholesky *= 1e5\n\n # initial_grid = np.linspace(bvp.t0, bvp.tmax, 2)\n\n # print(len(refsol.x))\n # reference_solution = lambda *args, **kwargs: refsol_fine.sol(*args, **kwargs)[\n # 0\n # ].T.reshape((-1, 1))\n # scipy_sol = lambda *args, **kwargs: refsol.sol(*args, **kwargs)[0].T.reshape(\n # (-1, 1)\n # )\n\n # error = rmse(scipy_sol, reference_solution, testlocations)\n # print(\"Scipyerror:\", error)\n\n evalgrid = np.linspace(bvp.t0, bvp.tmax, 250, endpoint=True)\n\n for tol_order in np.arange(1.0, 9.0):\n if q == 3:\n if tol_order > 7:\n tol_order = 7.0\n TOL = 10.0 ** (-tol_order)\n\n print(\"tol\", TOL)\n solver = bvp_solver.BVPSolver.from_default_values_std_refinement(\n ibm, initial_sigma_squared=1e2, normalise_with_interval_size=False\n )\n initial_grid = np.linspace(bvp.t0, bvp.tmax, 3)\n initial_guess = np.ones((len(initial_grid), bvp.dimension))\n\n initial_posterior, sigma_squared = solver.compute_initialisation(\n bvp, initial_grid, initial_guess=initial_guess, use_bridge=True\n )\n\n solution_gen = solver.solution_generator(\n bvp,\n atol=TOL,\n rtol=TOL,\n initial_posterior=initial_posterior,\n maxit_ieks=5,\n maxit_em=1,\n yield_ieks_iterations=False,\n )\n\n start_time = time.time()\n for post, ssq in solution_gen:\n print(len(post.locations))\n end_time = time.time() - start_time\n solution = diffeq.KalmanODESolution(post)\n\n testlocations = np.linspace(bvp.t0, bvp.tmax)\n reference_solution = lambda *args, **kwargs: bvp.solution(*args, **kwargs)[\n 0\n ].reshape((-1, 1))\n # plt.plot(testlocations, reference_solution(testlocations))\n # plt.plot(testlocations, solution(testlocations).mean[:, 0])\n # plt.show()\n solution_mean = (\n lambda *args, **kwargs: solution(*args, **kwargs)\n .mean[:, 0]\n .reshape((-1, 1))\n )\n\n print(ssq)\n chi2 = anees(solution, reference_solution, testlocations, damping=1e-30) / ssq\n\n initial_guess = np.ones((len(initial_grid), bvp1st.dimension)).T\n\n start_time_scipy = time.time()\n scipy_solution = solve_bvp(\n bvp1st.f, bvp1st.scipy_bc, initial_grid, initial_guess, tol=TOL\n )\n runtime_scipy = time.time() - start_time_scipy\n assert scipy_solution.success\n\n # How accurate would scipy be?\n scipy_sol_for_rmse = lambda *args: scipy_solution.sol(*args)[0][:, None]\n error_scipy = timeseries.root_mean_square_error(\n scipy_sol_for_rmse, reference_solution, testlocations\n )\n\n error = rmse(solution_mean, reference_solution, testlocations)\n results[q][TOL] = {}\n results[q][TOL][\"chi2\"] = chi2\n results[q][TOL][\"error\"] = error\n results[q][TOL][\"N\"] = len(solution.locations)\n results[q][TOL][\"time\"] = end_time\n results[q][TOL][\"scipy_error\"] = error_scipy\n results[q][TOL][\"scipy_N\"] = len(scipy_solution.x)\n results[q][TOL][\"scipy_time\"] = runtime_scipy\n\n print(chi2, error, end_time)\nprint(results)\n\nimport json\n\nwith open(\"./data/bratus_problem_work_precision.json\", \"w\") as outfile:\n json.dump(results, outfile)\n"
] |
[
[
"numpy.arange",
"scipy.integrate.solve_bvp",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
weiliansong/cycle-consistent-vae
|
[
"d92f4cebcbe0a90bc2f3369853fec39084a11fa2"
] |
[
"networks.py"
] |
[
"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom itertools import cycle\nfrom collections import OrderedDict\nfrom utils import reparameterize, transform_config\n\n\nclass Encoder(nn.Module):\n def __init__(self, style_dim, class_dim):\n super(Encoder, self).__init__()\n\n self.conv_model = nn.Sequential(OrderedDict([\n ('convolution_1',\n nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=2, padding=1, bias=True)),\n ('convolution_1_in', nn.InstanceNorm2d(num_features=16, track_running_stats=True)),\n ('ReLU_1', nn.ReLU(inplace=True)),\n\n ('convolution_2',\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=2, padding=1, bias=True)),\n ('convolution_2_in', nn.InstanceNorm2d(num_features=32, track_running_stats=True)),\n ('ReLU_2', nn.ReLU(inplace=True)),\n\n ('convolution_3',\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=1, bias=True)),\n ('convolution_3_in', nn.InstanceNorm2d(num_features=64, track_running_stats=True)),\n ('ReLU_3', nn.ReLU(inplace=True))\n ]))\n\n # Style embeddings\n self.style_mu = nn.Linear(in_features=256, out_features=style_dim, bias=True)\n self.style_logvar = nn.Linear(in_features=256, out_features=style_dim, bias=True)\n\n # Class embeddings\n self.class_output = nn.Linear(in_features=256, out_features=class_dim, bias=True)\n\n def forward(self, x):\n x = self.conv_model(x)\n x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))\n\n style_embeddings_mu = self.style_mu(x)\n style_embeddings_logvar = self.style_logvar(x)\n class_embeddings = self.class_output(x)\n\n return style_embeddings_mu, style_embeddings_logvar, class_embeddings\n\n\nclass Decoder(nn.Module):\n def __init__(self, style_dim, class_dim):\n super(Decoder, self).__init__()\n\n # Style embeddings input\n # NOTE This is interesting, the latent vectors are simply embeddings, used to retrive something bigger\n self.style_input = nn.Linear(in_features=style_dim, out_features=256, bias=True)\n\n # Class embeddings input\n # NOTE Same with the class latent as well, see above note\n self.class_input = nn.Linear(in_features=class_dim, out_features=256, bias=True)\n\n self.deconv_model = nn.Sequential(OrderedDict([\n ('deconvolution_1',\n nn.ConvTranspose2d(in_channels=128, out_channels=32, kernel_size=4, stride=2, padding=0, bias=True)),\n ('deconvolution_1_in', nn.InstanceNorm2d(num_features=32, track_running_stats=True)),\n ('LeakyReLU_1', nn.LeakyReLU(negative_slope=0.2, inplace=True)),\n\n ('deconvolution_2',\n nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=4, stride=2, padding=0, bias=True)),\n ('deconvolution_2_in', nn.InstanceNorm2d(num_features=16, track_running_stats=True)),\n ('LeakyReLU_2', nn.LeakyReLU(negative_slope=0.2, inplace=True)),\n\n ('deconvolution_3',\n nn.ConvTranspose2d(in_channels=16, out_channels=3, kernel_size=4, stride=2, padding=1, bias=True)),\n ('sigmoid_final', nn.Sigmoid())\n ]))\n\n def forward(self, style_embeddings, class_embeddings):\n style_embeddings = F.leaky_relu_(self.style_input(style_embeddings), negative_slope=0.2)\n class_embeddings = F.leaky_relu_(self.class_input(class_embeddings), negative_slope=0.2)\n\n x = torch.cat((style_embeddings, class_embeddings), dim=1)\n x = x.view(x.size(0), 128, 2, 2)\n x = self.deconv_model(x)\n\n return x\n\n\nclass Classifier(nn.Module):\n def __init__(self, z_dim, num_classes):\n super(Classifier, self).__init__()\n\n self.fc_model = nn.Sequential(OrderedDict([\n ('fc_1', nn.Linear(in_features=z_dim, out_features=256, bias=True)),\n ('fc_1_bn', nn.BatchNorm1d(num_features=256)),\n ('LeakyRelu_1', nn.LeakyReLU(negative_slope=0.2, inplace=True)),\n\n ('fc_2', nn.Linear(in_features=256, out_features=256, bias=True)),\n ('fc_2_bn', nn.BatchNorm1d(num_features=256)),\n ('LeakyRelu_2', nn.LeakyReLU(negative_slope=0.2, inplace=True)),\n\n ('fc_3', nn.Linear(in_features=256, out_features=num_classes, bias=True))\n ]))\n\n def forward(self, z):\n x = self.fc_model(z)\n\n return x\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n # Shape of the output of the generator\n img_shape = [3, 28, 28]\n\n self.dis_model = nn.Sequential(\n nn.Linear(int(np.prod(img_shape)), 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, img):\n img_flat = img.view(img.size(0), -1)\n validity = self.dis_model(img_flat)\n\n return validity\n\nif __name__ == '__main__':\n \"\"\"\n test for network outputs\n \"\"\"\n encoder = Encoder(16, 16)\n decoder = Decoder(16, 16)\n\n classifier = Classifier(z_dim=16, num_classes=10)\n\n mnist = datasets.MNIST(root='mnist', download=True, train=True, transform=transform_config)\n loader = cycle(DataLoader(mnist, batch_size=64, shuffle=True, num_workers=0, drop_last=True))\n\n image_batch, labels_batch = next(loader)\n\n mu, logvar, class_latent_space = encoder(Variable(image_batch))\n style_latent_space = reparameterize(training=True, mu=mu, logvar=logvar)\n\n reconstructed_image = decoder(style_latent_space, class_latent_space)\n classifier_pred = classifier(style_latent_space)\n\n print(reconstructed_image.size())\n print(classifier_pred.size())\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.InstanceNorm2d",
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.ReLU",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
autoplot/python
|
[
"5392e329bc69f3ac040c5f4932a13b0ab7a9615a"
] |
[
"autoplot/autoplot.py"
] |
[
"from __future__ import print_function\n\ndef version():\n return '0.3.9'\n\ndef printNoNewline(s):\n print(s, end=' ')\n \ndef javaaddpath(url='', jdwpPort=-1):\n '''Start up JVM, import JAR at URL, and import the paths starting with org \n into the Python namespace.\n com= jpype.JPackage('com') \n can be used to the com package into the Python namespace.\n Example:\n org = javaaddpath('http://autoplot.org/devel/autoplot.jar')\n if no url is provided, then the default http://autoplot.org/latest/autoplot.jar is used.\n '''\n\n import os\n import jpype\n import tempfile\n\n # TODO: Use requests package.\n try:\n # For Python 3.0 and later\n from urllib.request import urlopen\n except ImportError:\n # Fall back to Python 2's urllib2\n from urllib2 import urlopen\n\n if url=='':\n url='http://autoplot.org/latest/autoplot.jar'\n\n file_name = url.split('/')[-1]\n u = urlopen(url)\n i = u.info()\n file_size = int(i.get(\"Content-Length\"))\n cacheFile = tempfile.gettempdir()+os.sep+file_name\n useCache = False\n if os.path.exists(cacheFile):\n cacheFileSize = os.path.getsize(cacheFile)\n print('cache file size: %d' % cacheFileSize)\n if cacheFileSize == file_size:\n useCache = True\n\n if useCache:\n print(\"Using existing file: %s\" % cacheFile)\n\n else:\n print(\"Downloading: %s Bytes: %s\" % (file_name, file_size))\n\n file_size_dl = 0\n block_sz = 8192\n\n f = open(cacheFile, 'wb')\n\n while True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n\n file_size_dl += len(buffer)\n f.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size)\n status = status + chr(8)*(len(status)+1)\n status = '\\r' + status\n printNoNewline(status) # support Python 2.7 / 3.x\n\n print('')\n\n f.close()\n\n if not jpype.isJVMStarted():\n if jdwpPort > -1:\n print('Java is waiting for debugger at port %d' % jdwpPort)\n jpype.startJVM(jpype.getDefaultJVMPath(), '-Djava.class.path='+cacheFile, '-Xdebug',\n '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=%d' % jdwpPort)\n\n else:\n print('Java is starting')\n jpype.startJVM(jpype.getDefaultJVMPath(), '-Djava.class.path='+cacheFile)\n else:\n print('Java is already running.')\n\n return jpype.JPackage(\"org\")\n\n\ndef to_ndarray(apds, name):\n 'extract the data identified by name to numpy array, using datetime64 for times.'\n import numpy as np\n import jpype\n org = jpype.JPackage('org')\n apds.setPreferredUnits('microseconds since 2000-01-01T00:00')\n u = org.das2.datum.Units.lookupUnits(apds.propertyAsString(name, 'UNITS'))\n if u.isConvertibleTo(org.das2.datum.Units.us2000):\n g_base = np.datetime64('2000-01-01T00:00:00Z')\n dd = apds.values(name)\n result = np.array([g_base + np.timedelta64(int(dd[i]*1000), 'ns') for i in range(len(dd))])\n else:\n dd = apds.values(name)\n result = np.array(dd)\n return result\n\n\ndef to_qdataset(X, Y=None, Z=None):\n '''convert the ndarrays or array like objects to Autoplot QDataSet objects.\n datetime64 are handled by converting to QDataSet with Units.us2000'''\n import jpype\n if not jpype.isJVMStarted():\n raise Exception('Java is not running, use javaaddpath')\n org = jpype.JPackage('org')\n dataset = org.das2.qds.ops.Ops.dataset\n link = org.das2.qds.ops.Ops.link\n transpose = org.das2.qds.ops.Ops.transpose\n import numpy as np\n\n if Y is None and Z is None:\n if isinstance(X, jpype.JavaObject):\n xds = X # assume it's a QDataSet already\n else:\n if not hasattr(X, 'dtype'):\n X = np.array(X)\n if (str(X.dtype).startswith('datetime64') or str(X.dtype).startswith('<M8')):\n g_base = np.datetime64('2000-01-01T00:00:00Z')\n X = (X - g_base) / np.timedelta64(1000, 'ns')\n xds = dataset(jpype.JArray(jpype.JDouble, X.ndim)(X.tolist()))\n xds.putProperty(org.das2.qds.QDataSet.UNITS,\n org.das2.datum.Units.us2000)\n else:\n xds = dataset(jpype.JArray(jpype.JDouble, X.ndim)(X.tolist()))\n if xds.rank() == 2:\n xds = transpose(xds)\n return xds\n elif Z is None:\n xds = to_qdataset(X)\n yds = to_qdataset(Y)\n return link(xds, yds)\n else:\n xds = to_qdataset(X)\n yds = to_qdataset(Y)\n zds = to_qdataset(Z)\n return link(xds, yds, zds)\n\n\ndef show_completions( s ):\n 'print completions for the given URI.'\n import jpype\n org= javaaddpath()\n sc= org.autoplot.ScriptContext\n xxs= sc.getCompletions( s )\n for x in xxs:\n print(x)\n \n\n#def applot(X, Y=None, Z=None):\n# 'plot Python arrays or ndarrays in Autoplot'\n# import jpype\n# if not jpype.isJVMStarted():\n# raise Exception('Java is not running, use javaaddpath')\n# ds = to_qdataset(X, Y, Z)\n# org = jpype.JPackage('org')\n# sc = org.autoplot.ScriptContext\n# sc.plot(ds)\n\ndef das2stream( dataStruct, filename, ytags=None, ascii=1, xunits='' ):\n\n print( 'writing das2stream to ' + filename + ' using ' + version() )\n import time\n\n streamHeader= [ '[00]xxxxxx<stream source=\"applot.pro\" localDate=\"'+time.asctime()+'\">', '</stream>' ]\n contentLength= -10 # don't include the packet tag and content length\n for i in range( len( streamHeader ) ):\n contentLength += len( streamHeader[i] ) + 1\n\n x= streamHeader[0]\n x= '[00]' + '%06d' % contentLength + x[10:]\n streamHeader[0]= x\n\n if ascii:\n xdatatype= 'ascii24'\n else:\n xdatatype= 'sun_real8'\n if ascii:\n datatype= 'ascii16'\n else:\n datatype='sun_real8'\n\n packetDescriptor= [ '[01]xxxxxx<packet>' ]\n tags= dataStruct['tags']\n nt= len(tags)\n packetDescriptor.append( ' <x type=\"'+xdatatype+'\" name=\"'+tags[0]+'\" units=\"'+xunits+'\" />' )\n\n totalItems=1\n\n format=['%24.12f']\n reclen= 4 + 24 + (nt-1) * 20\n i=0\n for tag in tags:\n d= dataStruct[tag]\n if ( i==0 ):\n name=''\n i=i+1\n continue\n else:\n name= tags[i] ### stream reader needs a default plane\n if ( isinstance( d, list ) ):\n rank= 1\n elif ( hasattr( d, \"shape\") ): # check for numpy\n rank= len(d.shape)\n\n if ( rank==1 ):\n packetDescriptor.append( ' <y type=\"'+datatype+'\" name=\"'+name+'\" units=\"\" idlname=\"'+tags[i]+'\" />' )\n\n if ( i<nt-1 ): format.append('%16.4e')\n else: format.append( '%15.3e' )\n totalItems= totalItems + 1\n else:\n if ytags==None: ytags= range(s[2])\n sytags= ','.join( [ \"%f\"%n for n in ytags ] )\n nitems= len(ytags)\n packetDescriptor.append( ' <yscan type=\"' +datatype+'\" name=\"' +name +'\" units=\"\" nitems=\"'+str(nitems) +'\" yTags=\"'+sytags+'\"' +' />' )\n \n for i in range(1,nitems):\n format.append('%16.4e')\n if ( i<nt-1 ):\n format.append('%16.4e')\n else:\n format.append('%15.4e')\n totalItems+= nitems\n i=i+1;\n\n packetDescriptor.append( '</packet>' )\n\n contentLength= -10 # don't include the packet tag and content length\n for i in range(0,len(packetDescriptor)):\n contentLength += len( packetDescriptor[i] ) + 1\n \n x= packetDescriptor[0]\n x= x[0:4]+'%06d' % contentLength + x[10:]\n packetDescriptor[0]= x\n\n unit= open( filename, 'wb' )\n\n for i in range(len(streamHeader)):\n unit.write( bytes(streamHeader[i],'utf8') )\n unit.write( bytes('\\n','utf8') )\n\n for i in range(len(packetDescriptor)):\n unit.write( bytes(packetDescriptor[i],'utf8') )\n unit.write( bytes('\\n','utf8') ) \n\n nr= len( dataStruct['x'] )\n \n keys= dataStruct.keys()\n\n newline= ascii\n for i in range(nr):\n unit.write( bytes(':01:','utf8') )\n for j in range(nt):\n tag= tags[j]\n if ( ascii ):\n rec= dataStruct[tag][i]\n if hasattr(rec, \"__len__\"):\n l= len(rec)\n for k in range(l):\n s= format[j] % rec[k]\n unit.write( bytes(s,'utf8') )\n if ( j==nt-1 ): newline=False\n else:\n s= format[j] % rec\n unit.write( bytes(s,'utf8') )\n else:\n import struct\n rec= dataStruct[tag][i]\n if hasattr(rec, \"__len__\"):\n l= len(rec)\n for j in range(l):\n unit.write( struct.pack( '>d', rec[j] ) )\n else:\n unit.write( struct.pack( '>d', rec ) )\n\n if ( newline ): unit.write( bytes('\\n','utf8') )\n\n unit.close() \n\n\ndef qstream(dataStruct, filename, ytags=None, ascii=True, xunits='', delta_plus=None, delta_minus=None):\n \"\"\"for rank 2, ytags must be specified ascii, boolean, use ascii transfer types\"\"\"\n tags = dataStruct['tags']\n nt = len(tags)\n name = tags[-1]\n tname = tags[0]\n\n print('writing qstream to ' + filename + ' using ' + version() )\n import time\n\n streamHeader = ['<stream dataset_id=\"'+name+'\" source=\"autoplot.py\" localDate=\"'+time.asctime()+'\">', '</stream>']\n contentLength= 0\n for i in range(len(streamHeader)):\n contentLength += len( streamHeader[i] ) + 1\n\n x = streamHeader[0]\n x = '[00]' + '%06d' % contentLength + x\n streamHeader[0] = x\n\n if ascii: \n xdatatype = 'ascii30'\n else: \n xdatatype = 'double'\n if ascii: \n datatype = 'ascii16'\n else: \n datatype = 'double'\n\n if ytags != None:\n ny = len(ytags)\n svals = str(ytags[0])\n for j in range(1,len(ytags)):\n svals = svals+','+str(ytags[j]).strip()\n\n dep1Descriptor = [ '<packet>' ]\n dep1Descriptor.append( ' <qdataset id=\"DEP1\" rank=\"1\" >' )\n dep1Descriptor.append( ' <properties>' )\n dep1Descriptor.append( ' <property name=\"NAME\" type=\"String\" value=\"DEP1\" />')\n dep1Descriptor.append( ' </properties>' )\n dep1Descriptor.append( ' <values encoding=\"'+datatype+'\" length=\"'+str(ny)+'\" values=\"'+svals+'\" />' )\n dep1Descriptor.append( ' </qdataset>' )\n dep1Descriptor.append( ' </packet>' )\n\n contentLength = 0 # don't include the packet tag and content length\n for i in range( len( dep1Descriptor ) ):\n contentLength += len( dep1Descriptor[i] ) + 1\n \n x = dep1Descriptor[0]\n x = '[02]' + '%06d' % contentLength + x\n dep1Descriptor[0] = x\n\n packetDescriptor = [ '[01]xxxxxx<packet>' ]\n\n nt = len(tags)\n packetDescriptor.append( ' <qdataset id=\"'+tname+'\" rank=\"1\" >' )\n packetDescriptor.append( ' <properties>' )\n packetDescriptor.append( ' <property name=\"NAME\" type=\"String\" value=\"'+tname+'\" />' )\n packetDescriptor.append( ' <property name=\"UNITS\" type=\"units\" value=\"'+xunits+'\" />' )\n packetDescriptor.append( ' </properties>' )\n packetDescriptor.append( ' <values encoding=\"'+xdatatype+'\" length=\"\" />' )\n packetDescriptor.append( ' </qdataset>' )\n\n totalItems = 1\n\n format = ['%30f']\n formats = {'x':format}\n \n reclen = 4 + 30 + (nt-1) * 20\n\n i = 1\n for tag in tags[1:]:\n formats1 = []\n d = dataStruct[tag]\n if isinstance(d, list):\n rank = 1\n elif hasattr(d, \"shape\"): # check for numpy\n rank = len(d.shape)\n\n name = tag ### stream reader needs a default plane\n if rank == 1:\n packetDescriptor.append( ' <qdataset id=\"'+name+'\" rank=\"1\" >' )\n packetDescriptor.append( ' <properties>' )\n packetDescriptor.append( ' <property name=\"NAME\" type=\"String\" value=\"'+name+'\" />' )\n packetDescriptor.append( ' <property name=\"DEPEND_0\" type=\"qdataset\" value=\"'+tname+'\" />' )\n if i == 1:\n if not delta_plus is None:\n packetDescriptor.append( ' <property name=\"DELTA_PLUS\" type=\"qdataset\" value=\"'+delta_plus+'\" />' )\n if not delta_minus is None:\n packetDescriptor.append( ' <property name=\"DELTA_MINUS\" type=\"qdataset\" value=\"'+delta_minus+'\" />' )\n packetDescriptor.append( ' </properties>' )\n packetDescriptor.append( ' <values encoding=\"'+datatype+'\" length=\"\" />' )\n packetDescriptor.append( ' </qdataset>' )\n if i<nt-1:\n formats1.append('%16.4e')\n else:\n formats1.append('%15.4e')\n totalItems += 1\n else:\n nitems = d.shape[1]\n packetDescriptor.append( ' <qdataset id=\"'+name+'\" rank=\"2\" >' )\n packetDescriptor.append( ' <properties>' )\n packetDescriptor.append( ' <property name=\"DEPEND_0\" type=\"qdataset\" value=\"'+tname+'\" />' )\n packetDescriptor.append( ' <property name=\"DEPEND_1\" type=\"qdataset\" value=\"DEP1\" />' )\n packetDescriptor.append( ' <property name=\"NAME\" type=\"String\" value=\"'+name+'\" />' )\n packetDescriptor.append( ' </properties>' )\n packetDescriptor.append( ' <values encoding=\"'+datatype+'\" length=\"'+str(nitems)+'\" />' )\n packetDescriptor.append( ' </qdataset>' )\n for i in range(0, nitems-1):\n formats1.append('%16.4e')\n if i<nt-1: \n formats1.append('%16.4e')\n else:\n formats1.append('%15.4e')\n totalItems += nitems\n i = i+1\n formats[tag] = formats1\n packetDescriptor.append( '</packet>' )\n\n contentLength = -10 # don't include the packet tag and content length\n for i in range(len(packetDescriptor) ):\n contentLength += len( packetDescriptor[i] ) + 1\n\n x = packetDescriptor[0]\n x = x[0:4] + '%06d' % contentLength + x[10:]\n packetDescriptor[0] = x\n\n unit = open(filename, 'wb')\n\n for i in range(len(streamHeader)):\n unit.write(bytes(streamHeader[i],'utf8'))\n unit.write(bytes('\\n','utf8'))\n\n for i in range(len(packetDescriptor)):\n unit.write(bytes(packetDescriptor[i],'utf8'))\n unit.write(bytes('\\n','utf8'))\n\n nr = len( dataStruct['x'] )\n\n if not ytags is None:\n for i in range(len(dep1Descriptor)):\n unit.write(bytes(dep1Descriptor[i],'utf8'))\n unit.write(bytes('\\n','utf8'))\n\n nr = len(dataStruct['x']) # number of records to output\n\n keys = dataStruct.keys()\n\n newline = False\n #import pdb\n #pdb.set_trace()\n \n for i in range(nr):\n unit.write(bytes(':01:','utf8'))\n for j in range(nt):\n tag = tags[j]\n format = formats[tag]\n if ascii:\n rec = dataStruct[tag][i]\n if hasattr(rec,'__len__'):\n l = len(rec)\n for k in range(l):\n print( format[k] )\n s = format[k] % rec[k]\n unit.write(bytes(s, 'utf8'))\n else:\n s = format[0] % rec\n unit.write(bytes(s, 'utf8'))\n if ( j == nt-1 ): \n newline = True\n else:\n import struct\n rec = dataStruct[tag][i]\n if hasattr(rec, '__len__'):\n l = len(rec)\n for j in range(l):\n unit.write(struct.pack('>d', rec[j]))\n else:\n unit.write(struct.pack('>d',rec))\n if ( newline ):\n unit.write(bytes('\\n', 'utf8'))\n unit.close()\n\n\ndef tryPortConnect( host, port ):\n print('tryPortConnect')\n import socket\n s = socket.socket()\n s.connect(('localhost',port))\n s.close()\n\n\ndef sendCommand( s, cmd ):\n s.send( bytes(cmd,'utf8') )\n print('done')\n\ndef applot( x=None, y=None, z=None, z4=None, xunits='', ylabel='', tmpfile=None, noplot=0, respawn=0, delta_plus=None, delta_minus=None ):\n '''\nNAME:\n plot\nPURPOSE:\n Plot to Autoplot instead of the direct graphics plotting, by creating a temporary file of the data and sending a plot\n command to Autoplot with the server turned on.\nARGUMENTS:\n X,Y,Z as with plot. If X is an integer, then it is the position in Autoplot, so that multiple plots can be sent to \n one Autoplot canvas.\nCALLING SEQUENCE:\n applot( X, Y )\n applot( X, Y, Z ) for a spectrogram\n\nKEYWORDS:\n tmpfile= explicitly set the file used to move data into Autoplot. This can also be used with /noplot\n noplot=True just make the tmpfile, don't actually try to plot.\n xunits= units as a string, especially like \"seconds since 2010-01-01T00:00\"\n ylabel='' label is currently ignored.\n delta_plus= array of positive lengths showing the upper limit of the 1-sigma confidence interval.\n delta_minus= array of positive lengths showing the lower limit of the 1-sigma confidence interval.\n'''\n\n port= 12345\n\n useDas2Stream=False\n\n if useDas2Stream: \n ext='d2s'\n else:\n ext='qds'\n\n if ( delta_plus!=None ):\n ext='qds'\n \n if tmpfile==None:\n import datetime\n dt= datetime.datetime.today()\n tag= dt.strftime(\"%Y%m%dT%H%M%S\")\n import glob\n ff= glob.glob( '/tmp/' + 'autoplot.' + tag + '.???.'+ext )\n seq= '.%03d.' % len(ff)\n tmpfile= '/tmp/' + 'autoplot.' + tag + seq + ext \n else:\n if ( tmpfile.index('.'+ext) != len(tmpfile)-4 ):\n tmpfile= tmpfile + '.'+ext # add the extension\n\n if ( not z4 is None ): \n np=4\n elif ( not z is None ): \n np=3\n elif( not y is None ): \n np=2\n elif( not x is None ): \n np=1\n else:\n raise Exception(\"no x, which must be specified\")\n \n # serialize the data to a das2stream in a temporary file\n if isinstance( x, int ):\n pos= x\n xx= y\n if ( not z is None ):\n yy= z\n if ( not z4 is None ):\n zz= z4\n np= np-1\n else:\n pos= -1\n xx= x\n if ( not y is None ):\n yy= y\n if ( not z is None ):\n zz= z\n \n ascii=1\n\n if ext=='qds':\n print('*****')\n print( type( xx ) )\n print( xunits )\n if np==3:\n data= { 'x':xx, 'z':zz, 'tags':['x','z'] }\n qstream( data, tmpfile, ytags=yy, xunits=xunits, ascii=ascii )\n elif np==2:\n if ( delta_plus!=None ):\n data= { 'x':xx, 'y':yy, 'delta_plus':delta_plus, 'delta_minus':delta_minus, 'tags':['x','y','delta_plus','delta_minus'] }\n qstream( data, tmpfile, ascii=ascii, xunits=xunits, delta_plus='delta_plus', delta_minus='delta_minus' )\n else:\n data= { 'x':xx, 'y':yy, 'tags':['x','y'] }\n qstream( data, tmpfile, ascii=ascii, xunits=xunits )\n else:\n ndim= len( xx.shape )\n if ndim==2:\n data= { 'x':range(len(xx)), 'z':xx, 'tags':['x','z'] }\n qstream( data, tmpfile, ytags=range(xx.shape[1]), ascii=ascii, xunits='' )\n else:\n if ( delta_plus!=None and delta_minus!=None ):\n data= { 'x':range(len(xx)), 'y':xx, 'delta_plus':delta_plus, 'delta_minus':delta_minus, 'tags':['x','y','delta_plus','delta_minus'] }\n qstream( data, tmpfile, ascii=ascii, xunits='', delta_plus='delta_plus', delta_minus='delta_minus' )\n else:\n data= { 'x':range(len(xx)), 'y':xx, 'tags':['x','y'] }\n qstream( data, tmpfile, ascii=ascii, xunits='' )\n \n else:\n if np==3:\n data= { 'x':xx, 'z':zz, 'tags':['x','z'] }\n das2stream( data, tmpfile, ytags=yy, xunits=xunits, ascii=ascii )\n elif np==2:\n data= { 'x':xx, 'y':yy, 'tags':['x','y'] }\n das2stream( data, tmpfile, ascii=ascii, xunits=xunits )\n else:\n rank=1\n if ( rank==2 ):\n data= { 'x':range(len(xx)), 'z':xx, 'tags':['x','z'] }\n das2stream( data, tmpfile, ytags=range(s[2]), ascii=ascii, xunits='' )\n else:\n data= { 'x':range(len(xx)), 'y':xx, 'tags':['x','y'] }\n das2stream( data, tmpfile, ascii=ascii, xunits='' )\n \n if noplot==1:\n return\n\n err= 0\n if ( err==0 ):\n import socket\n s = socket.socket()\n s.connect(('localhost',port))\n\n if ( pos>-1 ):\n cmd= 'plot( '+str(pos)+', \"file:'+tmpfile+'\" );\\n' # semicolon means no echo\n\n else:\n cmd= 'plot( \"file:'+tmpfile+'\" );\\n' # semicolon means no echo\n\n foo= sendCommand( s, cmd )\n s.shutdown(1)\n s.close()\n\n else:\n raise Exception( 'error encountered!' )\n \n"
] |
[
[
"numpy.timedelta64",
"numpy.array",
"numpy.datetime64"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexhenrie/numpy
|
[
"662f973ba58563b268d009e67806aa1150ca1cb2",
"662f973ba58563b268d009e67806aa1150ca1cb2"
] |
[
"numpy/lib/utils.py",
"numpy/core/tests/test_dtype.py"
] |
[
"import os\nimport sys\nimport textwrap\nimport types\nimport re\nimport warnings\n\nfrom numpy.core.numerictypes import issubclass_, issubsctype, issubdtype\nfrom numpy.core.overrides import set_module\nfrom numpy.core import ndarray, ufunc, asarray\nimport numpy as np\n\n__all__ = [\n 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',\n 'deprecate_with_doc', 'get_include', 'info', 'source', 'who',\n 'lookfor', 'byte_bounds', 'safe_eval'\n ]\n\ndef get_include():\n \"\"\"\n Return the directory that contains the NumPy \\\\*.h header files.\n\n Extension modules that need to compile against NumPy should use this\n function to locate the appropriate include directory.\n\n Notes\n -----\n When using ``distutils``, for example in ``setup.py``.\n ::\n\n import numpy as np\n ...\n Extension('extension_name', ...\n include_dirs=[np.get_include()])\n ...\n\n \"\"\"\n import numpy\n if numpy.show_config is None:\n # running from numpy source directory\n d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')\n else:\n # using installed numpy core headers\n import numpy.core as core\n d = os.path.join(os.path.dirname(core.__file__), 'include')\n return d\n\n\ndef _set_function_name(func, name):\n func.__name__ = name\n return func\n\n\nclass _Deprecate:\n \"\"\"\n Decorator class to deprecate old functions.\n\n Refer to `deprecate` for details.\n\n See Also\n --------\n deprecate\n\n \"\"\"\n\n def __init__(self, old_name=None, new_name=None, message=None):\n self.old_name = old_name\n self.new_name = new_name\n self.message = message\n\n def __call__(self, func, *args, **kwargs):\n \"\"\"\n Decorator call. Refer to ``decorate``.\n\n \"\"\"\n old_name = self.old_name\n new_name = self.new_name\n message = self.message\n\n if old_name is None:\n try:\n old_name = func.__name__\n except AttributeError:\n old_name = func.__name__\n if new_name is None:\n depdoc = \"`%s` is deprecated!\" % old_name\n else:\n depdoc = \"`%s` is deprecated, use `%s` instead!\" % \\\n (old_name, new_name)\n\n if message is not None:\n depdoc += \"\\n\" + message\n\n def newfunc(*args,**kwds):\n \"\"\"`arrayrange` is deprecated, use `arange` instead!\"\"\"\n warnings.warn(depdoc, DeprecationWarning, stacklevel=2)\n return func(*args, **kwds)\n\n newfunc = _set_function_name(newfunc, old_name)\n doc = func.__doc__\n if doc is None:\n doc = depdoc\n else:\n lines = doc.expandtabs().split('\\n')\n indent = _get_indent(lines[1:])\n if lines[0].lstrip():\n # Indent the original first line to let inspect.cleandoc()\n # dedent the docstring despite the deprecation notice.\n doc = indent * ' ' + doc\n else:\n # Remove the same leading blank lines as cleandoc() would.\n skip = len(lines[0]) + 1\n for line in lines[1:]:\n if len(line) > indent:\n break\n skip += len(line) + 1\n doc = doc[skip:]\n depdoc = textwrap.indent(depdoc, ' ' * indent)\n doc = '\\n\\n'.join([depdoc, doc])\n newfunc.__doc__ = doc\n try:\n d = func.__dict__\n except AttributeError:\n pass\n else:\n newfunc.__dict__.update(d)\n return newfunc\n\n\ndef _get_indent(lines):\n \"\"\"\n Determines the leading whitespace that could be removed from all the lines.\n \"\"\"\n indent = sys.maxsize\n for line in lines:\n content = len(line.lstrip())\n if content:\n indent = min(indent, len(line) - content)\n if indent == sys.maxsize:\n indent = 0\n return indent\n\n\ndef deprecate(*args, **kwargs):\n \"\"\"\n Issues a DeprecationWarning, adds warning to `old_name`'s\n docstring, rebinds ``old_name.__name__`` and returns the new\n function object.\n\n This function may also be used as a decorator.\n\n Parameters\n ----------\n func : function\n The function to be deprecated.\n old_name : str, optional\n The name of the function to be deprecated. Default is None, in\n which case the name of `func` is used.\n new_name : str, optional\n The new name for the function. Default is None, in which case the\n deprecation message is that `old_name` is deprecated. If given, the\n deprecation message is that `old_name` is deprecated and `new_name`\n should be used instead.\n message : str, optional\n Additional explanation of the deprecation. Displayed in the\n docstring after the warning.\n\n Returns\n -------\n old_func : function\n The deprecated function.\n\n Examples\n --------\n Note that ``olduint`` returns a value after printing Deprecation\n Warning:\n\n >>> olduint = np.deprecate(np.uint)\n DeprecationWarning: `uint64` is deprecated! # may vary\n >>> olduint(6)\n 6\n\n \"\"\"\n # Deprecate may be run as a function or as a decorator\n # If run as a function, we initialise the decorator class\n # and execute its __call__ method.\n\n if args:\n fn = args[0]\n args = args[1:]\n\n return _Deprecate(*args, **kwargs)(fn)\n else:\n return _Deprecate(*args, **kwargs)\n\n\ndef deprecate_with_doc(msg):\n \"\"\"\n Deprecates a function and includes the deprecation in its docstring.\n \n This function is used as a decorator. It returns an object that can be \n used to issue a DeprecationWarning, by passing the to-be decorated \n function as argument, this adds warning to the to-be decorated function's \n docstring and returns the new function object.\n \n See Also\n --------\n deprecate : Decorate a function such that it issues a `DeprecationWarning` \n \n Parameters\n ----------\n msg : str\n Additional explanation of the deprecation. Displayed in the \n docstring after the warning.\n\n Returns\n -------\n obj : object\n\n \"\"\"\n return _Deprecate(message=msg) \n\n\n#--------------------------------------------\n# Determine if two arrays can share memory\n#--------------------------------------------\n\ndef byte_bounds(a):\n \"\"\"\n Returns pointers to the end-points of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array. It must conform to the Python-side of the array\n interface.\n\n Returns\n -------\n (low, high) : tuple of 2 integers\n The first integer is the first byte of the array, the second\n integer is just past the last byte of the array. If `a` is not\n contiguous it will not use every byte between the (`low`, `high`)\n values.\n\n Examples\n --------\n >>> I = np.eye(2, dtype='f'); I.dtype\n dtype('float32')\n >>> low, high = np.byte_bounds(I)\n >>> high - low == I.size*I.itemsize\n True\n >>> I = np.eye(2); I.dtype\n dtype('float64')\n >>> low, high = np.byte_bounds(I)\n >>> high - low == I.size*I.itemsize\n True\n\n \"\"\"\n ai = a.__array_interface__\n a_data = ai['data'][0]\n astrides = ai['strides']\n ashape = ai['shape']\n bytes_a = asarray(a).dtype.itemsize\n\n a_low = a_high = a_data\n if astrides is None:\n # contiguous case\n a_high += a.size * bytes_a\n else:\n for shape, stride in zip(ashape, astrides):\n if stride < 0:\n a_low += (shape-1)*stride\n else:\n a_high += (shape-1)*stride\n a_high += bytes_a\n return a_low, a_high\n\n\n#-----------------------------------------------------------------------------\n# Function for output and information on the variables used.\n#-----------------------------------------------------------------------------\n\n\ndef who(vardict=None):\n \"\"\"\n Print the NumPy arrays in the given dictionary.\n\n If there is no dictionary passed in or `vardict` is None then returns\n NumPy arrays in the globals() dictionary (all NumPy arrays in the\n namespace).\n\n Parameters\n ----------\n vardict : dict, optional\n A dictionary possibly containing ndarrays. Default is globals().\n\n Returns\n -------\n out : None\n Returns 'None'.\n\n Notes\n -----\n Prints out the name, shape, bytes and type of all of the ndarrays\n present in `vardict`.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> b = np.ones(20)\n >>> np.who()\n Name Shape Bytes Type\n ===========================================================\n a 10 80 int64\n b 20 160 float64\n Upper bound on total bytes = 240\n\n >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',\n ... 'idx':5}\n >>> np.who(d)\n Name Shape Bytes Type\n ===========================================================\n x 2 16 float64\n y 3 24 float64\n Upper bound on total bytes = 40\n\n \"\"\"\n if vardict is None:\n frame = sys._getframe().f_back\n vardict = frame.f_globals\n sta = []\n cache = {}\n for name in vardict.keys():\n if isinstance(vardict[name], ndarray):\n var = vardict[name]\n idv = id(var)\n if idv in cache.keys():\n namestr = name + \" (%s)\" % cache[idv]\n original = 0\n else:\n cache[idv] = name\n namestr = name\n original = 1\n shapestr = \" x \".join(map(str, var.shape))\n bytestr = str(var.nbytes)\n sta.append([namestr, shapestr, bytestr, var.dtype.name,\n original])\n\n maxname = 0\n maxshape = 0\n maxbyte = 0\n totalbytes = 0\n for k in range(len(sta)):\n val = sta[k]\n if maxname < len(val[0]):\n maxname = len(val[0])\n if maxshape < len(val[1]):\n maxshape = len(val[1])\n if maxbyte < len(val[2]):\n maxbyte = len(val[2])\n if val[4]:\n totalbytes += int(val[2])\n\n if len(sta) > 0:\n sp1 = max(10, maxname)\n sp2 = max(10, maxshape)\n sp3 = max(10, maxbyte)\n prval = \"Name %s Shape %s Bytes %s Type\" % (sp1*' ', sp2*' ', sp3*' ')\n print(prval + \"\\n\" + \"=\"*(len(prval)+5) + \"\\n\")\n\n for k in range(len(sta)):\n val = sta[k]\n print(\"%s %s %s %s %s %s %s\" % (val[0], ' '*(sp1-len(val[0])+4),\n val[1], ' '*(sp2-len(val[1])+5),\n val[2], ' '*(sp3-len(val[2])+5),\n val[3]))\n print(\"\\nUpper bound on total bytes = %d\" % totalbytes)\n return\n\n#-----------------------------------------------------------------------------\n\n\n# NOTE: pydoc defines a help function which works similarly to this\n# except it uses a pager to take over the screen.\n\n# combine name and arguments and split to multiple lines of width\n# characters. End lines on a comma and begin argument list indented with\n# the rest of the arguments.\ndef _split_line(name, arguments, width):\n firstwidth = len(name)\n k = firstwidth\n newstr = name\n sepstr = \", \"\n arglist = arguments.split(sepstr)\n for argument in arglist:\n if k == firstwidth:\n addstr = \"\"\n else:\n addstr = sepstr\n k = k + len(argument) + len(addstr)\n if k > width:\n k = firstwidth + 1 + len(argument)\n newstr = newstr + \",\\n\" + \" \"*(firstwidth+2) + argument\n else:\n newstr = newstr + addstr + argument\n return newstr\n\n_namedict = None\n_dictlist = None\n\n# Traverse all module directories underneath globals\n# to see if something is defined\ndef _makenamedict(module='numpy'):\n module = __import__(module, globals(), locals(), [])\n thedict = {module.__name__:module.__dict__}\n dictlist = [module.__name__]\n totraverse = [module.__dict__]\n while True:\n if len(totraverse) == 0:\n break\n thisdict = totraverse.pop(0)\n for x in thisdict.keys():\n if isinstance(thisdict[x], types.ModuleType):\n modname = thisdict[x].__name__\n if modname not in dictlist:\n moddict = thisdict[x].__dict__\n dictlist.append(modname)\n totraverse.append(moddict)\n thedict[modname] = moddict\n return thedict, dictlist\n\n\ndef _info(obj, output=sys.stdout):\n \"\"\"Provide information about ndarray obj.\n\n Parameters\n ----------\n obj : ndarray\n Must be ndarray, not checked.\n output\n Where printed output goes.\n\n Notes\n -----\n Copied over from the numarray module prior to its removal.\n Adapted somewhat as only numpy is an option now.\n\n Called by info.\n\n \"\"\"\n extra = \"\"\n tic = \"\"\n bp = lambda x: x\n cls = getattr(obj, '__class__', type(obj))\n nm = getattr(cls, '__name__', cls)\n strides = obj.strides\n endian = obj.dtype.byteorder\n\n print(\"class: \", nm, file=output)\n print(\"shape: \", obj.shape, file=output)\n print(\"strides: \", strides, file=output)\n print(\"itemsize: \", obj.itemsize, file=output)\n print(\"aligned: \", bp(obj.flags.aligned), file=output)\n print(\"contiguous: \", bp(obj.flags.contiguous), file=output)\n print(\"fortran: \", obj.flags.fortran, file=output)\n print(\n \"data pointer: %s%s\" % (hex(obj.ctypes._as_parameter_.value), extra),\n file=output\n )\n print(\"byteorder: \", end=' ', file=output)\n if endian in ['|', '=']:\n print(\"%s%s%s\" % (tic, sys.byteorder, tic), file=output)\n byteswap = False\n elif endian == '>':\n print(\"%sbig%s\" % (tic, tic), file=output)\n byteswap = sys.byteorder != \"big\"\n else:\n print(\"%slittle%s\" % (tic, tic), file=output)\n byteswap = sys.byteorder != \"little\"\n print(\"byteswap: \", bp(byteswap), file=output)\n print(\"type: %s\" % obj.dtype, file=output)\n\n\n@set_module('numpy')\ndef info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):\n \"\"\"\n Get help information for a function, class, or module.\n\n Parameters\n ----------\n object : object or str, optional\n Input object or name to get information about. If `object` is a\n numpy object, its docstring is given. If it is a string, available\n modules are searched for matching objects. If None, information\n about `info` itself is returned.\n maxwidth : int, optional\n Printing width.\n output : file like object, optional\n File like object that the output is written to, default is\n ``stdout``. The object has to be opened in 'w' or 'a' mode.\n toplevel : str, optional\n Start search at this level.\n\n See Also\n --------\n source, lookfor\n\n Notes\n -----\n When used interactively with an object, ``np.info(obj)`` is equivalent\n to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython\n prompt.\n\n Examples\n --------\n >>> np.info(np.polyval) # doctest: +SKIP\n polyval(p, x)\n Evaluate the polynomial p at x.\n ...\n\n When using a string for `object` it is possible to get multiple results.\n\n >>> np.info('fft') # doctest: +SKIP\n *** Found in numpy ***\n Core FFT routines\n ...\n *** Found in numpy.fft ***\n fft(a, n=None, axis=-1)\n ...\n *** Repeat reference found in numpy.fft.fftpack ***\n *** Total of 3 references found. ***\n\n \"\"\"\n global _namedict, _dictlist\n # Local import to speed up numpy's import time.\n import pydoc\n import inspect\n\n if (hasattr(object, '_ppimport_importer') or\n hasattr(object, '_ppimport_module')):\n object = object._ppimport_module\n elif hasattr(object, '_ppimport_attr'):\n object = object._ppimport_attr\n\n if object is None:\n info(info)\n elif isinstance(object, ndarray):\n _info(object, output=output)\n elif isinstance(object, str):\n if _namedict is None:\n _namedict, _dictlist = _makenamedict(toplevel)\n numfound = 0\n objlist = []\n for namestr in _dictlist:\n try:\n obj = _namedict[namestr][object]\n if id(obj) in objlist:\n print(\"\\n \"\n \"*** Repeat reference found in %s *** \" % namestr,\n file=output\n )\n else:\n objlist.append(id(obj))\n print(\" *** Found in %s ***\" % namestr, file=output)\n info(obj)\n print(\"-\"*maxwidth, file=output)\n numfound += 1\n except KeyError:\n pass\n if numfound == 0:\n print(\"Help for %s not found.\" % object, file=output)\n else:\n print(\"\\n \"\n \"*** Total of %d references found. ***\" % numfound,\n file=output\n )\n\n elif inspect.isfunction(object) or inspect.ismethod(object):\n name = object.__name__\n try:\n arguments = str(inspect.signature(object))\n except Exception:\n arguments = \"()\"\n\n if len(name+arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(\" \" + argstr + \"\\n\", file=output)\n print(inspect.getdoc(object), file=output)\n\n elif inspect.isclass(object):\n name = object.__name__\n try:\n arguments = str(inspect.signature(object))\n except Exception:\n arguments = \"()\"\n\n if len(name+arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(\" \" + argstr + \"\\n\", file=output)\n doc1 = inspect.getdoc(object)\n if doc1 is None:\n if hasattr(object, '__init__'):\n print(inspect.getdoc(object.__init__), file=output)\n else:\n print(inspect.getdoc(object), file=output)\n\n methods = pydoc.allmethods(object)\n\n public_methods = [meth for meth in methods if meth[0] != '_']\n if public_methods:\n print(\"\\n\\nMethods:\\n\", file=output)\n for meth in public_methods:\n thisobj = getattr(object, meth, None)\n if thisobj is not None:\n methstr, other = pydoc.splitdoc(\n inspect.getdoc(thisobj) or \"None\"\n )\n print(\" %s -- %s\" % (meth, methstr), file=output)\n\n elif hasattr(object, '__doc__'):\n print(inspect.getdoc(object), file=output)\n\n\n@set_module('numpy')\ndef source(object, output=sys.stdout):\n \"\"\"\n Print or write to a file the source code for a NumPy object.\n\n The source code is only returned for objects written in Python. Many\n functions and classes are defined in C and will therefore not return\n useful information.\n\n Parameters\n ----------\n object : numpy object\n Input object. This can be any object (function, class, module,\n ...).\n output : file object, optional\n If `output` not supplied then source code is printed to screen\n (sys.stdout). File object must be created with either write 'w' or\n append 'a' modes.\n\n See Also\n --------\n lookfor, info\n\n Examples\n --------\n >>> np.source(np.interp) #doctest: +SKIP\n In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py\n def interp(x, xp, fp, left=None, right=None):\n \\\"\\\"\\\".... (full docstring printed)\\\"\\\"\\\"\n if isinstance(x, (float, int, number)):\n return compiled_interp([x], xp, fp, left, right).item()\n else:\n return compiled_interp(x, xp, fp, left, right)\n\n The source code is only returned for objects written in Python.\n\n >>> np.source(np.array) #doctest: +SKIP\n Not available for this object.\n\n \"\"\"\n # Local import to speed up numpy's import time.\n import inspect\n try:\n print(\"In file: %s\\n\" % inspect.getsourcefile(object), file=output)\n print(inspect.getsource(object), file=output)\n except Exception:\n print(\"Not available for this object.\", file=output)\n\n\n# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}\n# where kind: \"func\", \"class\", \"module\", \"object\"\n# and index: index in breadth-first namespace traversal\n_lookfor_caches = {}\n\n# regexp whose match indicates that the string may contain a function\n# signature\n_function_signature_re = re.compile(r\"[a-z0-9_]+\\(.*[,=].*\\)\", re.I)\n\n\n@set_module('numpy')\ndef lookfor(what, module=None, import_modules=True, regenerate=False,\n output=None):\n \"\"\"\n Do a keyword search on docstrings.\n\n A list of objects that matched the search is displayed,\n sorted by relevance. All given keywords need to be found in the\n docstring for it to be returned as a result, but the order does\n not matter.\n\n Parameters\n ----------\n what : str\n String containing words to look for.\n module : str or list, optional\n Name of module(s) whose docstrings to go through.\n import_modules : bool, optional\n Whether to import sub-modules in packages. Default is True.\n regenerate : bool, optional\n Whether to re-generate the docstring cache. Default is False.\n output : file-like, optional\n File-like object to write the output to. If omitted, use a pager.\n\n See Also\n --------\n source, info\n\n Notes\n -----\n Relevance is determined only roughly, by checking if the keywords occur\n in the function name, at the start of a docstring, etc.\n\n Examples\n --------\n >>> np.lookfor('binary representation') # doctest: +SKIP\n Search results for 'binary representation'\n ------------------------------------------\n numpy.binary_repr\n Return the binary representation of the input number as a string.\n numpy.core.setup_common.long_double_representation\n Given a binary dump as given by GNU od -b, look for long double\n numpy.base_repr\n Return a string representation of a number in the given base system.\n ...\n\n \"\"\"\n import pydoc\n\n # Cache\n cache = _lookfor_generate_cache(module, import_modules, regenerate)\n\n # Search\n # XXX: maybe using a real stemming search engine would be better?\n found = []\n whats = str(what).lower().split()\n if not whats:\n return\n\n for name, (docstring, kind, index) in cache.items():\n if kind in ('module', 'object'):\n # don't show modules or objects\n continue\n doc = docstring.lower()\n if all(w in doc for w in whats):\n found.append(name)\n\n # Relevance sort\n # XXX: this is full Harrison-Stetson heuristics now,\n # XXX: it probably could be improved\n\n kind_relevance = {'func': 1000, 'class': 1000,\n 'module': -1000, 'object': -1000}\n\n def relevance(name, docstr, kind, index):\n r = 0\n # do the keywords occur within the start of the docstring?\n first_doc = \"\\n\".join(docstr.lower().strip().split(\"\\n\")[:3])\n r += sum([200 for w in whats if w in first_doc])\n # do the keywords occur in the function name?\n r += sum([30 for w in whats if w in name])\n # is the full name long?\n r += -len(name) * 5\n # is the object of bad type?\n r += kind_relevance.get(kind, -1000)\n # is the object deep in namespace hierarchy?\n r += -name.count('.') * 10\n r += max(-index / 100, -100)\n return r\n\n def relevance_value(a):\n return relevance(a, *cache[a])\n found.sort(key=relevance_value)\n\n # Pretty-print\n s = \"Search results for '%s'\" % (' '.join(whats))\n help_text = [s, \"-\"*len(s)]\n for name in found[::-1]:\n doc, kind, ix = cache[name]\n\n doclines = [line.strip() for line in doc.strip().split(\"\\n\")\n if line.strip()]\n\n # find a suitable short description\n try:\n first_doc = doclines[0].strip()\n if _function_signature_re.search(first_doc):\n first_doc = doclines[1].strip()\n except IndexError:\n first_doc = \"\"\n help_text.append(\"%s\\n %s\" % (name, first_doc))\n\n if not found:\n help_text.append(\"Nothing found.\")\n\n # Output\n if output is not None:\n output.write(\"\\n\".join(help_text))\n elif len(help_text) > 10:\n pager = pydoc.getpager()\n pager(\"\\n\".join(help_text))\n else:\n print(\"\\n\".join(help_text))\n\ndef _lookfor_generate_cache(module, import_modules, regenerate):\n \"\"\"\n Generate docstring cache for given module.\n\n Parameters\n ----------\n module : str, None, module\n Module for which to generate docstring cache\n import_modules : bool\n Whether to import sub-modules in packages.\n regenerate : bool\n Re-generate the docstring cache\n\n Returns\n -------\n cache : dict {obj_full_name: (docstring, kind, index), ...}\n Docstring cache for the module, either cached one (regenerate=False)\n or newly generated.\n\n \"\"\"\n # Local import to speed up numpy's import time.\n import inspect\n\n from io import StringIO\n\n if module is None:\n module = \"numpy\"\n\n if isinstance(module, str):\n try:\n __import__(module)\n except ImportError:\n return {}\n module = sys.modules[module]\n elif isinstance(module, list) or isinstance(module, tuple):\n cache = {}\n for mod in module:\n cache.update(_lookfor_generate_cache(mod, import_modules,\n regenerate))\n return cache\n\n if id(module) in _lookfor_caches and not regenerate:\n return _lookfor_caches[id(module)]\n\n # walk items and collect docstrings\n cache = {}\n _lookfor_caches[id(module)] = cache\n seen = {}\n index = 0\n stack = [(module.__name__, module)]\n while stack:\n name, item = stack.pop(0)\n if id(item) in seen:\n continue\n seen[id(item)] = True\n\n index += 1\n kind = \"object\"\n\n if inspect.ismodule(item):\n kind = \"module\"\n try:\n _all = item.__all__\n except AttributeError:\n _all = None\n\n # import sub-packages\n if import_modules and hasattr(item, '__path__'):\n for pth in item.__path__:\n for mod_path in os.listdir(pth):\n this_py = os.path.join(pth, mod_path)\n init_py = os.path.join(pth, mod_path, '__init__.py')\n if (os.path.isfile(this_py) and\n mod_path.endswith('.py')):\n to_import = mod_path[:-3]\n elif os.path.isfile(init_py):\n to_import = mod_path\n else:\n continue\n if to_import == '__init__':\n continue\n\n try:\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n try:\n sys.stdout = StringIO()\n sys.stderr = StringIO()\n __import__(\"%s.%s\" % (name, to_import))\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n # Catch SystemExit, too\n except BaseException:\n continue\n\n for n, v in _getmembers(item):\n try:\n item_name = getattr(v, '__name__', \"%s.%s\" % (name, n))\n mod_name = getattr(v, '__module__', None)\n except NameError:\n # ref. SWIG's global cvars\n # NameError: Unknown C global variable\n item_name = \"%s.%s\" % (name, n)\n mod_name = None\n if '.' not in item_name and mod_name:\n item_name = \"%s.%s\" % (mod_name, item_name)\n\n if not item_name.startswith(name + '.'):\n # don't crawl \"foreign\" objects\n if isinstance(v, ufunc):\n # ... unless they are ufuncs\n pass\n else:\n continue\n elif not (inspect.ismodule(v) or _all is None or n in _all):\n continue\n stack.append((\"%s.%s\" % (name, n), v))\n elif inspect.isclass(item):\n kind = \"class\"\n for n, v in _getmembers(item):\n stack.append((\"%s.%s\" % (name, n), v))\n elif hasattr(item, \"__call__\"):\n kind = \"func\"\n\n try:\n doc = inspect.getdoc(item)\n except NameError:\n # ref SWIG's NameError: Unknown C global variable\n doc = None\n if doc is not None:\n cache[name] = (doc, kind, index)\n\n return cache\n\ndef _getmembers(item):\n import inspect\n try:\n members = inspect.getmembers(item)\n except Exception:\n members = [(x, getattr(item, x)) for x in dir(item)\n if hasattr(item, x)]\n return members\n\n\ndef safe_eval(source):\n \"\"\"\n Protected string evaluation.\n\n Evaluate a string containing a Python literal expression without\n allowing the execution of arbitrary non-literal code.\n\n Parameters\n ----------\n source : str\n The string to evaluate.\n\n Returns\n -------\n obj : object\n The result of evaluating `source`.\n\n Raises\n ------\n SyntaxError\n If the code has invalid Python syntax, or if it contains\n non-literal code.\n\n Examples\n --------\n >>> np.safe_eval('1')\n 1\n >>> np.safe_eval('[1, 2, 3]')\n [1, 2, 3]\n >>> np.safe_eval('{\"foo\": (\"bar\", 10.0)}')\n {'foo': ('bar', 10.0)}\n\n >>> np.safe_eval('import os')\n Traceback (most recent call last):\n ...\n SyntaxError: invalid syntax\n\n >>> np.safe_eval('open(\"/home/user/.ssh/id_dsa\").read()')\n Traceback (most recent call last):\n ...\n ValueError: malformed node or string: <_ast.Call object at 0x...>\n\n \"\"\"\n # Local import to speed up numpy's import time.\n import ast\n return ast.literal_eval(source)\n\n\ndef _median_nancheck(data, result, axis, out):\n \"\"\"\n Utility function to check median result from data for NaN values at the end\n and return NaN in that case. Input result can also be a MaskedArray.\n\n Parameters\n ----------\n data : array\n Input data to median function\n result : Array or MaskedArray\n Result of median function\n axis : {int, sequence of int, None}, optional\n Axis or axes along which the median was computed.\n out : ndarray, optional\n Output array in which to place the result.\n Returns\n -------\n median : scalar or ndarray\n Median or NaN in axes which contained NaN in the input.\n \"\"\"\n if data.size == 0:\n return result\n data = np.moveaxis(data, axis, -1)\n n = np.isnan(data[..., -1])\n # masked NaN values are ok\n if np.ma.isMaskedArray(n):\n n = n.filled(False)\n if result.ndim == 0:\n if n == True:\n if out is not None:\n out[...] = data.dtype.type(np.nan)\n result = out\n else:\n result = data.dtype.type(np.nan)\n elif np.count_nonzero(n.ravel()) > 0:\n result[n] = np.nan\n return result\n\n#-----------------------------------------------------------------------------\n",
"import sys\nimport operator\nimport pytest\nimport ctypes\nimport gc\nimport warnings\n\nimport numpy as np\nfrom numpy.core._rational_tests import rational\nfrom numpy.core._multiarray_tests import create_custom_field_dtype\nfrom numpy.testing import (\n assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)\nfrom numpy.compat import pickle\nfrom itertools import permutations\n\ndef assert_dtype_equal(a, b):\n assert_equal(a, b)\n assert_equal(hash(a), hash(b),\n \"two equivalent types do not hash to the same value !\")\n\ndef assert_dtype_not_equal(a, b):\n assert_(a != b)\n assert_(hash(a) != hash(b),\n \"two different types hash to the same value !\")\n\nclass TestBuiltin:\n @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,\n np.compat.unicode])\n def test_run(self, t):\n \"\"\"Only test hash runs at all.\"\"\"\n dt = np.dtype(t)\n hash(dt)\n\n @pytest.mark.parametrize('t', [int, float])\n def test_dtype(self, t):\n # Make sure equivalent byte order char hash the same (e.g. < and = on\n # little endian)\n dt = np.dtype(t)\n dt2 = dt.newbyteorder(\"<\")\n dt3 = dt.newbyteorder(\">\")\n if dt == dt2:\n assert_(dt.byteorder != dt2.byteorder, \"bogus test\")\n assert_dtype_equal(dt, dt2)\n else:\n assert_(dt.byteorder != dt3.byteorder, \"bogus test\")\n assert_dtype_equal(dt, dt3)\n\n def test_equivalent_dtype_hashing(self):\n # Make sure equivalent dtypes with different type num hash equal\n uintp = np.dtype(np.uintp)\n if uintp.itemsize == 4:\n left = uintp\n right = np.dtype(np.uint32)\n else:\n left = uintp\n right = np.dtype(np.ulonglong)\n assert_(left == right)\n assert_(hash(left) == hash(right))\n\n def test_invalid_types(self):\n # Make sure invalid type strings raise an error\n\n assert_raises(TypeError, np.dtype, 'O3')\n assert_raises(TypeError, np.dtype, 'O5')\n assert_raises(TypeError, np.dtype, 'O7')\n assert_raises(TypeError, np.dtype, 'b3')\n assert_raises(TypeError, np.dtype, 'h4')\n assert_raises(TypeError, np.dtype, 'I5')\n assert_raises(TypeError, np.dtype, 'e3')\n assert_raises(TypeError, np.dtype, 'f5')\n\n if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:\n assert_raises(TypeError, np.dtype, 'g12')\n elif np.dtype('g').itemsize == 12:\n assert_raises(TypeError, np.dtype, 'g16')\n\n if np.dtype('l').itemsize == 8:\n assert_raises(TypeError, np.dtype, 'l4')\n assert_raises(TypeError, np.dtype, 'L4')\n else:\n assert_raises(TypeError, np.dtype, 'l8')\n assert_raises(TypeError, np.dtype, 'L8')\n\n if np.dtype('q').itemsize == 8:\n assert_raises(TypeError, np.dtype, 'q4')\n assert_raises(TypeError, np.dtype, 'Q4')\n else:\n assert_raises(TypeError, np.dtype, 'q8')\n assert_raises(TypeError, np.dtype, 'Q8')\n\n @pytest.mark.parametrize(\"dtype\",\n ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',\n 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',\n 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',\n \"Float128\", \"Complex128\"])\n def test_numeric_style_types_are_invalid(self, dtype):\n with assert_raises(TypeError):\n np.dtype(dtype)\n\n @pytest.mark.parametrize(\n 'value',\n ['m8', 'M8', 'datetime64', 'timedelta64',\n 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',\n '>f', '<f', '=f', '|f',\n ])\n def test_dtype_bytes_str_equivalence(self, value):\n bytes_value = value.encode('ascii')\n from_bytes = np.dtype(bytes_value)\n from_str = np.dtype(value)\n assert_dtype_equal(from_bytes, from_str)\n\n def test_dtype_from_bytes(self):\n # Empty bytes object\n assert_raises(TypeError, np.dtype, b'')\n # Byte order indicator, but no type\n assert_raises(TypeError, np.dtype, b'|')\n\n # Single character with ordinal < NPY_NTYPES returns\n # type by index into _builtin_descrs\n assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))\n assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))\n\n # Single character where value is a valid type code\n assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))\n\n # Bytes with non-ascii values raise errors\n assert_raises(TypeError, np.dtype, b'\\xff')\n assert_raises(TypeError, np.dtype, b's\\xff')\n\n def test_bad_param(self):\n # Can't give a size that's too small\n assert_raises(ValueError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', 'i1'],\n 'offsets':[0, 4],\n 'itemsize':4})\n # If alignment is enabled, the alignment (4) must divide the itemsize\n assert_raises(ValueError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', 'i1'],\n 'offsets':[0, 4],\n 'itemsize':9}, align=True)\n # If alignment is enabled, the individual fields must be aligned\n assert_raises(ValueError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i1', 'f4'],\n 'offsets':[0, 2]}, align=True)\n\n def test_field_order_equality(self):\n x = np.dtype({'names': ['A', 'B'],\n 'formats': ['i4', 'f4'],\n 'offsets': [0, 4]})\n y = np.dtype({'names': ['B', 'A'],\n 'formats': ['f4', 'i4'],\n 'offsets': [4, 0]})\n assert_equal(x == y, False)\n # But it is currently an equivalent cast:\n assert np.can_cast(x, y, casting=\"equiv\")\n\n\nclass TestRecord:\n def test_equivalent_record(self):\n \"\"\"Test whether equivalent record dtypes hash the same.\"\"\"\n a = np.dtype([('yo', int)])\n b = np.dtype([('yo', int)])\n assert_dtype_equal(a, b)\n\n def test_different_names(self):\n # In theory, they may hash the same (collision) ?\n a = np.dtype([('yo', int)])\n b = np.dtype([('ye', int)])\n assert_dtype_not_equal(a, b)\n\n def test_different_titles(self):\n # In theory, they may hash the same (collision) ?\n a = np.dtype({'names': ['r', 'b'],\n 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n b = np.dtype({'names': ['r', 'b'],\n 'formats': ['u1', 'u1'],\n 'titles': ['RRed pixel', 'Blue pixel']})\n assert_dtype_not_equal(a, b)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_refcount_dictionary_setting(self):\n names = [\"name1\"]\n formats = [\"f8\"]\n titles = [\"t1\"]\n offsets = [0]\n d = dict(names=names, formats=formats, titles=titles, offsets=offsets)\n refcounts = {k: sys.getrefcount(i) for k, i in d.items()}\n np.dtype(d)\n refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}\n assert refcounts == refcounts_new\n\n def test_mutate(self):\n # Mutating a dtype should reset the cached hash value\n a = np.dtype([('yo', int)])\n b = np.dtype([('yo', int)])\n c = np.dtype([('ye', int)])\n assert_dtype_equal(a, b)\n assert_dtype_not_equal(a, c)\n a.names = ['ye']\n assert_dtype_equal(a, c)\n assert_dtype_not_equal(a, b)\n state = b.__reduce__()[2]\n a.__setstate__(state)\n assert_dtype_equal(a, b)\n assert_dtype_not_equal(a, c)\n\n def test_not_lists(self):\n \"\"\"Test if an appropriate exception is raised when passing bad values to\n the dtype constructor.\n \"\"\"\n assert_raises(TypeError, np.dtype,\n dict(names={'A', 'B'}, formats=['f8', 'i4']))\n assert_raises(TypeError, np.dtype,\n dict(names=['A', 'B'], formats={'f8', 'i4'}))\n\n def test_aligned_size(self):\n # Check that structured dtypes get padded to an aligned size\n dt = np.dtype('i4, i1', align=True)\n assert_equal(dt.itemsize, 8)\n dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)\n assert_equal(dt.itemsize, 8)\n dt = np.dtype({'names':['f0', 'f1'],\n 'formats':['i4', 'u1'],\n 'offsets':[0, 4]}, align=True)\n assert_equal(dt.itemsize, 8)\n dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)\n assert_equal(dt.itemsize, 8)\n # Nesting should preserve that alignment\n dt1 = np.dtype([('f0', 'i4'),\n ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),\n ('f2', 'i1')], align=True)\n assert_equal(dt1.itemsize, 20)\n dt2 = np.dtype({'names':['f0', 'f1', 'f2'],\n 'formats':['i4',\n [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],\n 'i1'],\n 'offsets':[0, 4, 16]}, align=True)\n assert_equal(dt2.itemsize, 20)\n dt3 = np.dtype({'f0': ('i4', 0),\n 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),\n 'f2': ('i1', 16)}, align=True)\n assert_equal(dt3.itemsize, 20)\n assert_equal(dt1, dt2)\n assert_equal(dt2, dt3)\n # Nesting should preserve packing\n dt1 = np.dtype([('f0', 'i4'),\n ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),\n ('f2', 'i1')], align=False)\n assert_equal(dt1.itemsize, 11)\n dt2 = np.dtype({'names':['f0', 'f1', 'f2'],\n 'formats':['i4',\n [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],\n 'i1'],\n 'offsets':[0, 4, 10]}, align=False)\n assert_equal(dt2.itemsize, 11)\n dt3 = np.dtype({'f0': ('i4', 0),\n 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),\n 'f2': ('i1', 10)}, align=False)\n assert_equal(dt3.itemsize, 11)\n assert_equal(dt1, dt2)\n assert_equal(dt2, dt3)\n # Array of subtype should preserve alignment\n dt1 = np.dtype([('a', '|i1'),\n ('b', [('f0', '<i2'),\n ('f1', '<f4')], 2)], align=True)\n assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),\n ('b', [('f0', '<i2'), ('', '|V2'),\n ('f1', '<f4')], (2,))])\n\n def test_union_struct(self):\n # Should be able to create union dtypes\n dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],\n 'offsets':[0, 0, 2]}, align=True)\n assert_equal(dt.itemsize, 4)\n a = np.array([3], dtype='<u4').view(dt)\n a['f1'] = 10\n a['f2'] = 36\n assert_equal(a['f0'], 10 + 36*256*256)\n # Should be able to specify fields out of order\n dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],\n 'offsets':[4, 0, 2]}, align=True)\n assert_equal(dt.itemsize, 8)\n # field name should not matter: assignment is by position\n dt2 = np.dtype({'names':['f2', 'f0', 'f1'],\n 'formats':['<u4', '<u2', '<u2'],\n 'offsets':[4, 0, 2]}, align=True)\n vals = [(0, 1, 2), (3, -1, 4)]\n vals2 = [(0, 1, 2), (3, -1, 4)]\n a = np.array(vals, dt)\n b = np.array(vals2, dt2)\n assert_equal(a.astype(dt2), b)\n assert_equal(b.astype(dt), a)\n assert_equal(a.view(dt2), b)\n assert_equal(b.view(dt), a)\n # Should not be able to overlap objects with other types\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['O', 'i1'],\n 'offsets':[0, 2]})\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', 'O'],\n 'offsets':[0, 3]})\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':[[('a', 'O')], 'i1'],\n 'offsets':[0, 2]})\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', [('a', 'O')]],\n 'offsets':[0, 3]})\n # Out of order should still be ok, however\n dt = np.dtype({'names':['f0', 'f1'],\n 'formats':['i1', 'O'],\n 'offsets':[np.dtype('intp').itemsize, 0]})\n\n @pytest.mark.parametrize([\"obj\", \"dtype\", \"expected\"],\n [([], (\"(2)f4,\"), np.empty((0, 2), dtype=\"f4\")),\n (3, \"(3)f4,\", [3, 3, 3]),\n (np.float64(2), \"(2)f4,\", [2, 2]),\n ([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),\n ([\"1\", \"2\"], \"(2)i,\", None)])\n def test_subarray_list(self, obj, dtype, expected):\n dtype = np.dtype(dtype)\n res = np.array(obj, dtype=dtype)\n\n if expected is None:\n # iterate the 1-d list to fill the array\n expected = np.empty(len(obj), dtype=dtype)\n for i in range(len(expected)):\n expected[i] = obj[i]\n\n assert_array_equal(res, expected)\n\n def test_comma_datetime(self):\n dt = np.dtype('M8[D],datetime64[Y],i8')\n assert_equal(dt, np.dtype([('f0', 'M8[D]'),\n ('f1', 'datetime64[Y]'),\n ('f2', 'i8')]))\n\n def test_from_dictproxy(self):\n # Tests for PR #5920\n dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})\n assert_dtype_equal(dt, np.dtype(dt.fields))\n dt2 = np.dtype((np.void, dt.fields))\n assert_equal(dt2.fields, dt.fields)\n\n def test_from_dict_with_zero_width_field(self):\n # Regression test for #6430 / #2196\n dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])\n dt2 = np.dtype({'names': ['val1', 'val2'],\n 'formats': [(np.float32, (0,)), int]})\n\n assert_dtype_equal(dt, dt2)\n assert_equal(dt.fields['val1'][0].itemsize, 0)\n assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)\n\n def test_bool_commastring(self):\n d = np.dtype('?,?,?') # raises?\n assert_equal(len(d.names), 3)\n for n in d.names:\n assert_equal(d.fields[n][0], np.dtype('?'))\n\n def test_nonint_offsets(self):\n # gh-8059\n def make_dtype(off):\n return np.dtype({'names': ['A'], 'formats': ['i4'],\n 'offsets': [off]})\n\n assert_raises(TypeError, make_dtype, 'ASD')\n assert_raises(OverflowError, make_dtype, 2**70)\n assert_raises(TypeError, make_dtype, 2.3)\n assert_raises(ValueError, make_dtype, -10)\n\n # no errors here:\n dt = make_dtype(np.uint32(0))\n np.zeros(1, dtype=dt)[0].item()\n\n def test_fields_by_index(self):\n dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])\n assert_dtype_equal(dt[0], np.dtype(np.int8))\n assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))\n assert_dtype_equal(dt[-1], dt[1])\n assert_dtype_equal(dt[-2], dt[0])\n assert_raises(IndexError, lambda: dt[-3])\n\n assert_raises(TypeError, operator.getitem, dt, 3.0)\n\n assert_equal(dt[1], dt[np.int8(1)])\n\n @pytest.mark.parametrize('align_flag',[False, True])\n def test_multifield_index(self, align_flag):\n # indexing with a list produces subfields\n # the align flag should be preserved\n dt = np.dtype([\n (('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')\n ], align=align_flag)\n\n dt_sub = dt[['B', 'col1']]\n assert_equal(\n dt_sub,\n np.dtype({\n 'names': ['B', 'col1'],\n 'formats': ['<f8', '<U20'],\n 'offsets': [88, 0],\n 'titles': [None, 'title'],\n 'itemsize': 96\n })\n )\n assert_equal(dt_sub.isalignedstruct, align_flag)\n\n dt_sub = dt[['B']]\n assert_equal(\n dt_sub,\n np.dtype({\n 'names': ['B'],\n 'formats': ['<f8'],\n 'offsets': [88],\n 'itemsize': 96\n })\n )\n assert_equal(dt_sub.isalignedstruct, align_flag)\n\n dt_sub = dt[[]]\n assert_equal(\n dt_sub,\n np.dtype({\n 'names': [],\n 'formats': [],\n 'offsets': [],\n 'itemsize': 96\n })\n )\n assert_equal(dt_sub.isalignedstruct, align_flag)\n\n assert_raises(TypeError, operator.getitem, dt, ())\n assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])\n assert_raises(TypeError, operator.getitem, dt, ['col1', 2])\n assert_raises(KeyError, operator.getitem, dt, ['fake'])\n assert_raises(KeyError, operator.getitem, dt, ['title'])\n assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])\n\n def test_partial_dict(self):\n # 'names' is missing\n assert_raises(ValueError, np.dtype,\n {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})\n\n def test_fieldless_views(self):\n a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],\n 'itemsize':8})\n assert_raises(ValueError, a.view, np.dtype([]))\n\n d = np.dtype((np.dtype([]), 10))\n assert_equal(d.shape, (10,))\n assert_equal(d.itemsize, 0)\n assert_equal(d.base, np.dtype([]))\n\n arr = np.fromiter((() for i in range(10)), [])\n assert_equal(arr.dtype, np.dtype([]))\n assert_raises(ValueError, np.frombuffer, b'', dtype=[])\n assert_equal(np.frombuffer(b'', dtype=[], count=2),\n np.empty(2, dtype=[]))\n\n assert_raises(ValueError, np.dtype, ([], 'f8'))\n assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])\n\n assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),\n np.ones(2, dtype=bool))\n\n assert_equal(np.zeros((1, 2), dtype=[]) == a,\n np.ones((1, 2), dtype=bool))\n\n\nclass TestSubarray:\n def test_single_subarray(self):\n a = np.dtype((int, (2)))\n b = np.dtype((int, (2,)))\n assert_dtype_equal(a, b)\n\n assert_equal(type(a.subdtype[1]), tuple)\n assert_equal(type(b.subdtype[1]), tuple)\n\n def test_equivalent_record(self):\n \"\"\"Test whether equivalent subarray dtypes hash the same.\"\"\"\n a = np.dtype((int, (2, 3)))\n b = np.dtype((int, (2, 3)))\n assert_dtype_equal(a, b)\n\n def test_nonequivalent_record(self):\n \"\"\"Test whether different subarray dtypes hash differently.\"\"\"\n a = np.dtype((int, (2, 3)))\n b = np.dtype((int, (3, 2)))\n assert_dtype_not_equal(a, b)\n\n a = np.dtype((int, (2, 3)))\n b = np.dtype((int, (2, 2)))\n assert_dtype_not_equal(a, b)\n\n a = np.dtype((int, (1, 2, 3)))\n b = np.dtype((int, (1, 2)))\n assert_dtype_not_equal(a, b)\n\n def test_shape_equal(self):\n \"\"\"Test some data types that are equal\"\"\"\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))\n # FutureWarning during deprecation period; after it is passed this\n # should instead check that \"(1)f8\" == \"1f8\" == (\"f8\", 1).\n with pytest.warns(FutureWarning):\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))\n assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))\n assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))\n d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))\n assert_dtype_equal(np.dtype(d), np.dtype(d))\n\n def test_shape_simple(self):\n \"\"\"Test some simple cases that shouldn't be equal\"\"\"\n assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))\n assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))\n assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))\n\n def test_shape_monster(self):\n \"\"\"Test some more complicated cases that shouldn't be equal\"\"\"\n assert_dtype_not_equal(\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))\n assert_dtype_not_equal(\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))\n assert_dtype_not_equal(\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))\n assert_dtype_not_equal(\n np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))\n\n def test_shape_sequence(self):\n # Any sequence of integers should work as shape, but the result\n # should be a tuple (immutable) of base type integers.\n a = np.array([1, 2, 3], dtype=np.int16)\n l = [1, 2, 3]\n # Array gets converted\n dt = np.dtype([('a', 'f4', a)])\n assert_(isinstance(dt['a'].shape, tuple))\n assert_(isinstance(dt['a'].shape[0], int))\n # List gets converted\n dt = np.dtype([('a', 'f4', l)])\n assert_(isinstance(dt['a'].shape, tuple))\n #\n\n class IntLike:\n def __index__(self):\n return 3\n\n def __int__(self):\n # (a PyNumber_Check fails without __int__)\n return 3\n\n dt = np.dtype([('a', 'f4', IntLike())])\n assert_(isinstance(dt['a'].shape, tuple))\n assert_(isinstance(dt['a'].shape[0], int))\n dt = np.dtype([('a', 'f4', (IntLike(),))])\n assert_(isinstance(dt['a'].shape, tuple))\n assert_(isinstance(dt['a'].shape[0], int))\n\n def test_shape_matches_ndim(self):\n dt = np.dtype([('a', 'f4', ())])\n assert_equal(dt['a'].shape, ())\n assert_equal(dt['a'].ndim, 0)\n\n dt = np.dtype([('a', 'f4')])\n assert_equal(dt['a'].shape, ())\n assert_equal(dt['a'].ndim, 0)\n\n dt = np.dtype([('a', 'f4', 4)])\n assert_equal(dt['a'].shape, (4,))\n assert_equal(dt['a'].ndim, 1)\n\n dt = np.dtype([('a', 'f4', (1, 2, 3))])\n assert_equal(dt['a'].shape, (1, 2, 3))\n assert_equal(dt['a'].ndim, 3)\n\n def test_shape_invalid(self):\n # Check that the shape is valid.\n max_int = np.iinfo(np.intc).max\n max_intp = np.iinfo(np.intp).max\n # Too large values (the datatype is part of this)\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])\n assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])\n # Takes a different code path (fails earlier:\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])\n # Negative values\n assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])\n assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])\n\n def test_alignment(self):\n #Check that subarrays are aligned\n t1 = np.dtype('(1,)i4', align=True)\n t2 = np.dtype('2i4', align=True)\n assert_equal(t1.alignment, t2.alignment)\n\n\ndef iter_struct_object_dtypes():\n \"\"\"\n Iterates over a few complex dtypes and object pattern which\n fill the array with a given object (defaults to a singleton).\n\n Yields\n ------\n dtype : dtype\n pattern : tuple\n Structured tuple for use with `np.array`.\n count : int\n Number of objects stored in the dtype.\n singleton : object\n A singleton object. The returned pattern is constructed so that\n all objects inside the datatype are set to the singleton.\n \"\"\"\n obj = object()\n\n dt = np.dtype([('b', 'O', (2, 3))])\n p = ([[obj] * 3] * 2,)\n yield pytest.param(dt, p, 6, obj, id=\"<subarray>\")\n\n dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])\n p = (0, [[obj] * 3] * 2)\n yield pytest.param(dt, p, 6, obj, id=\"<subarray in field>\")\n\n dt = np.dtype([('a', 'i4'),\n ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])\n p = (0, [[(obj, 0)] * 3] * 2)\n yield pytest.param(dt, p, 6, obj, id=\"<structured subarray 1>\")\n\n dt = np.dtype([('a', 'i4'),\n ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])\n p = (0, [[(obj, obj)] * 3] * 2)\n yield pytest.param(dt, p, 12, obj, id=\"<structured subarray 2>\")\n\n\[email protected](not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\nclass TestStructuredObjectRefcounting:\n \"\"\"These tests cover various uses of complicated structured types which\n include objects and thus require reference counting.\n \"\"\"\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n @pytest.mark.parametrize([\"creation_func\", \"creation_obj\"], [\n pytest.param(np.empty, None,\n # None is probably used for too many things\n marks=pytest.mark.skip(\"unreliable due to python's behaviour\")),\n (np.ones, 1),\n (np.zeros, 0)])\n def test_structured_object_create_delete(self, dt, pat, count, singleton,\n creation_func, creation_obj):\n \"\"\"Structured object reference counting in creation and deletion\"\"\"\n # The test assumes that 0, 1, and None are singletons.\n gc.collect()\n before = sys.getrefcount(creation_obj)\n arr = creation_func(3, dt)\n\n now = sys.getrefcount(creation_obj)\n assert now - before == count * 3\n del arr\n now = sys.getrefcount(creation_obj)\n assert now == before\n\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n def test_structured_object_item_setting(self, dt, pat, count, singleton):\n \"\"\"Structured object reference counting for simple item setting\"\"\"\n one = 1\n\n gc.collect()\n before = sys.getrefcount(singleton)\n arr = np.array([pat] * 3, dt)\n assert sys.getrefcount(singleton) - before == count * 3\n # Fill with `1` and check that it was replaced correctly:\n before2 = sys.getrefcount(one)\n arr[...] = one\n after2 = sys.getrefcount(one)\n assert after2 - before2 == count * 3\n del arr\n gc.collect()\n assert sys.getrefcount(one) == before2\n assert sys.getrefcount(singleton) == before\n\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n @pytest.mark.parametrize(\n ['shape', 'index', 'items_changed'],\n [((3,), ([0, 2],), 2),\n ((3, 2), ([0, 2], slice(None)), 4),\n ((3, 2), ([0, 2], [1]), 2),\n ((3,), ([True, False, True]), 2)])\n def test_structured_object_indexing(self, shape, index, items_changed,\n dt, pat, count, singleton):\n \"\"\"Structured object reference counting for advanced indexing.\"\"\"\n zero = 0\n one = 1\n\n arr = np.zeros(shape, dt)\n\n gc.collect()\n before_zero = sys.getrefcount(zero)\n before_one = sys.getrefcount(one)\n # Test item getting:\n part = arr[index]\n after_zero = sys.getrefcount(zero)\n assert after_zero - before_zero == count * items_changed\n del part\n # Test item setting:\n arr[index] = one\n gc.collect()\n after_zero = sys.getrefcount(zero)\n after_one = sys.getrefcount(one)\n assert before_zero - after_zero == count * items_changed\n assert after_one - before_one == count * items_changed\n\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):\n \"\"\"Structured object reference counting for specialized functions.\n The older functions such as take and repeat use different code paths\n then item setting (when writing this).\n \"\"\"\n indices = [0, 1]\n\n arr = np.array([pat] * 3, dt)\n gc.collect()\n before = sys.getrefcount(singleton)\n res = arr.take(indices)\n after = sys.getrefcount(singleton)\n assert after - before == count * 2\n new = res.repeat(10)\n gc.collect()\n after_repeat = sys.getrefcount(singleton)\n assert after_repeat - after == count * 2 * 10\n\n\nclass TestStructuredDtypeSparseFields:\n \"\"\"Tests subarray fields which contain sparse dtypes so that\n not all memory is used by the dtype work. Such dtype's should\n leave the underlying memory unchanged.\n \"\"\"\n dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],\n 'offsets':[0, 4]}, (2, 3))])\n sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],\n 'offsets':[4]}, (2, 3))])\n\n @pytest.mark.xfail(reason=\"inaccessible data is changed see gh-12686.\")\n @pytest.mark.valgrind_error(reason=\"reads from uninitialized buffers.\")\n def test_sparse_field_assignment(self):\n arr = np.zeros(3, self.dtype)\n sparse_arr = arr.view(self.sparse_dtype)\n\n sparse_arr[...] = np.finfo(np.float32).max\n # dtype is reduced when accessing the field, so shape is (3, 2, 3):\n assert_array_equal(arr[\"a\"][\"aa\"], np.zeros((3, 2, 3)))\n\n def test_sparse_field_assignment_fancy(self):\n # Fancy assignment goes to the copyswap function for complex types:\n arr = np.zeros(3, self.dtype)\n sparse_arr = arr.view(self.sparse_dtype)\n\n sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max\n # dtype is reduced when accessing the field, so shape is (3, 2, 3):\n assert_array_equal(arr[\"a\"][\"aa\"], np.zeros((3, 2, 3)))\n\n\nclass TestMonsterType:\n \"\"\"Test deeply nested subtypes.\"\"\"\n\n def test1(self):\n simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n a = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((int, (3, 2))))])\n b = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((int, (3, 2))))])\n assert_dtype_equal(a, b)\n\n c = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((a, (3, 2))))])\n d = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((a, (3, 2))))])\n assert_dtype_equal(c, d)\n\n def test_list_recursion(self):\n l = list()\n l.append(('f', l))\n with pytest.raises(RecursionError):\n np.dtype(l)\n\n def test_tuple_recursion(self):\n d = np.int32\n for i in range(100000):\n d = (d, (1,))\n with pytest.raises(RecursionError):\n np.dtype(d)\n\n def test_dict_recursion(self):\n d = dict(names=['self'], formats=[None], offsets=[0])\n d['formats'][0] = d\n with pytest.raises(RecursionError):\n np.dtype(d)\n\n\nclass TestMetadata:\n def test_no_metadata(self):\n d = np.dtype(int)\n assert_(d.metadata is None)\n\n def test_metadata_takes_dict(self):\n d = np.dtype(int, metadata={'datum': 1})\n assert_(d.metadata == {'datum': 1})\n\n def test_metadata_rejects_nondict(self):\n assert_raises(TypeError, np.dtype, int, metadata='datum')\n assert_raises(TypeError, np.dtype, int, metadata=1)\n assert_raises(TypeError, np.dtype, int, metadata=None)\n\n def test_nested_metadata(self):\n d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])\n assert_(d['a'].metadata == {'datum': 1})\n\n def test_base_metadata_copied(self):\n d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))\n assert_(d.metadata == {'datum': 1})\n\nclass TestString:\n def test_complex_dtype_str(self):\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\n ('rtile', '>f4', (64, 36))], (3,)),\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\n ('bright', '>f4', (8, 36))])])\n assert_equal(str(dt),\n \"[('top', [('tiles', ('>f4', (64, 64)), (1,)), \"\n \"('rtile', '>f4', (64, 36))], (3,)), \"\n \"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), \"\n \"('bright', '>f4', (8, 36))])]\")\n\n # If the sticky aligned flag is set to True, it makes the\n # str() function use a dict representation with an 'aligned' flag\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\n ('rtile', '>f4', (64, 36))],\n (3,)),\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\n ('bright', '>f4', (8, 36))])],\n align=True)\n assert_equal(str(dt),\n \"{'names':['top','bottom'], \"\n \"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), \"\n \"('rtile', '>f4', (64, 36))], (3,)),\"\n \"[('bleft', ('>f4', (8, 64)), (1,)), \"\n \"('bright', '>f4', (8, 36))]], \"\n \"'offsets':[0,76800], \"\n \"'itemsize':80000, \"\n \"'aligned':True}\")\n assert_equal(np.dtype(eval(str(dt))), dt)\n\n dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],\n 'offsets': [0, 1, 2],\n 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})\n assert_equal(str(dt),\n \"[(('Red pixel', 'r'), 'u1'), \"\n \"(('Green pixel', 'g'), 'u1'), \"\n \"(('Blue pixel', 'b'), 'u1')]\")\n\n dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],\n 'formats': ['<u4', 'u1', 'u1', 'u1'],\n 'offsets': [0, 0, 1, 2],\n 'titles': ['Color', 'Red pixel',\n 'Green pixel', 'Blue pixel']})\n assert_equal(str(dt),\n \"{'names':['rgba','r','g','b'],\"\n \" 'formats':['<u4','u1','u1','u1'],\"\n \" 'offsets':[0,0,1,2],\"\n \" 'titles':['Color','Red pixel',\"\n \"'Green pixel','Blue pixel'],\"\n \" 'itemsize':4}\")\n\n dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\n 'offsets': [0, 2],\n 'titles': ['Red pixel', 'Blue pixel']})\n assert_equal(str(dt),\n \"{'names':['r','b'],\"\n \" 'formats':['u1','u1'],\"\n \" 'offsets':[0,2],\"\n \" 'titles':['Red pixel','Blue pixel'],\"\n \" 'itemsize':3}\")\n\n dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])\n assert_equal(str(dt),\n \"[('a', '<m8[D]'), ('b', '<M8[us]')]\")\n\n def test_repr_structured(self):\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\n ('rtile', '>f4', (64, 36))], (3,)),\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\n ('bright', '>f4', (8, 36))])])\n assert_equal(repr(dt),\n \"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), \"\n \"('rtile', '>f4', (64, 36))], (3,)), \"\n \"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), \"\n \"('bright', '>f4', (8, 36))])])\")\n\n dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],\n 'offsets': [0, 1, 2],\n 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},\n align=True)\n assert_equal(repr(dt),\n \"dtype([(('Red pixel', 'r'), 'u1'), \"\n \"(('Green pixel', 'g'), 'u1'), \"\n \"(('Blue pixel', 'b'), 'u1')], align=True)\")\n\n def test_repr_structured_not_packed(self):\n dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],\n 'formats': ['<u4', 'u1', 'u1', 'u1'],\n 'offsets': [0, 0, 1, 2],\n 'titles': ['Color', 'Red pixel',\n 'Green pixel', 'Blue pixel']}, align=True)\n assert_equal(repr(dt),\n \"dtype({'names':['rgba','r','g','b'],\"\n \" 'formats':['<u4','u1','u1','u1'],\"\n \" 'offsets':[0,0,1,2],\"\n \" 'titles':['Color','Red pixel',\"\n \"'Green pixel','Blue pixel'],\"\n \" 'itemsize':4}, align=True)\")\n\n dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\n 'offsets': [0, 2],\n 'titles': ['Red pixel', 'Blue pixel'],\n 'itemsize': 4})\n assert_equal(repr(dt),\n \"dtype({'names':['r','b'], \"\n \"'formats':['u1','u1'], \"\n \"'offsets':[0,2], \"\n \"'titles':['Red pixel','Blue pixel'], \"\n \"'itemsize':4})\")\n\n def test_repr_structured_datetime(self):\n dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])\n assert_equal(repr(dt),\n \"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])\")\n\n def test_repr_str_subarray(self):\n dt = np.dtype(('<i2', (1,)))\n assert_equal(repr(dt), \"dtype(('<i2', (1,)))\")\n assert_equal(str(dt), \"('<i2', (1,))\")\n\n def test_base_dtype_with_object_type(self):\n # Issue gh-2798, should not error.\n np.array(['a'], dtype=\"O\").astype((\"O\", [(\"name\", \"O\")]))\n\n def test_empty_string_to_object(self):\n # Pull request #4722\n np.array([\"\", \"\"]).astype(object)\n\n def test_void_subclass_unsized(self):\n dt = np.dtype(np.record)\n assert_equal(repr(dt), \"dtype('V')\")\n assert_equal(str(dt), '|V0')\n assert_equal(dt.name, 'record')\n\n def test_void_subclass_sized(self):\n dt = np.dtype((np.record, 2))\n assert_equal(repr(dt), \"dtype('V2')\")\n assert_equal(str(dt), '|V2')\n assert_equal(dt.name, 'record16')\n\n def test_void_subclass_fields(self):\n dt = np.dtype((np.record, [('a', '<u2')]))\n assert_equal(repr(dt), \"dtype((numpy.record, [('a', '<u2')]))\")\n assert_equal(str(dt), \"(numpy.record, [('a', '<u2')])\")\n assert_equal(dt.name, 'record16')\n\n\nclass TestDtypeAttributeDeletion:\n\n def test_dtype_non_writable_attributes_deletion(self):\n dt = np.dtype(np.double)\n attr = [\"subdtype\", \"descr\", \"str\", \"name\", \"base\", \"shape\",\n \"isbuiltin\", \"isnative\", \"isalignedstruct\", \"fields\",\n \"metadata\", \"hasobject\"]\n\n for s in attr:\n assert_raises(AttributeError, delattr, dt, s)\n\n def test_dtype_writable_attributes_deletion(self):\n dt = np.dtype(np.double)\n attr = [\"names\"]\n for s in attr:\n assert_raises(AttributeError, delattr, dt, s)\n\n\nclass TestDtypeAttributes:\n def test_descr_has_trailing_void(self):\n # see gh-6359\n dtype = np.dtype({\n 'names': ['A', 'B'],\n 'formats': ['f4', 'f4'],\n 'offsets': [0, 8],\n 'itemsize': 16})\n new_dtype = np.dtype(dtype.descr)\n assert_equal(new_dtype.itemsize, 16)\n\n def test_name_dtype_subclass(self):\n # Ticket #4357\n class user_def_subcls(np.void):\n pass\n assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')\n\n\nclass TestPickling:\n\n def check_pickling(self, dtype):\n for proto in range(pickle.HIGHEST_PROTOCOL + 1):\n pickled = pickle.loads(pickle.dumps(dtype, proto))\n assert_equal(pickled, dtype)\n assert_equal(pickled.descr, dtype.descr)\n if dtype.metadata is not None:\n assert_equal(pickled.metadata, dtype.metadata)\n # Check the reconstructed dtype is functional\n x = np.zeros(3, dtype=dtype)\n y = np.zeros(3, dtype=pickled)\n assert_equal(x, y)\n assert_equal(x[0], y[0])\n\n @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,\n np.compat.unicode, bool])\n def test_builtin(self, t):\n self.check_pickling(np.dtype(t))\n\n def test_structured(self):\n dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))\n self.check_pickling(dt)\n\n def test_structured_aligned(self):\n dt = np.dtype('i4, i1', align=True)\n self.check_pickling(dt)\n\n def test_structured_unaligned(self):\n dt = np.dtype('i4, i1', align=False)\n self.check_pickling(dt)\n\n def test_structured_padded(self):\n dt = np.dtype({\n 'names': ['A', 'B'],\n 'formats': ['f4', 'f4'],\n 'offsets': [0, 8],\n 'itemsize': 16})\n self.check_pickling(dt)\n\n def test_structured_titles(self):\n dt = np.dtype({'names': ['r', 'b'],\n 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n self.check_pickling(dt)\n\n @pytest.mark.parametrize('base', ['m8', 'M8'])\n @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',\n 'ms', 'us', 'ns', 'ps', 'fs', 'as'])\n def test_datetime(self, base, unit):\n dt = np.dtype('%s[%s]' % (base, unit) if unit else base)\n self.check_pickling(dt)\n if unit:\n dt = np.dtype('%s[7%s]' % (base, unit))\n self.check_pickling(dt)\n\n def test_metadata(self):\n dt = np.dtype(int, metadata={'datum': 1})\n self.check_pickling(dt)\n\n\ndef test_rational_dtype():\n # test for bug gh-5719\n a = np.array([1111], dtype=rational).astype\n assert_raises(OverflowError, a, 'int8')\n\n # test that dtype detection finds user-defined types\n x = rational(1)\n assert_equal(np.array([x,x]).dtype, np.dtype(rational))\n\n\ndef test_dtypes_are_true():\n # test for gh-6294\n assert bool(np.dtype('f8'))\n assert bool(np.dtype('i8'))\n assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))\n\n\ndef test_invalid_dtype_string():\n # test for gh-10440\n assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')\n assert_raises(TypeError, np.dtype, u'Fl\\xfcgel')\n\n\ndef test_keyword_argument():\n # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971\n assert np.dtype(dtype=np.float64) == np.dtype(np.float64)\n\n\nclass TestFromDTypeAttribute:\n def test_simple(self):\n class dt:\n dtype = np.dtype(\"f8\")\n\n assert np.dtype(dt) == np.float64\n assert np.dtype(dt()) == np.float64\n\n def test_recursion(self):\n class dt:\n pass\n\n dt.dtype = dt\n with pytest.raises(RecursionError):\n np.dtype(dt)\n\n dt_instance = dt()\n dt_instance.dtype = dt\n with pytest.raises(RecursionError):\n np.dtype(dt_instance)\n\n def test_void_subtype(self):\n class dt(np.void):\n # This code path is fully untested before, so it is unclear\n # what this should be useful for. Note that if np.void is used\n # numpy will think we are deallocating a base type [1.17, 2019-02].\n dtype = np.dtype(\"f,f\")\n\n np.dtype(dt)\n np.dtype(dt(1))\n\n def test_void_subtype_recursion(self):\n class vdt(np.void):\n pass\n\n vdt.dtype = vdt\n\n with pytest.raises(RecursionError):\n np.dtype(vdt)\n\n with pytest.raises(RecursionError):\n np.dtype(vdt(1))\n\n\nclass TestDTypeClasses:\n @pytest.mark.parametrize(\"dtype\", list(np.typecodes['All']) + [rational])\n def test_basic_dtypes_subclass_properties(self, dtype):\n # Note: Except for the isinstance and type checks, these attributes\n # are considered currently private and may change.\n dtype = np.dtype(dtype)\n assert isinstance(dtype, np.dtype)\n assert type(dtype) is not np.dtype\n assert type(dtype).__name__ == f\"dtype[{dtype.type.__name__}]\"\n assert type(dtype).__module__ == \"numpy\"\n assert not type(dtype)._abstract\n\n # the flexible dtypes and datetime/timedelta have additional parameters\n # which are more than just storage information, these would need to be\n # given when creating a dtype:\n parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)\n if dtype.type not in parametric:\n assert not type(dtype)._parametric\n assert type(dtype)() is dtype\n else:\n assert type(dtype)._parametric\n with assert_raises(TypeError):\n type(dtype)()\n\n def test_dtype_superclass(self):\n assert type(np.dtype) is not type\n assert isinstance(np.dtype, type)\n\n assert type(np.dtype).__name__ == \"_DTypeMeta\"\n assert type(np.dtype).__module__ == \"numpy\"\n assert np.dtype._abstract\n\n\nclass TestFromCTypes:\n\n @staticmethod\n def check(ctype, dtype):\n dtype = np.dtype(dtype)\n assert_equal(np.dtype(ctype), dtype)\n assert_equal(np.dtype(ctype()), dtype)\n\n def test_array(self):\n c8 = ctypes.c_uint8\n self.check( 3 * c8, (np.uint8, (3,)))\n self.check( 1 * c8, (np.uint8, (1,)))\n self.check( 0 * c8, (np.uint8, (0,)))\n self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))\n self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))\n\n def test_padded_structure(self):\n class PaddedStruct(ctypes.Structure):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', np.uint8),\n ('b', np.uint16)\n ], align=True)\n self.check(PaddedStruct, expected)\n\n def test_bit_fields(self):\n class BitfieldStruct(ctypes.Structure):\n _fields_ = [\n ('a', ctypes.c_uint8, 7),\n ('b', ctypes.c_uint8, 1)\n ]\n assert_raises(TypeError, np.dtype, BitfieldStruct)\n assert_raises(TypeError, np.dtype, BitfieldStruct())\n\n def test_pointer(self):\n p_uint8 = ctypes.POINTER(ctypes.c_uint8)\n assert_raises(TypeError, np.dtype, p_uint8)\n\n def test_void_pointer(self):\n self.check(ctypes.c_void_p, np.uintp)\n\n def test_union(self):\n class Union(ctypes.Union):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ]\n expected = np.dtype(dict(\n names=['a', 'b'],\n formats=[np.uint8, np.uint16],\n offsets=[0, 0],\n itemsize=2\n ))\n self.check(Union, expected)\n\n def test_union_with_struct_packed(self):\n class Struct(ctypes.Structure):\n _pack_ = 1\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n\n class Union(ctypes.Union):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ('c', ctypes.c_uint32),\n ('d', Struct),\n ]\n expected = np.dtype(dict(\n names=['a', 'b', 'c', 'd'],\n formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],\n offsets=[0, 0, 0, 0],\n itemsize=ctypes.sizeof(Union)\n ))\n self.check(Union, expected)\n\n def test_union_packed(self):\n class Struct(ctypes.Structure):\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n _pack_ = 1\n class Union(ctypes.Union):\n _pack_ = 1\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ('c', ctypes.c_uint32),\n ('d', Struct),\n ]\n expected = np.dtype(dict(\n names=['a', 'b', 'c', 'd'],\n formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],\n offsets=[0, 0, 0, 0],\n itemsize=ctypes.sizeof(Union)\n ))\n self.check(Union, expected)\n\n def test_packed_structure(self):\n class PackedStructure(ctypes.Structure):\n _pack_ = 1\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', np.uint8),\n ('b', np.uint16)\n ])\n self.check(PackedStructure, expected)\n\n def test_large_packed_structure(self):\n class PackedStructure(ctypes.Structure):\n _pack_ = 2\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ('c', ctypes.c_uint8),\n ('d', ctypes.c_uint16),\n ('e', ctypes.c_uint32),\n ('f', ctypes.c_uint32),\n ('g', ctypes.c_uint8)\n ]\n expected = np.dtype(dict(\n formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],\n offsets=[0, 2, 4, 6, 8, 12, 16],\n names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],\n itemsize=18))\n self.check(PackedStructure, expected)\n\n def test_big_endian_structure_packed(self):\n class BigEndStruct(ctypes.BigEndianStructure):\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n _pack_ = 1\n expected = np.dtype([('one', 'u1'), ('two', '>u4')])\n self.check(BigEndStruct, expected)\n\n def test_little_endian_structure_packed(self):\n class LittleEndStruct(ctypes.LittleEndianStructure):\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n _pack_ = 1\n expected = np.dtype([('one', 'u1'), ('two', '<u4')])\n self.check(LittleEndStruct, expected)\n\n def test_little_endian_structure(self):\n class PaddedStruct(ctypes.LittleEndianStructure):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', '<B'),\n ('b', '<H')\n ], align=True)\n self.check(PaddedStruct, expected)\n\n def test_big_endian_structure(self):\n class PaddedStruct(ctypes.BigEndianStructure):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', '>B'),\n ('b', '>H')\n ], align=True)\n self.check(PaddedStruct, expected)\n\n def test_simple_endian_types(self):\n self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))\n self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))\n self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))\n self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))\n\n all_types = set(np.typecodes['All'])\n all_pairs = permutations(all_types, 2)\n\n @pytest.mark.parametrize(\"pair\", all_pairs)\n def test_pairs(self, pair):\n \"\"\"\n Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]\n Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])\n \"\"\"\n # gh-5645: check that np.dtype('i,L') can be used\n pair_type = np.dtype('{},{}'.format(*pair))\n expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])\n assert_equal(pair_type, expected)\n\n\nclass TestUserDType:\n @pytest.mark.leaks_references(reason=\"dynamically creates custom dtype.\")\n def test_custom_structured_dtype(self):\n class mytype:\n pass\n\n blueprint = np.dtype([(\"field\", object)])\n dt = create_custom_field_dtype(blueprint, mytype, 0)\n assert dt.type == mytype\n # We cannot (currently) *create* this dtype with `np.dtype` because\n # mytype does not inherit from `np.generic`. This seems like an\n # unnecessary restriction, but one that has been around forever:\n assert np.dtype(mytype) == np.dtype(\"O\")\n\n def test_custom_structured_dtype_errors(self):\n class mytype:\n pass\n\n blueprint = np.dtype([(\"field\", object)])\n\n with pytest.raises(ValueError):\n # Tests what happens if fields are unset during creation\n # which is currently rejected due to the containing object\n # (see PyArray_RegisterDataType).\n create_custom_field_dtype(blueprint, mytype, 1)\n\n with pytest.raises(RuntimeError):\n # Tests that a dtype must have its type field set up to np.dtype\n # or in this case a builtin instance.\n create_custom_field_dtype(blueprint, mytype, 2)\n"
] |
[
[
"numpy.ma.isMaskedArray",
"numpy.isnan",
"numpy.core.overrides.set_module",
"numpy.core.asarray",
"numpy.moveaxis"
],
[
"numpy.can_cast",
"numpy.dtype",
"numpy.iinfo",
"numpy.testing.assert_equal",
"numpy.core._multiarray_tests.create_custom_field_dtype",
"numpy.uint32",
"numpy.compat.pickle.dumps",
"numpy.core._rational_tests.rational",
"numpy.int8",
"numpy.finfo",
"numpy.frombuffer",
"numpy.zeros",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.array",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.float64",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tmcgilchrist/ocaml-arrow
|
[
"627eac81cf6a4195a25fc300821ad9d51fd9cd40"
] |
[
"bin/gen-parquet.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nnum_rows = 10**6\ndf = pd.DataFrame({\n 'x': range(num_rows),\n 'foo': ['foo'] * num_rows,\n 'bar': [f'barbar{d}' for d in range(num_rows)],\n})\n\nfor period in range(2, 21):\n df[f'baz{period}'] = [f'{d % period}-baz-periodic' for d in range(num_rows)]\n\ndf['x2'] = df['x'] * df['x']\ndf['cos_x'] = np.cos(df['x'])\ndf['sin_x'] = np.sin(df['x'])\n\nfor c in range(50):\n df[f'cos_x{c}'] = df['cos_x'] + c\n\ndf.to_parquet('foo.parquet')\n"
] |
[
[
"numpy.cos",
"numpy.sin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheWorstOne/numpy-formulas
|
[
"093657d4a23dfe82685595254aae50e0c6e46afb"
] |
[
"ImpurityMeasures/entropy_c.py"
] |
[
"import numpy as np\nfrom numpy.core.numeric import zeros_like\nimport pandas as pd\n\n# [TODO] This code was made in a hurry. \n# It can be improved, someday I will. Please excuse me\n\ndata = {\n \"a3\": [1.0, 6.0, 5.0, 4.0, 7.0, 3.0,8.0,7.0,5.0],\n \"class\": [\"CP\", \"CP\", \"CN\", \"CP\", \"CN\", \"CN\", \"CN\", \"CP\", \"CN\"]\n}\n\ndivision = np.array([2.0, 3.5, 4.5, 5.5, 6.5, 7.5])\n\ndf = pd.DataFrame(data)\n\ndf.sort_values(by=[\"a3\"], inplace=True)\n\nprint(df)\n\nE_father = 0.9911\n\nfor i in division:\n print(\"------------------------------------------------------\")\n print(\"Split in \", str(i),\"\\n\")\n dfi = df.copy()\n dfi[\"a3\"] = dfi[\"a3\"].apply(lambda x: \"C0\" if x <= i else \"C1\")\n confusion = pd.crosstab(dfi[\"a3\"], dfi[\"class\"], margins=True, margins_name=\"Total\")\n print(confusion)\n index = confusion.index\n confusion = confusion.values\n\n a = confusion[0,0]/confusion[0,-1]\n b = confusion[0,1]/confusion[0,-1]\n E0 = -(a*np.log2(a, out=np.zeros_like(a), where=(a!=0))) - (b*np.log2(b, out=np.zeros_like(b), where=(b!=0)))\n print(\"\\nEntropy of {}:\\t\\t{}\".format(index[0],E0))\n \n c = confusion[1,0]/confusion[1,-1]\n d = confusion[1,1]/confusion[1,-1]\n E1 = -(c*np.log2(c, out=np.zeros_like(c), where=(c!=0))) - (d*np.log2(d, out=np.zeros_like(d), where=(d!=0)))\n print(\"Entropy of {}:\\t\\t{}\".format(index[1],E1))\n\n C0 = confusion[0,-1]/confusion[-1,-1]\n C1 = confusion[1,-1]/confusion[-1,-1]\n InfGain = E_father - ((C0*E0)+(C1*E1))\n print(\"Information Gain:\\t{}\".format(InfGain))\n\n"
] |
[
[
"pandas.crosstab",
"numpy.array",
"numpy.zeros_like",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
brunnovicente/SeedKmeans
|
[
"2a71adb5f40c4fceb32fae40f03189e6773094bf"
] |
[
"SEEDEDKmeans.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\nclass SKmeans:\n \n def __init__(self, n_grupos = 3):\n self.n_grupos = n_grupos\n \n def fit(self, L, U, y):\n sementes = self.gerarSementes(L, y)\n self.kmeans = KMeans(n_clusters = self.n_grupos, init = sementes, n_init= 1)\n self.kmeans.fit(U)\n \n def predict(self, X):\n return self.kmeans.predict(X)\n \n def gerarSementes(self, X, y):\n data = pd.DataFrame(X)\n data['classe'] = y\n grouped = data.groupby(['classe']).mean()\n return grouped.get_values() \n "
] |
[
[
"sklearn.cluster.KMeans",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
markxiao/APPFL
|
[
"2940f01695b84d8239368e5d1fc3133c7f7a05ae"
] |
[
"appfl/protos/operator.py"
] |
[
"import logging\nfrom collections import OrderedDict\nimport hydra\nfrom omegaconf import DictConfig\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.nn import CrossEntropyLoss\nimport torchvision\nfrom torchvision.transforms import ToTensor\n\nimport numpy as np\nimport copy\n\nfrom appfl.misc.utils import *\nfrom appfl.algorithm.iadmm import *\nfrom appfl.algorithm.fedavg import *\nfrom .federated_learning_pb2 import Job\n\nclass FLOperator():\n def __init__(self, cfg, model, test_dataset, num_clients):\n self.logger = logging.getLogger(__name__)\n self.operator_id = cfg.operator.id\n self.cfg = cfg\n self.num_clients = num_clients\n self.num_epochs = cfg.num_epochs\n self.round_number = 1\n self.best_accuracy = 0.0\n self.device = \"cpu\"\n self.client_states = {}\n self.client_learning_status = {}\n self.servicer = None # Takes care of communication via gRPC\n\n self.dataloader = DataLoader(test_dataset,\n num_workers=0,\n batch_size=cfg.test_data_batch_size,\n shuffle=cfg.test_data_shuffle)\n self.fed_server = eval(cfg.fed.servername)(\n copy.deepcopy(model), num_clients, self.device, **cfg.fed.args)\n\n def get_tensor(self, name):\n return np.array(self.fed_server.model.state_dict()[name]) if name in self.fed_server.model.state_dict() else None\n\n def get_job(self):\n job_todo = Job.TRAIN\n if self.round_number > self.num_epochs:\n job_todo = Job.QUIT\n return min(self.round_number, self.num_epochs), job_todo\n\n def update_weights(self):\n self.logger.info(f\"[Round: {self.round_number: 04}] Updating model weights\")\n self.fed_server.update(self.fed_server.model.state_dict(), self.client_states)\n\n if self.cfg.validation == True:\n test_loss, accuracy = validation(self.fed_server, self.dataloader)\n\n if accuracy > self.best_accuracy:\n self.best_accuracy = accuracy\n\n self.logger.info(\n f\"[Round: {self.round_number: 04}] Test set: Average loss: {test_loss:.4f}, Accuracy: {accuracy:.2f}%, Best Accuracy: {self.best_accuracy:.2f}%\"\n )\n self.round_number += 1\n\n def is_round_finished(self):\n return all((c+1,self.round_number) in self.client_learning_status for c in range(0,self.num_clients))\n\n def send_learning_results(self, client_id, round_number, tensor_list):\n results = {}\n for tensor in tensor_list:\n name = tensor.name\n shape = tuple(tensor.data_shape)\n flat = np.frombuffer(tensor.data_bytes, dtype=np.float32)\n nparray = np.reshape(flat, newshape=shape, order='C')\n results[name] = torch.from_numpy(nparray)\n self.client_states[client_id] = results\n self.client_learning_status[(client_id,round_number)] = True\n\n if self.is_round_finished():\n self.logger.info(f\"[Round: {self.round_number: 04}] Finished; all clients have sent their results.\")\n self.update_weights()\n"
] |
[
[
"numpy.reshape",
"numpy.frombuffer",
"torch.utils.data.DataLoader",
"torch.from_numpy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
henrykrumb/paperscraper
|
[
"31abb49701b90bfb5107b46e82941068d242ec38"
] |
[
"paperscraper/postprocessing.py"
] |
[
"import logging\nimport sys\nfrom typing import List, Dict\n\nimport numpy as np\nimport pandas as pd\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\ndef aggregate_paper(\n data: List[Dict[str, str]],\n start_year: int = 2016,\n bins_per_year: int = 4,\n filtering: bool = False,\n filter_keys: List = list(),\n unwanted_keys: List = list(),\n return_filtered: bool = False,\n filter_abstract: bool = True,\n last_year: int = 2021,\n):\n \"\"\"Consumes a list of unstructured keyword results from a .jsonl and\n aggregates papers into several bins per year.\n\n Args:\n data (List[Dict[str,str]]): Content of a .jsonl file, i.e., a list of\n dictionaries, one per paper.\n start_year (int, optional): First year of interest. Defaults to 2016.\n bins_per_year (int, optional): Defaults to 4 (quarterly aggregation).\n filtering (bool, optional): Whether or not all papers in .jsonl are\n perceived as matches or whether an additional sanity checking for\n the keywords is performed in abstract/title. Defaults to False.\n filter_keys (list, optional): List of str used for filtering. Only\n applies if filtering is True. Defaults to empty list.\n unwanted_keys (list, optional): List of str that must not occur in either\n title or abstract. Only applies if filtering is True.\n return_filtered (bool, optional): Whether the filtered matches are also\n returned. Only applies if filtering is True. Defaults to False.\n filer_abstract (bool, optional): Whether the keyword is searched in the abstract\n or not. Defaults to True.\n last_year (int, optional): Most recent year for the aggregation. Defaults\n to current year. All newer entries are discarded.\n\n Returns:\n bins (np.array): Vector of length number of years (2020 - start_year) x\n bins_per_year.\n \"\"\"\n\n if not isinstance(data, list):\n raise ValueError(f\"Expected list, received {type(data)}\")\n if not isinstance(bins_per_year, int):\n raise ValueError(f\"Expected int, received {type(bins_per_year)}\")\n if 12 % bins_per_year != 0:\n raise ValueError(f\"Cant split year into {bins_per_year} bins\")\n\n num_years = last_year - start_year + 1\n bins = np.zeros((num_years * bins_per_year))\n\n if len(data) == 0:\n return bins if not return_filtered else (bins, [])\n\n # Remove duplicate entries (keep only the first one)\n df = pd.DataFrame(data).sort_values(by=\"date\", ascending=True)\n data = df.drop_duplicates(subset=\"title\", keep=\"first\").to_dict(\"records\")\n\n dates = [dd[\"date\"] for dd in data]\n\n filtered = []\n for paper, date in zip(data, dates):\n year = int(date.split(\"-\")[0])\n if year < start_year or year > last_year:\n continue\n\n # At least one synonym per keyword needs to be in either title or\n # abstract.\n if filtering and filter_keys != list():\n\n # Filter out papers which undesired terms\n unwanted = False\n for unwanted_key in unwanted_keys:\n if unwanted_key.lower() in paper[\"title\"].lower():\n unwanted = True\n if (\n filter_abstract\n and paper[\"abstract\"] is not None\n and unwanted_key.lower() in paper[\"abstract\"].lower()\n ):\n unwanted = True\n if unwanted:\n continue\n\n got_keys = []\n for key_term in filter_keys:\n got_key = False\n if not isinstance(key_term, list):\n key_term = [key_term]\n for key in key_term:\n if key.lower() in paper[\"title\"].lower():\n got_key = True\n if (\n filter_abstract\n and paper[\"abstract\"] is not None\n and key.lower() in paper[\"abstract\"].lower()\n ):\n got_key = True\n got_keys.append(got_key)\n\n if len(got_keys) != sum(got_keys):\n continue\n\n filtered.append(paper)\n\n if len(date.split(\"-\")) < 2:\n logger.warning(\n f\"Paper without month {date}, randomly assigned month.\"\n f\"{paper['title']}\"\n )\n month = np.random.choice(12)\n else:\n month = int(date.split(\"-\")[1])\n\n year_bin = year - start_year\n month_bin = int(np.floor((month - 1) / (12 / bins_per_year)))\n bins[year_bin * bins_per_year + month_bin] += 1\n\n if return_filtered:\n return bins, filtered\n else:\n return bins\n"
] |
[
[
"numpy.floor",
"numpy.zeros",
"pandas.DataFrame",
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chunchentu/MagNet
|
[
"72be5ebf7f3271b3b037c345ba457c93f5a01462",
"72be5ebf7f3271b3b037c345ba457c93f5a01462",
"5c80091dcf2b80d6d22af8e5e1b103218c36e889"
] |
[
"setup_mnist.py",
"setup_face.py",
"defensive_models.py"
] |
[
"## setup_mnist.py -- mnist data and model loading code\n##\n## Copyright (C) 2016, Nicholas Carlini <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\n## Modified for MagNet's use.\n\n# from __future__ import print_function\n# from future.standard_library import install_aliases\n# install_aliases()\n\nimport numpy as np\nimport os\nimport gzip\nimport urllib.request\n\nfrom keras.models import load_model\n\ndef extract_data(filename, num_images):\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(num_images*28*28)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data / 255) - 0.5\n data = data.reshape(num_images, 28, 28, 1)\n return data\n\ndef extract_labels(filename, num_images):\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return (np.arange(10) == labels[:, None]).astype(np.float32)\n\nclass MNIST:\n def __init__(self):\n if not os.path.exists(\"data\"):\n os.mkdir(\"data\")\n files = [\"train-images-idx3-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\",\n \"train-labels-idx1-ubyte.gz\",\n \"t10k-labels-idx1-ubyte.gz\"]\n for name in files:\n urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/' + name, \"data/\"+name)\n\n train_data = extract_data(\"data/train-images-idx3-ubyte.gz\", 60000)+0.5\n train_labels = extract_labels(\"data/train-labels-idx1-ubyte.gz\", 60000)\n self.test_data = extract_data(\"data/t10k-images-idx3-ubyte.gz\", 10000)+0.5\n self.test_labels = extract_labels(\"data/t10k-labels-idx1-ubyte.gz\", 10000)\n \n VALIDATION_SIZE = 5000\n \n self.validation_data = train_data[:VALIDATION_SIZE, :, :, :]\n self.validation_labels = train_labels[:VALIDATION_SIZE]\n self.train_data = train_data[VALIDATION_SIZE:, :, :, :]\n self.train_labels = train_labels[VALIDATION_SIZE:]\n\n @staticmethod\n def print():\n return \"MNIST\"\n\n\nclass MNISTModel:\n def __init__(self, restore, session=None):\n self.num_channels = 1\n self.image_size = 28\n self.num_labels = 10\n self.model = load_model(restore)\n\n def predict(self, data):\n return self.model(data)\n",
"## setup_mnist.py -- mnist data and model loading code\n##\n## Copyright (C) 2016, Nicholas Carlini <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\n## Modified for MagNet's use.\n\n# from __future__ import print_function\n# from future.standard_library import install_aliases\n# install_aliases()\n\nimport numpy as np\nimport os\nimport gzip\nimport urllib.request\nimport scipy.io\n\nfrom keras.models import load_model\n\ndef extract_data(filename, num_images):\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(num_images*28*28)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data / 255) - 0.5\n data = data.reshape(num_images, 28, 28, 1)\n return data\n\ndef extract_labels(filename, num_images):\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return (np.arange(10) == labels[:, None]).astype(np.float32)\n\nclass FACE:\n def __init__(self):\n\n d = scipy.io.loadmat(\"face_data.mat\")\n\n # train_data = extract_data(\"data/train-images-idx3-ubyte.gz\", 60000)+0.5\n # train_labels = extract_labels(\"data/train-labels-idx1-ubyte.gz\", 60000)\n # self.test_data = extract_data(\"data/t10k-images-idx3-ubyte.gz\", 10000)+0.5\n # self.test_labels = extract_labels(\"data/t10k-labels-idx1-ubyte.gz\", 10000)\n \n # VALIDATION_SIZE = 5000\n \n # self.validation_data = train_data[:VALIDATION_SIZE, :, :, :]\n # self.validation_labels = train_labels[:VALIDATION_SIZE]\n # self.train_data = train_data[VALIDATION_SIZE:, :, :, :]\n # self.train_labels = train_labels[VALIDATION_SIZE:]\n\n self.train_data = d[\"true_train_x\"]\n self.train_labels = d[\"true_train_y\"]\n self.test_data = d[\"true_test_x\"]\n self.test_labels = d[\"true_test_y\"]\n self.validation_data = self.test_data\n self.validation_labels = self.test_labels\n self.train_adv = d[\"adv_train_x\"]\n self.test_adv = d[\"adv_test_x\"]\n self.train_data = np.concatenate((self.train_data, self.train_adv), axis=0)\n\n\n @staticmethod\n def print():\n return \"Face\"\n\n\nclass MNISTModel:\n def __init__(self, restore, session=None):\n self.num_channels = 1\n self.image_size = 32\n self.num_labels = 10\n self.model = load_model(restore)\n\n def predict(self, data):\n return self.model(data)\n",
"## defensive_models.py -- defines several flavors of autoencoders for defense\n##\n## Copyright (C) 2017, Dongyu Meng <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\nimport os\nimport numpy as np\nfrom keras.layers.core import Lambda\nfrom keras.layers.merge import Average, add\nfrom keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, AveragePooling2D\nfrom keras.models import Model\nimport keras.regularizers as regs\n\n\nclass DenoisingAutoEncoder:\n def __init__(self, image_shape,\n structure,\n v_noise=0.0,\n activation=\"relu\",\n model_dir=\"./defensive_models/\",\n reg_strength=0.0):\n \"\"\"\n Denoising autoencoder.\n\n image_shape: Shape of input image. e.g. 28, 28, 1.\n structure: Structure of autoencoder.\n v_noise: Volume of noise while training.\n activation: What activation function to use.\n model_dir: Where to save / load model from.\n reg_strength: Strength of L2 regularization.\n \"\"\"\n h, w, c = image_shape\n self.image_shape = image_shape\n self.model_dir = model_dir\n self.v_noise = v_noise\n\n input_img = Input(shape=self.image_shape)\n x = input_img\n\n for layer in structure:\n if isinstance(layer, int):\n x = Conv2D(layer, (3, 3), activation=activation, padding=\"same\",\n activity_regularizer=regs.l2(reg_strength))(x)\n elif layer == \"max\":\n x = MaxPooling2D((2, 2), padding=\"same\")(x)\n elif layer == \"average\":\n x = AveragePooling2D((2, 2), padding=\"same\")(x)\n else:\n print(layer, \"is not recognized!\")\n exit(0)\n\n for layer in reversed(structure):\n if isinstance(layer, int):\n x = Conv2D(layer, (3, 3), activation=activation, padding=\"same\",\n activity_regularizer=regs.l2(reg_strength))(x)\n elif layer == \"max\" or layer == \"average\":\n x = UpSampling2D((2, 2))(x)\n\n decoded = Conv2D(c, (3, 3), activation='sigmoid', padding='same',\n activity_regularizer=regs.l2(reg_strength))(x)\n self.model = Model(input_img, decoded)\n\n def train(self, data, archive_name, num_epochs=100, batch_size=256,\n if_save=True):\n self.model.compile(loss='mean_squared_error',\n metrics=['mean_squared_error'],\n optimizer='adam')\n\n noise = self.v_noise * np.random.normal(size=np.shape(data.train_data))\n noisy_train_data = data.train_data + noise\n noisy_train_data = np.clip(noisy_train_data, 0.0, 1.0)\n\n self.model.fit(noisy_train_data, data.train_data,\n batch_size=batch_size,\n validation_data=(data.validation_data, data.validation_data),\n epochs=num_epochs,\n shuffle=True)\n\n if if_save: self.model.save(os.path.join(self.model_dir, archive_name))\n\n def load(self, archive_name, model_dir=None):\n if model_dir is None: model_dir = self.model_dir\n self.model.load_weights(os.path.join(model_dir, archive_name))\n\n\nclass PackedAutoEncoder:\n def __init__(self, image_shape, structure, data,\n v_noise=0.1, n_pack=2, pre_epochs=3, activation=\"relu\",\n model_dir=\"./defensive_models/\"):\n \"\"\"\n Train different autoencoders.\n Demo code for graybox scenario.\n\n pre_epochs: How many epochs do we train before fine-tuning.\n n_pack: Number of autoencoders we want to train at once.\n \"\"\"\n self.v_noise = v_noise\n self.n_pack = n_pack\n self.model_dir = model_dir\n pack = []\n\n for i in range(n_pack):\n dae = DenoisingAutoEncoder(image_shape, structure, v_noise=v_noise,\n activation=activation, model_dir=model_dir)\n dae.train(data, \"\", if_save=False, num_epochs=pre_epochs)\n pack.append(dae.model)\n\n shared_input = Input(shape=image_shape, name=\"shared_input\")\n outputs = [dae(shared_input) for dae in pack]\n avg_output = Average()(outputs)\n delta_outputs = [add([avg_output, Lambda(lambda x: -x)(output)])\n for output in outputs]\n\n self.model = Model(inputs=shared_input, outputs=outputs+delta_outputs)\n\n def train(self, data, archive_name, alpha, num_epochs=10, batch_size=128):\n noise = self.v_noise * np.random.normal(size=np.shape(data.train_data))\n noisy_train_data = data.train_data + noise\n noisy_train_data = np.clip(noisy_train_data, 0.0, 1.0)\n\n train_zeros = [np.zeros_like(data.train_data)] * self.n_pack\n val_zeros = [np.zeros_like(data.validation_data)] * self.n_pack\n\n self.model.compile(loss=\"mean_squared_error\", optimizer=\"adam\",\n loss_weights=[1.0]*self.n_pack + [-alpha]*self.n_pack)\n\n self.model.fit(noisy_train_data,\n [data.train_data]*self.n_pack + train_zeros,\n batch_size=batch_size,\n validation_data=(data.validation_data,\n [data.validation_data]*self.n_pack+val_zeros),\n epochs=num_epochs,\n shuffle=True)\n\n for i in range(self.n_pack):\n model = Model(self.model.input, self.model.outputs[i])\n model.save(os.path.join(self.model_dir, archive_name+\"_\"+str(i)))\n\n def load(self, archive_name, model_dir=None):\n if model_dir is None: model_dir = self.model_dir\n self.model.load_weights(os.path.join(model_dir, archive_name))\n\n"
] |
[
[
"numpy.frombuffer",
"numpy.arange"
],
[
"numpy.concatenate",
"numpy.frombuffer",
"numpy.arange"
],
[
"numpy.shape",
"numpy.zeros_like",
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
delira-dev/vision_torch
|
[
"d944aa67d319bd63a2add5cb89e8308413943de6",
"d944aa67d319bd63a2add5cb89e8308413943de6",
"d944aa67d319bd63a2add5cb89e8308413943de6"
] |
[
"deliravision/torch/models/gans/conditional/models.py",
"deliravision/torch/models/gans/coupled/cogan.py",
"tests/models/test_backbones_torch.py"
] |
[
"import torch\nfrom functools import reduce\nfrom operator import mul\n\n\nclass Generator(torch.nn.Module):\n \"\"\"\n A very simple generator model to generate images of specific classes\n \"\"\"\n def __init__(self, n_classes, img_shape, latent_dim):\n \"\"\"\n\n Parameters\n ----------\n n_classes : int\n the total number of classes\n img_shape : tuple\n the shape of the input images (including channel-dimension,\n excluding batch-dimension)\n latent_dim : int\n the size of the latent dimension\n\n \"\"\"\n super().__init__()\n\n self.label_emb = torch.nn.Embedding(n_classes, n_classes)\n\n def block(in_feat, out_feat, normalize=True):\n layers = [torch.nn.Linear(in_feat, out_feat)]\n if normalize:\n layers.append(torch.nn.BatchNorm1d(out_feat, 0.8))\n layers.append(torch.nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = torch.nn.Sequential(\n *block(latent_dim + n_classes, 128, normalize=False),\n *block(128, 256),\n *block(256, 512),\n *block(512, 1024),\n torch.nn.Linear(1024, int(reduce(mul, img_shape))),\n torch. nn.Tanh()\n )\n\n self._img_shape = img_shape\n\n def forward(self, noise, labels):\n \"\"\"\n Forwards a single noise batch with the according labels through the\n generator network\n\n Parameters\n ----------\n noise : :class:`torch.Tensor`\n the noise batch\n labels : :class:`torch.Tensor`\n the label batch\n\n Returns\n -------\n :class:`torch.Tensor`\n the generated image\n\n \"\"\"\n # Concatenate label embedding and image to produce input\n gen_input = torch.cat((self.label_emb(labels).squeeze(1), noise), -1)\n img = self.model(gen_input)\n img = img.view(img.size(0), *self._img_shape)\n return img\n\n\nclass Discriminator(torch.nn.Module):\n \"\"\"\n A very simple discriminator network for conditionally generated images\n \"\"\"\n def __init__(self, n_classes, img_shape):\n \"\"\"\n\n Parameters\n ----------\n n_classes : int\n the total number of classes\n img_shape : tuple\n the shape of the input images (including channel-dimension,\n excluding batch-dimension)\n\n \"\"\"\n super().__init__()\n\n self.label_embedding = torch.nn.Embedding(n_classes, n_classes)\n\n self.model = torch.nn.Sequential(\n torch.nn.Linear(n_classes + int(reduce(mul, img_shape)), 512),\n torch.nn.LeakyReLU(0.2, inplace=True),\n torch.nn.Linear(512, 512),\n torch.nn.Dropout(0.4),\n torch.nn.LeakyReLU(0.2, inplace=True),\n torch.nn.Linear(512, 512),\n torch.nn.Dropout(0.4),\n torch.nn.LeakyReLU(0.2, inplace=True),\n torch.nn.Linear(512, 1),\n )\n\n def forward(self, img, labels):\n \"\"\"\n Feeds an image and label batch through the discriminator network\n\n Parameters\n ----------\n img : :class:`torch.Tensor`\n the image batch\n labels : :class:`torch.Tensor`\n the label batch\n\n Returns\n -------\n :class:`torch.Tensor`\n the discriminative result\n\n \"\"\"\n # Concatenate label embedding and image to produce input\n d_in = torch.cat((img.view(img.size(0), -1),\n self.label_embedding(labels).squeeze(1)), -1)\n validity = self.model(d_in)\n return validity\n",
"from delira.models import AbstractPyTorchNetwork\nimport torch\n\nfrom deliravision.models.gans.coupled.models import CoupledDiscriminators, \\\n CoupledGenerators\n\n\nclass CoupledGAN(AbstractPyTorchNetwork):\n \"\"\"\n An implementation of of coupled generative adversarial networks, which are\n capable of generating images with the same content in different domains.\n\n References\n ----------\n `Paper <https://arxiv.org/abs/1606.07536>`_\n\n Warnings\n --------\n This Network is designed for training only; if you want to predict from an\n already trained network, it might be best, to split this network into its\n parts (i. e. separating the discriminator from the generator). This will\n give a significant boost in inference speed and a significant decrease in\n memory consumption, since no memory is allocated for additional weights of\n the unused parts and no inference is done for them. If this whole network\n is used, inferences might be done multiple times per network, to obtain\n all necessary (intermediate) outputs for training.\n\n \"\"\"\n def __init__(self, img_size, in_channels, latent_dim,\n generator_cls=CoupledGenerators,\n discriminator_cls=CoupledDiscriminators):\n \"\"\"\n\n Parameters\n ----------\n img_size : int\n image size\n in_channels : int\n number of input channels\n latent_dim : int\n size of the latent dimension\n generator_cls :\n class implementing the actual coupled generator topology\n discriminator_cls :\n class implementing the actual coupled discriminator topology\n\n \"\"\"\n\n super().__init__()\n\n self.generator = generator_cls(img_size, latent_dim, in_channels)\n self.discriminator = discriminator_cls(in_channels, img_size)\n self._latent_dim = latent_dim\n\n def forward(self, imgs_a, imgs_b, noise=None):\n \"\"\"\n\n Parameters\n ----------\n imgs_a : :class:`torch.Tensor`\n images of domain A\n imgs_b : :class:`torch.Tensor`\n images of domain B\n noise : :class:`torch.Tensor`\n noise vector for image generation (will be sampled from normal\n dirstibution if not given)\n\n Returns\n -------\n dict\n dictionary containing all (intermediate) outputs necessary for loss\n calculation and training\n\n \"\"\"\n if noise is None:\n noise = torch.randn(imgs_a.size(0), self._latent_dim,\n device=imgs_a.device, dtype=imgs_a.dtype)\n\n gen_imgs_a, gen_imgs_b = self.generator(noise)\n\n discr_fake_a, discr_fake_b = self.discriminator(gen_imgs_a,\n gen_imgs_b)\n\n discr_real_a, discr_real_b = self.discriminator(imgs_a, imgs_b)\n\n return {\"gen_imgs_a\": gen_imgs_a, \"gen_imgs_b\": gen_imgs_b,\n \"discr_fake_a\": discr_fake_a, \"discr_fake_b\": discr_fake_b,\n \"discr_real_a\": discr_real_a, \"discr_real_b\": discr_real_b}\n\n @staticmethod\n def closure(model, data_dict: dict, optimizers: dict, losses=None,\n metrics=None, fold=0, **kwargs):\n\n \"\"\"\n Function which handles prediction from batch, logging, loss calculation\n and optimizer step\n\n Parameters\n ----------\n model : :class:`delira.models.AbstractPyTorchNetwork`\n model to forward data through\n data_dict : dict\n dictionary containing the data\n optimizers : dict\n dictionary containing all optimizers to perform parameter update\n losses : dict\n Functions or classes to calculate losses\n metrics : dict\n Functions or classes to calculate other metrics\n fold : int\n Current Fold in Crossvalidation (default: 0)\n kwargs : dict\n additional keyword arguments\n\n Returns\n -------\n dict\n Metric values (with same keys as input dict metrics); will always\n be empty here\n dict\n Loss values (with same keys as input dict losses)\n dict\n Arbitrary number of predictions\n\n \"\"\"\n\n metric_vals, loss_vals = {}, {}\n\n preds = model(data_dict[\"data_a\"], data_dict[\"data_b\"])\n\n loss_gen_a = losses[\"adversarial\"](preds[\"discr_fake_a\"], True)\n loss_gen_b = losses[\"adversarial\"](preds[\"discr_fake_b\"], True)\n\n loss_vals[\"gen_a\"] = loss_gen_a.item()\n loss_vals[\"gen_b\"] = loss_gen_b.item()\n\n loss_generator = (loss_gen_a + loss_gen_b) / 2\n loss_vals[\"generator\"] = loss_generator.item()\n\n optimizers[\"generator\"].zero_grad()\n loss_generator.backward(retain_graph=True)\n optimizers[\"generator\"].step()\n\n loss_discr_real_a = losses[\"adversarial\"](preds[\"discr_real_a\"], True)\n loss_discr_real_b = losses[\"adversarial\"](preds[\"discr_real_b\"], True)\n loss_discr_fake_a = losses[\"adversarial\"](preds[\"discr_fake_a\"], False)\n loss_discr_fake_b = losses[\"adversarial\"](preds[\"discr_fake_b\"], False)\n\n loss_discrimintaor = (loss_discr_real_a +\n loss_discr_real_b +\n loss_discr_fake_a +\n loss_discr_fake_b) / 4\n\n loss_vals.update({\n \"loss_discr_real_a\": loss_discr_real_a.item(),\n \"loss_discr_real_b\": loss_discr_real_b.item(),\n \"loss_discr_fake_a\": loss_discr_fake_a.item(),\n \"loss_discr_fake_b\": loss_discr_fake_b.item(),\n \"discriminator\": loss_discrimintaor.item()\n })\n\n optimizers[\"discriminator\"].zero_grad()\n loss_discrimintaor.backward()\n optimizers[\"discriminator\"].step()\n\n # zero gradients again just to make sure, gradients aren't carried to\n # next iteration (won't affect training since gradients are zeroed\n # before every backprop step, but would result in way higher memory\n # consumption)\n for k, v in optimizers.items():\n v.zero_grad()\n\n return metric_vals, loss_vals, {k: v.detach()\n for k, v in preds.items()}\n\n @staticmethod\n def prepare_batch(batch: dict, input_device, output_device):\n for k, v in batch.items():\n batch[k] = torch.from_numpy(v).to(torch.float).to(input_device)\n\n return batch",
"import unittest\nimport gc\n\n\nclass TestBackbones(unittest.TestCase):\n\n def setUp(self) -> None:\n test_cases = []\n\n # TODO: Add MobileNet to tests\n from deliravision.torch.models.backbones import SqueezeNetTorch, AlexNetTorch\n from deliravision.torch.models.model_fns import create_vgg_torch, create_resnet_torch, create_densenet_torch, \\\n create_resnext_torch, create_seresnext_torch, create_seresnet_torch\n\n test_cases.append({\n \"network_cls\": SqueezeNetTorch,\n \"network_kwargs\": {'version': 1.0, \"num_classes\": 1000,\n \"in_channels\": 3, \"n_dim\": 2,\n \"pool_type\": \"Max\", \"p_dropout\": 0.5,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SqueezeNet1.0\"\n })\n test_cases.append({\n \"network_cls\": SqueezeNetTorch,\n \"network_kwargs\": {'version': 1.1, \"num_classes\": 1000,\n \"in_channels\": 3, \"n_dim\": 2,\n \"pool_type\": \"Max\", \"p_dropout\": 0.5,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SqueezeNet1.1\"\n })\n test_cases.append({\n \"network_cls\": AlexNetTorch,\n \"network_kwargs\": {\"num_classes\": 1000, \"in_channels\": 3,\n \"n_dim\": 2, \"pool_type\": \"Max\",\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"AlexNet\"\n })\n test_cases.append({\n \"network_cls\": create_vgg_torch,\n \"network_kwargs\": {\"num_layers\": 11,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"VGG11\"\n })\n test_cases.append({\n \"network_cls\": create_vgg_torch,\n \"network_kwargs\": {\"num_layers\": 13,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"VGG13\"\n })\n test_cases.append({\n \"network_cls\": create_vgg_torch,\n \"network_kwargs\": {\"num_layers\": 16,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"VGG16\"\n })\n test_cases.append({\n \"network_cls\": create_vgg_torch,\n \"network_kwargs\": {\"num_layers\": 19,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"VGG19\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 18,\n \"zero_init_residual\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet18\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 34,\n \"zero_init_residual\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet34\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 26,\n \"zero_init_residual\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet26\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 50,\n \"zero_init_residual\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet50\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 50,\n \"zero_init_residual\": True,\n \"deep_start\": True,\n \"avg_down\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet50V2\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 101,\n \"zero_init_residual\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet101\"\n })\n test_cases.append({\n \"network_cls\": create_resnet_torch,\n \"network_kwargs\": {\"num_layers\": 152,\n \"zero_init_residual\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNet152\"\n })\n test_cases.append({\n \"network_cls\": create_densenet_torch,\n \"network_kwargs\": {\"num_layers\": 121,\n \"drop_rate\": 0.2,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"DenseNet121\"\n })\n test_cases.append({\n \"network_cls\": create_densenet_torch,\n \"network_kwargs\": {\"num_layers\": 161,\n \"drop_rate\": 0.2,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"DenseNet161\"\n })\n test_cases.append({\n \"network_cls\": create_densenet_torch,\n \"network_kwargs\": {\"num_layers\": 169,\n \"drop_rate\": 0.2,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"DenseNet169\"\n })\n test_cases.append({\n \"network_cls\": create_densenet_torch,\n \"network_kwargs\": {\"num_layers\": 201,\n \"drop_rate\": 0.2,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"DenseNet201\"\n })\n test_cases.append({\n \"network_cls\": create_resnext_torch,\n \"network_kwargs\": {\"num_layers\": 26,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNeXt26\"\n })\n test_cases.append({\n \"network_cls\": create_resnext_torch,\n \"network_kwargs\": {\"num_layers\": 26,\n \"deep_start\": True,\n \"avg_down\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNeXt26V2\"\n })\n test_cases.append({\n \"network_cls\": create_resnext_torch,\n \"network_kwargs\": {\"num_layers\": 50,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNeXt50\"\n })\n test_cases.append({\n \"network_cls\": create_resnext_torch,\n \"network_kwargs\": {\"num_layers\": 101,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNeXt101\"\n })\n test_cases.append({\n \"network_cls\": create_resnext_torch,\n \"network_kwargs\": {\"num_layers\": 152,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"ResNeXt152\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 18,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet18\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 18,\n \"deep_start\": True,\n \"avg_down\": True\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet18V2\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 34,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet34\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 26},\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet26\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 50,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet50\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 101,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet101\"\n })\n test_cases.append({\n \"network_cls\": create_seresnet_torch,\n \"network_kwargs\": {\"num_layers\": 152,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNet152\"\n })\n\n test_cases.append({\n \"network_cls\": create_seresnext_torch,\n \"network_kwargs\": {\"num_layers\": 26,\n \"cardinality\": 1,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNeXt26\"\n })\n test_cases.append({\n \"network_cls\": create_seresnext_torch,\n \"network_kwargs\": {\"num_layers\": 26,\n \"deep_start\": True,\n \"avg_down\": True,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNeXt26V2\"\n })\n test_cases.append({\n \"network_cls\": create_seresnext_torch,\n \"network_kwargs\": {\"num_layers\": 50,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNeXt50\"\n })\n test_cases.append({\n \"network_cls\": create_seresnext_torch,\n \"network_kwargs\": {\"num_layers\": 101,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNeXt101\"\n })\n test_cases.append({\n \"network_cls\": create_seresnext_torch,\n \"network_kwargs\": {\"num_layers\": 152,\n },\n \"input_shape\": (5, 3, 224, 224),\n \"name\": \"SEResNeXt152\"\n })\n\n self.test_cases = test_cases\n\n def test_models_forward(self):\n import torch\n device = torch.device(\"cpu\")\n gpu_available = False\n\n # if torch.cuda.is_available():\n # gpu_available = True\n # device = torch.device(\"cuda:0\")\n\n print(\"Testing Model Inference:\")\n for case in self.test_cases:\n with self.subTest(case=case):\n with torch.no_grad():\n\n model = case[\"network_cls\"](**case[\"network_kwargs\"]\n ).to(device)\n input_tensor = torch.rand(case[\"input_shape\"]\n ).to(device)\n\n result = model(input_tensor)\n\n self.assertIsInstance(result, dict)\n\n if gpu_available:\n torch.cuda.synchronize()\n torch.cuda.empty_cache()\n del model\n del input_tensor\n del result\n gc.collect()\n print(\"\\t%s\" % case[\"name\"])\n\n\nif __name__ == '__main__':\n from multiprocessing import freeze_support\n freeze_support()\n unittest.main()\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
],
[
"torch.from_numpy"
],
[
"torch.cuda.synchronize",
"torch.cuda.empty_cache",
"torch.no_grad",
"torch.rand",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ye-zs/statsmodels
|
[
"19248ecae8c23c7ca8499ce6fb9cf19a931d7c7f"
] |
[
"statsmodels/tsa/statespace/mlemodel.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nState Space Model\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\nimport contextlib\nimport warnings\n\nimport datetime as dt\nfrom types import SimpleNamespace\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\n\nfrom statsmodels.tools.tools import pinv_extended, Bunch\nfrom statsmodels.tools.sm_exceptions import PrecisionWarning, ValueWarning\nfrom statsmodels.tools.numdiff import (_get_epsilon, approx_hess_cs,\n approx_fprime_cs, approx_fprime)\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.tools.eval_measures import aic, aicc, bic, hqic\n\nimport statsmodels.base.wrapper as wrap\n\nimport statsmodels.tsa.base.prediction as pred\n\nfrom statsmodels.base.data import PandasData\nimport statsmodels.tsa.base.tsa_model as tsbase\n\nfrom .news import NewsResults, NewsResultsWrapper\nfrom .simulation_smoother import SimulationSmoother\nfrom .kalman_smoother import SmootherResults\nfrom .kalman_filter import INVERT_UNIVARIATE, SOLVE_LU, MEMORY_CONSERVE\nfrom .initialization import Initialization\nfrom .tools import prepare_exog, concat\n\n\ndef _handle_args(names, defaults, *args, **kwargs):\n output_args = []\n # We need to handle positional arguments in two ways, in case this was\n # called by a Scipy optimization routine\n if len(args) > 0:\n # the fit() method will pass a dictionary\n if isinstance(args[0], dict):\n flags = args[0]\n # otherwise, a user may have just used positional arguments...\n else:\n flags = dict(zip(names, args))\n for i in range(len(names)):\n output_args.append(flags.get(names[i], defaults[i]))\n\n for name, value in flags.items():\n if name in kwargs:\n raise TypeError(\"loglike() got multiple values for keyword\"\n \" argument '%s'\" % name)\n else:\n for i in range(len(names)):\n output_args.append(kwargs.pop(names[i], defaults[i]))\n\n return tuple(output_args) + (kwargs,)\n\n\ndef _check_index(desired_index, dta, title='data'):\n given_index = None\n if isinstance(dta, (pd.Series, pd.DataFrame)):\n given_index = dta.index\n if given_index is not None and not desired_index.equals(given_index):\n desired_freq = getattr(desired_index, 'freq', None)\n given_freq = getattr(given_index, 'freq', None)\n if ((desired_freq is not None or given_freq is not None) and\n desired_freq != given_freq):\n raise ValueError('Given %s does not have an index'\n ' that extends the index of the'\n ' model. Expected index frequency is'\n ' \"%s\", but got \"%s\".'\n % (title, desired_freq, given_freq))\n else:\n raise ValueError('Given %s does not have an index'\n ' that extends the index of the'\n ' model.' % title)\n\n\nclass MLEModel(tsbase.TimeSeriesModel):\n r\"\"\"\n State space model for maximum likelihood estimation\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n k_states : int\n The dimension of the unobserved state process.\n exog : array_like, optional\n Array of exogenous regressors, shaped nobs x k. Default is no\n exogenous regressors.\n dates : array_like of datetime, optional\n An array-like object of datetime objects. If a Pandas object is given\n for endog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n **kwargs\n Keyword arguments may be used to provide default values for state space\n matrices or for Kalman filtering options. See `Representation`, and\n `KalmanFilter` for more details.\n\n Attributes\n ----------\n ssm : statsmodels.tsa.statespace.kalman_filter.KalmanFilter\n Underlying state space representation.\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults\n statsmodels.tsa.statespace.kalman_filter.KalmanFilter\n statsmodels.tsa.statespace.representation.Representation\n\n Notes\n -----\n This class wraps the state space model with Kalman filtering to add in\n functionality for maximum likelihood estimation. In particular, it adds\n the concept of updating the state space representation based on a defined\n set of parameters, through the `update` method or `updater` attribute (see\n below for more details on which to use when), and it adds a `fit` method\n which uses a numerical optimizer to select the parameters that maximize\n the likelihood of the model.\n\n The `start_params` `update` method must be overridden in the\n child class (and the `transform` and `untransform` methods, if needed).\n \"\"\"\n\n def __init__(self, endog, k_states, exog=None, dates=None, freq=None,\n **kwargs):\n # Initialize the model base\n super(MLEModel, self).__init__(endog=endog, exog=exog,\n dates=dates, freq=freq,\n missing='none')\n\n # Store kwargs to recreate model\n self._init_kwargs = kwargs\n\n # Prepared the endog array: C-ordered, shape=(nobs x k_endog)\n self.endog, self.exog = self.prepare_data()\n\n # Dimensions\n self.nobs = self.endog.shape[0]\n self.k_states = k_states\n\n # Initialize the state-space representation\n self.initialize_statespace(**kwargs)\n\n # Setup holder for fixed parameters\n self._has_fixed_params = False\n self._fixed_params = None\n self._params_index = None\n self._fixed_params_index = None\n self._free_params_index = None\n\n def prepare_data(self):\n \"\"\"\n Prepare data for use in the state space representation\n \"\"\"\n endog = np.array(self.data.orig_endog, order='C')\n exog = self.data.orig_exog\n if exog is not None:\n exog = np.array(exog)\n\n # Base class may allow 1-dim data, whereas we need 2-dim\n if endog.ndim == 1:\n endog.shape = (endog.shape[0], 1) # this will be C-contiguous\n\n return endog, exog\n\n def initialize_statespace(self, **kwargs):\n \"\"\"\n Initialize the state space representation\n\n Parameters\n ----------\n **kwargs\n Additional keyword arguments to pass to the state space class\n constructor.\n \"\"\"\n # (Now self.endog is C-ordered and in long format (nobs x k_endog). To\n # get F-ordered and in wide format just need to transpose)\n endog = self.endog.T\n\n # Instantiate the state space object\n self.ssm = SimulationSmoother(endog.shape[0], self.k_states,\n nobs=endog.shape[1], **kwargs)\n # Bind the data to the model\n self.ssm.bind(endog)\n\n # Other dimensions, now that `ssm` is available\n self.k_endog = self.ssm.k_endog\n\n def _get_index_with_final_state(self):\n # The index we inherit from `TimeSeriesModel` will only cover the\n # data sample itself, but we will also need an index value for the\n # final state which is the next time step to the last datapoint.\n # This method figures out an appropriate value for the three types of\n # supported indexes: date-based, Int64Index, or RangeIndex\n if self._index_dates:\n if isinstance(self._index, pd.DatetimeIndex):\n index = pd.date_range(\n start=self._index[0], periods=len(self._index) + 1,\n freq=self._index.freq)\n elif isinstance(self._index, pd.PeriodIndex):\n index = pd.period_range(\n start=self._index[0], periods=len(self._index) + 1,\n freq=self._index.freq)\n else:\n raise NotImplementedError\n elif isinstance(self._index, pd.RangeIndex):\n # COMPAT: pd.RangeIndex does not have start, stop, step prior to\n # pandas 0.25\n try:\n start = self._index.start\n stop = self._index.stop\n step = self._index.step\n except AttributeError:\n start = self._index._start\n stop = self._index._stop\n step = self._index._step\n index = pd.RangeIndex(start, stop + step, step)\n elif isinstance(self._index, pd.Int64Index):\n # The only valid Int64Index is a full, incrementing index, so this\n # is general\n value = self._index[-1] + 1\n index = pd.Int64Index(self._index.tolist() + [value])\n else:\n raise NotImplementedError\n return index\n\n def __setitem__(self, key, value):\n return self.ssm.__setitem__(key, value)\n\n def __getitem__(self, key):\n return self.ssm.__getitem__(key)\n\n def _get_init_kwds(self):\n # Get keywords based on model attributes\n kwds = super(MLEModel, self)._get_init_kwds()\n\n for key, value in kwds.items():\n if value is None and hasattr(self.ssm, key):\n kwds[key] = getattr(self.ssm, key)\n\n return kwds\n\n def clone(self, endog, exog=None, **kwargs):\n \"\"\"\n Clone state space model with new data and optionally new specification\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n k_states : int\n The dimension of the unobserved state process.\n exog : array_like, optional\n Array of exogenous regressors, shaped nobs x k. Default is no\n exogenous regressors.\n kwargs\n Keyword arguments to pass to the new model class to change the\n model specification.\n\n Returns\n -------\n model : MLEModel subclass\n\n Notes\n -----\n This method must be implemented\n \"\"\"\n raise NotImplementedError('This method is not implemented in the base'\n ' class and must be set up by each specific'\n ' model.')\n\n def _clone_from_init_kwds(self, endog, **kwargs):\n # Cannot make this the default, because there is extra work required\n # for subclasses to make _get_init_kwds useful.\n use_kwargs = self._get_init_kwds()\n use_kwargs.update(kwargs)\n\n # Check for `exog`\n if getattr(self, 'k_exog', 0) > 0 and kwargs.get('exog', None) is None:\n raise ValueError('Cloning a model with an exogenous component'\n ' requires specifying a new exogenous array using'\n ' the `exog` argument.')\n\n mod = self.__class__(endog, **use_kwargs)\n return mod\n\n def set_filter_method(self, filter_method=None, **kwargs):\n \"\"\"\n Set the filtering method\n\n The filtering method controls aspects of which Kalman filtering\n approach will be used.\n\n Parameters\n ----------\n filter_method : int, optional\n Bitmask value to set the filter method to. See notes for details.\n **kwargs\n Keyword arguments may be used to influence the filter method by\n setting individual boolean flags. See notes for details.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_filter_method(filter_method, **kwargs)\n\n def set_inversion_method(self, inversion_method=None, **kwargs):\n \"\"\"\n Set the inversion method\n\n The Kalman filter may contain one matrix inversion: that of the\n forecast error covariance matrix. The inversion method controls how and\n if that inverse is performed.\n\n Parameters\n ----------\n inversion_method : int, optional\n Bitmask value to set the inversion method to. See notes for\n details.\n **kwargs\n Keyword arguments may be used to influence the inversion method by\n setting individual boolean flags. See notes for details.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_inversion_method(inversion_method, **kwargs)\n\n def set_stability_method(self, stability_method=None, **kwargs):\n \"\"\"\n Set the numerical stability method\n\n The Kalman filter is a recursive algorithm that may in some cases\n suffer issues with numerical stability. The stability method controls\n what, if any, measures are taken to promote stability.\n\n Parameters\n ----------\n stability_method : int, optional\n Bitmask value to set the stability method to. See notes for\n details.\n **kwargs\n Keyword arguments may be used to influence the stability method by\n setting individual boolean flags. See notes for details.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_stability_method(stability_method, **kwargs)\n\n def set_conserve_memory(self, conserve_memory=None, **kwargs):\n \"\"\"\n Set the memory conservation method\n\n By default, the Kalman filter computes a number of intermediate\n matrices at each iteration. The memory conservation options control\n which of those matrices are stored.\n\n Parameters\n ----------\n conserve_memory : int, optional\n Bitmask value to set the memory conservation method to. See notes\n for details.\n **kwargs\n Keyword arguments may be used to influence the memory conservation\n method by setting individual boolean flags.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_conserve_memory(conserve_memory, **kwargs)\n\n def set_smoother_output(self, smoother_output=None, **kwargs):\n \"\"\"\n Set the smoother output\n\n The smoother can produce several types of results. The smoother output\n variable controls which are calculated and returned.\n\n Parameters\n ----------\n smoother_output : int, optional\n Bitmask value to set the smoother output to. See notes for details.\n **kwargs\n Keyword arguments may be used to influence the smoother output by\n setting individual boolean flags.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanSmoother` class for details.\n \"\"\"\n self.ssm.set_smoother_output(smoother_output, **kwargs)\n\n def initialize_known(self, initial_state, initial_state_cov):\n \"\"\"Initialize known\"\"\"\n self.ssm.initialize_known(initial_state, initial_state_cov)\n\n def initialize_approximate_diffuse(self, variance=None):\n \"\"\"Initialize approximate diffuse\"\"\"\n self.ssm.initialize_approximate_diffuse(variance)\n\n def initialize_stationary(self):\n \"\"\"Initialize stationary\"\"\"\n self.ssm.initialize_stationary()\n\n @property\n def initialization(self):\n return self.ssm.initialization\n\n @initialization.setter\n def initialization(self, value):\n self.ssm.initialization = value\n\n @property\n def initial_variance(self):\n return self.ssm.initial_variance\n\n @initial_variance.setter\n def initial_variance(self, value):\n self.ssm.initial_variance = value\n\n @property\n def loglikelihood_burn(self):\n return self.ssm.loglikelihood_burn\n\n @loglikelihood_burn.setter\n def loglikelihood_burn(self, value):\n self.ssm.loglikelihood_burn = value\n\n @property\n def tolerance(self):\n return self.ssm.tolerance\n\n @tolerance.setter\n def tolerance(self, value):\n self.ssm.tolerance = value\n\n def _validate_can_fix_params(self, param_names):\n for param_name in param_names:\n if param_name not in self.param_names:\n raise ValueError('Invalid parameter name passed: \"%s\".'\n % param_name)\n\n @contextlib.contextmanager\n def fix_params(self, params):\n \"\"\"\n Fix parameters to specific values (context manager)\n\n Parameters\n ----------\n params : dict\n Dictionary describing the fixed parameter values, of the form\n `param_name: fixed_value`. See the `param_names` property for valid\n parameter names.\n\n Examples\n --------\n >>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))\n >>> with mod.fix_params({'ar.L1': 0.5}):\n res = mod.fit()\n \"\"\"\n k_params = len(self.param_names)\n # Initialization (this is done here rather than in the constructor\n # because param_names may not be available at that point)\n if self._fixed_params is None:\n self._fixed_params = {}\n self._params_index = dict(\n zip(self.param_names, np.arange(k_params)))\n\n # Cache the current fixed parameters\n cache_fixed_params = self._fixed_params.copy()\n cache_has_fixed_params = self._has_fixed_params\n cache_fixed_params_index = self._fixed_params_index\n cache_free_params_index = self._free_params_index\n\n # Validate parameter names and values\n self._validate_can_fix_params(set(params.keys()))\n\n # Set the new fixed parameters, keeping the order as given by\n # param_names\n self._fixed_params.update(params)\n self._fixed_params = dict([\n (name, self._fixed_params[name]) for name in self.param_names\n if name in self._fixed_params])\n\n # Update associated values\n self._has_fixed_params = True\n self._fixed_params_index = [self._params_index[key]\n for key in self._fixed_params.keys()]\n self._free_params_index = list(\n set(np.arange(k_params)).difference(self._fixed_params_index))\n\n try:\n yield\n finally:\n # Reset the fixed parameters\n self._has_fixed_params = cache_has_fixed_params\n self._fixed_params = cache_fixed_params\n self._fixed_params_index = cache_fixed_params_index\n self._free_params_index = cache_free_params_index\n\n def fit(self, start_params=None, transformed=True, includes_fixed=False,\n cov_type=None, cov_kwds=None, method='lbfgs', maxiter=50,\n full_output=1, disp=5, callback=None, return_params=False,\n optim_score=None, optim_complex_step=None, optim_hessian=None,\n flags=None, low_memory=False, **kwargs):\n \"\"\"\n Fits the model by maximum likelihood via Kalman filter.\n\n Parameters\n ----------\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n If None, the default is given by Model.start_params.\n transformed : bool, optional\n Whether or not `start_params` is already transformed. Default is\n True.\n includes_fixed : bool, optional\n If parameters were previously fixed with the `fix_params` method,\n this argument describes whether or not `start_params` also includes\n the fixed parameters, in addition to the free parameters. Default\n is False.\n cov_type : str, optional\n The `cov_type` keyword governs the method for calculating the\n covariance matrix of parameter estimates. Can be one of:\n\n - 'opg' for the outer product of gradient estimator\n - 'oim' for the observed information matrix estimator, calculated\n using the method of Harvey (1989)\n - 'approx' for the observed information matrix estimator,\n calculated using a numerical approximation of the Hessian matrix.\n - 'robust' for an approximate (quasi-maximum likelihood) covariance\n matrix that may be valid even in the presence of some\n misspecifications. Intermediate calculations use the 'oim'\n method.\n - 'robust_approx' is the same as 'robust' except that the\n intermediate calculations use the 'approx' method.\n - 'none' for no covariance matrix calculation.\n\n Default is 'opg' unless memory conservation is used to avoid\n computing the loglikelihood values for each observation, in which\n case the default is 'approx'.\n cov_kwds : dict or None, optional\n A dictionary of arguments affecting covariance matrix computation.\n\n **opg, oim, approx, robust, robust_approx**\n\n - 'approx_complex_step' : bool, optional - If True, numerical\n approximations are computed using complex-step methods. If False,\n numerical approximations are computed using finite difference\n methods. Default is True.\n - 'approx_centered' : bool, optional - If True, numerical\n approximations computed using finite difference methods use a\n centered approximation. Default is False.\n method : str, optional\n The `method` determines which solver from `scipy.optimize`\n is used, and it can be chosen from among the following strings:\n\n - 'newton' for Newton-Raphson, 'nm' for Nelder-Mead\n - 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)\n - 'lbfgs' for limited-memory BFGS with optional box constraints\n - 'powell' for modified Powell's method\n - 'cg' for conjugate gradient\n - 'ncg' for Newton-conjugate gradient\n - 'basinhopping' for global basin-hopping solver\n\n The explicit arguments in `fit` are passed to the solver,\n with the exception of the basin-hopping solver. Each\n solver has several optional arguments that are not the same across\n solvers. See the notes section below (or scipy.optimize) for the\n available arguments and for the list of explicit arguments that the\n basin-hopping solver supports.\n maxiter : int, optional\n The maximum number of iterations to perform.\n full_output : bool, optional\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n disp : bool, optional\n Set to True to print convergence messages.\n callback : callable callback(xk), optional\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n return_params : bool, optional\n Whether or not to return only the array of maximizing parameters.\n Default is False.\n optim_score : {'harvey', 'approx'} or None, optional\n The method by which the score vector is calculated. 'harvey' uses\n the method from Harvey (1989), 'approx' uses either finite\n difference or complex step differentiation depending upon the\n value of `optim_complex_step`, and None uses the built-in gradient\n approximation of the optimizer. Default is None. This keyword is\n only relevant if the optimization method uses the score.\n optim_complex_step : bool, optional\n Whether or not to use complex step differentiation when\n approximating the score; if False, finite difference approximation\n is used. Default is True. This keyword is only relevant if\n `optim_score` is set to 'harvey' or 'approx'.\n optim_hessian : {'opg','oim','approx'}, optional\n The method by which the Hessian is numerically approximated. 'opg'\n uses outer product of gradients, 'oim' uses the information\n matrix formula from Harvey (1989), and 'approx' uses numerical\n approximation. This keyword is only relevant if the\n optimization method uses the Hessian matrix.\n low_memory : bool, optional\n If set to True, techniques are applied to substantially reduce\n memory usage. If used, some features of the results object will\n not be available (including smoothed results and in-sample\n prediction), although out-of-sample forecasting is possible.\n Default is False.\n **kwargs\n Additional keyword arguments to pass to the optimizer.\n\n Returns\n -------\n MLEResults\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n statsmodels.tsa.statespace.mlemodel.MLEResults\n \"\"\"\n if start_params is None:\n start_params = self.start_params\n transformed = True\n includes_fixed = True\n\n # Update the score method\n if optim_score is None and method == 'lbfgs':\n kwargs.setdefault('approx_grad', True)\n kwargs.setdefault('epsilon', 1e-5)\n elif optim_score is None:\n optim_score = 'approx'\n\n # Check for complex step differentiation\n if optim_complex_step is None:\n optim_complex_step = not self.ssm._complex_endog\n elif optim_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n # Standardize starting parameters\n start_params = self.handle_params(start_params, transformed=True,\n includes_fixed=includes_fixed)\n\n # Unconstrain the starting parameters\n if transformed:\n start_params = self.untransform_params(start_params)\n\n # Remove any fixed parameters\n if self._has_fixed_params:\n start_params = start_params[self._free_params_index]\n\n # If all parameters are fixed, we are done\n if self._has_fixed_params and len(start_params) == 0:\n mlefit = Bunch(params=[], mle_retvals=None,\n mle_settings=None)\n else:\n # Maximum likelihood estimation\n if flags is None:\n flags = {}\n flags.update({\n 'transformed': False,\n 'includes_fixed': False,\n 'score_method': optim_score,\n 'approx_complex_step': optim_complex_step\n })\n if optim_hessian is not None:\n flags['hessian_method'] = optim_hessian\n fargs = (flags,)\n mlefit = super(MLEModel, self).fit(start_params, method=method,\n fargs=fargs,\n maxiter=maxiter,\n full_output=full_output,\n disp=disp, callback=callback,\n skip_hessian=True, **kwargs)\n\n # Just return the fitted parameters if requested\n if return_params:\n return self.handle_params(mlefit.params, transformed=False,\n includes_fixed=False)\n # Otherwise construct the results class if desired\n else:\n # Handle memory conservation option\n if low_memory:\n conserve_memory = self.ssm.conserve_memory\n self.ssm.set_conserve_memory(MEMORY_CONSERVE)\n\n # Perform filtering / smoothing\n if (self.ssm.memory_no_predicted or self.ssm.memory_no_gain\n or self.ssm.memory_no_smoothing):\n func = self.filter\n else:\n func = self.smooth\n res = func(mlefit.params, transformed=False, includes_fixed=False,\n cov_type=cov_type, cov_kwds=cov_kwds)\n\n res.mlefit = mlefit\n res.mle_retvals = mlefit.mle_retvals\n res.mle_settings = mlefit.mle_settings\n\n # Reset memory conservation\n if low_memory:\n self.ssm.set_conserve_memory(conserve_memory)\n\n return res\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"\n Fit the model with some parameters subject to equality constraints.\n\n Parameters\n ----------\n constraints : dict\n Dictionary of constraints, of the form `param_name: fixed_value`.\n See the `param_names` property for valid parameter names.\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n If None, the default is given by Model.start_params.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the remaining parameters.\n\n Returns\n -------\n results : Results instance\n\n Examples\n --------\n >>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))\n >>> res = mod.fit_constrained({'ar.L1': 0.5})\n \"\"\"\n with self.fix_params(constraints):\n res = self.fit(start_params, **fit_kwds)\n return res\n\n @property\n def _res_classes(self):\n return {'fit': (MLEResults, MLEResultsWrapper)}\n\n def _wrap_results(self, params, result, return_raw, cov_type=None,\n cov_kwds=None, results_class=None, wrapper_class=None):\n if not return_raw:\n # Wrap in a results object\n result_kwargs = {}\n if cov_type is not None:\n result_kwargs['cov_type'] = cov_type\n if cov_kwds is not None:\n result_kwargs['cov_kwds'] = cov_kwds\n\n if results_class is None:\n results_class = self._res_classes['fit'][0]\n if wrapper_class is None:\n wrapper_class = self._res_classes['fit'][1]\n\n res = results_class(self, params, result, **result_kwargs)\n result = wrapper_class(res)\n return result\n\n def filter(self, params, transformed=True, includes_fixed=False,\n complex_step=False, cov_type=None, cov_kwds=None,\n return_ssm=False, results_class=None,\n results_wrapper_class=None, low_memory=False, **kwargs):\n \"\"\"\n Kalman filtering\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n return_ssm : bool,optional\n Whether or not to return only the state space output or a full\n results object. Default is to return a full results object.\n cov_type : str, optional\n See `MLEResults.fit` for a description of covariance matrix types\n for results object.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n low_memory : bool, optional\n If set to True, techniques are applied to substantially reduce\n memory usage. If used, some features of the results object will\n not be available (including in-sample prediction), although\n out-of-sample forecasting is possible. Default is False.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n # Save the parameter names\n self.data.param_names = self.param_names\n\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n # Handle memory conservation\n if low_memory:\n kwargs['conserve_memory'] = MEMORY_CONSERVE\n\n # Get the state space output\n result = self.ssm.filter(complex_step=complex_step, **kwargs)\n\n # Wrap in a results object\n return self._wrap_results(params, result, return_ssm, cov_type,\n cov_kwds, results_class,\n results_wrapper_class)\n\n def smooth(self, params, transformed=True, includes_fixed=False,\n complex_step=False, cov_type=None, cov_kwds=None,\n return_ssm=False, results_class=None,\n results_wrapper_class=None, **kwargs):\n \"\"\"\n Kalman smoothing\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n return_ssm : bool,optional\n Whether or not to return only the state space output or a full\n results object. Default is to return a full results object.\n cov_type : str, optional\n See `MLEResults.fit` for a description of covariance matrix types\n for results object.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n # Save the parameter names\n self.data.param_names = self.param_names\n\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n # Get the state space output\n result = self.ssm.smooth(complex_step=complex_step, **kwargs)\n\n # Wrap in a results object\n return self._wrap_results(params, result, return_ssm, cov_type,\n cov_kwds, results_class,\n results_wrapper_class)\n\n _loglike_param_names = ['transformed', 'includes_fixed', 'complex_step']\n _loglike_param_defaults = [True, False, False]\n\n def loglike(self, params, *args, **kwargs):\n \"\"\"\n Loglikelihood evaluation\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n See Also\n --------\n update : modifies the internal state of the state space model to\n reflect new params\n\n Notes\n -----\n [1]_ recommend maximizing the average likelihood to avoid scale issues;\n this is done automatically by the base Model fit method.\n\n References\n ----------\n .. [1] Koopman, Siem Jan, Neil Shephard, and Jurgen A. Doornik. 1999.\n Statistical Algorithms for Models in State Space Using SsfPack 2.2.\n Econometrics Journal 2 (1): 107-60. doi:10.1111/1368-423X.00023.\n \"\"\"\n transformed, includes_fixed, complex_step, kwargs = _handle_args(\n MLEModel._loglike_param_names, MLEModel._loglike_param_defaults,\n *args, **kwargs)\n\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n loglike = self.ssm.loglike(complex_step=complex_step, **kwargs)\n\n # Koopman, Shephard, and Doornik recommend maximizing the average\n # likelihood to avoid scale issues, but the averaging is done\n # automatically in the base model `fit` method\n return loglike\n\n def loglikeobs(self, params, transformed=True, includes_fixed=False,\n complex_step=False, **kwargs):\n \"\"\"\n Loglikelihood evaluation\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n See Also\n --------\n update : modifies the internal state of the Model to reflect new params\n\n Notes\n -----\n [1]_ recommend maximizing the average likelihood to avoid scale issues;\n this is done automatically by the base Model fit method.\n\n References\n ----------\n .. [1] Koopman, Siem Jan, Neil Shephard, and Jurgen A. Doornik. 1999.\n Statistical Algorithms for Models in State Space Using SsfPack 2.2.\n Econometrics Journal 2 (1): 107-60. doi:10.1111/1368-423X.00023.\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # If we're using complex-step differentiation, then we cannot use\n # Cholesky factorization\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n return self.ssm.loglikeobs(complex_step=complex_step, **kwargs)\n\n def simulation_smoother(self, simulation_output=None, **kwargs):\n r\"\"\"\n Retrieve a simulation smoother for the state space model.\n\n Parameters\n ----------\n simulation_output : int, optional\n Determines which simulation smoother output is calculated.\n Default is all (including state and disturbances).\n **kwargs\n Additional keyword arguments, used to set the simulation output.\n See `set_simulation_output` for more details.\n\n Returns\n -------\n SimulationSmoothResults\n \"\"\"\n return self.ssm.simulation_smoother(\n simulation_output=simulation_output, **kwargs)\n\n def _forecasts_error_partial_derivatives(self, params, transformed=True,\n includes_fixed=False,\n approx_complex_step=None,\n approx_centered=False,\n res=None, **kwargs):\n params = np.array(params, ndmin=1)\n\n # We cannot use complex-step differentiation with non-transformed\n # parameters\n if approx_complex_step is None:\n approx_complex_step = transformed\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the observed_information_matrix\"\n \" with untransformed parameters.\")\n\n # If we're using complex-step differentiation, then we cannot use\n # Cholesky factorization\n if approx_complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n # Get values at the params themselves\n if res is None:\n self.update(params, transformed=transformed,\n includes_fixed=includes_fixed,\n complex_step=approx_complex_step)\n res = self.ssm.filter(complex_step=approx_complex_step, **kwargs)\n\n # Setup\n n = len(params)\n\n # Compute partial derivatives w.r.t. forecast error and forecast\n # error covariance\n partials_forecasts_error = (\n np.zeros((self.k_endog, self.nobs, n))\n )\n partials_forecasts_error_cov = (\n np.zeros((self.k_endog, self.k_endog, self.nobs, n))\n )\n if approx_complex_step:\n epsilon = _get_epsilon(params, 2, None, n)\n increments = np.identity(n) * 1j * epsilon\n\n for i, ih in enumerate(increments):\n self.update(params + ih, transformed=transformed,\n includes_fixed=includes_fixed,\n complex_step=True)\n _res = self.ssm.filter(complex_step=True, **kwargs)\n\n partials_forecasts_error[:, :, i] = (\n _res.forecasts_error.imag / epsilon[i]\n )\n\n partials_forecasts_error_cov[:, :, :, i] = (\n _res.forecasts_error_cov.imag / epsilon[i]\n )\n elif not approx_centered:\n epsilon = _get_epsilon(params, 2, None, n)\n ei = np.zeros((n,), float)\n for i in range(n):\n ei[i] = epsilon[i]\n self.update(params + ei, transformed=transformed,\n includes_fixed=includes_fixed, complex_step=False)\n _res = self.ssm.filter(complex_step=False, **kwargs)\n\n partials_forecasts_error[:, :, i] = (\n _res.forecasts_error - res.forecasts_error) / epsilon[i]\n\n partials_forecasts_error_cov[:, :, :, i] = (\n _res.forecasts_error_cov -\n res.forecasts_error_cov) / epsilon[i]\n ei[i] = 0.0\n else:\n epsilon = _get_epsilon(params, 3, None, n) / 2.\n ei = np.zeros((n,), float)\n for i in range(n):\n ei[i] = epsilon[i]\n\n self.update(params + ei, transformed=transformed,\n includes_fixed=includes_fixed, complex_step=False)\n _res1 = self.ssm.filter(complex_step=False, **kwargs)\n\n self.update(params - ei, transformed=transformed,\n includes_fixed=includes_fixed, complex_step=False)\n _res2 = self.ssm.filter(complex_step=False, **kwargs)\n\n partials_forecasts_error[:, :, i] = (\n (_res1.forecasts_error - _res2.forecasts_error) /\n (2 * epsilon[i]))\n\n partials_forecasts_error_cov[:, :, :, i] = (\n (_res1.forecasts_error_cov - _res2.forecasts_error_cov) /\n (2 * epsilon[i]))\n\n ei[i] = 0.0\n\n return partials_forecasts_error, partials_forecasts_error_cov\n\n def observed_information_matrix(self, params, transformed=True,\n includes_fixed=False,\n approx_complex_step=None,\n approx_centered=False, **kwargs):\n \"\"\"\n Observed information matrix\n\n Parameters\n ----------\n params : array_like, optional\n Array of parameters at which to evaluate the loglikelihood\n function.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n Notes\n -----\n This method is from Harvey (1989), which shows that the information\n matrix only depends on terms from the gradient. This implementation is\n partially analytic and partially numeric approximation, therefore,\n because it uses the analytic formula for the information matrix, with\n numerically computed elements of the gradient.\n\n References\n ----------\n Harvey, Andrew C. 1990.\n Forecasting, Structural Time Series Models and the Kalman Filter.\n Cambridge University Press.\n \"\"\"\n params = np.array(params, ndmin=1)\n\n # Setup\n n = len(params)\n\n # We cannot use complex-step differentiation with non-transformed\n # parameters\n if approx_complex_step is None:\n approx_complex_step = transformed\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the observed_information_matrix\"\n \" with untransformed parameters.\")\n\n # Get values at the params themselves\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=approx_complex_step)\n # If we're using complex-step differentiation, then we cannot use\n # Cholesky factorization\n if approx_complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n res = self.ssm.filter(complex_step=approx_complex_step, **kwargs)\n dtype = self.ssm.dtype\n\n # Save this for inversion later\n inv_forecasts_error_cov = res.forecasts_error_cov.copy()\n\n partials_forecasts_error, partials_forecasts_error_cov = (\n self._forecasts_error_partial_derivatives(\n params, transformed=transformed, includes_fixed=includes_fixed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, res=res, **kwargs))\n\n # Compute the information matrix\n tmp = np.zeros((self.k_endog, self.k_endog, self.nobs, n), dtype=dtype)\n\n information_matrix = np.zeros((n, n), dtype=dtype)\n d = np.maximum(self.ssm.loglikelihood_burn, res.nobs_diffuse)\n for t in range(d, self.nobs):\n inv_forecasts_error_cov[:, :, t] = (\n np.linalg.inv(res.forecasts_error_cov[:, :, t])\n )\n for i in range(n):\n tmp[:, :, t, i] = np.dot(\n inv_forecasts_error_cov[:, :, t],\n partials_forecasts_error_cov[:, :, t, i]\n )\n for i in range(n):\n for j in range(n):\n information_matrix[i, j] += (\n 0.5 * np.trace(np.dot(tmp[:, :, t, i],\n tmp[:, :, t, j]))\n )\n information_matrix[i, j] += np.inner(\n partials_forecasts_error[:, t, i],\n np.dot(inv_forecasts_error_cov[:, :, t],\n partials_forecasts_error[:, t, j])\n )\n return information_matrix / (self.nobs - self.ssm.loglikelihood_burn)\n\n def opg_information_matrix(self, params, transformed=True,\n includes_fixed=False, approx_complex_step=None,\n **kwargs):\n \"\"\"\n Outer product of gradients information matrix\n\n Parameters\n ----------\n params : array_like, optional\n Array of parameters at which to evaluate the loglikelihood\n function.\n **kwargs\n Additional arguments to the `loglikeobs` method.\n\n References\n ----------\n Berndt, Ernst R., Bronwyn Hall, Robert Hall, and Jerry Hausman. 1974.\n Estimation and Inference in Nonlinear Structural Models.\n NBER Chapters. National Bureau of Economic Research, Inc.\n \"\"\"\n # We cannot use complex-step differentiation with non-transformed\n # parameters\n if approx_complex_step is None:\n approx_complex_step = transformed\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the observed_information_matrix\"\n \" with untransformed parameters.\")\n\n score_obs = self.score_obs(params, transformed=transformed,\n includes_fixed=includes_fixed,\n approx_complex_step=approx_complex_step,\n **kwargs).transpose()\n return (\n np.inner(score_obs, score_obs) /\n (self.nobs - self.ssm.loglikelihood_burn)\n )\n\n def _score_complex_step(self, params, **kwargs):\n # the default epsilon can be too small\n # inversion_method = INVERT_UNIVARIATE | SOLVE_LU\n epsilon = _get_epsilon(params, 2., None, len(params))\n kwargs['transformed'] = True\n kwargs['complex_step'] = True\n return approx_fprime_cs(params, self.loglike, epsilon=epsilon,\n kwargs=kwargs)\n\n def _score_finite_difference(self, params, approx_centered=False,\n **kwargs):\n kwargs['transformed'] = True\n return approx_fprime(params, self.loglike, kwargs=kwargs,\n centered=approx_centered)\n\n def _score_harvey(self, params, approx_complex_step=True, **kwargs):\n score_obs = self._score_obs_harvey(\n params, approx_complex_step=approx_complex_step, **kwargs)\n return np.sum(score_obs, axis=0)\n\n def _score_obs_harvey(self, params, approx_complex_step=True,\n approx_centered=False, includes_fixed=False,\n **kwargs):\n \"\"\"\n Score\n\n Parameters\n ----------\n params : array_like, optional\n Array of parameters at which to evaluate the loglikelihood\n function.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n Notes\n -----\n This method is from Harvey (1989), section 3.4.5\n\n References\n ----------\n Harvey, Andrew C. 1990.\n Forecasting, Structural Time Series Models and the Kalman Filter.\n Cambridge University Press.\n \"\"\"\n params = np.array(params, ndmin=1)\n n = len(params)\n\n # Get values at the params themselves\n self.update(params, transformed=True, includes_fixed=includes_fixed,\n complex_step=approx_complex_step)\n if approx_complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n if 'transformed' in kwargs:\n del kwargs['transformed']\n res = self.ssm.filter(complex_step=approx_complex_step, **kwargs)\n\n # Get forecasts error partials\n partials_forecasts_error, partials_forecasts_error_cov = (\n self._forecasts_error_partial_derivatives(\n params, transformed=True, includes_fixed=includes_fixed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, res=res, **kwargs))\n\n # Compute partial derivatives w.r.t. likelihood function\n partials = np.zeros((self.nobs, n))\n k_endog = self.k_endog\n for t in range(self.nobs):\n inv_forecasts_error_cov = np.linalg.inv(\n res.forecasts_error_cov[:, :, t])\n\n for i in range(n):\n partials[t, i] += np.trace(np.dot(\n np.dot(inv_forecasts_error_cov,\n partials_forecasts_error_cov[:, :, t, i]),\n (np.eye(k_endog) -\n np.dot(inv_forecasts_error_cov,\n np.outer(res.forecasts_error[:, t],\n res.forecasts_error[:, t])))))\n # 2 * dv / di * F^{-1} v_t\n # where x = F^{-1} v_t or F x = v\n partials[t, i] += 2 * np.dot(\n partials_forecasts_error[:, t, i],\n np.dot(inv_forecasts_error_cov, res.forecasts_error[:, t]))\n\n return -partials / 2.\n\n _score_param_names = ['transformed', 'includes_fixed', 'score_method',\n 'approx_complex_step', 'approx_centered']\n _score_param_defaults = [True, False, 'approx', None, False]\n\n def score(self, params, *args, **kwargs):\n \"\"\"\n Compute the score function at params.\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the score.\n *args\n Additional positional arguments to the `loglike` method.\n **kwargs\n Additional keyword arguments to the `loglike` method.\n\n Returns\n -------\n score : ndarray\n Score, evaluated at `params`.\n\n Notes\n -----\n This is a numerical approximation, calculated using first-order complex\n step differentiation on the `loglike` method.\n\n Both args and kwargs are necessary because the optimizer from\n `fit` must call this function and only supports passing arguments via\n args (for example `scipy.optimize.fmin_l_bfgs`).\n \"\"\"\n (transformed, includes_fixed, method, approx_complex_step,\n approx_centered, kwargs) = (\n _handle_args(MLEModel._score_param_names,\n MLEModel._score_param_defaults, *args, **kwargs))\n # For fit() calls, the method is called 'score_method' (to distinguish\n # it from the method used for fit) but generally in kwargs the method\n # will just be called 'method'\n if 'method' in kwargs:\n method = kwargs.pop('method')\n\n if approx_complex_step is None:\n approx_complex_step = not self.ssm._complex_endog\n if approx_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n out = self.handle_params(\n params, transformed=transformed, includes_fixed=includes_fixed,\n return_jacobian=not transformed)\n if transformed:\n params = out\n else:\n params, transform_score = out\n\n if method == 'harvey':\n kwargs['includes_fixed'] = True\n score = self._score_harvey(\n params, approx_complex_step=approx_complex_step, **kwargs)\n elif method == 'approx' and approx_complex_step:\n kwargs['includes_fixed'] = True\n score = self._score_complex_step(params, **kwargs)\n elif method == 'approx':\n kwargs['includes_fixed'] = True\n score = self._score_finite_difference(\n params, approx_centered=approx_centered, **kwargs)\n else:\n raise NotImplementedError('Invalid score method.')\n\n if not transformed:\n score = np.dot(transform_score, score)\n\n if self._has_fixed_params and not includes_fixed:\n score = score[self._free_params_index]\n\n return score\n\n def score_obs(self, params, method='approx', transformed=True,\n includes_fixed=False, approx_complex_step=None,\n approx_centered=False, **kwargs):\n \"\"\"\n Compute the score per observation, evaluated at params\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the score.\n **kwargs\n Additional arguments to the `loglike` method.\n\n Returns\n -------\n score : ndarray\n Score per observation, evaluated at `params`.\n\n Notes\n -----\n This is a numerical approximation, calculated using first-order complex\n step differentiation on the `loglikeobs` method.\n \"\"\"\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the score at each observation\"\n \" with untransformed parameters.\")\n\n if approx_complex_step is None:\n approx_complex_step = not self.ssm._complex_endog\n if approx_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n params = self.handle_params(params, transformed=True,\n includes_fixed=includes_fixed)\n kwargs['transformed'] = transformed\n kwargs['includes_fixed'] = True\n\n if method == 'harvey':\n score = self._score_obs_harvey(\n params, approx_complex_step=approx_complex_step, **kwargs)\n elif method == 'approx' and approx_complex_step:\n # the default epsilon can be too small\n epsilon = _get_epsilon(params, 2., None, len(params))\n kwargs['complex_step'] = True\n score = approx_fprime_cs(params, self.loglikeobs, epsilon=epsilon,\n kwargs=kwargs)\n elif method == 'approx':\n score = approx_fprime(params, self.loglikeobs, kwargs=kwargs,\n centered=approx_centered)\n else:\n raise NotImplementedError('Invalid scoreobs method.')\n\n return score\n\n _hessian_param_names = ['transformed', 'hessian_method',\n 'approx_complex_step', 'approx_centered']\n _hessian_param_defaults = [True, 'approx', None, False]\n\n def hessian(self, params, *args, **kwargs):\n r\"\"\"\n Hessian matrix of the likelihood function, evaluated at the given\n parameters\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the hessian.\n *args\n Additional positional arguments to the `loglike` method.\n **kwargs\n Additional keyword arguments to the `loglike` method.\n\n Returns\n -------\n hessian : ndarray\n Hessian matrix evaluated at `params`\n\n Notes\n -----\n This is a numerical approximation.\n\n Both args and kwargs are necessary because the optimizer from\n `fit` must call this function and only supports passing arguments via\n args (for example `scipy.optimize.fmin_l_bfgs`).\n \"\"\"\n transformed, method, approx_complex_step, approx_centered, kwargs = (\n _handle_args(MLEModel._hessian_param_names,\n MLEModel._hessian_param_defaults,\n *args, **kwargs))\n # For fit() calls, the method is called 'hessian_method' (to\n # distinguish it from the method used for fit) but generally in kwargs\n # the method will just be called 'method'\n if 'method' in kwargs:\n method = kwargs.pop('method')\n\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the hessian with untransformed\"\n \" parameters.\")\n\n if approx_complex_step is None:\n approx_complex_step = not self.ssm._complex_endog\n if approx_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n if method == 'oim':\n hessian = self._hessian_oim(\n params, transformed=transformed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, **kwargs)\n elif method == 'opg':\n hessian = self._hessian_opg(\n params, transformed=transformed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, **kwargs)\n elif method == 'approx' and approx_complex_step:\n hessian = self._hessian_complex_step(\n params, transformed=transformed, **kwargs)\n elif method == 'approx':\n hessian = self._hessian_finite_difference(\n params, transformed=transformed,\n approx_centered=approx_centered, **kwargs)\n else:\n raise NotImplementedError('Invalid Hessian calculation method.')\n return hessian\n\n def _hessian_oim(self, params, **kwargs):\n \"\"\"\n Hessian matrix computed using the Harvey (1989) information matrix\n \"\"\"\n return -self.observed_information_matrix(params, **kwargs)\n\n def _hessian_opg(self, params, **kwargs):\n \"\"\"\n Hessian matrix computed using the outer product of gradients\n information matrix\n \"\"\"\n return -self.opg_information_matrix(params, **kwargs)\n\n def _hessian_finite_difference(self, params, approx_centered=False,\n **kwargs):\n params = np.array(params, ndmin=1)\n\n warnings.warn('Calculation of the Hessian using finite differences'\n ' is usually subject to substantial approximation'\n ' errors.', PrecisionWarning)\n\n if not approx_centered:\n epsilon = _get_epsilon(params, 3, None, len(params))\n else:\n epsilon = _get_epsilon(params, 4, None, len(params)) / 2\n hessian = approx_fprime(params, self._score_finite_difference,\n epsilon=epsilon, kwargs=kwargs,\n centered=approx_centered)\n\n return hessian / (self.nobs - self.ssm.loglikelihood_burn)\n\n def _hessian_complex_step(self, params, **kwargs):\n \"\"\"\n Hessian matrix computed by second-order complex-step differentiation\n on the `loglike` function.\n \"\"\"\n # the default epsilon can be too small\n epsilon = _get_epsilon(params, 3., None, len(params))\n kwargs['transformed'] = True\n kwargs['complex_step'] = True\n hessian = approx_hess_cs(\n params, self.loglike, epsilon=epsilon, kwargs=kwargs)\n\n return hessian / (self.nobs - self.ssm.loglikelihood_burn)\n\n @property\n def start_params(self):\n \"\"\"\n (array) Starting parameters for maximum likelihood estimation.\n \"\"\"\n if hasattr(self, '_start_params'):\n return self._start_params\n else:\n raise NotImplementedError\n\n @property\n def param_names(self):\n \"\"\"\n (list of str) List of human readable parameter names (for parameters\n actually included in the model).\n \"\"\"\n if hasattr(self, '_param_names'):\n return self._param_names\n else:\n try:\n names = ['param.%d' % i for i in range(len(self.start_params))]\n except NotImplementedError:\n names = []\n return names\n\n @property\n def state_names(self):\n \"\"\"\n (list of str) List of human readable names for unobserved states.\n \"\"\"\n if hasattr(self, '_state_names'):\n return self._state_names\n else:\n names = ['state.%d' % i for i in range(self.k_states)]\n return names\n\n def transform_jacobian(self, unconstrained, approx_centered=False):\n \"\"\"\n Jacobian matrix for the parameter transformation function\n\n Parameters\n ----------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer.\n\n Returns\n -------\n jacobian : ndarray\n Jacobian matrix of the transformation, evaluated at `unconstrained`\n\n See Also\n --------\n transform_params\n\n Notes\n -----\n This is a numerical approximation using finite differences. Note that\n in general complex step methods cannot be used because it is not\n guaranteed that the `transform_params` method is a real function (e.g.\n if Cholesky decomposition is used).\n \"\"\"\n return approx_fprime(unconstrained, self.transform_params,\n centered=approx_centered)\n\n def transform_params(self, unconstrained):\n \"\"\"\n Transform unconstrained parameters used by the optimizer to constrained\n parameters used in likelihood evaluation\n\n Parameters\n ----------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer, to be\n transformed.\n\n Returns\n -------\n constrained : array_like\n Array of constrained parameters which may be used in likelihood\n evaluation.\n\n Notes\n -----\n This is a noop in the base class, subclasses should override where\n appropriate.\n \"\"\"\n return np.array(unconstrained, ndmin=1)\n\n def untransform_params(self, constrained):\n \"\"\"\n Transform constrained parameters used in likelihood evaluation\n to unconstrained parameters used by the optimizer\n\n Parameters\n ----------\n constrained : array_like\n Array of constrained parameters used in likelihood evaluation, to\n be transformed.\n\n Returns\n -------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer.\n\n Notes\n -----\n This is a noop in the base class, subclasses should override where\n appropriate.\n \"\"\"\n return np.array(constrained, ndmin=1)\n\n def handle_params(self, params, transformed=True, includes_fixed=False,\n return_jacobian=False):\n params = np.array(params, ndmin=1)\n\n # Never want integer dtype, so convert to floats\n if np.issubdtype(params.dtype, np.integer):\n params = params.astype(np.float64)\n\n if not includes_fixed and self._has_fixed_params:\n k_params = len(self.param_names)\n new_params = np.zeros(k_params, dtype=params.dtype) * np.nan\n new_params[self._free_params_index] = params\n params = new_params\n\n if not transformed:\n # It may be the case that the transformation relies on having\n # \"some\" (non-NaN) values for the fixed parameters, even if we will\n # not actually be transforming the fixed parameters (as they will)\n # be set below regardless\n if not includes_fixed and self._has_fixed_params:\n params[self._fixed_params_index] = (\n list(self._fixed_params.values()))\n\n if return_jacobian:\n transform_score = self.transform_jacobian(params)\n params = self.transform_params(params)\n\n if not includes_fixed and self._has_fixed_params:\n params[self._fixed_params_index] = (\n list(self._fixed_params.values()))\n\n return (params, transform_score) if return_jacobian else params\n\n def update(self, params, transformed=True, includes_fixed=False,\n complex_step=False):\n \"\"\"\n Update the parameters of the model\n\n Parameters\n ----------\n params : array_like\n Array of new parameters.\n transformed : bool, optional\n Whether or not `params` is already transformed. If set to False,\n `transform_params` is called. Default is True.\n\n Returns\n -------\n params : array_like\n Array of parameters.\n\n Notes\n -----\n Since Model is a base class, this method should be overridden by\n subclasses to perform actual updating steps.\n \"\"\"\n return self.handle_params(params=params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n def _validate_out_of_sample_exog(self, exog, out_of_sample):\n \"\"\"\n Validate given `exog` as satisfactory for out-of-sample operations\n\n Parameters\n ----------\n exog : array_like or None\n New observations of exogenous regressors, if applicable.\n out_of_sample : int\n Number of new observations required.\n\n Returns\n -------\n exog : array or None\n A numpy array of shape (out_of_sample, k_exog) if the model\n contains an `exog` component, or None if it does not.\n \"\"\"\n if out_of_sample and self.k_exog > 0:\n if exog is None:\n raise ValueError('Out-of-sample operations in a model'\n ' with a regression component require'\n ' additional exogenous values via the'\n ' `exog` argument.')\n exog = np.array(exog)\n required_exog_shape = (out_of_sample, self.k_exog)\n try:\n exog = exog.reshape(required_exog_shape)\n except ValueError:\n raise ValueError('Provided exogenous values are not of the'\n ' appropriate shape. Required %s, got %s.'\n % (str(required_exog_shape),\n str(exog.shape)))\n elif self.k_exog > 0:\n exog = None\n warnings.warn('Exogenous array provided, but additional data'\n ' is not required. `exog` argument ignored.',\n ValueWarning)\n\n return exog\n\n def _get_extension_time_varying_matrices(\n self, params, exog, out_of_sample, extend_kwargs=None,\n transformed=True, includes_fixed=False, **kwargs):\n \"\"\"\n Get updated time-varying state space system matrices\n\n Parameters\n ----------\n params : array_like\n Array of parameters used to construct the time-varying system\n matrices.\n exog : array_like or None\n New observations of exogenous regressors, if applicable.\n out_of_sample : int\n Number of new observations required.\n extend_kwargs : dict, optional\n Dictionary of keyword arguments to pass to the state space model\n constructor. For example, for an SARIMAX state space model, this\n could be used to pass the `concentrate_scale=True` keyword\n argument. Any arguments that are not explicitly set in this\n dictionary will be copied from the current model instance.\n transformed : bool, optional\n Whether or not `start_params` is already transformed. Default is\n True.\n includes_fixed : bool, optional\n If parameters were previously fixed with the `fix_params` method,\n this argument describes whether or not `start_params` also includes\n the fixed parameters, in addition to the free parameters. Default\n is False.\n \"\"\"\n # Get the appropriate exog for the extended sample\n exog = self._validate_out_of_sample_exog(exog, out_of_sample)\n\n # Create extended model\n if extend_kwargs is None:\n extend_kwargs = {}\n\n # Handle trend offset for extended model\n if getattr(self, 'k_trend', 0) > 0 and hasattr(self, 'trend_offset'):\n extend_kwargs.setdefault(\n 'trend_offset', self.trend_offset + self.nobs)\n\n mod_extend = self.clone(\n endog=np.zeros((out_of_sample, self.k_endog)), exog=exog,\n **extend_kwargs)\n mod_extend.update(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # Retrieve the extensions to the time-varying system matrices and\n # put them in kwargs\n for name in self.ssm.shapes.keys():\n if name == 'obs' or name in kwargs:\n continue\n if getattr(self.ssm, name).shape[-1] > 1:\n mat = getattr(mod_extend.ssm, name)\n kwargs[name] = mat[..., -out_of_sample:]\n\n return kwargs\n\n def simulate(self, params, nsimulations, measurement_shocks=None,\n state_shocks=None, initial_state=None, anchor=None,\n repetitions=None, exog=None, extend_model=None,\n extend_kwargs=None, transformed=True, includes_fixed=False,\n **kwargs):\n r\"\"\"\n Simulate a new time series following the state space model\n\n Parameters\n ----------\n params : array_like\n Array of parameters to use in constructing the state space\n representation to use when simulating.\n nsimulations : int\n The number of observations to simulate. If the model is\n time-invariant this can be any number. If the model is\n time-varying, then this number must be less than or equal to the\n number of observations.\n measurement_shocks : array_like, optional\n If specified, these are the shocks to the measurement equation,\n :math:`\\varepsilon_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_endog`, where `k_endog` is the\n same as in the state space model.\n state_shocks : array_like, optional\n If specified, these are the shocks to the state equation,\n :math:`\\eta_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the\n same as in the state space model.\n initial_state : array_like, optional\n If specified, this is the initial state vector to use in\n simulation, which should be shaped (`k_states` x 1), where\n `k_states` is the same as in the state space model. If unspecified,\n but the model has been initialized, then that initialization is\n used. This must be specified if `anchor` is anything other than\n \"start\" or 0 (or else you can use the `simulate` method on a\n results object rather than on the model object).\n anchor : int, str, or datetime, optional\n First period for simulation. The simulation will be conditional on\n all existing datapoints prior to the `anchor`. Type depends on the\n index of the given `endog` in the model. Two special cases are the\n strings 'start' and 'end'. `start` refers to beginning the\n simulation at the first period of the sample, and `end` refers to\n beginning the simulation at the first period after the sample.\n Integer values can run from 0 to `nobs`, or can be negative to\n apply negative indexing. Finally, if a date/time index was provided\n to the model, then this argument can be a date string to parse or a\n datetime type. Default is 'start'.\n repetitions : int, optional\n Number of simulated paths to generate. Default is 1 simulated path.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is\n True.\n includes_fixed : bool, optional\n If parameters were previously fixed with the `fix_params` method,\n this argument describes whether or not `params` also includes\n the fixed parameters, in addition to the free parameters. Default\n is False.\n\n Returns\n -------\n simulated_obs : ndarray\n An array of simulated observations. If `repetitions=None`, then it\n will be shaped (nsimulations x k_endog) or (nsimulations,) if\n `k_endog=1`. Otherwise it will be shaped\n (nsimulations x k_endog x repetitions). If the model was given\n Pandas input then the output will be a Pandas object. If\n `k_endog > 1` and `repetitions` is not None, then the output will\n be a Pandas DataFrame that has a MultiIndex for the columns, with\n the first level containing the names of the `endog` variables and\n the second level containing the repetition number.\n \"\"\"\n # Make sure the model class has the current parameters\n self.update(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # Get the starting location\n if anchor is None or anchor == 'start':\n iloc = 0\n elif anchor == 'end':\n iloc = self.nobs\n else:\n iloc, _, _ = self._get_index_loc(anchor)\n if isinstance(iloc, slice):\n iloc = iloc.start\n\n if iloc < 0:\n iloc = self.nobs + iloc\n if iloc > self.nobs:\n raise ValueError('Cannot anchor simulation outside of the sample.')\n\n if iloc > 0 and initial_state is None:\n raise ValueError('If `anchor` is after the start of the sample,'\n ' must provide a value for `initial_state`.')\n\n # Get updated time-varying system matrices in **kwargs, if necessary\n out_of_sample = max(iloc + nsimulations - self.nobs, 0)\n if extend_model is None:\n extend_model = self.exog is not None or not self.ssm.time_invariant\n if out_of_sample and extend_model:\n kwargs = self._get_extension_time_varying_matrices(\n params, exog, out_of_sample, extend_kwargs,\n transformed=transformed, includes_fixed=includes_fixed,\n **kwargs)\n\n # Standardize the dimensions of the initial state\n if initial_state is not None:\n initial_state = np.array(initial_state)\n if initial_state.ndim < 2:\n initial_state = np.atleast_2d(initial_state).T\n\n # Construct a model that represents the simulation period\n end = min(self.nobs, iloc + nsimulations)\n nextend = iloc + nsimulations - end\n sim_model = self.ssm.extend(np.empty((nextend, self.k_endog)),\n start=iloc, end=end, **kwargs)\n\n # Simulate the data\n _repetitions = 1 if repetitions is None else repetitions\n sim = np.zeros((nsimulations, self.k_endog, _repetitions))\n\n for i in range(_repetitions):\n initial_state_variates = None\n if initial_state is not None:\n if initial_state.shape[1] == 1:\n initial_state_variates = initial_state[:, 0]\n else:\n initial_state_variates = initial_state[:, i]\n\n # TODO: allow specifying measurement / state shocks for each\n # repetition?\n\n out, _ = sim_model.simulate(\n nsimulations, measurement_shocks, state_shocks,\n initial_state_variates)\n\n sim[:, :, i] = out\n\n # Wrap data / squeeze where appropriate\n use_pandas = isinstance(self.data, PandasData)\n index = None\n if use_pandas:\n _, _, _, index = self._get_prediction_index(\n iloc, iloc + nsimulations - 1)\n # If `repetitions` isn't set, we squeeze the last dimension(s)\n if repetitions is None:\n if self.k_endog == 1:\n sim = sim[:, 0, 0]\n if use_pandas:\n sim = pd.Series(sim, index=index, name=self.endog_names)\n else:\n sim = sim[:, :, 0]\n if use_pandas:\n sim = pd.DataFrame(sim, index=index,\n columns=self.endog_names)\n elif use_pandas:\n shape = sim.shape\n endog_names = self.endog_names\n if not isinstance(endog_names, list):\n endog_names = [endog_names]\n columns = pd.MultiIndex.from_product([endog_names,\n np.arange(shape[2])])\n sim = pd.DataFrame(sim.reshape(shape[0], shape[1] * shape[2]),\n index=index, columns=columns)\n\n return sim\n\n def impulse_responses(self, params, steps=1, impulse=0,\n orthogonalized=False, cumulative=False, anchor=None,\n exog=None, extend_model=None, extend_kwargs=None,\n transformed=True, includes_fixed=False, **kwargs):\n \"\"\"\n Impulse response function\n\n Parameters\n ----------\n params : array_like\n Array of model parameters.\n steps : int, optional\n The number of steps for which impulse responses are calculated.\n Default is 1. Note that for time-invariant models, the initial\n impulse is not counted as a step, so if `steps=1`, the output will\n have 2 entries.\n impulse : int or array_like\n If an integer, the state innovation to pulse; must be between 0\n and `k_posdef-1`. Alternatively, a custom impulse vector may be\n provided; must be shaped `k_posdef x 1`.\n orthogonalized : bool, optional\n Whether or not to perform impulse using orthogonalized innovations.\n Note that this will also affect custum `impulse` vectors. Default\n is False.\n cumulative : bool, optional\n Whether or not to return cumulative impulse responses. Default is\n False.\n anchor : int, str, or datetime, optional\n Time point within the sample for the state innovation impulse. Type\n depends on the index of the given `endog` in the model. Two special\n cases are the strings 'start' and 'end', which refer to setting the\n impulse at the first and last points of the sample, respectively.\n Integer values can run from 0 to `nobs - 1`, or can be negative to\n apply negative indexing. Finally, if a date/time index was provided\n to the model, then this argument can be a date string to parse or a\n datetime type. Default is 'start'.\n exog : array_like, optional\n New observations of exogenous regressors for our-of-sample periods,\n if applicable.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is\n True.\n includes_fixed : bool, optional\n If parameters were previously fixed with the `fix_params` method,\n this argument describes whether or not `params` also includes\n the fixed parameters, in addition to the free parameters. Default\n is False.\n **kwargs\n If the model has time-varying design or transition matrices and the\n combination of `anchor` and `steps` implies creating impulse\n responses for the out-of-sample period, then these matrices must\n have updated values provided for the out-of-sample steps. For\n example, if `design` is a time-varying component, `nobs` is 10,\n `anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)\n matrix must be provided with the new design matrix values.\n\n Returns\n -------\n impulse_responses : ndarray\n Responses for each endogenous variable due to the impulse\n given by the `impulse` argument. For a time-invariant model, the\n impulse responses are given for `steps + 1` elements (this gives\n the \"initial impulse\" followed by `steps` responses for the\n important cases of VAR and SARIMAX models), while for time-varying\n models the impulse responses are only given for `steps` elements\n (to avoid having to unexpectedly provide updated time-varying\n matrices).\n\n Notes\n -----\n Intercepts in the measurement and state equation are ignored when\n calculating impulse responses.\n\n TODO: add an option to allow changing the ordering for the\n orthogonalized option. Will require permuting matrices when\n constructing the extended model.\n \"\"\"\n # Make sure the model class has the current parameters\n self.update(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # For time-invariant models, add an additional `step`. This is the\n # default for time-invariant models based on the expected behavior for\n # ARIMA and VAR models: we want to record the initial impulse and also\n # `steps` values of the responses afterwards.\n # Note: we don't modify `steps` itself, because\n # `KalmanFilter.impulse_responses` also adds an additional step in this\n # case (this is so that there isn't different behavior when calling\n # this method versus that method). We just need to also keep track of\n # this here because we need to generate the correct extended model.\n additional_steps = 0\n if (self.ssm._design.shape[2] == 1 and\n self.ssm._transition.shape[2] == 1 and\n self.ssm._selection.shape[2] == 1):\n additional_steps = 1\n\n # Get the starting location\n if anchor is None or anchor == 'start':\n iloc = 0\n elif anchor == 'end':\n iloc = self.nobs - 1\n else:\n iloc, _, _ = self._get_index_loc(anchor)\n if isinstance(iloc, slice):\n iloc = iloc.start\n\n if iloc < 0:\n iloc = self.nobs + iloc\n if iloc >= self.nobs:\n raise ValueError('Cannot anchor impulse responses outside of the'\n ' sample.')\n\n time_invariant = (\n self.ssm._design.shape[2] == self.ssm._obs_cov.shape[2] ==\n self.ssm._transition.shape[2] == self.ssm._selection.shape[2] ==\n self.ssm._state_cov.shape[2] == 1)\n\n # Get updated time-varying system matrices in **kwargs, if necessary\n # (Note: KalmanFilter adds 1 to steps to account for the first impulse)\n out_of_sample = max(\n iloc + (steps + additional_steps + 1) - self.nobs, 0)\n if extend_model is None:\n extend_model = self.exog is not None and not time_invariant\n if out_of_sample and extend_model:\n kwargs = self._get_extension_time_varying_matrices(\n params, exog, out_of_sample, extend_kwargs,\n transformed=transformed, includes_fixed=includes_fixed,\n **kwargs)\n\n # Special handling for matrix terms that are time-varying but\n # irrelevant for impulse response functions. Must be set since\n # ssm.extend() requires that we pass new matrices for these, but they\n # are ignored for IRF purposes.\n end = min(self.nobs, iloc + steps + additional_steps)\n nextend = iloc + (steps + additional_steps + 1) - end\n if ('obs_intercept' not in kwargs and\n self.ssm._obs_intercept.shape[1] > 1):\n kwargs['obs_intercept'] = np.zeros((self.k_endog, nextend))\n if ('state_intercept' not in kwargs and\n self.ssm._state_intercept.shape[1] > 1):\n kwargs['state_intercept'] = np.zeros((self.k_states, nextend))\n if 'obs_cov' not in kwargs and self.ssm._obs_cov.shape[2] > 1:\n kwargs['obs_cov'] = np.zeros((self.k_endog, self.k_endog, nextend))\n # Special handling for matrix terms that are time-varying but\n # only the value at the anchor matters for IRF purposes.\n if 'state_cov' not in kwargs and self.ssm._state_cov.shape[2] > 1:\n tmp = np.zeros((self.ssm.k_posdef, self.ssm.k_posdef, nextend))\n tmp[:] = self['state_cov', :, :, iloc:iloc + 1]\n kwargs['state_cov'] = tmp\n if 'selection' not in kwargs and self.ssm._selection.shape[2] > 1:\n tmp = np.zeros((self.k_states, self.ssm.k_posdef, nextend))\n tmp[:] = self['selection', :, :, iloc:iloc + 1]\n kwargs['selection'] = tmp\n\n # Construct a model that represents the simulation period\n sim_model = self.ssm.extend(np.empty((nextend, self.k_endog)),\n start=iloc, end=end, **kwargs)\n\n # Compute the impulse responses\n irfs = sim_model.impulse_responses(\n steps, impulse, orthogonalized, cumulative)\n\n # IRF is (nobs x k_endog); do not want to squeeze in case of steps = 1\n if irfs.shape[1] == 1:\n irfs = irfs[:, 0]\n\n return irfs\n\n @classmethod\n def from_formula(cls, formula, data, subset=None):\n \"\"\"\n Not implemented for state space models\n \"\"\"\n raise NotImplementedError\n\n\nclass MLEResults(tsbase.TimeSeriesModelResults):\n r\"\"\"\n Class to hold results from fitting a state space model.\n\n Parameters\n ----------\n model : MLEModel instance\n The fitted model instance\n params : ndarray\n Fitted parameters\n filter_results : KalmanFilter instance\n The underlying state space model and Kalman filter output\n\n Attributes\n ----------\n model : Model instance\n A reference to the model that was fit.\n filter_results : KalmanFilter instance\n The underlying state space model and Kalman filter output\n nobs : float\n The number of observations used to fit the model.\n params : ndarray\n The parameters of the model.\n scale : float\n This is currently set to 1.0 unless the model uses concentrated\n filtering.\n\n See Also\n --------\n MLEModel\n statsmodels.tsa.statespace.kalman_filter.FilterResults\n statsmodels.tsa.statespace.representation.FrozenRepresentation\n \"\"\"\n def __init__(self, model, params, results, cov_type=None, cov_kwds=None,\n **kwargs):\n self.data = model.data\n scale = results.scale\n\n tsbase.TimeSeriesModelResults.__init__(self, model, params,\n normalized_cov_params=None,\n scale=scale)\n\n # Save the fixed parameters\n self._has_fixed_params = self.model._has_fixed_params\n self._fixed_params_index = self.model._fixed_params_index\n self._free_params_index = self.model._free_params_index\n # TODO: seems like maybe self.fixed_params should be the dictionary\n # itself, not just the keys?\n if self._has_fixed_params:\n self._fixed_params = self.model._fixed_params.copy()\n self.fixed_params = list(self._fixed_params.keys())\n else:\n self._fixed_params = None\n self.fixed_params = []\n self.param_names = [\n '%s (fixed)' % name if name in self.fixed_params else name\n for name in (self.data.param_names or [])]\n\n # Save the state space representation output\n self.filter_results = results\n if isinstance(results, SmootherResults):\n self.smoother_results = results\n else:\n self.smoother_results = None\n\n # Dimensions\n self.nobs = self.filter_results.nobs\n self.nobs_diffuse = self.filter_results.nobs_diffuse\n if self.nobs_diffuse > 0 and self.loglikelihood_burn > 0:\n warnings.warn('Care should be used when applying a loglikelihood'\n ' burn to a model with exact diffuse initialization.'\n ' Some results objects, e.g. degrees of freedom,'\n ' expect only one of the two to be set.')\n # This only excludes explicitly burned (usually approximate diffuse)\n # periods but does not exclude exact diffuse periods. This is\n # because the loglikelihood remains valid for the initial periods in\n # the exact diffuse case (see DK, 2012, section 7.2) and so also do\n # e.g. information criteria (see DK, 2012, section 7.4) and the score\n # vector (see DK, 2012, section 7.3.3, equation 7.15).\n # However, other objects should be excluded in the diffuse periods\n # (e.g. the diffuse forecast errors, so in some cases a different\n # nobs_effective will have to be computed and used)\n self.nobs_effective = self.nobs - self.loglikelihood_burn\n\n P = self.filter_results.initial_diffuse_state_cov\n self.k_diffuse_states = 0 if P is None else np.sum(np.diagonal(P) == 1)\n\n # Degrees of freedom (see DK 2012, section 7.4)\n k_free_params = self.params.size - len(self.fixed_params)\n self.df_model = (k_free_params + self.k_diffuse_states\n + self.filter_results.filter_concentrated)\n self.df_resid = self.nobs_effective - self.df_model\n\n # Setup covariance matrix notes dictionary\n if not hasattr(self, 'cov_kwds'):\n self.cov_kwds = {}\n if cov_type is None:\n cov_type = 'approx' if results.memory_no_likelihood else 'opg'\n self.cov_type = cov_type\n\n # Setup the cache\n self._cache = {}\n\n # Handle covariance matrix calculation\n if cov_kwds is None:\n cov_kwds = {}\n self._cov_approx_complex_step = (\n cov_kwds.pop('approx_complex_step', True))\n self._cov_approx_centered = cov_kwds.pop('approx_centered', False)\n try:\n self._rank = None\n self._get_robustcov_results(cov_type=cov_type, use_self=True,\n **cov_kwds)\n except np.linalg.LinAlgError:\n self._rank = 0\n k_params = len(self.params)\n self.cov_params_default = np.zeros((k_params, k_params)) * np.nan\n self.cov_kwds['cov_type'] = (\n 'Covariance matrix could not be calculated: singular.'\n ' information matrix.')\n self.model.update(self.params, transformed=True, includes_fixed=True)\n\n # References of filter and smoother output\n extra_arrays = [\n 'filtered_state', 'filtered_state_cov', 'predicted_state',\n 'predicted_state_cov', 'forecasts', 'forecasts_error',\n 'forecasts_error_cov', 'standardized_forecasts_error',\n 'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',\n 'scaled_smoothed_estimator',\n 'scaled_smoothed_estimator_cov', 'smoothing_error',\n 'smoothed_state',\n 'smoothed_state_cov', 'smoothed_state_autocov',\n 'smoothed_measurement_disturbance',\n 'smoothed_state_disturbance',\n 'smoothed_measurement_disturbance_cov',\n 'smoothed_state_disturbance_cov']\n for name in extra_arrays:\n setattr(self, name, getattr(self.filter_results, name, None))\n\n # Remove too-short results when memory conservation was used\n if self.filter_results.memory_no_forecast_mean:\n self.forecasts = None\n self.forecasts_error = None\n if self.filter_results.memory_no_forecast_cov:\n self.forecasts_error_cov = None\n if self.filter_results.memory_no_predicted_mean:\n self.predicted_state = None\n if self.filter_results.memory_no_predicted_cov:\n self.predicted_state_cov = None\n if self.filter_results.memory_no_filtered_mean:\n self.filtered_state = None\n if self.filter_results.memory_no_filtered_cov:\n self.filtered_state_cov = None\n if self.filter_results.memory_no_gain:\n pass\n if self.filter_results.memory_no_smoothing:\n pass\n if self.filter_results.memory_no_std_forecast:\n self.standardized_forecasts_error = None\n\n # Save more convenient access to states\n # (will create a private attribute _states here and provide actual\n # access via a getter, so that we can e.g. issue a warning in the case\n # that a useless Pandas index was given in the model specification)\n self._states = SimpleNamespace()\n\n use_pandas = isinstance(self.data, PandasData)\n index = self.model._index\n columns = self.model.state_names\n\n # Predicted states\n # Note: a complication here is that we also include the initial values\n # here, so that we need an extended index in the Pandas case\n if (self.predicted_state is None or\n self.filter_results.memory_no_predicted_mean):\n self._states.predicted = None\n elif use_pandas:\n extended_index = self.model._get_index_with_final_state()\n self._states.predicted = pd.DataFrame(\n self.predicted_state.T, index=extended_index, columns=columns)\n else:\n self._states.predicted = self.predicted_state.T\n if (self.predicted_state_cov is None or\n self.filter_results.memory_no_predicted_cov):\n self._states.predicted_cov = None\n elif use_pandas:\n extended_index = self.model._get_index_with_final_state()\n tmp = np.transpose(self.predicted_state_cov, (2, 0, 1))\n self._states.predicted_cov = pd.DataFrame(\n np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], tmp.shape[2])),\n index=pd.MultiIndex.from_product(\n [extended_index, columns]).swaplevel(),\n columns=columns)\n else:\n self._states.predicted_cov = np.transpose(\n self.predicted_state_cov, (2, 0, 1))\n\n # Filtered states\n if (self.filtered_state is None or\n self.filter_results.memory_no_filtered_mean):\n self._states.filtered = None\n elif use_pandas:\n self._states.filtered = pd.DataFrame(\n self.filtered_state.T, index=index, columns=columns)\n else:\n self._states.filtered = self.filtered_state.T\n if (self.filtered_state_cov is None or\n self.filter_results.memory_no_filtered_cov):\n self._states.filtered_cov = None\n elif use_pandas:\n tmp = np.transpose(self.filtered_state_cov, (2, 0, 1))\n self._states.filtered_cov = pd.DataFrame(\n np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], tmp.shape[2])),\n index=pd.MultiIndex.from_product([index, columns]).swaplevel(),\n columns=columns)\n else:\n self._states.filtered_cov = np.transpose(\n self.filtered_state_cov, (2, 0, 1))\n\n # Smoothed states\n if self.smoothed_state is None:\n self._states.smoothed = None\n elif use_pandas:\n self._states.smoothed = pd.DataFrame(\n self.smoothed_state.T, index=index, columns=columns)\n else:\n self._states.smoothed = self.smoothed_state.T\n if self.smoothed_state_cov is None:\n self._states.smoothed_cov = None\n elif use_pandas:\n tmp = np.transpose(self.smoothed_state_cov, (2, 0, 1))\n self._states.smoothed_cov = pd.DataFrame(\n np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], tmp.shape[2])),\n index=pd.MultiIndex.from_product([index, columns]).swaplevel(),\n columns=columns)\n else:\n self._states.smoothed_cov = np.transpose(\n self.smoothed_state_cov, (2, 0, 1))\n\n # Handle removing data\n self._data_attr_model = getattr(self, '_data_attr_model', [])\n self._data_attr_model.extend(['ssm'])\n self._data_attr.extend(extra_arrays)\n self._data_attr.extend(['filter_results', 'smoother_results'])\n\n def _get_robustcov_results(self, cov_type='opg', **kwargs):\n \"\"\"\n Create new results instance with specified covariance estimator as\n default\n\n Note: creating new results instance currently not supported.\n\n Parameters\n ----------\n cov_type : str\n the type of covariance matrix estimator to use. See Notes below\n kwargs : depends on cov_type\n Required or optional arguments for covariance calculation.\n See Notes below.\n\n Returns\n -------\n results : results instance\n This method creates a new results instance with the requested\n covariance as the default covariance of the parameters.\n Inferential statistics like p-values and hypothesis tests will be\n based on this covariance matrix.\n\n Notes\n -----\n The following covariance types and required or optional arguments are\n currently available:\n\n - 'opg' for the outer product of gradient estimator\n - 'oim' for the observed information matrix estimator, calculated\n using the method of Harvey (1989)\n - 'approx' for the observed information matrix estimator,\n calculated using a numerical approximation of the Hessian matrix.\n Uses complex step approximation by default, or uses finite\n differences if `approx_complex_step=False` in the `cov_kwds`\n dictionary.\n - 'robust' for an approximate (quasi-maximum likelihood) covariance\n matrix that may be valid even in the presence of some\n misspecifications. Intermediate calculations use the 'oim'\n method.\n - 'robust_approx' is the same as 'robust' except that the\n intermediate calculations use the 'approx' method.\n - 'none' for no covariance matrix calculation.\n \"\"\"\n from statsmodels.base.covtype import descriptions\n\n use_self = kwargs.pop('use_self', False)\n if use_self:\n res = self\n else:\n raise NotImplementedError\n res = self.__class__(\n self.model, self.params,\n normalized_cov_params=self.normalized_cov_params,\n scale=self.scale)\n\n # Set the new covariance type\n res.cov_type = cov_type\n res.cov_kwds = {}\n\n # Calculate the new covariance matrix\n approx_complex_step = self._cov_approx_complex_step\n if approx_complex_step:\n approx_type_str = 'complex-step'\n elif self._cov_approx_centered:\n approx_type_str = 'centered finite differences'\n else:\n approx_type_str = 'finite differences'\n\n k_params = len(self.params)\n if k_params == 0:\n res.cov_params_default = np.zeros((0, 0))\n res._rank = 0\n res.cov_kwds['description'] = 'No parameters estimated.'\n elif cov_type == 'custom':\n res.cov_type = kwargs['custom_cov_type']\n res.cov_params_default = kwargs['custom_cov_params']\n res.cov_kwds['description'] = kwargs['custom_description']\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n else:\n mask = np.s_[...]\n res._rank = np.linalg.matrix_rank(res.cov_params_default[mask])\n elif cov_type == 'none':\n res.cov_params_default = np.zeros((k_params, k_params)) * np.nan\n res._rank = np.nan\n res.cov_kwds['description'] = descriptions['none']\n elif self.cov_type == 'approx':\n res.cov_params_default = res.cov_params_approx\n res.cov_kwds['description'] = descriptions['approx'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'oim':\n res.cov_params_default = res.cov_params_oim\n res.cov_kwds['description'] = descriptions['OIM'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'opg':\n res.cov_params_default = res.cov_params_opg\n res.cov_kwds['description'] = descriptions['OPG'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'robust' or self.cov_type == 'robust_oim':\n res.cov_params_default = res.cov_params_robust_oim\n res.cov_kwds['description'] = descriptions['robust-OIM'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'robust_approx':\n res.cov_params_default = res.cov_params_robust_approx\n res.cov_kwds['description'] = descriptions['robust-approx'].format(\n approx_type=approx_type_str)\n else:\n raise NotImplementedError('Invalid covariance matrix type.')\n\n return res\n\n @cache_readonly\n def aic(self):\n \"\"\"\n (float) Akaike Information Criterion\n \"\"\"\n return aic(self.llf, self.nobs_effective, self.df_model)\n\n @cache_readonly\n def aicc(self):\n \"\"\"\n (float) Akaike Information Criterion with small sample correction\n \"\"\"\n return aicc(self.llf, self.nobs_effective, self.df_model)\n\n @cache_readonly\n def bic(self):\n \"\"\"\n (float) Bayes Information Criterion\n \"\"\"\n return bic(self.llf, self.nobs_effective, self.df_model)\n\n def _cov_params_approx(self, approx_complex_step=True,\n approx_centered=False):\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n params=self.params, transformed=True, includes_fixed=True,\n method='approx', approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n # TODO: Case with \"not approx_complex_step\" is not hit in\n # tests as of 2017-05-19\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n (tmp, singular_values) = pinv_extended(evaluated_hessian[mask])\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n neg_cov[mask] = tmp\n else:\n (neg_cov, singular_values) = pinv_extended(evaluated_hessian)\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return -neg_cov\n\n @cache_readonly\n def cov_params_approx(self):\n \"\"\"\n (array) The variance / covariance matrix. Computed using the numerical\n Hessian approximated by complex step or finite differences methods.\n \"\"\"\n return self._cov_params_approx(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def _cov_params_oim(self, approx_complex_step=True, approx_centered=False):\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n self.params, hessian_method='oim', transformed=True,\n includes_fixed=True, approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n (tmp, singular_values) = pinv_extended(evaluated_hessian[mask])\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n neg_cov[mask] = tmp\n else:\n (neg_cov, singular_values) = pinv_extended(evaluated_hessian)\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return -neg_cov\n\n @cache_readonly\n def cov_params_oim(self):\n \"\"\"\n (array) The variance / covariance matrix. Computed using the method\n from Harvey (1989).\n \"\"\"\n return self._cov_params_oim(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def _cov_params_opg(self, approx_complex_step=True, approx_centered=False):\n evaluated_hessian = self.nobs_effective * self.model._hessian_opg(\n self.params, transformed=True, includes_fixed=True,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n no_free_params = (self._free_params_index is not None and\n len(self._free_params_index) == 0)\n\n if no_free_params:\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n singular_values = np.empty(0)\n elif len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n (tmp, singular_values) = pinv_extended(evaluated_hessian[mask])\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n neg_cov[mask] = tmp\n else:\n (neg_cov, singular_values) = pinv_extended(evaluated_hessian)\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n if no_free_params:\n self._rank = 0\n else:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return -neg_cov\n\n @cache_readonly\n def cov_params_opg(self):\n \"\"\"\n (array) The variance / covariance matrix. Computed using the outer\n product of gradients method.\n \"\"\"\n return self._cov_params_opg(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n @cache_readonly\n def cov_params_robust(self):\n \"\"\"\n (array) The QMLE variance / covariance matrix. Alias for\n `cov_params_robust_oim`\n \"\"\"\n return self.cov_params_robust_oim\n\n def _cov_params_robust_oim(self, approx_complex_step=True,\n approx_centered=False):\n cov_opg = self._cov_params_opg(approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n self.params, hessian_method='oim', transformed=True,\n includes_fixed=True, approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n cov_params = np.zeros_like(evaluated_hessian) * np.nan\n\n cov_opg = cov_opg[mask]\n evaluated_hessian = evaluated_hessian[mask]\n\n tmp, singular_values = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n cov_params[mask] = tmp\n else:\n (cov_params, singular_values) = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return cov_params\n\n @cache_readonly\n def cov_params_robust_oim(self):\n \"\"\"\n (array) The QMLE variance / covariance matrix. Computed using the\n method from Harvey (1989) as the evaluated hessian.\n \"\"\"\n return self._cov_params_robust_oim(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def _cov_params_robust_approx(self, approx_complex_step=True,\n approx_centered=False):\n cov_opg = self._cov_params_opg(approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n self.params, transformed=True, includes_fixed=True,\n method='approx', approx_complex_step=approx_complex_step)\n # TODO: Case with \"not approx_complex_step\" is not\n # hit in tests as of 2017-05-19\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n cov_params = np.zeros_like(evaluated_hessian) * np.nan\n\n cov_opg = cov_opg[mask]\n evaluated_hessian = evaluated_hessian[mask]\n\n tmp, singular_values = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n cov_params[mask] = tmp\n else:\n (cov_params, singular_values) = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return cov_params\n\n @cache_readonly\n def cov_params_robust_approx(self):\n \"\"\"\n (array) The QMLE variance / covariance matrix. Computed using the\n numerical Hessian as the evaluated hessian.\n \"\"\"\n return self._cov_params_robust_approx(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def info_criteria(self, criteria, method='standard'):\n r\"\"\"\n Information criteria\n\n Parameters\n ----------\n criteria : {'aic', 'bic', 'hqic'}\n The information criteria to compute.\n method : {'standard', 'lutkepohl'}\n The method for information criteria computation. Default is\n 'standard' method; 'lutkepohl' computes the information criteria\n as in Lütkepohl (2007). See Notes for formulas.\n\n Notes\n -----\n The `'standard'` formulas are:\n\n .. math::\n\n AIC & = -2 \\log L(Y_n | \\hat \\psi) + 2 k \\\\\n BIC & = -2 \\log L(Y_n | \\hat \\psi) + k \\log n \\\\\n HQIC & = -2 \\log L(Y_n | \\hat \\psi) + 2 k \\log \\log n \\\\\n\n where :math:`\\hat \\psi` are the maximum likelihood estimates of the\n parameters, :math:`n` is the number of observations, and `k` is the\n number of estimated parameters.\n\n Note that the `'standard'` formulas are returned from the `aic`, `bic`,\n and `hqic` results attributes.\n\n The `'lutkepohl'` formulas are (Lütkepohl, 2010):\n\n .. math::\n\n AIC_L & = \\log | Q | + \\frac{2 k}{n} \\\\\n BIC_L & = \\log | Q | + \\frac{k \\log n}{n} \\\\\n HQIC_L & = \\log | Q | + \\frac{2 k \\log \\log n}{n} \\\\\n\n where :math:`Q` is the state covariance matrix. Note that the Lütkepohl\n definitions do not apply to all state space models, and should be used\n with care outside of SARIMAX and VARMAX models.\n\n References\n ----------\n .. [*] Lütkepohl, Helmut. 2007. *New Introduction to Multiple Time*\n *Series Analysis.* Berlin: Springer.\n \"\"\"\n criteria = criteria.lower()\n method = method.lower()\n\n if method == 'standard':\n out = getattr(self, criteria)\n elif method == 'lutkepohl':\n if self.filter_results.state_cov.shape[-1] > 1:\n raise ValueError('Cannot compute Lütkepohl statistics for'\n ' models with time-varying state covariance'\n ' matrix.')\n\n cov = self.filter_results.state_cov[:, :, 0]\n if criteria == 'aic':\n out = np.squeeze(np.linalg.slogdet(cov)[1] +\n 2 * self.df_model / self.nobs_effective)\n elif criteria == 'bic':\n out = np.squeeze(np.linalg.slogdet(cov)[1] +\n self.df_model * np.log(self.nobs_effective) /\n self.nobs_effective)\n elif criteria == 'hqic':\n out = np.squeeze(np.linalg.slogdet(cov)[1] +\n 2 * self.df_model *\n np.log(np.log(self.nobs_effective)) /\n self.nobs_effective)\n else:\n raise ValueError('Invalid information criteria')\n\n else:\n raise ValueError('Invalid information criteria computation method')\n\n return out\n\n @cache_readonly\n def fittedvalues(self):\n \"\"\"\n (array) The predicted values of the model. An (nobs x k_endog) array.\n \"\"\"\n # This is a (k_endog x nobs array; do not want to squeeze in case of\n # the corner case where nobs = 1 (mostly a concern in the predict or\n # forecast functions, but here also to maintain consistency)\n fittedvalues = self.forecasts\n if fittedvalues is None:\n pass\n elif fittedvalues.shape[0] == 1:\n fittedvalues = fittedvalues[0, :]\n else:\n fittedvalues = fittedvalues.T\n return fittedvalues\n\n @cache_readonly\n def hqic(self):\n \"\"\"\n (float) Hannan-Quinn Information Criterion\n \"\"\"\n # return (-2 * self.llf +\n # 2 * np.log(np.log(self.nobs_effective)) * self.df_model)\n return hqic(self.llf, self.nobs_effective, self.df_model)\n\n @cache_readonly\n def llf_obs(self):\n \"\"\"\n (float) The value of the log-likelihood function evaluated at `params`.\n \"\"\"\n return self.filter_results.llf_obs\n\n @cache_readonly\n def llf(self):\n \"\"\"\n (float) The value of the log-likelihood function evaluated at `params`.\n \"\"\"\n return self.filter_results.llf\n\n @cache_readonly\n def loglikelihood_burn(self):\n \"\"\"\n (float) The number of observations during which the likelihood is not\n evaluated.\n \"\"\"\n return self.filter_results.loglikelihood_burn\n\n @cache_readonly\n def mae(self):\n \"\"\"\n (float) Mean absolute error\n \"\"\"\n return np.mean(np.abs(self.resid))\n\n @cache_readonly\n def mse(self):\n \"\"\"\n (float) Mean squared error\n \"\"\"\n return self.sse / self.nobs\n\n @cache_readonly\n def pvalues(self):\n \"\"\"\n (array) The p-values associated with the z-statistics of the\n coefficients. Note that the coefficients are assumed to have a Normal\n distribution.\n \"\"\"\n pvalues = np.zeros_like(self.zvalues) * np.nan\n mask = np.ones_like(pvalues, dtype=bool)\n mask[self._free_params_index] = True\n mask &= ~np.isnan(self.zvalues)\n pvalues[mask] = norm.sf(np.abs(self.zvalues[mask])) * 2\n return pvalues\n\n @cache_readonly\n def resid(self):\n \"\"\"\n (array) The model residuals. An (nobs x k_endog) array.\n \"\"\"\n # This is a (k_endog x nobs array; do not want to squeeze in case of\n # the corner case where nobs = 1 (mostly a concern in the predict or\n # forecast functions, but here also to maintain consistency)\n resid = self.forecasts_error\n if resid is None:\n pass\n elif resid.shape[0] == 1:\n resid = resid[0, :]\n else:\n resid = resid.T\n return resid\n\n @property\n def states(self):\n if self.model._index_generated and not self.model._index_none:\n warnings.warn('No supported index is available. The `states`'\n ' DataFrame uses a generated integer index',\n ValueWarning)\n return self._states\n\n @cache_readonly\n def sse(self):\n \"\"\"\n (float) Sum of squared errors\n \"\"\"\n return np.sum(self.resid**2)\n\n @cache_readonly\n def zvalues(self):\n \"\"\"\n (array) The z-statistics for the coefficients.\n \"\"\"\n return self.params / self.bse\n\n def test_normality(self, method):\n \"\"\"\n Test for normality of standardized residuals.\n\n Null hypothesis is normality.\n\n Parameters\n ----------\n method : {'jarquebera', None}\n The statistical test for normality. Must be 'jarquebera' for\n Jarque-Bera normality test. If None, an attempt is made to select\n an appropriate test.\n\n See Also\n --------\n statsmodels.stats.stattools.jarque_bera\n The Jarque-Bera test of normality.\n\n Notes\n -----\n Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is\n calculated ignoring the first `d` residuals.\n\n In the case of missing data, the maintained hypothesis is that the\n data are missing completely at random. This test is then run on the\n standardized residuals excluding those corresponding to missing\n observations.\n \"\"\"\n if method is None:\n method = 'jarquebera'\n\n if self.standardized_forecasts_error is None:\n raise ValueError('Cannot compute test statistic when standardized'\n ' forecast errors have not been computed.')\n\n if method == 'jarquebera':\n from statsmodels.stats.stattools import jarque_bera\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n output = []\n for i in range(self.model.k_endog):\n resid = self.filter_results.standardized_forecasts_error[i, d:]\n mask = ~np.isnan(resid)\n output.append(jarque_bera(resid[mask]))\n else:\n raise NotImplementedError('Invalid normality test method.')\n\n return np.array(output)\n\n def test_heteroskedasticity(self, method, alternative='two-sided',\n use_f=True):\n r\"\"\"\n Test for heteroskedasticity of standardized residuals\n\n Tests whether the sum-of-squares in the first third of the sample is\n significantly different than the sum-of-squares in the last third\n of the sample. Analogous to a Goldfeld-Quandt test. The null hypothesis\n is of no heteroskedasticity.\n\n Parameters\n ----------\n method : {'breakvar', None}\n The statistical test for heteroskedasticity. Must be 'breakvar'\n for test of a break in the variance. If None, an attempt is\n made to select an appropriate test.\n alternative : str, 'increasing', 'decreasing' or 'two-sided'\n This specifies the alternative for the p-value calculation. Default\n is two-sided.\n use_f : bool, optional\n Whether or not to compare against the asymptotic distribution\n (chi-squared) or the approximate small-sample distribution (F).\n Default is True (i.e. default is to compare against an F\n distribution).\n\n Returns\n -------\n output : ndarray\n An array with `(test_statistic, pvalue)` for each endogenous\n variable. The array is then sized `(k_endog, 2)`. If the method is\n called as `het = res.test_heteroskedasticity()`, then `het[0]` is\n an array of size 2 corresponding to the first endogenous variable,\n where `het[0][0]` is the test statistic, and `het[0][1]` is the\n p-value.\n\n Notes\n -----\n The null hypothesis is of no heteroskedasticity. That means different\n things depending on which alternative is selected:\n\n - Increasing: Null hypothesis is that the variance is not increasing\n throughout the sample; that the sum-of-squares in the later\n subsample is *not* greater than the sum-of-squares in the earlier\n subsample.\n - Decreasing: Null hypothesis is that the variance is not decreasing\n throughout the sample; that the sum-of-squares in the earlier\n subsample is *not* greater than the sum-of-squares in the later\n subsample.\n - Two-sided: Null hypothesis is that the variance is not changing\n throughout the sample. Both that the sum-of-squares in the earlier\n subsample is not greater than the sum-of-squares in the later\n subsample *and* that the sum-of-squares in the later subsample is\n not greater than the sum-of-squares in the earlier subsample.\n\n For :math:`h = [T/3]`, the test statistic is:\n\n .. math::\n\n H(h) = \\sum_{t=T-h+1}^T \\tilde v_t^2\n \\Bigg / \\sum_{t=d+1}^{d+1+h} \\tilde v_t^2\n\n where :math:`d` = max(loglikelihood_burn, nobs_diffuse)` (usually\n corresponding to diffuse initialization under either the approximate\n or exact approach).\n\n This statistic can be tested against an :math:`F(h,h)` distribution.\n Alternatively, :math:`h H(h)` is asymptotically distributed according\n to :math:`\\chi_h^2`; this second test can be applied by passing\n `asymptotic=True` as an argument.\n\n See section 5.4 of [1]_ for the above formula and discussion, as well\n as additional details.\n\n TODO\n\n - Allow specification of :math:`h`\n\n References\n ----------\n .. [1] Harvey, Andrew C. 1990. *Forecasting, Structural Time Series*\n *Models and the Kalman Filter.* Cambridge University Press.\n \"\"\"\n if method is None:\n method = 'breakvar'\n\n if self.standardized_forecasts_error is None:\n raise ValueError('Cannot compute test statistic when standardized'\n ' forecast errors have not been computed.')\n\n if method == 'breakvar':\n # Store some values\n squared_resid = self.filter_results.standardized_forecasts_error**2\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n # This differs from self.nobs_effective because here we want to\n # exclude exact diffuse periods, whereas self.nobs_effective only\n # excludes explicitly burned (usually approximate diffuse) periods.\n nobs_effective = self.nobs - d\n\n test_statistics = []\n p_values = []\n for i in range(self.model.k_endog):\n h = int(np.round(nobs_effective / 3))\n numer_resid = squared_resid[i, -h:]\n numer_resid = numer_resid[~np.isnan(numer_resid)]\n numer_dof = len(numer_resid)\n\n denom_resid = squared_resid[i, d:d+h]\n denom_resid = denom_resid[~np.isnan(denom_resid)]\n denom_dof = len(denom_resid)\n\n if numer_dof < 2:\n warnings.warn('Early subset of data for variable %d'\n ' has too few non-missing observations to'\n ' calculate test statistic.' % i)\n numer_resid = np.nan\n if denom_dof < 2:\n warnings.warn('Later subset of data for variable %d'\n ' has too few non-missing observations to'\n ' calculate test statistic.' % i)\n denom_resid = np.nan\n\n test_statistic = np.sum(numer_resid) / np.sum(denom_resid)\n\n # Setup functions to calculate the p-values\n if use_f:\n from scipy.stats import f\n pval_lower = lambda test_statistics: f.cdf( # noqa:E731\n test_statistics, numer_dof, denom_dof)\n pval_upper = lambda test_statistics: f.sf( # noqa:E731\n test_statistics, numer_dof, denom_dof)\n else:\n from scipy.stats import chi2\n pval_lower = lambda test_statistics: chi2.cdf( # noqa:E731\n numer_dof * test_statistics, denom_dof)\n pval_upper = lambda test_statistics: chi2.sf( # noqa:E731\n numer_dof * test_statistics, denom_dof)\n\n # Calculate the one- or two-sided p-values\n alternative = alternative.lower()\n if alternative in ['i', 'inc', 'increasing']:\n p_value = pval_upper(test_statistic)\n elif alternative in ['d', 'dec', 'decreasing']:\n test_statistic = 1. / test_statistic\n p_value = pval_upper(test_statistic)\n elif alternative in ['2', '2-sided', 'two-sided']:\n p_value = 2 * np.minimum(\n pval_lower(test_statistic),\n pval_upper(test_statistic)\n )\n else:\n raise ValueError('Invalid alternative.')\n\n test_statistics.append(test_statistic)\n p_values.append(p_value)\n\n output = np.c_[test_statistics, p_values]\n else:\n raise NotImplementedError('Invalid heteroskedasticity test'\n ' method.')\n\n return output\n\n def test_serial_correlation(self, method, lags=None):\n \"\"\"\n Ljung-Box test for no serial correlation of standardized residuals\n\n Null hypothesis is no serial correlation.\n\n Parameters\n ----------\n method : {'ljungbox','boxpierece', None}\n The statistical test for serial correlation. If None, an attempt is\n made to select an appropriate test.\n lags : None, int or array_like\n If lags is an integer then this is taken to be the largest lag\n that is included, the test result is reported for all smaller lag\n length.\n If lags is a list or array, then all lags are included up to the\n largest lag in the list, however only the tests for the lags in the\n list are reported.\n If lags is None, then the default maxlag is 12*(nobs/100)^{1/4}.\n After 0.12 the default maxlag will change to min(10, nobs // 5) for\n non-seasonal models and min(2*m, nobs // 5) for seasonal time\n series where m is the seasonal period.\n\n Returns\n -------\n output : ndarray\n An array with `(test_statistic, pvalue)` for each endogenous\n variable and each lag. The array is then sized\n `(k_endog, 2, lags)`. If the method is called as\n `ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`\n holds the results of the Ljung-Box test (as would be returned by\n `statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th\n endogenous variable.\n\n See Also\n --------\n statsmodels.stats.diagnostic.acorr_ljungbox\n Ljung-Box test for serial correlation.\n\n Notes\n -----\n Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is\n calculated ignoring the first `d` residuals.\n\n Output is nan for any endogenous variable which has missing values.\n \"\"\"\n if method is None:\n method = 'ljungbox'\n\n if self.standardized_forecasts_error is None:\n raise ValueError('Cannot compute test statistic when standardized'\n ' forecast errors have not been computed.')\n\n if method == 'ljungbox' or method == 'boxpierce':\n from statsmodels.stats.diagnostic import acorr_ljungbox\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n # This differs from self.nobs_effective because here we want to\n # exclude exact diffuse periods, whereas self.nobs_effective only\n # excludes explicitly burned (usually approximate diffuse) periods.\n nobs_effective = self.nobs - d\n output = []\n\n # Default lags for acorr_ljungbox is 40, but may not always have\n # that many observations\n if lags is None:\n seasonal_periods = getattr(self.model, \"seasonal_periods\", 0)\n if seasonal_periods:\n lags = min(2 * seasonal_periods, nobs_effective // 5)\n else:\n lags = min(10, nobs_effective // 5)\n\n warnings.warn(\n \"The default value of lags is changing. After 0.12, \"\n \"this value will become min(10, nobs//5) for non-seasonal \"\n \"time series and min (2*m, nobs//5) for seasonal time \"\n \"series. Directly set lags to silence this warning.\",\n FutureWarning\n )\n\n for i in range(self.model.k_endog):\n results = acorr_ljungbox(\n self.filter_results.standardized_forecasts_error[i][d:],\n lags=lags, boxpierce=(method == 'boxpierce'),\n return_df=False)\n if method == 'ljungbox':\n output.append(results[0:2])\n else:\n output.append(results[2:])\n\n output = np.c_[output]\n else:\n raise NotImplementedError('Invalid serial correlation test'\n ' method.')\n return output\n\n def get_prediction(self, start=None, end=None, dynamic=False,\n index=None, exog=None, extend_model=None,\n extend_kwargs=None, **kwargs):\n \"\"\"\n In-sample prediction and out-of-sample forecasting\n\n Parameters\n ----------\n start : int, str, or datetime, optional\n Zero-indexed observation number at which to start forecasting,\n i.e., the first forecast is start. Can also be a date string to\n parse or a datetime type. Default is the the zeroth observation.\n end : int, str, or datetime, optional\n Zero-indexed observation number at which to end forecasting, i.e.,\n the last forecast is end. Can also be a date string to\n parse or a datetime type. However, if the dates index does not\n have a fixed frequency, end must be an integer index if you\n want out of sample prediction. Default is the last observation in\n the sample.\n dynamic : bool, int, str, or datetime, optional\n Integer offset relative to `start` at which to begin dynamic\n prediction. Can also be an absolute date string to parse or a\n datetime type (these are not interpreted as offsets).\n Prior to this observation, true endogenous values will be used for\n prediction; starting with this observation and continuing through\n the end of prediction, forecasted endogenous values will be used\n instead.\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n predictions : PredictionResults\n PredictionResults instance containing in-sample predictions and\n out-of-sample forecasts.\n \"\"\"\n if start is None:\n start = 0\n\n # Handle start, end, dynamic\n start, end, out_of_sample, prediction_index = (\n self.model._get_prediction_index(start, end, index))\n\n # Handle `dynamic`\n if isinstance(dynamic, (str, dt.datetime, pd.Timestamp)):\n dynamic, _, _ = self.model._get_index_loc(dynamic)\n # Convert to offset relative to start\n dynamic = dynamic - start\n\n # If we have out-of-sample forecasting and `exog` or in general any\n # kind of time-varying state space model, then we need to create an\n # extended model to get updated state space system matrices\n if extend_model is None:\n extend_model = (self.model.exog is not None or\n not self.filter_results.time_invariant)\n if out_of_sample and extend_model:\n kwargs = self.model._get_extension_time_varying_matrices(\n self.params, exog, out_of_sample, extend_kwargs,\n transformed=True, includes_fixed=True, **kwargs)\n\n # Make sure the model class has the current parameters\n self.model.update(self.params, transformed=True, includes_fixed=True)\n\n # Perform the prediction\n # This is a (k_endog x npredictions) array; do not want to squeeze in\n # case of npredictions = 1\n prediction_results = self.filter_results.predict(\n start, end + out_of_sample + 1, dynamic, **kwargs)\n\n # Return a new mlemodel.PredictionResults object\n return PredictionResultsWrapper(PredictionResults(\n self, prediction_results, row_labels=prediction_index))\n\n def get_forecast(self, steps=1, **kwargs):\n \"\"\"\n Out-of-sample forecasts and prediction intervals\n\n Parameters\n ----------\n steps : int, str, or datetime, optional\n If an integer, the number of steps to forecast from the end of the\n sample. Can also be a date string to parse or a datetime type.\n However, if the dates index does not have a fixed frequency, steps\n must be an integer. Default\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n predictions : PredictionResults\n PredictionResults instance containing in-sample predictions and\n out-of-sample forecasts.\n \"\"\"\n if isinstance(steps, int):\n end = self.nobs + steps - 1\n else:\n end = steps\n return self.get_prediction(start=self.nobs, end=end, **kwargs)\n\n def predict(self, start=None, end=None, dynamic=False, **kwargs):\n \"\"\"\n In-sample prediction and out-of-sample forecasting\n\n Parameters\n ----------\n start : int, str, or datetime, optional\n Zero-indexed observation number at which to start forecasting,\n i.e., the first forecast is start. Can also be a date string to\n parse or a datetime type. Default is the the zeroth observation.\n end : int, str, or datetime, optional\n Zero-indexed observation number at which to end forecasting, i.e.,\n the last forecast is end. Can also be a date string to\n parse or a datetime type. However, if the dates index does not\n have a fixed frequency, end must be an integer index if you\n want out of sample prediction. Default is the last observation in\n the sample.\n dynamic : bool, int, str, or datetime, optional\n Integer offset relative to `start` at which to begin dynamic\n prediction. Can also be an absolute date string to parse or a\n datetime type (these are not interpreted as offsets).\n Prior to this observation, true endogenous values will be used for\n prediction; starting with this observation and continuing through\n the end of prediction, forecasted endogenous values will be used\n instead.\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : array_like\n Array of out of in-sample predictions and / or out-of-sample\n forecasts. An (npredict x k_endog) array.\n\n See Also\n --------\n forecast\n Out-of-sample forecasts\n get_prediction\n Prediction results and confidence intervals\n \"\"\"\n # Perform the prediction\n prediction_results = self.get_prediction(start, end, dynamic, **kwargs)\n return prediction_results.predicted_mean\n\n def forecast(self, steps=1, **kwargs):\n \"\"\"\n Out-of-sample forecasts\n\n Parameters\n ----------\n steps : int, str, or datetime, optional\n If an integer, the number of steps to forecast from the end of the\n sample. Can also be a date string to parse or a datetime type.\n However, if the dates index does not have a fixed frequency, steps\n must be an integer. Default\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : PredictionResults\n PredictionResults instance containing in-sample predictions and\n out-of-sample forecasts.\n \"\"\"\n if isinstance(steps, int):\n end = self.nobs + steps - 1\n else:\n end = steps\n return self.predict(start=self.nobs, end=end, **kwargs)\n\n def simulate(self, nsimulations, measurement_shocks=None,\n state_shocks=None, initial_state=None, anchor=None,\n repetitions=None, exog=None, extend_model=None,\n extend_kwargs=None, **kwargs):\n r\"\"\"\n Simulate a new time series following the state space model\n\n Parameters\n ----------\n nsimulations : int\n The number of observations to simulate. If the model is\n time-invariant this can be any number. If the model is\n time-varying, then this number must be less than or equal to the\n number\n measurement_shocks : array_like, optional\n If specified, these are the shocks to the measurement equation,\n :math:`\\varepsilon_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_endog`, where `k_endog` is the\n same as in the state space model.\n state_shocks : array_like, optional\n If specified, these are the shocks to the state equation,\n :math:`\\eta_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the\n same as in the state space model.\n initial_state : array_like, optional\n If specified, this is the initial state vector to use in\n simulation, which should be shaped (`k_states` x 1), where\n `k_states` is the same as in the state space model. If unspecified,\n but the model has been initialized, then that initialization is\n used. This must be specified if `anchor` is anything other than\n \"start\" or 0.\n anchor : int, str, or datetime, optional\n Starting point from which to begin the simulations; type depends on\n the index of the given `endog` model. Two special cases are the\n strings 'start' and 'end', which refer to starting at the beginning\n and end of the sample, respectively. If a date/time index was\n provided to the model, then this argument can be a date string to\n parse or a datetime type. Otherwise, an integer index should be\n given. Default is 'start'.\n repetitions : int, optional\n Number of simulated paths to generate. Default is 1 simulated path.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n\n Returns\n -------\n simulated_obs : ndarray\n An array of simulated observations. If `repetitions=None`, then it\n will be shaped (nsimulations x k_endog) or (nsimulations,) if\n `k_endog=1`. Otherwise it will be shaped\n (nsimulations x k_endog x repetitions). If the model was given\n Pandas input then the output will be a Pandas object. If\n `k_endog > 1` and `repetitions` is not None, then the output will\n be a Pandas DataFrame that has a MultiIndex for the columns, with\n the first level containing the names of the `endog` variables and\n the second level containing the repetition number.\n \"\"\"\n # Get the starting location\n if anchor is None or anchor == 'start':\n iloc = 0\n elif anchor == 'end':\n iloc = self.nobs\n else:\n iloc, _, _ = self.model._get_index_loc(anchor)\n if isinstance(iloc, slice):\n iloc = iloc.start\n\n if iloc < 0:\n iloc = self.nobs + iloc\n if iloc > self.nobs:\n raise ValueError('Cannot anchor simulation outside of the sample.')\n\n # Setup the initial state\n if initial_state is None:\n initial_state_moments = (\n self.predicted_state[:, iloc],\n self.predicted_state_cov[:, :, iloc])\n\n _repetitions = 1 if repetitions is None else repetitions\n\n initial_state = np.random.multivariate_normal(\n *initial_state_moments, size=_repetitions).T\n\n scale = self.scale if self.filter_results.filter_concentrated else None\n with self.model.ssm.fixed_scale(scale):\n sim = self.model.simulate(\n self.params, nsimulations,\n measurement_shocks=measurement_shocks,\n state_shocks=state_shocks, initial_state=initial_state,\n anchor=anchor, repetitions=repetitions, exog=exog,\n transformed=True, includes_fixed=True,\n extend_model=extend_model, extend_kwargs=extend_kwargs,\n **kwargs)\n\n return sim\n\n def impulse_responses(self, steps=1, impulse=0, orthogonalized=False,\n cumulative=False, **kwargs):\n \"\"\"\n Impulse response function\n\n Parameters\n ----------\n steps : int, optional\n The number of steps for which impulse responses are calculated.\n Default is 1. Note that for time-invariant models, the initial\n impulse is not counted as a step, so if `steps=1`, the output will\n have 2 entries.\n impulse : int or array_like\n If an integer, the state innovation to pulse; must be between 0\n and `k_posdef-1`. Alternatively, a custom impulse vector may be\n provided; must be shaped `k_posdef x 1`.\n orthogonalized : bool, optional\n Whether or not to perform impulse using orthogonalized innovations.\n Note that this will also affect custum `impulse` vectors. Default\n is False.\n cumulative : bool, optional\n Whether or not to return cumulative impulse responses. Default is\n False.\n anchor : int, str, or datetime, optional\n Time point within the sample for the state innovation impulse. Type\n depends on the index of the given `endog` in the model. Two special\n cases are the strings 'start' and 'end', which refer to setting the\n impulse at the first and last points of the sample, respectively.\n Integer values can run from 0 to `nobs - 1`, or can be negative to\n apply negative indexing. Finally, if a date/time index was provided\n to the model, then this argument can be a date string to parse or a\n datetime type. Default is 'start'.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n **kwargs\n If the model has time-varying design or transition matrices and the\n combination of `anchor` and `steps` implies creating impulse\n responses for the out-of-sample period, then these matrices must\n have updated values provided for the out-of-sample steps. For\n example, if `design` is a time-varying component, `nobs` is 10,\n `anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)\n matrix must be provided with the new design matrix values.\n\n Returns\n -------\n impulse_responses : ndarray\n Responses for each endogenous variable due to the impulse\n given by the `impulse` argument. For a time-invariant model, the\n impulse responses are given for `steps + 1` elements (this gives\n the \"initial impulse\" followed by `steps` responses for the\n important cases of VAR and SARIMAX models), while for time-varying\n models the impulse responses are only given for `steps` elements\n (to avoid having to unexpectedly provide updated time-varying\n matrices).\n\n Notes\n -----\n Intercepts in the measurement and state equation are ignored when\n calculating impulse responses.\n \"\"\"\n scale = self.scale if self.filter_results.filter_concentrated else None\n with self.model.ssm.fixed_scale(scale):\n irfs = self.model.impulse_responses(self.params, steps, impulse,\n orthogonalized, cumulative,\n **kwargs)\n return irfs\n\n def _apply(self, mod, refit=False, fit_kwargs=None, **kwargs):\n if fit_kwargs is None:\n fit_kwargs = {}\n\n if refit:\n fit_kwargs.setdefault('start_params', self.params)\n if self._has_fixed_params:\n fit_kwargs.setdefault('includes_fixed', True)\n res = mod.fit_constrained(self._fixed_params, **fit_kwargs)\n else:\n res = mod.fit(**fit_kwargs)\n else:\n if 'cov_type' in fit_kwargs:\n raise ValueError('Cannot specify covariance type in'\n ' `fit_kwargs` unless refitting'\n ' parameters (not available in extend).')\n if 'cov_kwds' in fit_kwargs:\n raise ValueError('Cannot specify covariance keyword arguments'\n ' in `fit_kwargs` unless refitting'\n ' parameters (not available in extend).')\n\n fit_kwargs['cov_type'] = 'custom'\n fit_kwargs['cov_kwds'] = {\n 'custom_cov_type': self.cov_type,\n 'custom_cov_params': self.cov_params_default,\n 'custom_description': ('Parameters and standard errors'\n ' were estimated using a different'\n ' dataset and were then applied to this'\n ' dataset. %s'\n % self.cov_kwds['description'])}\n\n if self.smoother_results is not None:\n func = mod.smooth\n else:\n func = mod.filter\n\n if self._has_fixed_params:\n with mod.fix_params(self._fixed_params):\n fit_kwargs.setdefault('includes_fixed', True)\n res = func(self.params, **fit_kwargs)\n else:\n res = func(self.params, **fit_kwargs)\n\n return res\n\n def _news_previous_results(self, previous, start, end, periods):\n # Compute the news\n out = self.smoother_results.news(previous.smoother_results,\n start=start, end=end)\n return out\n\n def _news_updated_results(self, updated, start, end, periods):\n return updated._news_previous_results(self, start, end, periods)\n\n def _news_previous_data(self, endog, start, end, periods, exog):\n previous = self.apply(endog, exog=exog, copy_initialization=True)\n return self._news_previous_results(previous, start, end, periods)\n\n def _news_updated_data(self, endog, start, end, periods, exog):\n updated = self.apply(endog, exog=exog, copy_initialization=True)\n return self._news_updated_results(updated, start, end, periods)\n\n def news(self, comparison, start=None, end=None, periods=None, exog=None,\n return_raw=False, comparison_type=None, **kwargs):\n \"\"\"\n Compute impacts from updated data (news and revisions)\n\n Parameters\n ----------\n comparison : array_like or MLEResults\n An updated dataset with updated and/or revised data from which the\n news can be computed, or an updated or previous results object\n to use in computing the news.\n start : int, str, or datetime, optional\n The first period of impacts from news and revisions to compute.\n Can also be a date string to parse or a datetime type. Default is\n the first out-of-sample observation.\n end : int, str, or datetime, optional\n The last period of impacts from news and revisions to compute.\n Can also be a date string to parse or a datetime type. Default is\n the first out-of-sample observation.\n periods : int, optional\n The number of periods of impacts from news and revisions to\n compute.\n exog : array_like, optional\n Array of exogenous regressors for the out-of-sample period, if\n applicable.\n return_raw : bool, optional\n Whether or not to return only the specific output or a full\n results object. Default is to return a full results object.\n comparison_type : {None, 'previous', 'updated'}\n\n References\n ----------\n .. [1] Bańbura, Marta, and Michele Modugno.\n \"Maximum likelihood estimation of factor models on datasets with\n arbitrary pattern of missing data.\"\n Journal of Applied Econometrics 29, no. 1 (2014): 133-160.\n .. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.\n \"Nowcasting.\"\n The Oxford Handbook of Economic Forecasting. July 8, 2011.\n .. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia\n Reichlin.\n \"Now-casting and the real-time data flow.\"\n In Handbook of economic forecasting, vol. 2, pp. 195-237.\n Elsevier, 2013.\n \"\"\"\n # Validate input\n if self.smoother_results is None:\n raise ValueError('Cannot compute news without Kalman smoother'\n ' results.')\n\n # If we were given data, create a new results object\n comparison_dataset = not isinstance(\n comparison, (MLEResults, MLEResultsWrapper))\n if comparison_dataset:\n # If `exog` is longer than `comparison`, then we extend it to match\n nobs_endog = len(comparison)\n nobs_exog = len(exog) if exog is not None else nobs_endog\n\n if nobs_exog > nobs_endog:\n _, _, _, ix = self.model._get_prediction_index(\n start=0, end=nobs_exog - 1)\n # TODO: check that the index of `comparison` matches the model\n comparison = np.asarray(comparison)\n if comparison.ndim < 2:\n comparison = np.atleast_2d(comparison).T\n if (comparison.ndim != 2 or\n comparison.shape[1] != self.model.k_endog):\n raise ValueError('Invalid shape for `comparison`. Must'\n f' contain {self.model.k_endog} columns.')\n extra = np.zeros((nobs_exog - nobs_endog,\n self.model.k_endog)) * np.nan\n comparison = pd.DataFrame(\n np.concatenate([comparison, extra], axis=0), index=ix,\n columns=self.model.endog_names)\n\n # Get the results object\n comparison = self.apply(comparison, exog=exog,\n copy_initialization=True, **kwargs)\n\n # Now, figure out the `updated` versus `previous` results objects\n nmissing = self.filter_results.missing.sum()\n nmissing_comparison = comparison.filter_results.missing.sum()\n if (comparison_type == 'updated' or (comparison_type is None and (\n comparison.nobs > self.nobs or\n nmissing > nmissing_comparison))):\n updated = comparison\n previous = self\n elif (comparison_type == 'previous' or (comparison_type is None and (\n comparison.nobs < self.nobs or\n nmissing < nmissing_comparison))):\n updated = self\n previous = comparison\n else:\n raise ValueError('Could not automatically determine the type'\n ' of comparison requested to compute the'\n ' News, so it must be specified as \"updated\"'\n ' or \"previous\", using the `comparison_type`'\n ' keyword argument')\n\n # Check that the index of `updated` is a superset of the\n # index of `previous`\n # Note: the try/except block is for Pandas < 0.25, in which\n # `PeriodIndex.difference` raises a ValueError if the argument is not\n # also a `PeriodIndex`.\n try:\n diff = previous.model._index.difference(updated.model._index)\n except ValueError:\n diff = [True]\n if len(diff) > 0:\n raise ValueError('The index associated with the updated results is'\n ' not a superset of the index associated with the'\n ' previous results, and so these datasets do not'\n ' appear to be related. Can only compute the'\n ' news by comparing this results set to previous'\n ' results objects.')\n\n # Handle start, end, periods\n # There doesn't seem to be any universal defaults that both (a) make\n # sense for all data update combinations, and (b) work with both\n # time-invariant and time-varying models. So we require that the user\n # specify exactly two of start, end, periods.\n if start is None and end is None and periods is None:\n start = previous.nobs - 1\n end = previous.nobs - 1\n if int(start is None) + int(end is None) + int(periods is None) != 1:\n raise ValueError('Of the three parameters: start, end, and'\n ' periods, exactly two must be specified')\n # If we have the `periods` object, we need to convert `start`/`end` to\n # integers so that we can compute the other one. That's because\n # _get_prediction_index doesn't support a `periods` argument\n elif start is not None and periods is not None:\n start, _, _, _ = self.model._get_prediction_index(start, start)\n end = start + (periods - 1)\n elif end is not None and periods is not None:\n _, end, _, _ = self.model._get_prediction_index(end, end)\n start = end - (periods - 1)\n elif start is not None and end is not None:\n pass\n\n # Get the integer-based start, end and the prediction index\n start, end, out_of_sample, prediction_index = (\n updated.model._get_prediction_index(start, end))\n end = end + out_of_sample\n\n # News results will always use Pandas, so if the model's data was not\n # from Pandas, we'll create an index, as if the model's data had been\n # given a default Pandas index.\n if prediction_index is None:\n prediction_index = pd.RangeIndex(start=start, stop=end + 1)\n\n # For time-varying models try to create an appended `updated` model\n # with NaN values. Do not extend the model if this was already done\n # above (i.e. the case that `comparison` was a new dataset), because\n # in that case `exog` and `kwargs` should have\n # been set with the input `comparison` dataset in mind, and so would be\n # useless here. Ultimately, we've already extended `updated` as far\n # as we can. So raise an exception in that case with a useful message.\n # However, we still want to try to accommodate extending the model here\n # if it is possible.\n # Note that we do not need to extend time-invariant models, because\n # `KalmanSmoother.news` can itself handle any impact dates for\n # time-invariant models.\n time_varying = not (previous.filter_results.time_invariant or\n updated.filter_results.time_invariant)\n if time_varying and end >= updated.nobs:\n # If we the given `comparison` was a dataset and either `exog` or\n # `kwargs` was set, then we assume that we cannot create an updated\n # time-varying model (because then we can't tell if `kwargs` and\n # `exog` arguments are meant to apply to the `comparison` dataset\n # or to this extension)\n if comparison_dataset and (exog is not None or len(kwargs) > 0):\n if comparison is updated:\n raise ValueError('If providing an updated dataset as the'\n ' `comparison` with a time-varying model,'\n ' then the `end` period cannot be beyond'\n ' the end of that updated dataset.')\n else:\n raise ValueError('If providing an previous dataset as the'\n ' `comparison` with a time-varying model,'\n ' then the `end` period cannot be beyond'\n ' the end of the (updated) results'\n ' object.')\n\n # Try to extend `updated`\n updated_orig = updated\n # TODO: `append` should fix this k_endog=1 issue for us\n # TODO: is the + 1 necessary?\n if self.model.k_endog > 1:\n extra = np.zeros((end - updated.nobs + 1,\n self.model.k_endog)) * np.nan\n else:\n extra = np.zeros((end - updated.nobs + 1,)) * np.nan\n updated = updated_orig.append(extra, exog=exog, **kwargs)\n\n # Compute the news\n news_results = (\n updated._news_previous_results(previous, start, end + 1, periods))\n\n if not return_raw:\n news_results = NewsResultsWrapper(\n NewsResults(news_results, self, updated, previous,\n row_labels=prediction_index))\n return news_results\n\n def append(self, endog, exog=None, refit=False, fit_kwargs=None, **kwargs):\n \"\"\"\n Recreate the results object with new data appended to the original data\n\n Creates a new result object applied to a dataset that is created by\n appending new data to the end of the model's original data. The new\n results can then be used for analysis or forecasting.\n\n Parameters\n ----------\n endog : array_like\n New observations from the modeled time-series process.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n refit : bool, optional\n Whether to re-fit the parameters, based on the combined dataset.\n Default is False (so parameters from the current results object\n are used to create the new results object).\n fit_kwargs : dict, optional\n Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /\n `smooth`.\n **kwargs\n Keyword arguments may be used to modify model specification\n arguments when created the new model object.\n\n Returns\n -------\n results\n Updated Results object, that includes results from both the\n original dataset and the new dataset.\n\n Notes\n -----\n The `endog` and `exog` arguments to this method must be formatted in\n the same was (e.g. Pandas Series versus Numpy array) as were the\n `endog` and `exog` arrays passed to the original model.\n\n The `endog` argument to this method should consist of new observations\n that occurred directly after the last element of `endog`. For any other\n kind of dataset, see the `apply` method.\n\n This method will apply filtering to all of the original data as well\n as to the new data. To apply filtering only to the new data (which\n can be much faster if the original dataset is large), see the `extend`\n method.\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults.extend\n statsmodels.tsa.statespace.mlemodel.MLEResults.apply\n\n Examples\n --------\n >>> index = pd.period_range(start='2000', periods=2, freq='A')\n >>> original_observations = pd.Series([1.2, 1.5], index=index)\n >>> mod = sm.tsa.SARIMAX(original_observations)\n >>> res = mod.fit()\n >>> print(res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n Freq: A-DEC, dtype: float64\n >>> print(res.forecast(1))\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n\n >>> new_index = pd.period_range(start='2002', periods=1, freq='A')\n >>> new_observations = pd.Series([0.9], index=new_index)\n >>> updated_res = res.append(new_observations)\n >>> print(updated_res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(updated_res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n >>> print(updated_res.forecast(1))\n 2003 0.878\n Freq: A-DEC, dtype: float64\n \"\"\"\n start = self.nobs\n end = self.nobs + len(endog) - 1\n _, _, _, append_ix = self.model._get_prediction_index(start, end)\n\n # Check the index of the new data\n if isinstance(self.model.data, PandasData):\n _check_index(append_ix, endog, '`endog`')\n\n # Concatenate the new data to original data\n new_endog = concat([self.model.data.orig_endog, endog], axis=0,\n allow_mix=True)\n\n # Handle `exog`\n if exog is not None:\n _, exog = prepare_exog(exog)\n _check_index(append_ix, exog, '`exog`')\n\n new_exog = concat([self.model.data.orig_exog, exog], axis=0,\n allow_mix=True)\n else:\n new_exog = None\n\n # Create a continuous index for the combined data\n if isinstance(self.model.data, PandasData):\n start = 0\n end = len(new_endog) - 1\n _, _, _, new_index = self.model._get_prediction_index(start, end)\n\n # Standardize `endog` to have the right index and columns\n columns = self.model.endog_names\n if not isinstance(columns, list):\n columns = [columns]\n new_endog = pd.DataFrame(new_endog, index=new_index,\n columns=columns)\n\n # Standardize `exog` to have the right index\n if new_exog is not None:\n new_exog = pd.DataFrame(new_exog, index=new_index,\n columns=self.model.exog_names)\n\n mod = self.model.clone(new_endog, exog=new_exog, **kwargs)\n res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)\n\n return res\n\n def extend(self, endog, exog=None, fit_kwargs=None, **kwargs):\n \"\"\"\n Recreate the results object for new data that extends the original data\n\n Creates a new result object applied to a new dataset that is assumed to\n follow directly from the end of the model's original data. The new\n results can then be used for analysis or forecasting.\n\n Parameters\n ----------\n endog : array_like\n New observations from the modeled time-series process.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n fit_kwargs : dict, optional\n Keyword arguments to pass to `filter` or `smooth`.\n **kwargs\n Keyword arguments may be used to modify model specification\n arguments when created the new model object.\n\n Returns\n -------\n results\n Updated Results object, that includes results only for the new\n dataset.\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults.append\n statsmodels.tsa.statespace.mlemodel.MLEResults.apply\n\n Notes\n -----\n The `endog` argument to this method should consist of new observations\n that occurred directly after the last element of the model's original\n `endog` array. For any other kind of dataset, see the `apply` method.\n\n This method will apply filtering only to the new data provided by the\n `endog` argument, which can be much faster than re-filtering the entire\n dataset. However, the returned results object will only have results\n for the new data. To retrieve results for both the new data and the\n original data, see the `append` method.\n\n Examples\n --------\n >>> index = pd.period_range(start='2000', periods=2, freq='A')\n >>> original_observations = pd.Series([1.2, 1.5], index=index)\n >>> mod = sm.tsa.SARIMAX(original_observations)\n >>> res = mod.fit()\n >>> print(res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n Freq: A-DEC, dtype: float64\n >>> print(res.forecast(1))\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n\n >>> new_index = pd.period_range(start='2002', periods=1, freq='A')\n >>> new_observations = pd.Series([0.9], index=new_index)\n >>> updated_res = res.extend(new_observations)\n >>> print(updated_res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(updated_res.fittedvalues)\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n >>> print(updated_res.forecast(1))\n 2003 0.878\n Freq: A-DEC, dtype: float64\n \"\"\"\n start = self.nobs\n end = self.nobs + len(endog) - 1\n _, _, _, extend_ix = self.model._get_prediction_index(start, end)\n\n if isinstance(self.model.data, PandasData):\n _check_index(extend_ix, endog, '`endog`')\n\n # Standardize `endog` to have the right index and columns\n columns = self.model.endog_names\n if not isinstance(columns, list):\n columns = [columns]\n endog = pd.DataFrame(endog, index=extend_ix, columns=columns)\n # Extend the current fit result to additional data\n mod = self.model.clone(endog, exog=exog, **kwargs)\n mod.ssm.initialization = Initialization(\n mod.k_states, 'known', constant=self.predicted_state[..., -1],\n stationary_cov=self.predicted_state_cov[..., -1])\n res = self._apply(mod, refit=False, fit_kwargs=fit_kwargs, **kwargs)\n\n return res\n\n def apply(self, endog, exog=None, refit=False, fit_kwargs=None,\n copy_initialization=False, **kwargs):\n \"\"\"\n Apply the fitted parameters to new data unrelated to the original data\n\n Creates a new result object using the current fitted parameters,\n applied to a completely new dataset that is assumed to be unrelated to\n the model's original data. The new results can then be used for\n analysis or forecasting.\n\n Parameters\n ----------\n endog : array_like\n New observations from the modeled time-series process.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n refit : bool, optional\n Whether to re-fit the parameters, using the new dataset.\n Default is False (so parameters from the current results object\n are used to create the new results object).\n copy_initialization : bool, optional\n Whether or not to copy the current model's initialization to the\n new model. Default is True.\n fit_kwargs : dict, optional\n Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /\n `smooth`.\n **kwargs\n Keyword arguments may be used to modify model specification\n arguments when created the new model object.\n\n Returns\n -------\n results\n Updated Results object, that includes results only for the new\n dataset.\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults.append\n statsmodels.tsa.statespace.mlemodel.MLEResults.apply\n\n Notes\n -----\n The `endog` argument to this method should consist of new observations\n that are unrelated to the original model's `endog` dataset. For\n observations that continue that original dataset by follow directly\n after its last element, see the `append` and `extend` methods.\n\n Examples\n --------\n >>> index = pd.period_range(start='2000', periods=2, freq='A')\n >>> original_observations = pd.Series([1.2, 1.5], index=index)\n >>> mod = sm.tsa.SARIMAX(original_observations)\n >>> res = mod.fit()\n >>> print(res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n Freq: A-DEC, dtype: float64\n >>> print(res.forecast(1))\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n\n >>> new_index = pd.period_range(start='1980', periods=3, freq='A')\n >>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)\n >>> new_res = res.apply(new_observations)\n >>> print(new_res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(new_res.fittedvalues)\n 1980 1.1707\n 1981 1.3659\n 1982 0.2927\n Freq: A-DEC, dtype: float64\n Freq: A-DEC, dtype: float64\n >>> print(new_res.forecast(1))\n 1983 1.1707\n Freq: A-DEC, dtype: float64\n \"\"\"\n mod = self.model.clone(endog, exog=exog, **kwargs)\n if copy_initialization:\n mod.ssm.initialization = self.model.initialization\n res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)\n\n return res\n\n def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):\n \"\"\"\n Diagnostic plots for standardized residuals of one endogenous variable\n\n Parameters\n ----------\n variable : int, optional\n Index of the endogenous variable for which the diagnostic plots\n should be created. Default is 0.\n lags : int, optional\n Number of lags to include in the correlogram. Default is 10.\n fig : Figure, optional\n If given, subplots are created in this figure instead of in a new\n figure. Note that the 2x2 grid will be created in the provided\n figure using `fig.add_subplot()`.\n figsize : tuple, optional\n If a figure is created, this argument allows specifying a size.\n The tuple is (width, height).\n\n Returns\n -------\n Figure\n Figure instance with diagnostic plots\n\n See Also\n --------\n statsmodels.graphics.gofplots.qqplot\n statsmodels.graphics.tsaplots.plot_acf\n\n Notes\n -----\n Produces a 2x2 plot grid with the following plots (ordered clockwise\n from top left):\n\n 1. Standardized residuals over time\n 2. Histogram plus estimated density of standardized residuals, along\n with a Normal(0,1) density plotted for reference.\n 3. Normal Q-Q plot, with Normal reference line.\n 4. Correlogram\n \"\"\"\n from statsmodels.graphics.utils import _import_mpl, create_mpl_fig\n _import_mpl()\n fig = create_mpl_fig(fig, figsize)\n # Eliminate residuals associated with burned or diffuse likelihoods\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n resid = self.filter_results.standardized_forecasts_error[variable, d:]\n\n # Top-left: residuals vs time\n ax = fig.add_subplot(221)\n if hasattr(self.data, 'dates') and self.data.dates is not None:\n x = self.data.dates[d:]._mpl_repr()\n else:\n x = np.arange(len(resid))\n ax.plot(x, resid)\n ax.hlines(0, x[0], x[-1], alpha=0.5)\n ax.set_xlim(x[0], x[-1])\n ax.set_title('Standardized residual')\n\n # Top-right: histogram, Gaussian kernel density, Normal density\n # Can only do histogram and Gaussian kernel density on the non-null\n # elements\n resid_nonmissing = resid[~(np.isnan(resid))]\n ax = fig.add_subplot(222)\n\n # gh5792: Remove except after support for matplotlib>2.1 required\n try:\n ax.hist(resid_nonmissing, density=True, label='Hist')\n except AttributeError:\n ax.hist(resid_nonmissing, normed=True, label='Hist')\n\n from scipy.stats import gaussian_kde, norm\n kde = gaussian_kde(resid_nonmissing)\n xlim = (-1.96*2, 1.96*2)\n x = np.linspace(xlim[0], xlim[1])\n ax.plot(x, kde(x), label='KDE')\n ax.plot(x, norm.pdf(x), label='N(0,1)')\n ax.set_xlim(xlim)\n ax.legend()\n ax.set_title('Histogram plus estimated density')\n\n # Bottom-left: QQ plot\n ax = fig.add_subplot(223)\n from statsmodels.graphics.gofplots import qqplot\n qqplot(resid_nonmissing, line='s', ax=ax)\n ax.set_title('Normal Q-Q')\n\n # Bottom-right: Correlogram\n ax = fig.add_subplot(224)\n from statsmodels.graphics.tsaplots import plot_acf\n plot_acf(resid, ax=ax, lags=lags)\n ax.set_title('Correlogram')\n\n ax.set_ylim(-1, 1)\n\n return fig\n\n def summary(self, alpha=.05, start=None, title=None, model_name=None,\n display_params=True):\n \"\"\"\n Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals. Default is 0.05.\n start : int, optional\n Integer of the start observation. Default is 0.\n model_name : str\n The name of the model used. Default is to use model class name.\n\n Returns\n -------\n summary : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n \"\"\"\n from statsmodels.iolib.summary import Summary\n\n # Model specification results\n model = self.model\n if title is None:\n title = 'Statespace Model Results'\n\n if start is None:\n start = 0\n if self.model._index_dates:\n ix = self.model._index\n d = ix[start]\n sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]\n d = ix[-1]\n sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]\n else:\n sample = [str(start), ' - ' + str(self.nobs)]\n\n # Standardize the model name as a list of str\n if model_name is None:\n model_name = model.__class__.__name__\n\n # Diagnostic tests results\n try:\n het = self.test_heteroskedasticity(method='breakvar')\n except Exception: # FIXME: catch something specific\n het = np.array([[np.nan]*2])\n try:\n lb = self.test_serial_correlation(method='ljungbox')\n except Exception: # FIXME: catch something specific\n lb = np.array([[np.nan]*2]).reshape(1, 2, 1)\n try:\n jb = self.test_normality(method='jarquebera')\n except Exception: # FIXME: catch something specific\n jb = np.array([[np.nan]*4])\n\n # Create the tables\n if not isinstance(model_name, list):\n model_name = [model_name]\n\n top_left = [('Dep. Variable:', None)]\n top_left.append(('Model:', [model_name[0]]))\n for i in range(1, len(model_name)):\n top_left.append(('', ['+ ' + model_name[i]]))\n top_left += [\n ('Date:', None),\n ('Time:', None),\n ('Sample:', [sample[0]]),\n ('', [sample[1]])\n ]\n\n top_right = [\n ('No. Observations:', [self.nobs]),\n ('Log Likelihood', [\"%#5.3f\" % self.llf]),\n ]\n if hasattr(self, 'rsquared'):\n top_right.append(('R-squared:', [\"%#8.3f\" % self.rsquared]))\n top_right += [\n ('AIC', [\"%#5.3f\" % self.aic]),\n ('BIC', [\"%#5.3f\" % self.bic]),\n ('HQIC', [\"%#5.3f\" % self.hqic])]\n if (self.filter_results is not None and\n self.filter_results.filter_concentrated):\n top_right.append(('Scale', [\"%#5.3f\" % self.scale]))\n\n if hasattr(self, 'cov_type'):\n top_left.append(('Covariance Type:', [self.cov_type]))\n\n format_str = lambda array: [ # noqa:E731\n ', '.join(['{0:.2f}'.format(i) for i in array])\n ]\n diagn_left = [('Ljung-Box (Q):', format_str(lb[:, 0, -1])),\n ('Prob(Q):', format_str(lb[:, 1, -1])),\n ('Heteroskedasticity (H):', format_str(het[:, 0])),\n ('Prob(H) (two-sided):', format_str(het[:, 1]))\n ]\n\n diagn_right = [('Jarque-Bera (JB):', format_str(jb[:, 0])),\n ('Prob(JB):', format_str(jb[:, 1])),\n ('Skew:', format_str(jb[:, 2])),\n ('Kurtosis:', format_str(jb[:, 3]))\n ]\n\n summary = Summary()\n summary.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n if len(self.params) > 0 and display_params:\n summary.add_table_params(self, alpha=alpha,\n xname=self.param_names, use_t=False)\n summary.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n title=\"\")\n\n # Add warnings/notes, added to text format only\n etext = []\n if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:\n etext.append(self.cov_kwds['description'])\n if self._rank < (len(self.params) - len(self.fixed_params)):\n cov_params = self.cov_params()\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n cov_params = cov_params[mask]\n etext.append(\"Covariance matrix is singular or near-singular,\"\n \" with condition number %6.3g. Standard errors may be\"\n \" unstable.\" % np.linalg.cond(cov_params))\n\n if etext:\n etext = [\"[{0}] {1}\".format(i + 1, text)\n for i, text in enumerate(etext)]\n etext.insert(0, \"Warnings:\")\n summary.add_extra_txt(etext)\n\n return summary\n\n\nclass MLEResultsWrapper(wrap.ResultsWrapper):\n _attrs = {\n 'zvalues': 'columns',\n 'cov_params_approx': 'cov',\n 'cov_params_default': 'cov',\n 'cov_params_oim': 'cov',\n 'cov_params_opg': 'cov',\n 'cov_params_robust': 'cov',\n 'cov_params_robust_approx': 'cov',\n 'cov_params_robust_oim': 'cov',\n }\n _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {\n 'forecast': 'dates',\n 'impulse_responses': 'ynames'\n }\n _wrap_methods = wrap.union_dicts(\n tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)\nwrap.populate_wrapper(MLEResultsWrapper, MLEResults) # noqa:E305\n\n\nclass PredictionResults(pred.PredictionResults):\n \"\"\"\n\n Parameters\n ----------\n prediction_results : kalman_filter.PredictionResults instance\n Results object from prediction after fitting or filtering a state space\n model.\n row_labels : iterable\n Row labels for the predicted data.\n\n Attributes\n ----------\n \"\"\"\n def __init__(self, model, prediction_results, row_labels=None):\n if model.model.k_endog == 1:\n endog = pd.Series(prediction_results.endog[0],\n name=model.model.endog_names)\n else:\n endog = pd.DataFrame(prediction_results.endog.T,\n columns=model.model.endog_names)\n self.model = Bunch(data=model.data.__class__(\n endog=endog, predict_dates=row_labels))\n self.prediction_results = prediction_results\n\n # Get required values\n k_endog, nobs = prediction_results.endog.shape\n if not prediction_results.results.memory_no_forecast_mean:\n predicted_mean = self.prediction_results.forecasts\n else:\n predicted_mean = np.zeros((k_endog, nobs)) * np.nan\n\n if predicted_mean.shape[0] == 1:\n predicted_mean = predicted_mean[0, :]\n else:\n predicted_mean = predicted_mean.transpose()\n\n if not prediction_results.results.memory_no_forecast_cov:\n var_pred_mean = self.prediction_results.forecasts_error_cov\n else:\n var_pred_mean = np.zeros((k_endog, k_endog, nobs)) * np.nan\n\n if var_pred_mean.shape[0] == 1:\n var_pred_mean = var_pred_mean[0, 0, :]\n else:\n var_pred_mean = var_pred_mean.transpose()\n\n # Initialize\n super(PredictionResults, self).__init__(predicted_mean, var_pred_mean,\n dist='norm',\n row_labels=row_labels)\n\n def conf_int(self, method='endpoint', alpha=0.05, **kwds):\n # TODO: this performs metadata wrapping, and that should be handled\n # by attach_* methods. However, they do not currently support\n # this use case.\n conf_int = super(PredictionResults, self).conf_int(alpha, **kwds)\n\n # Create a dataframe\n if self._row_labels is not None:\n conf_int = pd.DataFrame(conf_int, index=self.row_labels)\n\n # Attach the endog names\n ynames = self.model.data.ynames\n if not type(ynames) == list:\n ynames = [ynames]\n names = (['lower {0}'.format(name) for name in ynames] +\n ['upper {0}'.format(name) for name in ynames])\n conf_int.columns = names\n\n return conf_int\n\n def summary_frame(self, endog=0, alpha=0.05):\n # TODO: finish and cleanup\n # import pandas as pd\n # ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split\n ci_mean = np.asarray(self.conf_int(alpha=alpha))\n to_include = {}\n if self.predicted_mean.ndim == 1:\n yname = self.model.data.ynames\n to_include['mean'] = self.predicted_mean\n to_include['mean_se'] = self.se_mean\n k_endog = 1\n else:\n yname = self.model.data.ynames[endog]\n to_include['mean'] = self.predicted_mean[:, endog]\n to_include['mean_se'] = self.se_mean[:, endog]\n k_endog = self.predicted_mean.shape[1]\n to_include['mean_ci_lower'] = ci_mean[:, endog]\n to_include['mean_ci_upper'] = ci_mean[:, k_endog + endog]\n\n # pandas dict does not handle 2d_array\n # data = np.column_stack(list(to_include.values()))\n # names = ....\n res = pd.DataFrame(to_include, index=self._row_labels,\n columns=list(to_include.keys()))\n res.columns.name = yname\n return res\n\n\nclass PredictionResultsWrapper(wrap.ResultsWrapper):\n _attrs = {\n 'predicted_mean': 'dates',\n 'se_mean': 'dates',\n 't_values': 'dates',\n }\n _wrap_attrs = wrap.union_dicts(_attrs)\n\n _methods = {}\n _wrap_methods = wrap.union_dicts(_methods)\nwrap.populate_wrapper(PredictionResultsWrapper, PredictionResults) # noqa:E305\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"pandas.Series",
"numpy.linspace",
"numpy.linalg.matrix_rank",
"pandas.RangeIndex",
"numpy.random.multivariate_normal",
"numpy.issubdtype",
"numpy.asarray",
"pandas.DataFrame",
"numpy.round",
"numpy.concatenate",
"scipy.stats.gaussian_kde",
"numpy.zeros_like",
"numpy.ix_",
"numpy.ones_like",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.linalg.cond",
"numpy.linalg.slogdet",
"numpy.outer",
"numpy.zeros",
"numpy.log",
"numpy.linalg.inv",
"numpy.isnan",
"scipy.stats.chi2.sf",
"numpy.atleast_2d",
"numpy.identity",
"pandas.MultiIndex.from_product",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.diagonal",
"numpy.maximum",
"numpy.inner",
"numpy.abs",
"scipy.stats.norm.pdf",
"scipy.stats.f.cdf",
"scipy.stats.chi2.cdf",
"scipy.stats.f.sf",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zaydalameddine/NetflixStockProfile_2017
|
[
"35f3faee7d349191b75dfe087a4f6d8272063971"
] |
[
"Data + Script/script.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 26 15:43:43 2020\n\n@author: Zayd Alameddine\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nnetflix_stocks = pd.read_csv('NFLX.csv')\ndowjones_stocks = pd.read_csv('DJI.csv')\nnetflix_stocks_quarterly = pd.read_csv('NFLX_daily_by_quarter.csv')\n\n# renamed this column to make it easier to use since the Adj Close is really just the actual price of the stock\nnetflix_stocks.rename(columns = {'Adj Close': 'Price'}, inplace = True)\ndowjones_stocks.rename(columns = {'Adj Close': 'Price'}, inplace = True)\nnetflix_stocks_quarterly.rename(columns = {'Adj Close': 'Price'}, inplace = True)\n\n# Visualizing the distribution of the Netflix quarterly stock\nax = sns.violinplot(x = 'Quarter', y = 'Price', data = netflix_stocks_quarterly)\nax.set_title('Distribution of 2017 Netflix Stock Prices by Quarter')\nax.set(xlabel='Closing Stock Price', ylabel = 'Business Quarters in 2017')\n\nplt.show()\nplt.savefig('QuarterlyDistribution.png')\n# deletes the previous plot\nplt.clf()\n\n# Charting the performance of the earnings per chare (EPS) by graphing the estimated Yahoo projected for the Quarter compared to the actual earnings for that quarter\n\n# All the information that will be needed to plot the chart\nx_positions = [1, 2, 3, 4]\nchart_labels = [\"1Q2017\",\"2Q2017\",\"3Q2017\",\"4Q2017\"]\nearnings_actual =[.4, .15,.29,.41]\nearnings_estimate = [.37,.15,.32,.41 ]\n\n# Ploting the actual vs estimated earnings\nplt.scatter(x_positions, earnings_actual, c='red', alpha = 0.5)\nplt.scatter(x_positions, earnings_estimate, c='blue', alpha = 0.5)\n\n# Labeling the plot\nplt.legend(['Actual', 'Estimate'])\nplt.xticks(x_positions, chart_labels)\nplt.title('Earnings Per Share in Cents')\n\nplt.show()\nplt.savefig('EarningsPerShare.png')\n# deletes the previous plot\nplt.clf()\n\n# Plot of earnings and revenue reported by Netflix\n\n# The metrics below are in billions of dollars\nrevenue_by_quarter = [2.79, 2.98,3.29,3.7]\nearnings_by_quarter = [.0656,.12959,.18552,.29012]\nquarter_labels = [\"2Q2017\",\"3Q2017\",\"4Q2017\", \"1Q2018\"]\n\n# Revenue\nn = 1 # This is our first dataset (out of 2)\nt = 2 # Number of dataset\nd = 4 # Number of sets of bars\nw = 0.8 # Width of each bar\nbars1_x = [t*element + w*n for element\n in range(d)]\n\n\n\n# Earnings\nn = 2 # This is our second dataset (out of 2)\nt = 2 # Number of dataset\nd = 4 # Number of sets of bars\nw = 0.8 # Width of each bar\nbars2_x = [t*element + w*n for element\n in range(d)]\n\nmiddle_x = [ (a + b) / 2.0 for a, b in zip(bars1_x, bars2_x)]\nlabels = [\"Revenue\", \"Earnings\"]\n\nplt.bar(bars1_x, revenue_by_quarter)\nplt.bar(bars2_x, earnings_by_quarter)\n\n# Adding titles and labels to the plot \nplt.legend(labels)\nplt.title('Earnings and Revenue Per Quarter')\n\nplt.xticks(middle_x, quarter_labels)\n\nplt.show()\nplt.savefig('Earnings&RevenuePerQuarter')\n# deletes the previous plot\nplt.clf()\n\n# Comparing the Netflix stock to the Dow Jones Industrial Average in 2017\n\n# Left plot Netflix\nax1 = plt.subplot(1, 2, 1)\nplt.plot(netflix_stocks['Date'], netflix_stocks['Price'])\n\nax1.set_title('Netflix')\nax1.set(xlabel='Date', ylabel = 'Stock Price')\n\n# Right plot Dow Jones\nax2 = plt.subplot(1, 2, 2)\nplt.plot(dowjones_stocks['Date'], dowjones_stocks['Price'])\n\nax2.set_title('Dow Jones')\nax2.set(xlabel='Date', ylabel = 'Stock Price')\n\n\nplt.subplots_adjust(wspace=.5)\n\nplt.show()\nplt.savefig('NetflixDowJonesComparison.png')\n\nprint(netflix_stocks.head(5))\nprint(dowjones_stocks.head(5))\nprint(netflix_stocks_quarterly.head(5))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.