repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
maccam912/Oddyssey | [
"a9d1eca9ea1dfabd9873eb842eae03f2ed83d405"
] | [
"src/GameManager/gui/subscreen.py"
] | [
"import numpy as np\n\nclass SubScreen():\n def __init__(self, x, y, width, height, curses):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.curses = curses\n \n def put_char(self, x, y, char=' ', foreground='white', background='transparent'):\n if x < self.width and x >= self.x and y < self.height and y >= self.y:\n self.curses.put_char(self.x + x, self.y + y, char, foreground, background)\n else:\n raise ValueError('Error: Out of SubScreen boundary.')\n \n def put_message(self, x, y , message, foreground='white', background='transparent', auto=True, align='left'):\n self.curses.put_message(self.x + x, self.y + y , message, foreground, background, auto, align, box_x=self.x, box_y=self.y, box_width=self.width, box_height=self.height)\n \n def fill_char(self, char=' ', foreground='white', background='transparent'):\n for i in range(self.x, self.x + self.width):\n for j in range(self.y, self.y + self.height):\n self.curses.put_char(i, j, char, foreground, background) \n\nclass MessageScreen(SubScreen):\n def __init__(self, x, y, width, height, curses):\n super(MessageScreen, self).__init__(x, y, width, height, curses)\n self.initialization()\n \n def initialization(self):\n self.message_id = 1\n self.message_size = self.height\n self.message_storage = ['']*self.message_size\n self.color_storage = ['transparent']*self.message_size\n self.idx_storage = ['']*self.message_size\n \n def add_message(self, message, color='white'):\n idx = '[%d] '%(self.message_id)\n message = message\n self.message_id += 1\n self.message_storage.append(message)\n self.color_storage.append(color)\n self.idx_storage.append(idx)\n \n self.message_storage.pop(0)\n self.color_storage.pop(0)\n self.idx_storage.pop(0)\n \n def draw(self):\n self.fill_char()\n for i in range(len(self.message_storage)):\n self.put_message(0, i, self.idx_storage[i], foreground='white', background='transparent', auto=True, align='left')\n self.put_message(len(self.idx_storage[i]), i , self.message_storage[i], foreground=self.color_storage[i], background='transparent', auto=True, align='left')\n\nclass PlayerInfoScreen(SubScreen):\n def __init__(self, x, y, width, height, curses, player):\n super(PlayerInfoScreen, self).__init__(x, y, width, height, curses)\n self.player = player\n self.initialization()\n \n def initialization(self):\n self.full_health_bar_length = 15\n self.draw()\n \n def draw(self):\n # Draw background\n self.fill_char(char='█', foreground='peru', background='transparent')\n # Draw HP bar\n health = self.player.current_health\n interval = self.player.health / self.full_health_bar_length / 3\n level = int(np.ceil(health / interval))\n \n health_title = 'HP '\n \n if level % 3 == 0:\n remainder = ''\n elif level % 3 == 1:\n remainder = '░'\n elif level % 3 == 2: \n remainder = '▒'\n \n health_message = '█' * int((level - level%3)/3) + remainder\n self.put_message(0, 0, health_title, foreground='red', background='peru', auto=True, align='left')\n self.put_message(len(health_title), 0, ' '*self.full_health_bar_length, foreground='red', background='transparent', auto=True, align='left')\n self.put_message(len(health_title), 0, health_message, foreground='red', background='transparent', auto=True, align='left')\n "
] | [
[
"numpy.ceil"
]
] |
davidliyutong/Flint | [
"4e2552dac8d781c21e8998ad68bbf1b986b09258"
] | [
"test/test43_tf_official.py"
] | [
"from models import Linear3\nfrom core.Optimizers import sgd, bgd\nfrom core.Functions import one_hot_f\nimport numpy as np\nfrom tensorflow import keras\nfrom core.Dataloader import batch_iterator\n\n\ndef test(model, test_inputs, test_labels):\n num_of_sample = test_inputs.shape[0]\n cnt_correct, cnt_tot = 0, 0\n for i in range(num_of_sample):\n test_input = test_inputs[i:i + 1]\n test_label = test_labels[i]\n res = model.forward_prop(test_input)\n if np.argmax(res) == np.argmax(test_label):\n cnt_correct += 1\n cnt_tot += 1\n\n return cnt_correct / cnt_tot\n\n\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\ntrain_images = np.expand_dims(train_images / 255, axis=-1)\ntest_images = np.expand_dims(test_images / 255, axis=-1)\ntrain_labels = one_hot_f(train_labels, num_classes=10)\ntest_labels = one_hot_f(test_labels, num_classes=10)\n\nLinear3.compile()\nLinear3.cuda()\ntrain_iterator = batch_iterator(batch_sz=256)\noptimizer = bgd(0.01)\noptimizer.fit(Linear3, train_images, train_labels, train_iterator, epoch=50)\nLinear3.save('Linear3_cuda')\n"
] | [
[
"numpy.expand_dims",
"numpy.argmax"
]
] |
vishalbelsare/neupy | [
"684313cdaddcad326f2169384fb15ec3aa29d991"
] | [
"tests/layers/test_reshape_layer.py"
] | [
"import numpy as np\n\nfrom neupy import layers\n\nfrom base import BaseTestCase\n\n\nclass ReshapeLayerTestCase(BaseTestCase):\n def test_reshape_layer_1d_shape(self):\n x = np.random.random((5, 4, 3, 2, 1))\n\n input_layer = layers.Input((4, 3, 2, 1))\n reshape_layer = layers.Reshape()\n input_layer > reshape_layer\n\n y = reshape_layer.output(x).eval()\n self.assertEqual(y.shape, (5, 4 * 3 * 2 * 1))\n\n def test_reshape_layer_2d_shape(self):\n x = np.random.random((5, 20))\n\n input_layer = layers.Input(20)\n reshape_layer = layers.Reshape((4, 5))\n input_layer > reshape_layer\n\n y = reshape_layer.output(x).eval()\n self.assertEqual(y.shape, (5, 4, 5))\n"
] | [
[
"numpy.random.random"
]
] |
baender/gimli | [
"eb9a2204669cf11209b9577472f61ac70217a191",
"eb9a2204669cf11209b9577472f61ac70217a191",
"eb9a2204669cf11209b9577472f61ac70217a191",
"eb9a2204669cf11209b9577472f61ac70217a191"
] | [
"pygimli/physics/traveltime/raplot.py",
"pygimli/utils/cache.py",
"doc/examples/3_dc_and_ip/plot_03_ert_2_layer_mod.py",
"doc/examples/2_seismics/plot_03_rays_layered_and_gradient_models.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\"WRITEME\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pygimli as pg\n\nfrom pygimli.viewer.mpl import createColorBar # , updateColorBar\n\nfrom .ratools import shotReceiverDistances\n\n\ndef drawTravelTimeData(ax, data, t=None):\n \"\"\"\n Draw first arrival traveltime data into mpl ax a.\n data of type \\ref DataContainer must contain sensorIdx 's' and 'g'\n and thus being numbered internally [0..n)\n \"\"\"\n x = pg.x(data.sensorPositions())\n # z = pg.z(data.sensorPositions())\n\n shots = pg.unique(pg.sort(data('s')))\n geoph = pg.unique(pg.sort(data('g')))\n\n startOffsetIDX = 0\n\n if min(min(shots), min(geoph)) == 1:\n startOffsetIDX = 1\n\n tShow = data('t')\n if t is not None:\n tShow = t\n\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim([max(tShow), -0.002])\n ax.figure.show()\n\n for shot in shots:\n gIdx = pg.find(data('s') == shot)\n sensorIdx = [int(i__ - startOffsetIDX) for i__ in data('g')[gIdx]]\n ax.plot(x[sensorIdx], tShow[gIdx], 'x-')\n\n yPixel = ax.transData.inverted().transform_point((1, 1))[1] - \\\n ax.transData.inverted().transform_point((0, 0))[1]\n xPixel = ax.transData.inverted().transform_point((1, 1))[0] - \\\n ax.transData.inverted().transform_point((0, 0))[0]\n\n # draw shot points\n ax.plot(x[[int(i__ - startOffsetIDX) for i__ in shots]],\n np.zeros(len(shots)) + 8. * yPixel, 'gv', markersize=8)\n\n # draw geophone points\n ax.plot(x[[int(i__ - startOffsetIDX) for i__ in geoph]],\n np.zeros(len(geoph)) + 3. * yPixel, 'r^', markersize=8)\n\n ax.grid()\n ax.set_ylim([max(tShow), +16. * yPixel])\n ax.set_xlim([min(x) - 5. * xPixel, max(x) + 5. * xPixel])\n\n ax.set_xlabel('x-Coordinate [m]')\n ax.set_ylabel('Traveltime [ms]')\n\n\ndef plotFirstPicks(ax, data, tt=None, plotva=False, marker='x-'):\n \"\"\"Naming convention. drawFOO(ax, ... )\"\"\"\n pg.deprecated(\"use drawFirstPicks\")\n return drawFirstPicks(ax=ax, data=data, tt=tt, plotva=plotva,\n marker=marker)\n\n\ndef drawFirstPicks(ax, data, tt=None, plotva=False, marker='x-'):\n \"\"\"plot first arrivals as lines\"\"\"\n px = pg.x(data)\n gx = np.array([px[int(g)] for g in data(\"g\")])\n sx = np.array([px[int(s)] for s in data(\"s\")])\n if tt is None:\n tt = np.array(data(\"t\"))\n if plotva:\n tt = np.absolute(gx - sx) / tt\n\n uns = np.unique(sx)\n\n cols = plt.cm.tab10(np.arange(10))\n\n for i, si in enumerate(uns):\n ti = tt[sx == si]\n gi = gx[sx == si]\n ii = gi.argsort()\n ax.plot(gi[ii], ti[ii], marker, color=cols[i % 10])\n ax.plot(si, 0., 's', color=cols[i % 10], markersize=8)\n\n ax.grid(True)\n if plotva:\n ax.set_ylabel(\"Apparent velocity (m/s)\")\n else:\n ax.set_ylabel(\"Traveltime (s)\")\n ax.set_xlabel(\"x (m)\")\n ax.invert_yaxis()\n\n\ndef _getOffset(data, full=False):\n \"\"\"Return vector of offsets (in m) between shot and receiver.\"\"\"\n pg.deprecated('use shotReceiverDistances') # 190429 ??\n return shotReceiverDistances(data, full)\n\n\ndef showVA(data, usePos=True, ax=None, **kwargs):\n \"\"\"Show apparent velocity as image plot\n\n Parameters\n ----------\n data : pg.DataContainer()\n Datacontainer with 's' and 'g' Sensorindieces and 't' traveltimes.\n \"\"\"\n ax, _ = pg.show(ax=ax)\n gci = drawVA(ax, data=data, usePos=usePos, **kwargs)\n\n cBar = createColorBar(gci, **kwargs)\n\n return gci, cBar\n\n\ndef drawVA(ax, data, vals=None, usePos=True, pseudosection=False, **kwargs):\n \"\"\"Draw apparent velocities as matrix into ax\n\n Parameters\n ----------\n ax : mpl.Axes\n\n data : pg.DataContainer()\n Datacontainer with 's' and 'g' Sensorindieces and 't' traveltimes.\n\n usePos: bool [True]\n Use sensor positions for axes tick labels\n\n pseudosection : bool [False]\n Show in pseudosection style.\n\n vals : iterable\n Traveltimes, if None data need to contain 't' values.\n \"\"\"\n if isinstance(vals, str):\n vals = data(vals)\n \n if vals is None:\n vals = data('t')\n\n px = pg.x(data)\n gx = np.asarray([px[g] for g in data.id(\"g\")])\n sx = np.asarray([px[s] for s in data.id(\"s\")])\n\n offset = shotReceiverDistances(data, full=True)\n\n if min(vals) < 1e-10:\n print(vals)\n pg.error('zero traveltimes found.')\n va = offset / vals\n\n if pseudosection:\n midpoint = (gx + sx) / 2\n gci = pg.viewer.mpl.dataview.drawVecMatrix(ax, midpoint, offset, va,\n queeze=True,\n label=pg.unit('as'))\n else:\n gci = pg.viewer.mpl.dataview.drawVecMatrix(ax, gx, sx, va,\n squeeze=True,\n label=pg.unit('as'))\n\n # A = np.ones((data.sensorCount(), data.sensorCount())) * np.nan\n # for i in range(data.size()):\n # A[int(data('s')[i]), int(data('g')[i])] = va[i]\n # gci = ax.imshow(A, interpolation='nearest')\n # ax.grid(True)\n\n if usePos:\n xt = np.arange(0, data.sensorCount(), 50)\n ax.set_xticks(xt)\n ax.set_xticklabels([str(int(px[xti])) for xti in xt])\n ax.set_yticks(xt)\n ax.set_yticklabels([str(int(px[xti])) for xti in xt])\n\n return gci\n\n\ndef plotLines(ax, line_filename, step=1):\n xz = np.loadtxt(line_filename)\n n_points = xz.shape[0]\n if step == 2:\n for i in range(0, n_points, step):\n x = xz[i:i + step, 0]\n z = xz[i:i + step, 1]\n ax.plot(x, z, 'k-')\n if step == 1:\n ax.plot(xz[:, 0], xz[:, 1], 'k-')\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Caching manager with function decorator.\n\nInput supports python base types and all pg.core objects with .hash() method.\nOutput supports DataContainerERT, ...\n\nTODO:\n\n * Output types:\n numpy.ndarray, pg.Mesh. pg.Vector, pg.Matrix\n\nTo use just add the decorator.\n\[email protected]\ndef myLongRunningStuff(*args, **kwargs):\n #...\n return results\n\"\"\"\nimport sys\nimport os\nimport inspect\nimport hashlib\nimport json\nimport time\n\nimport numpy as np\n\nimport pygimli as pg\n\n\ndef strHash(string):\n return int(hashlib.sha224(string.encode()).hexdigest()[:16], 16)\n\nclass Cache(object):\n def __init__(self, hashValue):\n self._value = None\n self._hash = hashValue\n self._name = CacheManager().cachingPath(str(self._hash))\n self._info = None\n self.restore()\n\n @property\n def info(self):\n if self._info is None:\n self._info = {'type': '',\n 'file': '',\n 'date': 0,\n 'dur': 0.0,\n 'restored': 0,\n 'codeinfo': '',\n 'version': '',\n 'args': '',\n 'kwargs': {},\n }\n return self._info\n\n @info.setter\n def info(self, i):\n self._info = i\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, v):\n self.info['type'] = str(type(v).__name__)\n\n # if len(self.info['type']) != 1:\n # pg.error('only single return caches supported for now.')\n # return\n\n self.info['file'] = self._name\n\n self.updateCacheInfo()\n\n if self.info['type'] == 'Mesh':\n pg.info('Save Mesh binary v2')\n v.saveBinaryV2(self._name)\n elif self.info['type'] == 'RVector':\n pg.info('Save RVector binary')\n v.save(self._name, format=pg.core.Binary)\n elif self.info['type'] == 'ndarray':\n pg.info('Save ndarray')\n np.save(self._name, v, allow_pickle=True)\n else:\n np.save(self._name, v, allow_pickle=True)\n # pg.warn('ascii save of type', self.info['type'], 'might by dangerous')\n # v.save(self._name)\n\n self._value = v\n pg.info('Cache stored:', self._name)\n\n def updateCacheInfo(self):\n with open(self._name + '.json', 'w') as of:\n json.dump(self.info, of, sort_keys=False,\n indent=4, separators=(',', ': '))\n\n def restore(self):\n \"\"\"Read data from json infos\"\"\"\n if os.path.exists(self._name + '.json'):\n\n # Fricking mpl kills locale setting to system default .. this went\n # horrible wrong for german 'decimal_point': ','\n pg.checkAndFixLocaleDecimal_point(verbose=False)\n\n try:\n with open(self._name + '.json') as file:\n self.info = json.load(file)\n\n # if len(self.info['type']) != 1:\n # pg.error('only single return caches supported for now.')\n\n if self.info['type'] == 'DataContainerERT':\n self._value = pg.DataContainerERT(self.info['file'],\n removeInvalid=False)\n # print(self._value)\n elif self.info['type'] == 'RVector':\n self._value = pg.Vector()\n self._value.load(self.info['file'], format=pg.core.Binary)\n elif self.info['type'] == 'Mesh':\n pg.tic()\n self._value = pg.Mesh()\n self._value.loadBinaryV2(self.info['file'] + '.bms')\n pg.debug(\"Restoring cache took:\", pg.dur(), \"s\")\n elif self.info['type'] == 'ndarray':\n self._value = np.load(self.info['file'] + '.npy',\n allow_pickle=True)\n else:\n self._value = np.load(self.info['file'] + '.npy',\n allow_pickle=True)\n\n if self.value is not None:\n self.info['restored'] = self.info['restored'] + 1\n self.updateCacheInfo()\n pg.info('Cache {3} restored ({1}s x {0}): {2}'.\\\n format(self.info['restored'],\n round(self.info['dur'], 1),\n self._name, self.info['codeinfo']))\n else:\n # default try numpy\n pg.warn('Could not restore cache of type {0}.'.format(self.info['type']))\n\n pg.debug(\"Restoring cache took:\", pg.dur(), \"s\")\n except Exception as e:\n import traceback\n traceback.print_exc(file=sys.stdout)\n print(self.info)\n pg.error('Cache restoring failed.')\n\n#@pg.singleton\nclass CacheManager(object):\n __instance = None\n __has_init = False\n\n def __new__(cls):\n if cls.__instance is None:\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\n def __init__(self):\n if not self.__has_init:\n self._caches = {}\n self.__has_init = True\n\n @staticmethod\n def instance(cls):\n return cls.__instance__\n\n def cachingPath(self, fName):\n \"\"\"Create a path name for the cache\"\"\"\n if pg.rc[\"globalCache\"]:\n path = pg.getCachePath()\n else:\n path = \".cache\"\n if not os.path.exists(path):\n os.mkdir(path)\n return os.path.join(path, fName)\n\n def functInfo(self, funct):\n \"\"\"Return unique info string about the called function.\"\"\"\n return funct.__code__.co_filename + \":\" + funct.__qualname__\n\n def hash(self, funct, *args, **kwargs):\n \"\"\"\"Create a hash value\"\"\"\n pg.tic()\n functInfo = self.functInfo(funct)\n funcHash = strHash(functInfo)\n versionHash = strHash(pg.versionStr())\n codeHash = strHash(inspect.getsource(funct))\n\n argHash = 0\n for a in args:\n if isinstance(a, str):\n argHash = argHash ^ strHash(a)\n elif isinstance(a, list):\n for item in a:\n if isinstance(item, str):\n argHash = argHash ^ strHash(item)\n else:\n argHash = argHash ^ hash(item)\n else:\n argHash = argHash ^ hash(a)\n\n for k, v in kwargs.items():\n if isinstance(v, str):\n argHash = argHash ^ strHash(v)\n else:\n argHash = argHash ^ hash(v)\n\n pg.debug(\"Hashing took:\", pg.dur(), \"s\")\n return funcHash ^ versionHash ^ codeHash ^ argHash\n\n def cache(self, funct, *args, **kwargs):\n \"\"\" Create a unique cache \"\"\"\n hashVal = self.hash(funct, *args, **kwargs)\n\n cached = Cache(hashVal)\n cached.info['codeinfo'] = self.functInfo(funct)\n cached.info['version'] = pg.versionStr()\n cached.info['args'] = str(args)\n cached.info['kwargs'] = str(kwargs)\n\n return cached\n\n\ndef cache(funct):\n \"\"\"Cache decorator.\"\"\"\n def wrapper(*args, **kwargs):\n\n if '--noCache' in sys.argv or '-N' in sys.argv:\n return funct(*args, **kwargs)\n\n cache = CacheManager().cache(funct, *args, **kwargs)\n if cache.value is not None:\n return cache.value\n else:\n # pg.tic will not work because there is only one global __swatch__\n sw = pg.core.Stopwatch(True)\n rv = funct(*args, **kwargs)\n cache.info['date'] = time.time()\n cache.info['dur'] = sw.duration()\n try:\n cache.value = rv\n except Exception as e:\n print(e)\n pg.warn(\"Can't cache:\", rv)\n return rv\n return wrapper\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nr\"\"\"\n2D FEM modelling on two-layer example\n-------------------------------------\n\nCompare 2D FEM modelling with 1D VES sounding with and without complex\nresistivity values.\n\"\"\"\nimport numpy as np\n\nimport pygimli as pg\nimport pygimli.meshtools as mt\nfrom pygimli.physics.ert import simulate as simulateERT\nfrom pygimli.physics.ert import VESModelling, VESCModelling\nfrom pygimli.physics.ert import createERTData\n\n###############################################################################\n# First we create a data configuration of a 1D Schlumberger sounding with\n# 20 electrodes and and increasing MN/2 electrode spacing from 1m to 24m.\nscheme = createERTData(pg.utils.grange(start=1, end=24, dx=1, n=10, log=True),\n sounding=True)\n\n###############################################################################\n# First we create a geometry that covers the sought geometry.\n# We start with a 2 dimensional simulation world\n# of a bounding box [-200, -100] [200, 0], the layer at -5m and some suitable\n# requested cell sizes.\nplc = mt.createWorld(start=[-200, -100], end=[200, 0],\n layers=[-10], area=[5.0, 500])\n\n###############################################################################\n# To achieve a necessary numerical accuracy, we need some local mesh refinement\n# in the vicinity of the electrodes. However, since we don't need the\n# electrode (aka sensor) positions to be present as nodes in the geometry, we only add forced mesh\n# nodes near the electrode positions, right below the earths surface.\nfor s in scheme.sensors():\n plc.createNode(s + [0.0, -0.2])\n\n# Now we can create our forward modeling mesh.\nmesh = mt.createMesh(plc, quality=33)\n\npg.show(mesh, data=mesh.cellMarkers(), label='Marker', showMesh=True)\n\n###############################################################################\n# It is usually a good idea to calculate with a p2-refined mesh.\n# However, you should be careful for larger meshes since the numerical efford\n# will be highly increased.\nmesh = mesh.createP2()\n\n###############################################################################\n# Perform the modeling using the static convenience call for ERT.\n# Res is the resistivity mapping regarding the regions of the given geometry.\n# Region with marker 1 is the upper layer, maker 2 is the background\ndata = simulateERT(mesh, res=[[1, 100.0], [2, 1.0]],\n scheme=scheme, verbose=False)\n\n###############################################################################\n# 1D VES\nx = pg.x(scheme)\nab2 = (x[scheme('b')] - x[scheme('a')])/2\nmn2 = (x[scheme('n')] - x[scheme('m')])/2\nves = VESModelling(ab2=ab2, mn2=mn2)\n\n###############################################################################\n# Plot results\nfig, ax = pg.plt.subplots(1, 1)\nax.plot(ab2, data('rhoa'), '-o', label='2D (FEM)')\nax.plot(ab2, ves.response([10.0, 100.0, 1.0]), '-x', label='1D (VES)')\nax.set_xlabel('AB/2 (m)')\nax.set_ylabel('Apparent resistivity ($\\Omega$m)')\nax.grid(1)\nax.legend()\n\n###############################################################################\n# We can easily repeat the above example using a complex resistivity model.\n# defining amplitude and phase in negative mrad.\namps = np.array([100.0, 1.0])\nphases = np.array([1.0, 10.0])\nres = amps - 1j * amps * np.sin(phases/1000.)\ndata = simulateERT(mesh, res=[[1, res[0]], [2, res[1]]],\n scheme=scheme, verbose=False)\n\nves = VESCModelling(ab2=ab2, mn2=mn2)\nrc = ves.response([10.0, 100.0, 1.0, phases[0]/1000, phases[1]/1000])\n\n###############################################################################\n# We can apply the default drawing routines for 1D VES data as well.\nfig, ax = pg.plt.subplots(1, 1)\nves.drawData(ax, pg.cat(data('rhoa'), -data('phia')),\n labels=[r'$\\varrho_a$ 2D FEM', r'$\\varphi_a$ 2D FEM'],\n marker='o', linestyle='none')\nves.drawData(ax, rc,\n labels=[r'$\\varrho_a$ 1D VES', r'$\\varphi_a$ 1D VES'],\n marker=None)\n\nnp.testing.assert_approx_equal(data('rhoa')[0], 30.66351249, significant=5)\nnp.testing.assert_approx_equal(-data('phia')[0], 0.00132173865, significant=5)\n\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRaypaths in layered and gradient models\n=======================================\n\nThis example performs raytracing for a two-layer and a vertical gradient model\nand compares the resulting traveltimes to existing analytical solutions. An\napproximation of the raypath is found by finding the shortest-path through a\ngrid of nodes. The possible angular coverage is small when only corner points\nof a cell (primary nodes) are used for this purpose. The angular coverage, and\nhence the numerical accuracy of traveltime calculations, can be significantly\nimproved by a few secondary nodes along the cell edges. Details can be found in\n`Giroux & Larouche (2013) <https://doi.org/10.1016/j.cageo.2012.12.005>`_.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\nfrom math import asin, tan\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pygimli as pg\nimport pygimli.meshtools as mt\nfrom pygimli.viewer.mpl import drawMesh\n\nfrom pygimli.physics import TravelTimeManager\n\n###############################################################################\n# Two-layer model\n# ---------------\n# We start by building a regular grid.\n\nmesh_layered = mt.createGrid(\n np.arange(-20, 155, step=5, dtype=float), np.linspace(-60, 0, 13))\n\n###############################################################################\n# We now construct the velocity vector for the two-layer case by iterating over\n# the cells. Cells above 25 m depth are assigned :math:`v = 1000` m/s and cells\n# below are assigned :math:`v = 3000` m/s.\n\nvel_layered = np.zeros(mesh_layered.cellCount())\nfor cell in mesh_layered.cells():\n if cell.center().y() < -25:\n vel = 3000.0\n else:\n vel = 1000.0\n vel_layered[cell.id()] = vel\n\npg.show(mesh_layered, vel_layered, label=\"Velocity (m/s)\")\n\n###############################################################################\n# We now define the analytical solution. The traveltime at a given offset `x`\n# is the minimum of the direct and critically refracted wave, where the latter\n# is governed by Snell's law.\n\ndef analyticalSolution2Layer(x, zlay=25, v1=1000, v2=3000):\n \"\"\"Analytical solution for 2 layer case.\"\"\"\n tdirect = np.abs(x) / v1 # direct wave\n alfa = asin(v1 / v2) # critically refracted wave angle\n xreflec = tan(alfa) * zlay * 2. # first critically refracted\n trefrac = (x - xreflec) / v2 + xreflec * v2 / v1**2\n return np.minimum(tdirect, trefrac)\n\n\n###############################################################################\n# Vertical gradient model\n# -----------------------\n# We first create an unstructured mesh:\n\nsensors = np.arange(131, step=10.0)\nplc = mt.createWorld([-20, -60], [150, 0], worldMarker=False)\nfor pos in sensors:\n plc.createNode([pos, 0.0])\nmesh_gradient = mt.createMesh(plc, quality=33, area=3)\n\n###############################################################################\n# A vertical gradient model, i.e. :math:`v(z) = a + bz`, is defined per cell.\n\na = 1000\nb = 100\n\nvel_gradient = []\nfor node in mesh_gradient.nodes():\n vel_gradient.append(a + b * abs(node.y()))\nvel_gradient = pg.meshtools.nodeDataToCellData(mesh_gradient,\n np.array(vel_gradient))\npg.show(mesh_gradient, vel_gradient, label=\"Velocity (m/s)\")\n\n###############################################################################\n# The traveltime for a gradient velocity model is given by:\n#\n# .. math::\n#\n# v = \\left|b^{-1}cosh^{-1}\\left(1 + \\frac{b^2 x^2}{2a^2}\\right)\\right|\n#\n\ndef analyticalSolutionGradient(x, a=1000, b=100):\n \"\"\"Analytical solution for gradient model.\"\"\"\n tdirect = np.abs(x) / a # direct wave\n tmp = 1 + ((b**2 * np.abs(x)**2) / (2 * a**2))\n trefrac = np.abs(b**-1 * np.arccosh(tmp))\n return np.minimum(tdirect, trefrac)\n\n###############################################################################\n# The loop below calculates the travel times and makes the comparison plot.\n\nfig, ax = plt.subplots(3, 2, figsize=(10, 10), sharex=True)\n\nfor j, (case, mesh, vel) in enumerate(zip([\"layered\", \"gradient\"],\n [mesh_layered, mesh_gradient],\n [vel_layered, vel_gradient])):\n pg.boxprint(case)\n if case == \"gradient\":\n ana = analyticalSolutionGradient\n elif case == \"layered\":\n ana = analyticalSolution2Layer\n for boundary in mesh.boundaries():\n boundary.setMarker(0)\n\n xmin, xmax = mesh.xmin(), mesh.xmax()\n mesh.createNeighborInfos()\n\n # In order to use the Dijkstra, we extract the surface positions >0\n mx = pg.x(mesh)\n my = pg.y(mesh)\n px = np.sort(mx[my == 0.0])\n\n # A data container with index arrays named s (shot) and g (geophones) is\n # created and filled with the positions and shot/geophone indices.\n data = pg.DataContainer()\n data.registerSensorIndex('s')\n data.registerSensorIndex('g')\n\n for i, pxi in enumerate(px):\n data.createSensor([pxi, 0.0])\n if pxi == 0.0:\n source = i\n\n nData = len(px)\n data.resize(nData)\n data['s'] = [source] * nData # only one shot at first sensor\n data['g'] = range(nData) # and all sensors are receiver geophones\n\n # Draw initial mesh with velocity distribution\n pg.show(mesh, vel, ax=ax[0, j], label=\"Velocity (m/s)\", hold=True,\n logScale=False, cMap=\"summer_r\", coverage=0.7)\n drawMesh(ax[0, j], mesh, color=\"white\", lw=0.21)\n\n # We compare the accuracy for 0-5 secondary nodes\n sec_nodes = [0, 1, 5]\n t_all = []\n durations = []\n paths = []\n\n mgr = TravelTimeManager()\n\n cols = [\"orangered\", \"blue\", \"black\"]\n recs = [1, 3, 8, 13]\n\n for i, n in enumerate(sec_nodes):\n\n # Perform traveltime calculations and log time with pg.tic() & pg.toc()\n pg.tic()\n res = mgr.simulate(vel=vel, scheme=data, mesh=mesh, secNodes=n)\n # We need to copy res['t'] here because res['t'] is a reference to\n # an array in res, and res will be removed in the next iteration.\n # Unfortunately, we don't have any reverence counting for core objects yet.\n t_all.append(res['t'].array())\n durations.append(pg.dur())\n pg.toc(\"Raytracing with %d secondary nodes:\" % n)\n\n for r, p in enumerate(recs):\n if r == 0:\n lab = \"Raypath with %d sec nodes\" % n\n else:\n lab = None\n\n recNode = mgr.fop.mesh().findNearestNode([sensors[p], 0.0])\n sourceNode = mgr.fop.mesh().findNearestNode([0.0, 0.0])\n\n path = mgr.fop.dijkstra.shortestPath(sourceNode, recNode)\n points = mgr.fop.mesh().positions(withSecNodes=True)[path]\n ax[0, j].plot(pg.x(points), pg.y(points), cols[i], label=lab)\n\n t_ana = ana(px)\n\n # Upper subplot\n ax[1, j].plot(px, t_ana * 1000, label=\"Analytical solution\")\n\n for i, n in enumerate(sec_nodes):\n ax[1, j].plot(px, t_all[i] * 1000,\n label=\"Dijkstra (%d sec nodes, %.2f s)\" % (n, durations[i]))\n\n ax[2, j].plot(px, np.zeros_like(px), label=\"Zero line\") # to keep color cycle\n\n for i, n in enumerate(sec_nodes):\n ax[2, j].plot(px, np.abs(t_all[i] - t_ana) * 1000)\n\n ax[1, j].legend()\n\n # Draw sensor positions for the selected receivers\n for p in recs:\n ax[0, j].plot(sensors[p], 0.0, \"kv\", ms=10)\n ax[0, j].plot(0.0, 0.0, \"ro\", ms=10)\n ax[0, j].set_ylim(mesh.ymin(), 2)\n\nax[0, 0].set_title(\"Two-layer model\")\nax[0, 1].set_title(\"Vertical gradient model\")\nax[0, 0].legend()\nax[0, 0].set_ylabel(\"y (m)\")\nax[1, 0].set_ylabel(\"Traveltime (ms)\")\nax[2, 0].set_ylabel(\"Absolute difference to\\nanalytical solution (ms)\")\nax[2, 0].set_xlabel(\"x (m)\")\nfig.tight_layout()\n"
] | [
[
"numpy.arange",
"numpy.absolute",
"numpy.loadtxt",
"numpy.unique"
],
[
"numpy.load",
"numpy.save"
],
[
"numpy.array",
"numpy.sin"
],
[
"numpy.minimum",
"numpy.abs",
"numpy.linspace",
"numpy.arange",
"numpy.arccosh",
"matplotlib.pyplot.subplots",
"numpy.sort",
"numpy.zeros_like",
"numpy.array"
]
] |
saraswat/TensorLog | [
"c56cebfa33b5123d5340a7b429e333da09d223d8",
"c56cebfa33b5123d5340a7b429e333da09d223d8"
] | [
"tensorlog/testxcomp.py",
"tensorlog/dataset.py"
] | [
"# (C) William W. Cohen and Carnegie Mellon University, 2017\n\nimport logging\nimport numpy as np\nimport os\nimport unittest\nimport sys\nimport collections\nimport tempfile\n\nfrom tensorlog import xctargets\n\nif xctargets.tf:\n import tensorflow as tf\n from tensorlog import tensorflowxcomp\nelse: \n tensorflowxcomp=None\nif xctargets.theano:\n import theano\n from tensorlog import theanoxcomp\nelse:\n theanoxcomp=None\n\nfrom tensorlog import bpcompiler\nfrom tensorlog import comline\nfrom tensorlog import dataset\nfrom tensorlog import declare\nfrom tensorlog import matrixdb\nfrom tensorlog import learn\nfrom tensorlog import mutil\nfrom tensorlog import parser\nfrom tensorlog import program\nfrom tensorlog import simple\nfrom tensorlog import testtensorlog\nfrom tensorlog import funs\nfrom tensorlog import ops\nfrom tensorlog import learnxcomp as learnxc\nfrom tensorlog.expt import Expt\n\nif xctargets.tf:\n tf.logging.set_verbosity(tf.logging.WARN)\n \nTESTED_COMPILERS = []\nTESTED_LEARNERS = {}\nif xctargets.theano:\n for c in [\n theanoxcomp.DenseMatDenseMsgCrossCompiler,\n theanoxcomp.SparseMatDenseMsgCrossCompiler\n ]:\n TESTED_COMPILERS.append(c)\n TESTED_LEARNERS[c]=theanoxcomp.FixedRateGDLearner\nif xctargets.tf:\n for c in [\n tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler,\n ]:\n TESTED_COMPILERS.append(c)\n TESTED_LEARNERS[c]=tensorflowxcomp.FixedRateGDLearner\n \nRUN_OLD_INFERENCE_TESTS = False\nSAVE_SUMMARIES = False\n\ndef close_cross_compiler(xc):\n xc.close()\n if xctargets.tf and isinstance(xc,tensorflowxcomp.TensorFlowCrossCompiler):\n tf.reset_default_graph()\n\n\nclass TestXCSmallProofs(testtensorlog.TestSmallProofs):\n\n def test_if(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y).'], 'p(i,o)', 'william', {'susan':1.0})\n\n def test_failure(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y).'], 'p(i,o)', 'lottie', {matrixdb.NULL_ENTITY_NAME:1.0})\n\n def test_reverse_if(self):\n self.xcomp_check(['p(X,Y):-sister(Y,X).'], 'p(i,o)', 'rachel', {'william':1.0})\n\n def test_or(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y).', 'p(X,Y):-sister(X,Y).'], 'p(i,o)', 'william',\n {'susan':1.0, 'rachel':1.0, 'lottie':1.0, 'sarah':1.0})\n\n def test_chain(self):\n self.xcomp_check(['p(X,Z):-spouse(X,Y),sister(Y,Z).'], 'p(i,o)', 'susan',\n {'rachel':1.0, 'lottie':1.0, 'sarah':1.0})\n self.xcomp_check(['p(X,Z):-sister(X,Y),child(Y,Z).'], 'p(i,o)', 'william',\n {'charlotte':1.0, 'lucas':1.0, 'poppy':1.0, 'caroline':1.0, 'elizabeth':1.0})\n\n def test_mid(self):\n self.xcomp_check(['p(X,Y):-sister(X,Y),child(Y,Z).'], 'p(i,o)', 'william',\n {'sarah': 1.0, 'rachel': 2.0, 'lottie': 2.0})\n\n def test_nest(self):\n self.xcomp_check(['s(X,Y):-spouse(X,Y).','t(X,Z):-spouse(X,Y),s(Y,Z).'], 't(i,o)', 'susan', {'susan': 1.0})\n\n def test_back1(self):\n # fails for tensorflowxcomp\n self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z).'], 'p(i,o)', 'william', {'susan': 3.0})\n\n def test_back2(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z1),sister(X,Z2).'],'p(i,o)','william',{'susan': 9.0})\n\n def test_rec1(self):\n program.DEFAULT_MAXDEPTH=4\n self.xcomp_check(['p(X,Y):-spouse(X,Y).','p(X,Y):-p(Y,X).'], 'p(i,o)','william',{'susan': 5.0})\n program.DEFAULT_MAXDEPTH=10\n self.xcomp_check(['p(X,Y):-spouse(X,Y).','p(X,Y):-p(Y,X).'], 'p(i,o)','william',{'susan': 11.0})\n\n def test_const_output(self):\n self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'], 'sis(i,o)', 'sarah', {'william': 1.0})\n self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'], 'sis(i,o)', 'lottie', {'william': 2.0})\n\n def test_const_chain1(self):\n self.xcomp_check(['p(X,S) :- assign(S,susan),sister(X,Y),child(Y,Z).'],'p(i,o)','william',{'susan': 5.0})\n\n def test_const_chain2(self):\n self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'],'p(i,o)','sarah',{'pos':1.0})\n self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'],'p(i,o)','lottie',{'pos':2.0})\n\n def test_alt_chain(self):\n self.xcomp_check(['p(X,W) :- spouse(X,W),sister(X,Y),child(Y,Z).'],'p(i,o)','william',{'susan': 5.0})\n pass\n\n def test_proppr1(self):\n w = 7*self.db.onehot('r1')+3*self.db.onehot('r2')\n self.proppr_xcomp_check(w,['p(X,Y):-sister(X,Y) {r1}.','p(X,Y):-spouse(X,Y) {r2}.'],'p(i,o)',\n 'william', {'sarah': 7.0, 'rachel': 7.0, 'lottie': 7.0, 'susan': 3.0})\n\n def test_proppr2(self):\n w = 3*self.db.onehot('r2')\n self.proppr_xcomp_check(w,['p(X,Y):-spouse(Y,X) {r2}.'],'p(i,o)',\n 'susan', {'william': 3.0})\n\n def test_reuse1(self):\n self.xcomp_check(['p(X,Y) :- r(X,Z),r(Z,Y).', 'r(X,Y):-spouse(X,Y).'], 'p(i,o)', 'william',\n {'william':1.0})\n\n def _removeZeros(self, sdict):\n if True: return sdict\n e = sdict[None]\n ret = dict([ (k,v-e) for (k,v) in list(sdict.items()) if v != e])\n z = sum(ret.values())\n for k in ret: ret[k] = ret[k]/z\n return ret\n\n def xcomp_check(self,ruleStrings,mode_string,input_symbol,expected_result_dict,compare=False):\n self._xcomp_check('vanilla',None,ruleStrings,mode_string,input_symbol,expected_result_dict,compare)\n\n def proppr_xcomp_check(self,weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict):\n self._xcomp_check('proppr',weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict)\n\n def _xcomp_check(self,progType,weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict,compare=False):\n # run the base class check to see that the inference is correct\n if RUN_OLD_INFERENCE_TESTS:\n if progType=='proppr':\n self.proppr_inference_check(weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict)\n else:\n self.inference_check(ruleStrings,mode_string,input_symbol,expected_result_dict)\n # setup the next round of tests by compiling a tensorlog\n # Program - this code is lifted from the testtensorlog\n # inference routines\n print('xcomp inference for mode',mode_string,'on input',input_symbol)\n testtensorlog.softmax_normalize(expected_result_dict)\n rules = parser.RuleCollection()\n for r in ruleStrings:\n rules.add(parser.Parser().parseRule(r))\n if progType=='proppr':\n prog = program.ProPPRProgram(db=self.db,rules=rules,weights=weightVec)\n else:\n prog = program.Program(db=self.db,rules=rules)\n for compilerClass in TESTED_COMPILERS:\n #cross-compile the function\n xc = compilerClass(prog)\n # evaluate the function and get the output y\n #xc.show()\n print('== performing eval with',compilerClass,'==')\n inferenceFun = xc.inferenceFunction(mode_string)\n y = inferenceFun(prog.db.onehot(input_symbol))\n # print 'input',xc.getInputName(mode_string),'args,fun\n # =',xc.inference(mode_string) theano output will a be (probably\n # dense) message, so just compare and check that the maximal\n # elements from these two dicts are the same\n actual_result_dict = self.db.rowAsSymbolDict(y)\n self.check_maxes_in_dicts(actual_result_dict, expected_result_dict)\n # check it's normalized\n l1_error = abs(sum(actual_result_dict.values()) - 1.0)\n #print 'l1_error',l1_error,'actual_result_dict',actual_result_dict,'expected_result_dict',expected_result_dict\n self.assertTrue( l1_error < 0.0001)\n # also test proofCountFun\n proofCountFun = xc.proofCountFunction(mode_string)\n pc = proofCountFun(prog.db.onehot(input_symbol))\n # theano output will a be (probably dense) message, so\n # just compare that maximal elements from these two dicts\n # are the same\n pc_result_dict = self.db.rowAsSymbolDict(pc)\n if len(pc_result_dict)>0:\n self.check_maxes_in_dicts(pc_result_dict, expected_result_dict)\n print('== eval checks passed ==')\n close_cross_compiler(xc)\n\n def check_maxes_in_dicts(self,actual,expected):\n def maximalElements(d):\n m = max(d.values())\n return set(k for k in d if d[k]==m)\n actualMaxes = maximalElements(actual)\n expectedMaxes = maximalElements(expected)\n print('actual',actualMaxes,'expected',expectedMaxes)\n for a in actualMaxes:\n self.assertTrue(a in expectedMaxes)\n for a in expectedMaxes:\n self.assertTrue(a in actualMaxes)\n\n\nclass TestXCGrad(testtensorlog.TestGrad):\n\n def setUp(self):\n self.db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,'fam.cfacts'))\n\n def test_if(self):\n rules = ['p(X,Y):-sister(X,Y).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_if2(self):\n rules = ['p(X,Y):-sister(X,Y).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah']), ('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie']), ('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_reverse_if(self):\n rules = ['p(X,Y):-parent(Y,X).']\n mode = 'p(i,o)'\n params = [('parent',2)]\n self.xgrad_check(rules, mode, params,\n [('lottie',['charlotte'])],\n {'parent(charlotte,lottie)': +1,'parent(lucas,lottie)': -1})\n\n def test_chain1(self):\n rules = ['p(X,Z):-sister(X,Y),child(Y,Z).']\n mode = 'p(i,o)'\n self.xgrad_check(rules,mode,\n [('sister',2)],\n [('william',['caroline','elizabeth'])],\n {'sister(william,rachel)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules,mode,\n [('child',2)],\n [('william',['caroline','elizabeth'])],\n {'child(rachel,elizabeth)': +1,'child(lottie,lucas)': -1})\n self.xgrad_check(rules,mode,\n [('child',2),('sister',2)],\n [('william',['caroline','elizabeth'])],\n {'child(rachel,elizabeth)': +1,'child(lottie,lucas)': -1, 'sister(william,rachel)': +1,'sister(william,lottie)': -1})\n\n def test_chain2(self):\n rules = ['p(X,Z):-spouse(X,Y),sister(Y,Z).']\n mode = 'p(i,o)'\n self.xgrad_check(rules,mode,\n [('sister',2)],\n [('susan',['rachel'])],\n {'sister(william,rachel)': +1,'sister(william,lottie)': -1})\n\n\n def test_call1(self):\n rules = ['q(X,Y):-sister(X,Y).','p(Z,W):-q(Z,W).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_call2(self):\n rules = ['q(X,Y):-sister(X,Y).','p(Z,W):-r(Z,W).','r(Z,W):-q(Z,W).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_split(self):\n rules = ['p(X,Y):-sister(X,Y),child(Y,Z),young(Z).']\n mode = 'p(i,o)'\n params = [('child',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'child(lottie,lucas)': +1,'child(lottie,charlotte)': +1,'child(sarah,poppy)': -1})\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,lottie)': +1,'sister(william,sarah)': -1})\n\n def test_or(self):\n rules = ['p(X,Y):-child(X,Y).', 'p(X,Y):-sister(X,Y).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['charlie','rachel'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': -1,'sister(william,lottie)': -1})\n params = [('child',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['charlie','rachel'])],\n {'child(william,charlie)': +1,'child(william,josh)': -1})\n params = [('child',2),('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['charlie','rachel'])],\n {'child(william,charlie)': +1,'child(william,josh)': -1,'sister(william,rachel)': +1,'sister(william,sarah)': -1})\n\n\n def test_weighted_vec(self):\n rules = ['p(X,Y):-sister(X,Y),assign(R,r1),feat(R).','p(X,Y):-child(X,Y),assign(R,r2),feat(R).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','charlie'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': -1})\n params = [('child',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','charlie'])],\n {'child(william,charlie)': +1,'child(william,josh)': -1})\n params = [('feat',1)]\n self.xgrad_check(rules, mode, params,\n [('william',['josh','charlie'])],\n {'feat(r1)': -1,'feat(r2)': +1})\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah','lottie'])],\n {'feat(r1)': +1,'feat(r2)': -1})\n\n def learnxc_check(self,rule_strings,mode_string,params,xyPairs,expected):\n print(\"XLearner loss/grad eval\")\n rules = testtensorlog.rules_from_strings(rule_strings)\n prog = program.Program(db=self.db,rules=rules)\n mode = declare.ModeDeclaration(mode_string)\n prog.db.clearParameterMarkings()\n for (functor,arity) in params:\n prog.db.markAsParameter(functor,arity)\n # TODO: not working yet for mini-batches so check each example\n # individually\n for x,ys in xyPairs:\n data = testtensorlog.DataBuffer(self.db)\n data.add_data_symbols(x,ys)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(prog)\n print('learner check for compiler',xc.__class__)\n learner = learnxc.XLearner(prog,xc)\n paramsWithUpdates = learner.crossEntropyGrad(mode,data.get_x(),data.get_y())\n updates_with_string_keys = {}\n for (functor,arity),up in paramsWithUpdates:\n print('testxcomp update for',functor,arity,'is',up)\n upDict = prog.db.matrixAsPredicateFacts(functor,arity,up)\n print('upDict',upDict)\n for fact,grad_of_fact in list(upDict.items()):\n # need to flip for cross-compilers\n updates_with_string_keys[str(fact)] = -grad_of_fact\n self.check_directions(updates_with_string_keys,expected)\n \n\n def xgrad_check(self,rule_strings,mode_string,params,xyPairs,expected):\n print(\"direct loss/grad eval\")\n rules = testtensorlog.rules_from_strings(rule_strings)\n prog = program.Program(db=self.db,rules=rules)\n prog.db.clearParameterMarkings()\n for (functor,arity) in params:\n prog.db.markAsParameter(functor,arity)\n for x,ys in xyPairs:\n data = testtensorlog.DataBuffer(self.db)\n data.add_data_symbols(x,ys)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(prog)\n print('grad check for compiler',xc.__class__)\n gradFun = xc.dataLossGradFunction(mode_string)\n updates_with_string_keys = {}\n paramsWithUpdates = gradFun(data.get_x(),data.get_y())\n for (functor,arity),up in paramsWithUpdates:\n upDict = prog.db.matrixAsPredicateFacts(functor,arity,up)\n for fact,grad_of_fact in list(upDict.items()):\n # need to flip for cross-compilers\n updates_with_string_keys[str(fact)] = -grad_of_fact\n self.check_directions(updates_with_string_keys,expected)\n self.learnxc_check(rule_strings,mode_string,params,xyPairs,expected)\n close_cross_compiler(xc)\n\nclass TestXCProPPR(testtensorlog.TestProPPR):\n\n def setUp(self):\n super(TestXCProPPR,self).setUp()\n \n def debug(self):\n return self\n\n def evalxc(self,xc,input):\n inferenceFun = xc.inferenceFunction('predict/io')\n print(inferenceFun)\n rawPred = inferenceFun(input)\n # trim small numbers to zero\n pred = mutil.mapData(lambda d:np.clip((d - 1e-5),0.00,9999.99), rawPred)\n pred.eliminate_zeros()\n return pred\n\n def testNativeRow(self):\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n for i in range(self.numExamples):\n pred = self.evalxc(xc, self.X.getrow(i))\n d = self.prog.db.rowAsSymbolDict(pred)\n uniform = {'pos':0.5,'neg':0.5}\n self.check_dicts(d,uniform)\n close_cross_compiler(xc)\n\n def testNativeMatrix(self):\n\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n xc.ensureCompiled(self.mode,inputs=None)\n pred = self.prog.eval(self.mode,[self.X])\n d0 = self.prog.db.matrixAsSymbolDict(pred)\n for i,d in list(d0.items()):\n uniform = {'pos':0.5,'neg':0.5,}\n self.check_dicts(d,uniform)\n close_cross_compiler(xc)\n\n def testGradVector(self):\n data = testtensorlog.DataBuffer(self.prog.db)\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n learner = learn.OnePredFixedRateGDLearner(self.prog)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n #xc.compile(self.mode)\n gradFun = xc.dataLossGradFunction('predict/io')\n for i in range(X.shape[0]):\n print(\"example\",i)\n \n updates = learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)'),X[i],Y[i])\n w0 = updates[('weighted',1)].sum(axis=0)\n print(w0)\n \n updates = gradFun(X[i],Y[i])\n paramKey,w = updates[0]\n print(w)\n # w is different from the w in the corresponding testtensorlog test,\n # which is a crossEntropy gradient for each example, but it should have\n # opposite directions\n nrow,ncol = w.shape\n for i in range(nrow):\n for j in range(ncol):\n self.assertTrue((w[i,j]==0) == (w0[i,j]==0))\n self.assertTrue(w[i,j] * w0[i,j] <= 0)\n\n def testGradMatrix(self):\n data = testtensorlog.DataBuffer(self.prog.db)\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n learner = learn.OnePredFixedRateGDLearner(self.prog)\n updates = learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)'),X,Y)\n w0 = updates[('weighted',1)].sum(axis=0)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n #xc.compile(self.mode)\n gradFun = xc.dataLossGradFunction('predict/io')\n updates = gradFun(X,Y)\n paramKey,w = updates[0]\n # w is different from the w in the corresponding testtensorlog test,\n # which is a crossEntropy gradient for each example, but it should have\n # opposite directions\n nrow,ncol = w.shape\n for i in range(nrow):\n for j in range(ncol):\n self.assertTrue((w[i,j]==0) == (w0[i,j]==0),\"i=%d,j=%d,w=%g,w0=%g\"%(i,j,w[i,j],w0[i,j]))\n self.assertTrue(w[i,j] * w0[i,j] <= 0.0,\"i=%d,j=%d,w=%g,w0=%g\"%(i,j,w[i,j],w0[i,j]))\n close_cross_compiler(xc)\n\n def testMultiLearn1(self):\n pass\n\n def testLearn(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n modestr = 'predict/io'\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n for compilerClass in TESTED_COMPILERS:\n self.prog.setRuleWeights()\n self.prog.setFeatureWeights()\n if SAVE_SUMMARIES:\n xc = compilerClass(self.prog,compilerClass.__name__+\".summary\")\n else:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n \n v = self.prog.db.getParameter('weighted',1)\n d = self.prog.db.rowAsSymbolDict(v)\n # sanity check a couple of values\n self.assertTrue(d['little_pos'] == d['little_neg'])\n self.assertTrue(d['big_pos'] == d['big_neg'])\n \n# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)\n\n lossFun = xc.dataLossFunction('predict/io')\n loss0 = lossFun(X,Y)\n print('initial train data loss',loss0)\n TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)\n loss1 = lossFun(TX,TY)\n print('initial test data loss',loss1)\n P = learner.predict('predict/io',X)\n #acc0 = xc.accuracy('predict/io',X,Y)\n acc0 = learner.accuracy(Y,P)\n print('initial train accuracy',acc0)\n TP = learner.predict('predict/io',TX)\n #acc1 = xc.accuracy('predict/io',TX,TY)\n acc1 = learner.accuracy(TY,TP)\n print('initial test accuracy',acc1)\n\n print('params to optimize',xc.prog.getParamList())\n print('vars to optimize',xc.getParamVariables('predict/io'))\n \n# xc.optimizeDataLoss('predict/io', optimizer, X, Y, epochs=20)\n learner.trainMode('predict/io',X,Y)\n\n loss2 = lossFun(X,Y)\n print('final train data loss',loss2)\n loss3 = lossFun(TX,TY)\n print('final test data loss',loss3)\n P2 = learner.predict('predict/io',X)\n #acc2 = xc.accuracy('predict/io',X,Y)\n acc2 = learner.accuracy(Y,P2)\n print('final train accuracy',acc2)\n TP2 = learner.predict('predict/io',TX)\n #acc3 = xc.accuracy('predict/io',TX,TY)\n acc3 = learner.accuracy(TY,TP2)\n print('final test accuracy',acc3)\n\n\n xc.exportAllLearnedParams()\n v = self.prog.db.getParameter('weighted',1)\n d = self.prog.db.rowAsSymbolDict(v)\n # sanity check a couple of values\n self.assertTrue(d['little_pos'] > d['little_neg'])\n self.assertTrue(d['big_pos'] < d['big_neg'])\n close_cross_compiler(xc)\n\n self.assertTrue(acc2>=acc0)\n self.assertTrue(acc3>=acc1)\n\n self.assertTrue(loss2<loss0)\n self.assertTrue(loss2<loss1)\n \n self.assertTrue(acc2>=0.9)\n self.assertTrue(acc2==1.0)\n \n def testDatasetPredict(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n modestr = 'predict/io'\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n for compilerClass in TESTED_COMPILERS:\n self.prog.setRuleWeights()\n self.prog.setFeatureWeights()\n if SAVE_SUMMARIES:\n xc = compilerClass(self.prog,compilerClass.__name__+\".summary\")\n else:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n \n learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)\n P = learner.predict(mode,X)\n print(\"X\",X.shape)\n print(\"P\",P.shape)\n self.assertTrue(X.shape==P.shape)\n P = learner.datasetPredict(dataset.Dataset({mode:X},{mode:Y}))\n print(\"X\",X.shape)\n print(\"P\",P.getX(mode).shape)\n self.assertTrue(X.shape==P.getX(mode).shape)\n \n return xc,learner,X,Y,P\n\n def testExptScaffold(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)\n self.prog.setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)\n Expt({'prog':self.prog,\n 'trainData':dataset.Dataset({mode:X},{mode:Y}),\n 'testData':dataset.Dataset({mode:TX},{mode:TY}),\n 'targetMode':mode,\n 'learner':learner\n }).run()\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testExpt(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(self.prog)\n xc.runExpt(\n prog=self.prog,\n trainData=dataset.Dataset({mode:X},{mode:Y}),\n testData=dataset.Dataset({mode:TX},{mode:TY}),\n targetMode=mode)\n close_cross_compiler(xc)\n\nclass TestXCOpGen(unittest.TestCase):\n\n # TODO tests for other xcompilers?\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyTypes(self):\n matrixdb.conf.ignore_types = False\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n trainData = tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n mode = list(trainData.keys())[0]\n docs,labels = trainData[mode]\n xc = tlog.get_cross_compiler()\n ops = xc.possibleOps(docs,'doc')\n print('doc ops',ops)\n self.assertTrue(len(ops)==1)\n (words,wordType) = ops[0]\n self.assertTrue(wordType=='word')\n ops = xc.possibleOps(words,'word')\n self.assertTrue(len(ops)==3)\n pairs = None\n for (expr,exprType) in ops:\n if exprType=='labelWordPair':\n pairs = expr\n break\n self.assertTrue(pairs is not None)\n ops = xc.possibleOps(pairs,'labelWordPair')\n self.assertTrue(len(ops)==2)\n for (expr,exprType) in ops:\n self.assertTrue(exprType=='word')\n close_cross_compiler(xc)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyIgnoringTypes(self):\n matrixdb.conf.ignore_types = True\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n trainData = tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n mode = list(trainData.keys())[0]\n docs,labels = trainData[mode]\n xc = tlog.get_cross_compiler()\n ops = xc.possibleOps(docs)\n binary_predicates = [functor for (functor,arity) in tlog.db.matEncoding if arity==2]\n self.assertTrue(len(ops) == len(binary_predicates)*2)\n for x in ops:\n # ops should just be tensors\n self.assertFalse(isinstance(x,tuple))\n close_cross_compiler(xc)\n\nclass TestXCExpt(unittest.TestCase):\n\n\n def testTCToyTypes_wscaffold(self):\n matrixdb.conf.ignore_types = False\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n \n optdict['prog'].setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(optdict['prog'])\n learner = TESTED_LEARNERS[compilerClass](optdict['prog'],xc)\n Expt({\n 'prog':optdict['prog'],\n 'trainData':optdict['trainData'],\n 'testData':optdict['testData'],\n 'learner':learner,\n 'targetMode':declare.asMode(\"predict/io\")\n }).run()\n pbDoc = xc.db.onehot('pb','doc')\n self.checkXC(xc,'predict/io',pbDoc,{'negPair':115,'posPair':115,'hasWord':59,'weighted':115,'label':5})\n # some checks on the output of pprint\n lines = xc.pprint('predict/io')\n self.assertTrue(lines[0].find(\"SoftMaxFunction\") >= 0)\n self.assertTrue(lines[1].find(\"SumFunction\") >= 0)\n self.assertEqual(len(lines), 16)\n # some checks on misc xcomp API\n self.assertEqual(xc.inferenceOutputType('predict/io'),'label')\n pbId = xc.asSymbolId('pb',typeName='doc')\n pbSym = xc.asSymbol(pbId,typeName='doc')\n self.assertEqual(pbSym,'pb')\n self.assertEqual(xc.asSymbolId('this does not appear in the data',typeName='doc'), -1)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyTypes(self):\n matrixdb.conf.ignore_types = False\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(optdict['prog'])\n xc.runExpt(\n prog=optdict['prog'],\n trainData=optdict['trainData'],\n testData=optdict['testData'],\n targetMode=declare.asMode(\"predict/io\"))\n\n # check trainability\n for (functor,arity) in xc.db.matEncoding:\n v = xc.parameterFromDBToVariable(functor,arity)\n if v is not None:\n vIsTrainable = (v in tf.trainable_variables())\n vIsParameter = ((functor,arity) in xc.db.paramSet)\n self.assertEqual(vIsTrainable,vIsParameter)\n\n pbDoc = xc.db.onehot('pb','doc')\n self.checkXC(xc,'predict/io',pbDoc,{'negPair':115,'posPair':115,'hasWord':59,'weighted':115,'label':5})\n # some checks on the output of pprint\n lines = xc.pprint('predict/io')\n self.assertTrue(lines[0].find(\"SoftMaxFunction\") >= 0)\n self.assertTrue(lines[1].find(\"SumFunction\") >= 0)\n self.assertEqual(len(lines), 16)\n # some checks on misc xcomp API\n self.assertEqual(xc.inferenceOutputType('predict/io'),'label')\n pbId = xc.asSymbolId('pb',typeName='doc')\n pbSym = xc.asSymbol(pbId,typeName='doc')\n self.assertEqual(pbSym,'pb')\n self.assertEqual(xc.asSymbolId('this does not appear in the data',typeName='doc'), -1)\n close_cross_compiler(xc)\n\n\n def testTCToyIgnoringTypes_wscaffold(self):\n matrixdb.conf.ignore_types = True\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n optdict['prog'].setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(optdict['prog'])\n learner = TESTED_LEARNERS[compilerClass](optdict['prog'],xc)\n Expt({\n 'prog':optdict['prog'],\n 'trainData':optdict['trainData'],\n 'testData':optdict['testData'],\n 'learner':learner,\n 'targetMode':declare.asMode(\"predict/io\")\n }).run()\n pbDoc = xc.db.onehot('pb')\n self.checkXC(xc,'predict/io',pbDoc,collections.defaultdict(lambda:191))\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyIgnoringTypes(self):\n matrixdb.conf.ignore_types = True\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(optdict['prog'])\n xc.runExpt(\n prog=optdict['prog'],\n trainData=optdict['trainData'],\n testData=optdict['testData'],\n targetMode=declare.asMode(\"predict/io\"))\n pbDoc = xc.db.onehot('pb')\n self.checkXC(xc,'predict/io',pbDoc,collections.defaultdict(lambda:191))\n close_cross_compiler(xc)\n\n def checkXC(self,xc,mode,rawInput,expectedCols):\n print('matrixdb.conf.ignore_types',matrixdb.conf.ignore_types)\n db = xc.db\n for (functor,arity),mat in list(db.matEncoding.items()):\n print(functor,arity,'shape',mat.shape)\n r,c = mat.shape\n self.assertEqual(c,expectedCols[functor])\n inferenceFun = xc.inferenceFunction(mode)\n y = inferenceFun(rawInput)\n r,c = y.shape\n self.assertEqual(c,expectedCols['label'])\n\nclass TestMultiModeXC(unittest.TestCase):\n\n def setUp(self):\n self.db = matrixdb.MatrixDB.loadFile(\n os.path.join(testtensorlog.TEST_DATA_DIR,'matchtoy.cfacts'))\n self.prog = program.ProPPRProgram.loadRules(\n os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy.ppr\"),db=self.db)\n self.dset = dataset.Dataset.loadExamples(\n self.db, os.path.join(testtensorlog.TEST_DATA_DIR,'matchtoy-train.exam'),proppr=False)\n self.prog.setAllWeights()\n\n def testInScaffold(self):\n print(TESTED_COMPILERS)\n self.assertTrue(self.dset.modesToLearn() > 1)\n self.prog.setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n print(compilerClass)\n xc = compilerClass(self.prog)\n # compile everything\n for mode in self.dset.modesToLearn():\n xc.ensureCompiled(mode)\n learner = TESTED_LEARNERS[compilerClass](self.prog,xc)\n testAcc,testXent = Expt({\n 'prog':self.prog,\n 'trainData':self.dset,\n 'testData':self.dset,\n 'learner':learner,\n 'savedTestPredictions':'TestMultiModeXC.testInScaffold.%s.solutions.txt'%compilerClass.__name__\n }).run()\n print(testAcc)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testIt(self):\n self.assertTrue(self.dset.modesToLearn() > 1)\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(self.prog)\n # compile everything\n for mode in self.dset.modesToLearn():\n xc.ensureCompiled(mode,inputs=None)\n # check the variables\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n # set up for training\n trainStep = {}\n for mode in self.dset.modesToLearn():\n (dataLossArgs,dataLossExpr) = xc.dataLoss(mode)\n trainStep[mode] = optimizer.minimize(dataLossExpr, var_list=xc.getParamVariables(mode))\n # train\n for i in range(2): #epochs\n for mode in self.dset.modesToLearn():\n X = self.dset.getX(mode)\n Y = self.dset.getY(mode)\n fd = xc.getFeedDict(mode,X,Y,wrapped=False)\n session.run(trainStep[mode],feed_dict=fd)\n # test\n for mode in self.dset.modesToLearn():\n X = self.dset.getX(mode)\n Y = self.dset.getY(mode)\n Y_ = xc.inferenceFunction(mode)(X)\n acc = xc.accuracy(mode,X,Y)\n print('mode',mode,'acc',acc)\n session.close()\n close_cross_compiler(xc)\n\nclass TestMatParams(unittest.TestCase):\n\n def setUp(self):\n self.cacheDir = tempfile.mkdtemp()\n\n def cacheFile(self,fileName):\n return os.path.join(self.cacheDir,fileName)\n\n def testMToyMatParam(self):\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy.ppr\"))\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy-train.exam\"))\n tlog.db.markAsParameter('dabbrev',2)\n factDict = tlog.db.matrixAsPredicateFacts('dabbrev',2,tlog.db.matEncoding[('dabbrev',2)])\n print('before learning',len(factDict),'dabbrevs')\n self.assertTrue(len(factDict)==5)\n for f in sorted(factDict.keys()):\n print('>',str(f),factDict[f])\n\n # expt pipeline\n mode = list(trainData.keys())[0]\n TX,TY = trainData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=TY.shape, name='tensorlog/trueY')\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n for i in range(5):\n print('epoch',i+1)\n session.run(train_step, feed_dict=train_batch_fd)\n tlog.set_all_db_params_to_learned_values(session)\n# params = {'prog':prog,'trainData':trainData, 'testData':testData}\n# result = expt.Expt(params).run()\n# factDict = db.matrixAsPredicateFacts('dabbrev',2,db.matEncoding[('dabbrev',2)])\n# print 'after learning',len(factDict),'dabbrevs'\n# for f in sorted(factDict.keys()):\n# print '>',str(f),factDict[f]\n# self.assertTrue(len(factDict)>5)\n\[email protected](xctargets.tf,\"Tensorflow not available\")\nclass TestSimple(unittest.TestCase):\n\n def testEmptyRules(self):\n # should not throw an error\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"))\n\n def testIncrementalDBLoad(self):\n b = simple.Builder()\n predict,label,hasWord,posPair,negPair = b.predicates(\"predict,label,hasWord,posPair,negPair\")\n doc_t,label_t,word_t,labelWordPair_t = b.types(\"doc_t,label_t,word_t,labelWordPair_t\")\n b.schema += predict(doc_t,label_t) & label(label_t)\n b.schema += hasWord(doc_t,word_t) & posPair(word_t,labelWordPair_t) & negPair(word_t,labelWordPair_t)\n for basename in \"textcattoy_corpus.cfacts textcattoy_labels.cfacts textcattoy_pairs.cfacts\".split(\" \"):\n b.db += os.path.join(testtensorlog.TEST_DATA_DIR, basename)\n tlog = simple.Compiler(db=b.db)\n for (functor,arity,nnz) in [('hasWord',2,99),('label',1,2),('negPair',2,56)]:\n m = tlog.db.matEncoding[(functor,arity)]\n self.assertTrue(m.nnz == nnz)\n\n def testBatch(self):\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(trainData.keys())[0]\n TX,TY = trainData[mode]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0<0.6)\n for i in range(10):\n print('epoch',i+1)\n session.run(train_step, feed_dict=train_batch_fd)\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n self.assertTrue(acc1>=0.9)\n # test a round-trip serialization\n # saves the db\n cacheDir = tempfile.mkdtemp()\n db_file = os.path.join(cacheDir,'simple.db')\n tlog.set_all_db_params_to_learned_values(session)\n tlog.serialize_db(db_file)\n # load everything into a new graph and don't reset the learned params\n new_graph = tf.Graph()\n with new_graph.as_default():\n tlog2 = simple.Compiler(\n db=db_file,\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n autoset_db_params=False)\n # reconstruct the accuracy measure\n inference2 = tlog2.inference(mode)\n trueY2 = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY2')\n correct2 = tf.equal(tf.argmax(trueY2,1), tf.argmax(inference2,1))\n accuracy2 = tf.reduce_mean(tf.cast(correct2, tf.float32))\n # eval accuracy in a new session\n session2 = tf.Session()\n session2.run(tf.global_variables_initializer())\n test_batch_fd2 = {tlog2.input_placeholder_name(mode):UX, trueY2.name:UY}\n acc3 = session2.run(accuracy2, feed_dict=test_batch_fd2)\n print('accuracy after round-trip serialization',acc3)\n self.assertTrue(acc3>=0.9)\n session.close()\n\n def testMinibatch(self):\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n self.runTextCatLearner(tlog)\n\n def runTextCatLearner(self,tlog):\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(trainData.keys())[0]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0<0.6)\n for i in range(10):\n print('epoch',i+1, end=' ')\n for mode,(TX,TY) in tlog.minibatches(trainData,batch_size=2):\n print('.', end=' ')\n train_minibatch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session.run(train_step, feed_dict=train_minibatch_fd)\n print('epoch',i+1,'finished')\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n self.assertTrue(acc1>=0.9)\n session.close()\n\n def testBuilder1(self):\n b = simple.Builder()\n X,Y,Z = b.variables(\"X Y Z\")\n aunt,parent,sister,wife = b.predicates(\"aunt parent sister wife\")\n uncle = b.predicate(\"uncle\")\n b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y)\n b += aunt(X,Y) <= parent(X,Z) & sister(Z,Y)\n r1 = b.rule_id(\"ruleid_t\",\"r1\")\n r2 = b.rule_id(\"ruleid_t\",\"r2\")\n b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y) // r1\n b += aunt(X,Y) <= parent(X,Z) & sister(Z,Y) // r2\n feature,description = b.predicates(\"feature description\")\n weight = b.predicate(\"weight\")\n F = b.variable(\"F\")\n D = b.variable(\"D\")\n b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y) // (weight(F) | description(X,D) & feature(X,F))\n b.rules.listing()\n rs = b.rules.rulesFor(parser.Goal('aunt',[X,Y]))\n self.assertEqual(str(rs[0]), \"aunt(X,Y) :- uncle(X,Z), wife(Z,Y).\")\n self.assertEqual(str(rs[1]), \"aunt(X,Y) :- parent(X,Z), sister(Z,Y).\")\n self.assertEqual(str(rs[2]), \"aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(R1) : assign(R1,r1,ruleid_t)}.\")\n self.assertEqual(str(rs[3]), \"aunt(X,Y) :- parent(X,Z), sister(Z,Y) {weight(R2) : assign(R2,r2,ruleid_t)}.\")\n self.assertEqual(str(rs[4]), \"aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(F) : description(X,D),feature(X,F)}.\")\n\n def testBuilder2(self):\n b = simple.Builder()\n predict,assign,weighted,hasWord,posPair,negPair = b.predicates(\"predict assign weighted hasWord posPair negPair\")\n X,Pos,Neg,F,W = b.variables(\"X Pos Neg F W\")\n b += predict(X,Pos) <= assign(Pos,'pos','label') // (weighted(F) | hasWord(X,W) & posPair(W,F))\n b += predict(X,Neg) <= assign(Neg,'neg','label') // (weighted(F) | hasWord(X,W) & negPair(W,F))\n dbSpec = os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\")\n self.runTextCatLearner(simple.Compiler(db=dbSpec,prog=b.rules))\n\n def testBuilder3(self):\n b = simple.Builder()\n predict,assign,weighted,hasWord,posPair,negPair,label = b.predicates(\"predict assign weighted hasWord posPair negPair label\")\n doc_t,label_t,word_t,labelWordPair_t = b.types(\"doc_t label_t word_t labelWordPair_t\")\n\n b.schema += predict(doc_t,label_t)\n b.schema += hasWord(doc_t,word_t)\n b.schema += posPair(word_t,labelWordPair_t)\n b.schema += negPair(word_t,labelWordPair_t)\n b.schema += label(label_t)\n\n X,Pos,Neg,F,W = b.variables(\"X Pos Neg F W\")\n b.rules += predict(X,Pos) <= assign(Pos,'pos','label_t') // (weighted(F) | hasWord(X,W) & posPair(W,F))\n b.rules += predict(X,Neg) <= assign(Neg,'neg','label_t') // (weighted(F) | hasWord(X,W) & negPair(W,F))\n\n # use the untyped version of the facts to make sure the schema works\n b.db = os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy.cfacts\")\n\n self.runTextCatLearner(simple.Compiler(db=b.db, prog=b.rules))\n\nclass TestReparameterizationAndTypedLoading(unittest.TestCase):\n\n def testBugWasFixed(self):\n # use the untyped version of the facts to make sure the schema works\n db = matrixdb.MatrixDB()\n db.addLines([\"# :- r(lo_or_hi_t)\\n\",\n \"\\t\".join(\"r low 0.1\".split()) + \"\\n\",\n \"\\t\".join(\"r hi 0.9\".split()) + \"\\n\"])\n db.markAsParameter('r',1)\n prog = program.Program(db=db)\n typeName = db.schema.getArgType(\"r\",1,0)\n idLow = db.schema.getId(typeName,\"low\")\n idHi = db.schema.getId(typeName,\"hi\")\n db_r = db.matEncoding[('r',1)]\n self.approxEqual(db_r[0,idLow], 0.1)\n self.approxEqual(db_r[0,idHi], 0.9)\n\n xc = tensorflowxcomp.SparseMatDenseMsgCrossCompiler(prog)\n v_r = xc._vector(declare.asMode(\"r(i)\"))\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n xc.exportAllLearnedParams()\n print('exported to xc',db.matEncoding[('r',1)])\n db_r = db.matEncoding[('r',1)]\n self.approxEqual(db_r[0,idLow], 0.1)\n self.approxEqual(db_r[0,idHi], 0.9)\n\n def approxEqual(self,a,b):\n self.assertTrue(abs(float(a)-b) < 0.0001)\n\nclass TestPlugins(unittest.TestCase):\n\n def test_identity_io(self):\n ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp1(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.',\n 'predict(X,Y) :- assign(Neg,neg,label),udp1(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']\n plugins = program.Plugins()\n plugins.define('udp1/io', lambda x:x, lambda inputType:'label')\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_identity_oi(self):\n ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp2(Y,Pos) {weighted(F): hasWord(X,W),posPair(W,F)}.',\n 'predict(X,Y) :- assign(Neg,neg,label),udp2(Y,Neg) {weighted(F): hasWord(X,W),negPair(W,F)}.']\n plugins = program.Plugins()\n plugins.define('udp2/oi', lambda x:x, lambda inputType:'label')\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_double_io1(self):\n ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp3(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.',\n 'predict(X,Y) :- assign(Neg,neg,label),udp3(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']\n plugins = program.Plugins()\n plugins.define('udp3/io', lambda x:2*x, lambda inputType:'label')\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_double_io2(self):\n ruleStrings = ['predict(X,Pos) :- assign(Pos,pos,label) {weighted(F): hasWord(X,W),double(W,W2),posPair(W2,F)}.',\n 'predict(X,Neg) :- assign(Neg,neg,label) {weighted(F2): hasWord(X,W),negPair(W,F),double(F,F2)}.']\n plugins = program.Plugins()\n plugins.define('double/io', lambda x:2*x, lambda inputType:inputType)\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_kw_i(self):\n ruleStrings = ['predict(X,Pos) :- assign(Pos,pos,label),hasWord(X,W),poskw(W).',\n 'predict(X,Neg) :- assign(Neg,neg,label),hasWord(X,W),negkw(W).']\n plugins = program.Plugins()\n db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"))\n poskw_v = (db.onehot('little','word') + db.onehot('red','word')).todense()\n negkw_v = (db.onehot('big','word') + db.onehot('job','word') + db.onehot('huge','word')).todense()\n plugins.define('poskw/i', lambda:poskw_v, lambda:'word')\n plugins.define('negkw/i', lambda:negkw_v, lambda:'word')\n self.check_udp(ruleStrings,plugins)\n \n def check_udp(self,ruleStrings,plugins):\n db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"))\n rules = testtensorlog.rules_from_strings(ruleStrings)\n prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)\n mode = declare.asMode(\"predict/io\")\n prog.compile(mode)\n fun = prog.function[(mode,0)]\n print(\"\\n\".join(fun.pprint()))\n tlog = simple.Compiler(db=db, prog=prog)\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(testData.keys())[0]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n session.close()\n\n\n # TOFIX needs some work to pass\n # - you can't do polytree BP with multiple inputs\n # - so there's not a simple fix\n # - probably do this: (1) treat inputs to leftmost userDef as outputs (2) run message-passing for those outputs\n # (3) add the user def operator (4) repeat .... (5) when there are no more plugins\n def notest_isect_iio(self):\n bpcompiler.conf.trace = True\n ruleStrings = ['predict(X,Y) :- hasWord(X,W),posPair(W,P1),negPair(W,P2),isect(P1,P2,Y).']\n plugins = program.Plugins()\n plugins.define('isect/iio', lambda x1,x2:x1*x2, lambda t1,t2:t1)\n self.assertTrue(plugins.isDefined(declare.asMode('isect/iio')))\n self.check_learning_with_udp(ruleStrings,plugins)\n \n def argmax(self):\n bpcompiler.conf.trace = True\n ruleStrings = ['predict(X,Y):-olympics(X,Z),nations(Z),argmax(Z,Y).']\n plugins = program.Plugins()\n plugins.define('argmax/io',lambda x1:tf.nn.softmax(x1), lambda t1:t1)\n db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,'argmax.cfacts'))\n rules = testtensorlog.rules_from_strings(ruleStrings)\n prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)\n prog.setAllWeights()\n mode = declare.asMode(\"predict/io\")\n prog.compile(mode)\n fun = prog.function[(mode,0)]\n print(\"\\n\".join(fun.pprint()))\n tlog = simple.Compiler(db=db, prog=prog)\n \n data = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"argmax.exam\"))\n mode = list(data.keys())[0]\n UX,UY = data[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0>0.9)\n session.close()\n \n# acc0 = session.run(inference, feed_dict=test_batch_fd)\n# print \"inference results:\"\n# print acc0\n# print np.argmax(acc0,1)\n# print \"trueY:\"\n# print UY\n# print np.argmax(UY,1)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def check_learning_with_udp(self,ruleStrings,plugins,dbfile=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\")):\n db = matrixdb.MatrixDB.loadFile(dbfile)\n rules = testtensorlog.rules_from_strings(ruleStrings)\n prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)\n prog.setAllWeights()\n mode = declare.asMode(\"predict/io\")\n prog.compile(mode)\n fun = prog.function[(mode,0)]\n print(\"\\n\".join(fun.pprint()))\n tlog = simple.Compiler(db=db, prog=prog)\n\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(trainData.keys())[0]\n TX,TY = trainData[mode]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0<0.6)\n for i in range(10):\n print('epoch',i+1)\n session.run(train_step, feed_dict=train_batch_fd)\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n self.assertTrue(acc1>=0.9)\n session.close()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n # default is to test on everything adding command line arguments\n # 'tensorflow' 'theano' 'sparse' 'dense' filters the list (so\n # 'testxcomp.py tensorflow sparse' will run just\n # tensorflowxcomp.SparseMatDenseMsgCrossCompiler)\n\n if 'theano' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__module__.endswith(\"theanoxcomp\")]\n if 'tensorflow' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__module__.endswith(\"tensorflowxcomp\")]\n if 'dense' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__name__.startswith(\"Dense\")]\n if 'sparse' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__name__.startswith(\"Sparse\")]\n sys.argv = [a for a in sys.argv if a not in \"theano tensorflow dense sparse\".split()]\n print('TESTED_COMPILERS',TESTED_COMPILERS)\n \n unittest.main()\n",
"# (C) William W. Cohen and Carnegie Mellon University, 2016\n\nimport sys\nimport re\nimport math\nimport os.path\nimport collections\nimport scipy.sparse as SS\nimport scipy.io as SIO\nimport numpy as NP\nimport numpy.random as NR\nimport logging\n\nfrom tensorlog import config\nfrom tensorlog import mutil\nfrom tensorlog import matrixdb\nfrom tensorlog import declare\nfrom tensorlog import util\n\nconf = config.Config()\nconf.normalize_outputs = True; conf.help.normalize_outputs = \"In .exam files, l1-normalize the weights of valid outputs\"\n\n#\n# dealing with labeled training data\n#\n\nclass Dataset(object):\n\n def __init__(self,xDict,yDict):\n # dict which maps mode declaration to X matrices for training\n self.xDict = xDict\n # likewise for Y matrices\n self.yDict = yDict\n\n def isSinglePredicate(self):\n \"\"\"Returns true if all the examples are for a single predicate.\"\"\"\n return len(list(self.xDict.keys()))==1\n\n def extractMode(self,mode):\n \"\"\"Return a new Dataset that just contains this mode.\"\"\"\n assert mode in self.xDict, 'dataset does not contain mode %s' % str(mode)\n return Dataset({mode:self.xDict[mode]}, {mode:self.yDict[mode]})\n\n def modesToLearn(self):\n \"\"\"Return list of modes associated with the data.\"\"\"\n return list(self.xDict.keys())\n\n def hasMode(self,mode):\n \"\"\"True if there are examples of the mode in the dataset.\"\"\"\n return (mode in self.yDict and mode in self.xDict)\n\n def getX(self,mode):\n \"\"\"Get a matrix of all inputs for the mode.\"\"\"\n return self.xDict[mode]\n\n def getY(self,mode):\n \"\"\"Get a matrix of all desired outputs for the mode.\"\"\"\n return self.yDict[mode]\n\n def size(self):\n return sum([m.nnz for m in list(self.xDict.values())]) + sum([m.nnz for m in list(self.yDict.values())])\n\n def shuffle(self):\n for mode in self.xDict:\n shuffledRowNums = NP.arange(mutil.numRows(self.xDict[mode]))\n NR.shuffle(shuffledRowNums)\n self.xDict[mode] = mutil.shuffleRows(self.xDict[mode],shuffledRowNums)\n self.yDict[mode] = mutil.shuffleRows(self.yDict[mode],shuffledRowNums)\n\n def minibatchIterator(self,batchSize=100,shuffleFirst=True):\n \"\"\"Iterate over triples (mode,X',Y') where X' and Y' are sets of\n batchSize rows from the full data for mode, randomly selected\n (without replacement) from the dataset.\"\"\"\n # randomize the order of the examples\n if shuffleFirst: self.shuffle()\n # then sample an ordering of the modes\n modeList = self.modesToLearn()\n modeSampleDict = {}\n for modeIndex,mode in enumerate(modeList):\n numBatches = int(math.ceil( mutil.numRows(self.getX(mode)) / float(batchSize) ))\n modeSampleDict[mode] = NP.ones(numBatches,dtype='int')*modeIndex\n modeSamples = NP.concatenate(list(modeSampleDict.values()))\n NR.shuffle(modeSamples)\n # finally produce the minibatches\n currentOffset = [0] * len(modeList)\n for modeIndex in modeSamples:\n mode = modeList[modeIndex]\n lo = currentOffset[modeIndex]\n bX = mutil.selectRows(self.getX(mode),lo,lo+batchSize)\n bY = mutil.selectRows(self.getY(mode),lo,lo+batchSize)\n currentOffset[modeIndex] += batchSize\n yield mode,bX,bY\n\n def pprint(self):\n return ['%s: X %s Y %s' % (str(mode),mutil.pprintSummary(self.xDict[mode]),mutil.pprintSummary(self.yDict[mode])) for mode in self.xDict]\n\n #\n # i/o and conversions\n #\n\n def serialize(self,dir):\n \"\"\"Save the dataset on disk.\"\"\"\n if not os.path.exists(dir):\n os.mkdir(dir)\n dx = dict([(str(k_v[0]),k_v[1]) for k_v in list(self.xDict.items())])\n dy = dict([(str(k_v[0]),k_v[1]) for k_v in list(self.yDict.items())])\n SIO.savemat(os.path.join(dir,\"xDict\"),dx,do_compression=True)\n SIO.savemat(os.path.join(dir,\"yDict\"),dy,do_compression=True)\n\n @staticmethod\n def deserialize(dir):\n \"\"\"Recover a saved dataset.\"\"\"\n logging.info('deserializing dataset file '+ dir)\n xDict = {}\n yDict = {}\n SIO.loadmat(os.path.join(dir,\"xDict\"),xDict)\n SIO.loadmat(os.path.join(dir,\"yDict\"),yDict)\n #serialization converts modes to strings so convert them\n #back.... it also converts matrices to csr\n for d in (xDict,yDict):\n for stringKey,mat in list(d.items()):\n del d[stringKey]\n if not stringKey.startswith('__'):\n d[declare.asMode(stringKey)] = SS.csr_matrix(mat)\n dset = Dataset(xDict,yDict)\n logging.info('deserialized dataset has %d modes and %d non-zeros' % (len(dset.modesToLearn()), dset.size()))\n return dset\n\n\n @staticmethod\n def uncacheExamples(dsetFile,db,exampleFile,proppr=True):\n \"\"\"Build a dataset file from an examples file, serialize it, and\n return the de-serialized dataset. Or if that's not necessary,\n just deserialize it.\n \"\"\"\n if not os.path.exists(dsetFile) or os.path.getmtime(exampleFile)>os.path.getmtime(dsetFile):\n logging.info('serializing examples in %s to %s' % (exampleFile,dsetFile))\n dset = Dataset.loadExamples(db,exampleFile,proppr=proppr)\n dset.serialize(dsetFile)\n os.utime(dsetFile,None) #update the modification time for the directory\n return dset\n else:\n return Dataset.deserialize(dsetFile)\n\n @staticmethod\n def uncacheMatrix(dsetFile,db,functorToLearn,functorInDB):\n \"\"\"Build a dataset file from a DB matrix as specified with loadMatrix\n and serialize it. Or if that's not necessary, just\n deserialize it.\n \"\"\"\n if not os.path.exists(dsetFile):\n print(('preparing examples from',functorToLearn,'...'))\n dset = Dataset.loadMatrix(db,functorToLearn,functorInDB)\n print(('serializing dsetFile',dsetFile,'...'))\n dset.serialize(dsetFile)\n return dset\n else:\n print(('de-serializing dsetFile',dsetFile,'...'))\n return Dataset.deserialize(dsetFile)\n\n # TODO remove or make type-aware\n @staticmethod\n def loadMatrix(db,functorToLearn,functorInDB):\n \"\"\"Convert a DB matrix containing pairs x,f(x) to training data for a\n learner. For each row x with non-zero entries, copy that row\n to Y, and and also append a one-hot representation of x to the\n corresponding row of X.\n \"\"\"\n assert db.isTypeless(),'cannot run loadMatrix on database with defined types'\n functorToLearn = declare.asMode(functorToLearn)\n xrows = []\n yrows = []\n m = db.matEncoding[(functorInDB,2)].tocoo()\n n = db.dim()\n for i in range(len(m.data)):\n x = m.row[i]\n xrows.append(SS.csr_matrix( ([1.0],([0],[x])), shape=(1,n) ))\n rx = m.getrow(x)\n yrows.append(rx * (1.0/rx.sum()))\n return Dataset({functorToLearn:mutil.stack(xrows)},{functorToLearn:mutil.stack(yrows)})\n\n @staticmethod\n def _parseLine(line,proppr=True):\n #returns mode, x, positive y's where x and ys are symbols\n if not line.strip() or line[0]=='#':\n return None,None,None\n parts = line.strip().split(\"\\t\")\n if not proppr:\n assert len(parts)>=2, 'bad line: %r parts %r' % (line,parts)\n return declare.asMode(parts[0]+\"/io\"),parts[1],parts[2:]\n else:\n regex = re.compile('(\\w+)\\((\\w+),(\\w+)\\)')\n mx = regex.search(parts[0])\n if not mx:\n return None,None,None\n else:\n mode = declare.asMode(mx.group(1)+\"/io\")\n x = mx.group(2)\n pos = []\n for ans in parts[1:]:\n label = ans[0]\n my = regex.search(ans[1:])\n assert my,'problem at line '+line\n assert my.group(1)==mode.functor,'mismatched modes %s %s at line %s' % (my.group(1),mode,line)\n assert my.group(2)==x,'mismatched x\\'s at line '+line\n if label=='+':\n pos.append(my.group(3))\n return mode,x,pos\n\n @staticmethod\n def loadProPPRExamples(db,fileName):\n \"\"\"Convert a proppr-style foo.examples file to a two dictionaries of\n modename->matrix pairs, one for the Xs, one for the Ys\"\"\"\n return Dataset.loadExamples(db,fileName,proppr=True)\n\n @staticmethod\n def loadExamples(db,fileName,proppr=False):\n \"\"\"Convert foo.exam file, where each line is of the form\n\n functor <TAB> x <TAB> y1 ... yk\n\n to two dictionaries of modename->matrix pairs, one for the Xs,\n one for the Ys.\n\n \"\"\"\n logging.info('loading examples from '+ str(fileName))\n\n # map from relation to lists that buffer data,row\n # index,colindex information for each of the X,Y matrices\n xDatabuf = collections.defaultdict(list)\n xRowbuf = collections.defaultdict(list)\n xColbuf = collections.defaultdict(list)\n yDatabuf = collections.defaultdict(list)\n yRowbuf = collections.defaultdict(list)\n yColbuf = collections.defaultdict(list)\n xsResult = {}\n ysResult = {}\n def getId(typeName,symbol):\n s = symbol if db.schema.hasId(typeName,symbol) else matrixdb.OOV_ENTITY_NAME\n return db.schema.getId(typeName,s)\n for line in util.linesIn(fileName):\n pred,x,ys = Dataset._parseLine(line,proppr=proppr)\n if pred:\n xType = db.schema.getDomain(pred.getFunctor(),2)\n yType = db.schema.getRange(pred.getFunctor(),2)\n row_index = len(xDatabuf[pred])\n xDatabuf[pred].append(1.0)\n xRowbuf[pred].append(row_index)\n xColbuf[pred].append(getId(xType,x))\n for y in ys:\n yDatabuf[pred].append( 1.0/len(ys) if conf.normalize_outputs else 1.0)\n yRowbuf[pred].append(row_index)\n yColbuf[pred].append(getId(yType,y))\n for pred in list(xDatabuf.keys()):\n xType = db.schema.getDomain(pred.getFunctor(),2)\n yType = db.schema.getRange(pred.getFunctor(),2)\n nrows = len(xDatabuf[pred])\n coo_x = SS.coo_matrix((xDatabuf[pred],(xRowbuf[pred],xColbuf[pred])), shape=(nrows,db.dim(xType)))\n xsResult[pred] = SS.csr_matrix(coo_x,dtype='float32')\n coo_y = SS.coo_matrix((yDatabuf[pred],(yRowbuf[pred],yColbuf[pred])), shape=(nrows,db.dim(yType)))\n ysResult[pred] = SS.csr_matrix(coo_y,dtype='float32')\n dset = Dataset(xsResult,ysResult)\n logging.info('loaded dataset has %d modes and %d non-zeros' % (len(dset.modesToLearn()), dset.size()))\n logging.info('in loaded dataset, example normalization (so sum_{y} score[pred(x,y)] == 1) is %r' % conf.normalize_outputs)\n return dset\n\n #TODO refactor to also save examples in form: 'functor X Y1\n #... Yk'\n def saveProPPRExamples(self,fileName,db,append=False,mode=None):\n \"\"\"Convert X and Y to ProPPR examples and store in a file.\"\"\"\n fp = open(fileName,'a' if append else 'w')\n modeKeys = [mode] if mode else list(self.xDict.keys())\n for mode in modeKeys:\n assert mode in self.yDict, \"No mode '%s' in yDict\" % mode\n functor,arity = mode.getFunctor(),mode.getArity()\n dx = db.matrixAsSymbolDict(self.xDict[mode],db.schema.getDomain(functor,arity))\n dy = db.matrixAsSymbolDict(self.yDict[mode],db.schema.getRange(functor,arity))\n theoryPred = mode.functor\n for i in range(max(dx.keys())+1):\n dix = dx[i]\n diy = dy[i]\n assert len(list(dix.keys()))==1,'X row %d is not onehot: %r' % (i,dix)\n x = list(dix.keys())[0]\n fp.write('%s(%s,Y)' % (theoryPred,x))\n for y in list(diy.keys()):\n fp.write('\\t+%s(%s,%s)' % (theoryPred,x,y))\n fp.write('\\n')\n\nif __name__ == \"__main__\":\n usage = 'usage: python -m dataset.py --serialize foo.cfacts|foo.db bar.exam|bar.examples glob.dset'\n if sys.argv[1]=='--serialize':\n assert len(sys.argv)==5,usage\n dbFile = sys.argv[2]\n examFile = sys.argv[3]\n dsetFile = sys.argv[4]\n if dbFile.endswith(\".cfacts\"):\n db = matrixdb.MatrixDB.loadFile(dbFile)\n elif dbFile.endswith(\".db\"):\n db = matrixdb.MatrixDB.deserialize(dbFile)\n else:\n assert False,usage\n assert examFile.endswith(\".examples\") or examFile.endswith(\".exam\"),usage\n dset = Dataset.loadExamples(db,examFile,proppr=examFile.endswith(\".examples\"))\n dset.serialize(dsetFile)\n"
] | [
[
"tensorflow.Graph",
"tensorflow.nn.softmax",
"numpy.clip",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.argmax"
],
[
"scipy.sparse.csr_matrix",
"numpy.random.shuffle",
"numpy.ones"
]
] |
Mu-L/kedro | [
"a925fd59187a642e124527f0f1097e92ea8d1819"
] | [
"tests/extras/datasets/pandas/test_json_dataset.py"
] | [
"from pathlib import Path, PurePosixPath\n\nimport pandas as pd\nimport pytest\nfrom adlfs import AzureBlobFileSystem\nfrom fsspec.implementations.http import HTTPFileSystem\nfrom fsspec.implementations.local import LocalFileSystem\nfrom gcsfs import GCSFileSystem\nfrom pandas.testing import assert_frame_equal\nfrom s3fs.core import S3FileSystem\n\nfrom kedro.extras.datasets.pandas import JSONDataSet\nfrom kedro.io import DataSetError\nfrom kedro.io.core import PROTOCOL_DELIMITER, Version\n\n\[email protected]\ndef filepath_json(tmp_path):\n return (tmp_path / \"test.json\").as_posix()\n\n\[email protected]\ndef json_data_set(filepath_json, load_args, save_args, fs_args):\n return JSONDataSet(\n filepath=filepath_json,\n load_args=load_args,\n save_args=save_args,\n fs_args=fs_args,\n )\n\n\[email protected]\ndef versioned_json_data_set(filepath_json, load_version, save_version):\n return JSONDataSet(\n filepath=filepath_json, version=Version(load_version, save_version)\n )\n\n\[email protected]\ndef dummy_dataframe():\n return pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5], \"col3\": [5, 6]})\n\n\nclass TestJSONDataSet:\n def test_save_and_load(self, json_data_set, dummy_dataframe):\n \"\"\"Test saving and reloading the data set.\"\"\"\n json_data_set.save(dummy_dataframe)\n reloaded = json_data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded)\n\n def test_exists(self, json_data_set, dummy_dataframe):\n \"\"\"Test `exists` method invocation for both existing and\n nonexistent data set.\"\"\"\n assert not json_data_set.exists()\n json_data_set.save(dummy_dataframe)\n assert json_data_set.exists()\n\n @pytest.mark.parametrize(\n \"load_args\", [{\"k1\": \"v1\", \"index\": \"value\"}], indirect=True\n )\n def test_load_extra_params(self, json_data_set, load_args):\n \"\"\"Test overriding the default load arguments.\"\"\"\n for key, value in load_args.items():\n assert json_data_set._load_args[key] == value\n\n @pytest.mark.parametrize(\n \"save_args\", [{\"k1\": \"v1\", \"index\": \"value\"}], indirect=True\n )\n def test_save_extra_params(self, json_data_set, save_args):\n \"\"\"Test overriding the default save arguments.\"\"\"\n for key, value in save_args.items():\n assert json_data_set._save_args[key] == value\n\n @pytest.mark.parametrize(\n \"load_args,save_args\",\n [\n ({\"storage_options\": {\"a\": \"b\"}}, {}),\n ({}, {\"storage_options\": {\"a\": \"b\"}}),\n ({\"storage_options\": {\"a\": \"b\"}}, {\"storage_options\": {\"x\": \"y\"}}),\n ],\n )\n def test_storage_options_dropped(self, load_args, save_args, caplog, tmp_path):\n filepath = str(tmp_path / \"test.csv\")\n\n ds = JSONDataSet(filepath=filepath, load_args=load_args, save_args=save_args)\n\n records = [r for r in caplog.records if r.levelname == \"WARNING\"]\n expected_log_message = (\n f\"Dropping 'storage_options' for {filepath}, \"\n f\"please specify them under 'fs_args' or 'credentials'.\"\n )\n assert records[0].getMessage() == expected_log_message\n assert \"storage_options\" not in ds._save_args\n assert \"storage_options\" not in ds._load_args\n\n def test_load_missing_file(self, json_data_set):\n \"\"\"Check the error when trying to load missing file.\"\"\"\n pattern = r\"Failed while loading data from data set JSONDataSet\\(.*\\)\"\n with pytest.raises(DataSetError, match=pattern):\n json_data_set.load()\n\n @pytest.mark.parametrize(\n \"filepath,instance_type,credentials,load_path\",\n [\n (\"s3://bucket/file.json\", S3FileSystem, {}, \"s3://bucket/file.json\"),\n (\"file:///tmp/test.json\", LocalFileSystem, {}, \"/tmp/test.json\"),\n (\"/tmp/test.json\", LocalFileSystem, {}, \"/tmp/test.json\"),\n (\"gcs://bucket/file.json\", GCSFileSystem, {}, \"gcs://bucket/file.json\"),\n (\n \"https://example.com/file.json\",\n HTTPFileSystem,\n {},\n \"https://example.com/file.json\",\n ),\n (\n \"abfs://bucket/file.csv\",\n AzureBlobFileSystem,\n {\"account_name\": \"test\", \"account_key\": \"test\"},\n \"abfs://bucket/file.csv\",\n ),\n ],\n )\n def test_protocol_usage(\n self, filepath, instance_type, credentials, load_path, mocker\n ):\n data_set = JSONDataSet(filepath=filepath, credentials=credentials)\n assert isinstance(data_set._fs, instance_type)\n\n path = filepath.split(PROTOCOL_DELIMITER, 1)[-1]\n\n assert str(data_set._filepath) == path\n assert isinstance(data_set._filepath, PurePosixPath)\n\n mock_pandas_call = mocker.patch(\"pandas.read_json\")\n data_set.load()\n assert mock_pandas_call.call_count == 1\n assert mock_pandas_call.call_args_list[0][0][0] == load_path\n\n def test_catalog_release(self, mocker):\n fs_mock = mocker.patch(\"fsspec.filesystem\").return_value\n filepath = \"test.json\"\n data_set = JSONDataSet(filepath=filepath)\n data_set.release()\n fs_mock.invalidate_cache.assert_called_once_with(filepath)\n\n\nclass TestJSONDataSetVersioned:\n def test_version_str_repr(self, load_version, save_version):\n \"\"\"Test that version is in string representation of the class instance\n when applicable.\"\"\"\n filepath = \"test.json\"\n ds = JSONDataSet(filepath=filepath)\n ds_versioned = JSONDataSet(\n filepath=filepath, version=Version(load_version, save_version)\n )\n assert filepath in str(ds)\n assert \"version\" not in str(ds)\n\n assert filepath in str(ds_versioned)\n ver_str = f\"version=Version(load={load_version}, save='{save_version}')\"\n assert ver_str in str(ds_versioned)\n assert \"JSONDataSet\" in str(ds_versioned)\n assert \"JSONDataSet\" in str(ds)\n assert \"protocol\" in str(ds_versioned)\n assert \"protocol\" in str(ds)\n\n def test_save_and_load(self, versioned_json_data_set, dummy_dataframe):\n \"\"\"Test that saved and reloaded data matches the original one for\n the versioned data set.\"\"\"\n versioned_json_data_set.save(dummy_dataframe)\n reloaded_df = versioned_json_data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded_df)\n\n def test_no_versions(self, versioned_json_data_set):\n \"\"\"Check the error if no versions are available for load.\"\"\"\n pattern = r\"Did not find any versions for JSONDataSet\\(.+\\)\"\n with pytest.raises(DataSetError, match=pattern):\n versioned_json_data_set.load()\n\n def test_exists(self, versioned_json_data_set, dummy_dataframe):\n \"\"\"Test `exists` method invocation for versioned data set.\"\"\"\n assert not versioned_json_data_set.exists()\n versioned_json_data_set.save(dummy_dataframe)\n assert versioned_json_data_set.exists()\n\n def test_prevent_overwrite(self, versioned_json_data_set, dummy_dataframe):\n \"\"\"Check the error when attempting to override the data set if the\n corresponding hdf file for a given save version already exists.\"\"\"\n versioned_json_data_set.save(dummy_dataframe)\n pattern = (\n r\"Save path \\'.+\\' for JSONDataSet\\(.+\\) must \"\n r\"not exist if versioning is enabled\\.\"\n )\n with pytest.raises(DataSetError, match=pattern):\n versioned_json_data_set.save(dummy_dataframe)\n\n @pytest.mark.parametrize(\n \"load_version\", [\"2019-01-01T23.59.59.999Z\"], indirect=True\n )\n @pytest.mark.parametrize(\n \"save_version\", [\"2019-01-02T00.00.00.000Z\"], indirect=True\n )\n def test_save_version_warning(\n self, versioned_json_data_set, load_version, save_version, dummy_dataframe\n ):\n \"\"\"Check the warning when saving to the path that differs from\n the subsequent load path.\"\"\"\n pattern = (\n rf\"Save version '{save_version}' did not match load version \"\n rf\"'{load_version}' for JSONDataSet\\(.+\\)\"\n )\n with pytest.warns(UserWarning, match=pattern):\n versioned_json_data_set.save(dummy_dataframe)\n\n def test_http_filesystem_no_versioning(self):\n pattern = r\"HTTP\\(s\\) DataSet doesn't support versioning\\.\"\n\n with pytest.raises(DataSetError, match=pattern):\n JSONDataSet(\n filepath=\"https://example.com/file.json\", version=Version(None, None)\n )\n\n def test_versioning_existing_dataset(\n self, json_data_set, versioned_json_data_set, dummy_dataframe\n ):\n \"\"\"Check the error when attempting to save a versioned dataset on top of an\n already existing (non-versioned) dataset.\"\"\"\n json_data_set.save(dummy_dataframe)\n assert json_data_set.exists()\n assert json_data_set._filepath == versioned_json_data_set._filepath\n pattern = (\n f\"(?=.*file with the same name already exists in the directory)\"\n f\"(?=.*{versioned_json_data_set._filepath.parent.as_posix()})\"\n )\n with pytest.raises(DataSetError, match=pattern):\n versioned_json_data_set.save(dummy_dataframe)\n\n # Remove non-versioned dataset and try again\n Path(json_data_set._filepath.as_posix()).unlink()\n versioned_json_data_set.save(dummy_dataframe)\n assert versioned_json_data_set.exists()\n"
] | [
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] |
acezen/graph-learn | [
"77bd92f960e4d178a3606444684f7f04c7f5b738"
] | [
"examples/data/cora.py"
] | [
"# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Preprocess cora dataset and generate node, edge, train, val, test table.\nUsed by GCN, GAT, GraphSage supervised training.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom utils import download, extract\n\ndef preprocess(dataset):\n # process node table\n node_table = \"{}/node_table\".format(dataset)\n edge_table = \"{}/edge_table\".format(dataset)\n edge_table_with_self_loop = '{}/edge_table_with_self_loop'.format(dataset)\n train_table = \"{}/train_table\".format(dataset)\n val_table = \"{}/val_table\".format(dataset)\n test_table = \"{}/test_table\".format(dataset)\n\n idx_features_labels = np.genfromtxt(dataset + \"/cora.content\",\n dtype=np.dtype(str))\n if not os.path.exists(edge_table_with_self_loop):\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n\n features = sp.csr_matrix(idx_features_labels[:, 1:-1],\n dtype=np.float32)\n features = feature_normalize(features)\n features = np.array(features.todense())\n labels = encode_label(idx_features_labels[:, -1])\n node_idxs = []\n\n with open(node_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"label:int64\" + \"\\t\" + \"feature:string\" + \"\\n\")\n for i in range(idx.shape[0]):\n f.write(str(idx[i]) + \"\\t\" + str(labels[i]) +\n \"\\t\" + str(\":\".join(map(str, features[i]))) + \"\\n\")\n node_idxs.append(str(idx[i]))\n\n with open(train_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"weight:float\" + \"\\n\")\n for i in range(140):\n f.write(str(idx[i]) + \"\\t\" + str(1.0) + \"\\n\")\n with open(val_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"weight:float\" + \"\\n\")\n for i in range(200, 500):\n f.write(str(idx[i]) + \"\\t\" + str(1.0) + \"\\n\")\n with open(test_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"weight:float\" + \"\\n\")\n for i in range(500, 1500):\n f.write(str(idx[i]) + \"\\t\" + str(1.0) + \"\\n\")\n\n # process edge table\n edges = np.genfromtxt(dataset + \"/cora.cites\", dtype=np.int32)\n with open(edge_table, 'w') as f:\n f.write(\"src_id: int64\" + \"\\t\"\n + \"dst_id: int64\" + \"\\t\"\n + \"weight: double\" + \"\\n\")\n for i in range(edges.shape[0]):\n f.write(str(edges[i][0]) + \"\\t\" + str(edges[i][1]) + \"\\t\" + \"0.0\" + \"\\n\")\n\n with open(edge_table_with_self_loop, 'w') as f:\n f.write(\"src_id: int64\" + \"\\t\"\n + \"dst_id: int64\" + \"\\t\"\n + \"weight: double\" + \"\\n\")\n for i in range(edges.shape[0]):\n if edges[i][0] != edges[i][1]:\n f.write(str(edges[i][0]) + \"\\t\" + str(edges[i][1]) + \"\\t\" + \"0.0\" + \"\\n\")\n for idx in node_idxs:\n f.write(idx + '\\t' + idx + '\\t' + '0.0' + '\\n')\n\n print(\"Data Process Done.\")\n return\n print(\"Data {} has exist.\".format(dataset))\n\ndef encode_label(labels):\n classes = list(sorted(set(labels)))\n classes_dict = {c: i for i, c in\n enumerate(classes)}\n labels_int64 = np.array(list(map(classes_dict.get, labels)),\n dtype=np.int64)\n return labels_int64\n\ndef feature_normalize(sparse_matrix):\n \"\"\"Normalize sparse matrix feature by row.\n Reference:\n DGL(https://github.com/dmlc/dgl).\n \"\"\"\n row_sum = np.array(sparse_matrix.sum(1))\n row_norm = np.power(row_sum, -1).flatten()\n row_norm[np.isinf(row_norm)] = 0.\n row_matrix_norm = sp.diags(row_norm)\n sparse_matrix = row_matrix_norm.dot(sparse_matrix)\n return sparse_matrix\n\nif __name__ == \"__main__\":\n download('http://graph-learn-dataset.oss-cn-zhangjiakou.aliyuncs.com/cora.zip', 'cora.zip')\n extract('cora.zip', 'cora')\n preprocess('cora')\n"
] | [
[
"numpy.power",
"scipy.sparse.diags",
"numpy.dtype",
"scipy.sparse.csr_matrix",
"numpy.genfromtxt",
"numpy.array",
"numpy.isinf"
]
] |
zjzh/chainer | [
"e9da1423255c58c37be9733f51b158aa9b39dc93",
"e9da1423255c58c37be9733f51b158aa9b39dc93",
"e9da1423255c58c37be9733f51b158aa9b39dc93",
"e9da1423255c58c37be9733f51b158aa9b39dc93",
"e9da1423255c58c37be9733f51b158aa9b39dc93",
"e9da1423255c58c37be9733f51b158aa9b39dc93"
] | [
"tests/chainer_tests/functions_tests/pooling_tests/test_unpooling_2d.py",
"tests/chainerx_tests/unit_tests/routines_tests/test_misc.py",
"chainer/datasets/sub_dataset.py",
"tests/chainer_tests/initializer_tests/test_constant.py",
"tests/chainer_tests/links_tests/rnn_tests/test_link_n_step_lstm.py",
"chainer/links/caffe/caffe_function.py"
] | [
"import unittest\n\nimport numpy\nimport six\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import functions\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer_tests.functions_tests.pooling_tests import pooling_nd_helper\n\n\[email protected](*testing.product_dict(\n [\n # we assume insize as (2, 1)\n # standard output size which is estimated with get_deconv_outsize\n # function\n {'cover_all': False, 'outsize': (4, 2)},\n {'cover_all': True, 'outsize': (3, 1)},\n {'cover_all': False, 'outsize': None, 'expected_outsize': (4, 2)},\n {'cover_all': True, 'outsize': None, 'expected_outsize': (3, 1)},\n # another sizes which can be outsize of insize (2, 1)\n {'cover_all': False, 'outsize': (5, 2)},\n {'cover_all': True, 'outsize': (4, 2)},\n ],\n [\n {'dtype': numpy.float16},\n {'dtype': numpy.float32},\n {'dtype': numpy.float64},\n ],\n))\nclass TestUnpooling2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 2\n self.n_channels = 3\n inh, inw = 2, 1\n self.x = pooling_nd_helper.shuffled_linspace(\n (self.N, self.n_channels, inh, inw), self.dtype)\n\n self.ksize = 2\n outh, outw = self.outsize or self.expected_outsize\n self.gy = numpy.random.uniform(\n -1, 1, (self.N, self.n_channels, outh, outw)).astype(self.dtype)\n self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}\n self.check_double_backward_options = {}\n if self.dtype == numpy.float16:\n self.check_backward_options = {'atol': 2e-3, 'rtol': 2e-2}\n self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}\n self.ggx = numpy.random.uniform(\n -1, 1, self.x.shape).astype(self.dtype)\n\n def check_forward(self, x_data):\n x = chainer.Variable(x_data)\n y = functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n cover_all=self.cover_all)\n self.assertEqual(y.data.dtype, self.dtype)\n y_data = cuda.to_cpu(y.data)\n\n self.assertEqual(self.gy.shape, y_data.shape)\n for i in six.moves.range(self.N):\n for c in six.moves.range(self.n_channels):\n outsize = self.outsize or self.expected_outsize\n assert y_data.shape[2:] == outsize\n if outsize == (5, 2):\n expect = numpy.zeros(outsize, dtype=self.dtype)\n expect[:2, :] = self.x[i, c, 0, 0]\n expect[2:4, :] = self.x[i, c, 1, 0]\n elif outsize == (4, 2):\n expect = numpy.array([\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n ])\n elif outsize == (3, 1):\n expect = numpy.array([\n [self.x[i, c, 0, 0]],\n [self.x[i, c, 0, 0]],\n [self.x[i, c, 1, 0]],\n ])\n else:\n raise ValueError('Unsupported outsize: {}'.format(outsize))\n testing.assert_allclose(expect, y_data[i, c])\n\n def test_forward_cpu(self):\n self.check_forward(self.x)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.x))\n\n def check_backward(self, x_data, y_grad):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n cover_all=self.cover_all)\n gradient_check.check_backward(\n f, x_data, y_grad, dtype=numpy.float64,\n **self.check_backward_options)\n\n def test_backward_cpu(self):\n self.check_backward(self.x, self.gy)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n def check_double_backward(self, x_data, y_grad, x_grad_grad,\n use_cudnn='always'):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n cover_all=self.cover_all)\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_double_backward(\n f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,\n **self.check_double_backward_options)\n\n def test_double_backward_cpu(self):\n self.check_double_backward(\n self.x, self.gy, self.ggx, 'never')\n\n @attr.gpu\n def test_double_backward_gpu(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))\n\n @attr.gpu\n def test_double_backward_gpu_non_contiguous(self):\n self.check_double_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))\n\n @attr.gpu\n def test_double_backward_gpu_no_cudnn(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),\n 'never')\n\n\[email protected](*testing.product_dict(\n [\n {'insize': (2, 1), 'outsize': (4, 2), 'ksize': 2, 'pad': 0},\n {'insize': (4, 5), 'outsize': (4, 6), 'ksize': 2, 'pad': 2},\n ],\n [\n {'dtype': numpy.float16},\n {'dtype': numpy.float32},\n {'dtype': numpy.float64},\n ],\n))\nclass TestIntegerScaleUnpooling2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 2\n self.n_channels = 3\n inh, inw = self.insize\n self.x = pooling_nd_helper.shuffled_linspace(\n (self.N, self.n_channels, inh, inw), self.dtype)\n\n outh, outw = self.outsize or self.expected_outsize\n self.gy = numpy.random.uniform(\n -1, 1, (self.N, self.n_channels, outh, outw)).astype(self.dtype)\n self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}\n self.check_double_backward_options = {}\n if self.dtype == numpy.float16:\n self.check_backward_options = {'atol': 2e-3, 'rtol': 2e-2}\n self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}\n self.ggx = numpy.random.uniform(\n -1, 1, self.x.shape).astype(self.dtype)\n\n def check_forward(self, x_data):\n x = chainer.Variable(x_data)\n y = functions.unpooling_2d(\n x, self.ksize, outsize=self.outsize, pad=self.pad)\n self.assertEqual(y.data.dtype, self.dtype)\n y_data = cuda.to_cpu(y.data)\n\n self.assertEqual(self.gy.shape, y_data.shape)\n for i in six.moves.range(self.N):\n for c in six.moves.range(self.n_channels):\n outsize = self.outsize or self.expected_outsize\n assert y_data.shape[2:] == outsize\n if outsize == (4, 2):\n expect = numpy.array([\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n ])\n elif outsize == (4, 6):\n expect = numpy.array([\n [self.x[i, c, 1, 1], self.x[i, c, 1, 1],\n self.x[i, c, 1, 2], self.x[i, c, 1, 2],\n self.x[i, c, 1, 3], self.x[i, c, 1, 3]],\n [self.x[i, c, 1, 1], self.x[i, c, 1, 1],\n self.x[i, c, 1, 2], self.x[i, c, 1, 2],\n self.x[i, c, 1, 3], self.x[i, c, 1, 3]],\n [self.x[i, c, 2, 1], self.x[i, c, 2, 1],\n self.x[i, c, 2, 2], self.x[i, c, 2, 2],\n self.x[i, c, 2, 3], self.x[i, c, 2, 3]],\n [self.x[i, c, 2, 1], self.x[i, c, 2, 1],\n self.x[i, c, 2, 2], self.x[i, c, 2, 2],\n self.x[i, c, 2, 3], self.x[i, c, 2, 3]],\n ])\n else:\n raise ValueError('Unsupported outsize: {}'.format(outsize))\n testing.assert_allclose(expect, y_data[i, c])\n\n def test_forward_cpu(self):\n self.check_forward(self.x)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.x))\n\n def check_backward(self, x_data, y_grad):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n pad=self.pad)\n gradient_check.check_backward(\n f, x_data, y_grad, dtype=numpy.float64,\n **self.check_backward_options)\n\n def test_backward_cpu(self):\n self.check_backward(self.x, self.gy)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n def check_double_backward(self, x_data, y_grad, x_grad_grad,\n use_cudnn='always'):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n pad=self.pad)\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_double_backward(\n f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,\n **self.check_double_backward_options)\n\n def test_double_backward_cpu(self):\n self.check_double_backward(\n self.x, self.gy, self.ggx, 'never')\n\n @attr.gpu\n def test_double_backward_gpu(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))\n\n @attr.gpu\n def test_double_backward_gpu_non_contiguous(self):\n self.check_double_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))\n\n @attr.gpu\n def test_double_backward_gpu_no_cudnn(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),\n 'never')\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'h': [5],\n 'k': [3],\n 's': [3],\n 'p': [0],\n 'cover_all': [True, False],\n}))\nclass TestMaxPoolingUnpooling(unittest.TestCase):\n\n def check_left_inverse(self, xp, use_cudnn='never'):\n x = xp.arange(self.h * self.h).reshape(\n (1, 1, self.h, self.h)).astype(self.dtype)\n with chainer.using_config('use_cudnn', use_cudnn):\n y = chainer.functions.unpooling_2d(\n x, self.k, self.s, self.p, None, self.cover_all)\n x_ = chainer.functions.max_pooling_2d(\n y, self.k, self.s, self.p, self.cover_all).data\n\n self.assertEqual(x.shape, x_.shape)\n self.assertEqual(x.dtype, x_.dtype)\n chainer.testing.assert_allclose(x, x_)\n\n def test_left_inverse_cpu(self):\n self.check_left_inverse(numpy)\n\n @attr.gpu\n def test_left_inverse_cupy(self):\n self.check_left_inverse(cuda.cupy)\n\n @attr.gpu\n def test_left_inverse_cudnn(self):\n self.check_left_inverse(cuda.cupy, 'always')\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'h': [5],\n 'k': [3],\n 's': [3],\n 'p': [0],\n}))\nclass TestAveragePoolingUnpooling(unittest.TestCase):\n\n def check_left_inverse(self, xp, use_cudnn='never'):\n x = xp.arange(self.h * self.h).reshape(\n (1, 1, self.h, self.h)).astype(self.dtype)\n with chainer.using_config('use_cudnn', use_cudnn):\n # average_pooling_2d does not have cover_all option\n # as max_pooling_2d has.\n y = chainer.functions.unpooling_2d(\n x, self.k, self.s, self.p, None, False)\n x_ = chainer.functions.average_pooling_2d(\n y, self.k, self.s, self.p).data\n\n self.assertEqual(x.shape, x_.shape)\n self.assertEqual(x.dtype, x_.dtype)\n chainer.testing.assert_allclose(x, x_)\n\n def test_left_inverse_cpu(self):\n self.check_left_inverse(numpy)\n\n @attr.gpu\n def test_left_inverse_cupy(self):\n self.check_left_inverse(cuda.cupy)\n\n @attr.gpu\n def test_left_inverse_cudnn(self):\n self.check_left_inverse(cuda.cupy, 'always')\n\n\ntesting.run_module(__name__, __file__)\n",
"import chainer\nimport numpy\nimport pytest\n\nimport chainerx\nimport chainerx.testing\n\nfrom chainerx_tests import array_utils\nfrom chainerx_tests import dtype_utils\nfrom chainerx_tests import math_utils\nfrom chainerx_tests import op_utils\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'shape': [(), (1,), (1, 1, 1), (2, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,\n 'input': [1, 3],\n })\n # Special shapes (array.size = 0)\n + chainer.testing.product({\n 'shape': [(0,), (2, 0, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,\n 'input': [1, 3],\n 'check_numpy_strides_compliance': [False],\n })\n # Special values\n + chainer.testing.product({\n 'shape': [(2, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,\n 'input': [float('inf'), -float('inf'), float('nan'), -1, 0],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n))\nclass TestSqrt(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):\n\n def func(self, xp, a):\n return xp.sqrt(a)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],\n 'in_dtypes,out_dtype': dtype_utils.make_same_in_out_dtypes(\n 1, chainerx.testing.numeric_dtypes),\n 'input': ['random'],\n 'contiguous': [None, 'C'],\n })\n # Special values\n + chainer.testing.product({\n 'shape': [(2, 3)],\n 'in_dtypes,out_dtype': dtype_utils.make_same_in_out_dtypes(\n 1, chainerx.testing.float_dtypes),\n 'input': [float('inf'), -float('inf'), float('nan')],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n))\nclass TestSquare(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):\n\n def func(self, xp, a):\n return xp.square(a)\n\n\[email protected]_device(['native:0', 'cuda:0'])\ndef test_square_invalid_dtypes(device):\n shape = (3, 2)\n bool_array = chainerx.array(array_utils.uniform(shape, 'bool_'))\n with pytest.raises(chainerx.DtypeError):\n chainerx.square(bool_array)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n chainer.testing.product({\n 'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,\n 'input': ['random'],\n 'contiguous': [None, 'C'],\n 'is_module': [True, False],\n })\n + chainer.testing.product({\n 'shape': [(2, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,\n 'input': [float('inf'), -float('inf'), float('nan')],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n 'is_module': [True, False],\n })\n))\nclass TestAbs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):\n\n dodge_nondifferentiable = True\n\n def func(self, xp, a):\n # Check correct alias.\n assert chainerx.abs is chainerx.absolute\n\n # Check computed result.\n if self.is_module:\n return xp.abs(a)\n else:\n return abs(a)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,\n 'input': [-2.5, -1.5, -0.1, 0.1, 1.5, 2.5],\n 'contiguous': [None, 'C'],\n })\n # Special values\n + chainer.testing.product({\n 'shape': [(2, 3)],\n 'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,\n 'input': [float('inf'), -float('inf'), float('nan')],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n))\nclass TestFabs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):\n\n def func(self, xp, a):\n return xp.fabs(a)\n\n\[email protected]_chainerx_array_equal()\[email protected]_device(['native:0', 'cuda:0'])\[email protected]('input', [\n numpy.asarray(0.5),\n numpy.asarray(-1.2),\n numpy.asarray(10.9),\n numpy.asarray(-10.6),\n numpy.asarray(0.),\n numpy.asarray(float('inf')),\n numpy.asarray(-float('inf')),\n numpy.asarray(float('nan')),\n numpy.full((), 2.1),\n numpy.full((0,), 2),\n numpy.full((2, 3), 0),\n numpy.full((2, 3), 2.6),\n numpy.full((1, 1), -1.01),\n numpy.full((1, 1), 1.99),\n])\[email protected]('dtypes', [\n (('int8',), 'int8'),\n (('int16',), 'int16'),\n (('int32',), 'int32'),\n (('int64',), 'int64'),\n (('float16',), 'float16'),\n (('float32',), 'float32'),\n (('float64',), 'float64'),\n])\ndef test_sign(xp, device, input, dtypes):\n (in_dtype, ), out_dtype = dtypes\n a = xp.array(input.astype(in_dtype))\n return xp.sign(a)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'in_shapes': math_utils.shapes_combination_binary,\n 'in_dtypes,out_dtype': (\n dtype_utils.make_same_in_out_dtypes(\n 2, chainerx.testing.all_dtypes)),\n 'input_lhs': ['random'],\n 'input_rhs': ['random'],\n 'is_module': [False],\n })\n # Dtype combinations\n + chainer.testing.product({\n 'in_shapes': [((2, 3), (2, 3))],\n 'in_dtypes,out_dtype': dtype_utils.result_comparable_dtypes_two_arrays,\n 'input_lhs': ['random'],\n 'input_rhs': ['random'],\n 'is_module': [False],\n })\n # is_module\n + chainer.testing.product({\n 'in_shapes': [((2, 3), (2, 3))],\n 'in_dtypes,out_dtype': (\n dtype_utils.make_same_in_out_dtypes(\n 2, chainerx.testing.all_dtypes)),\n 'input_lhs': ['random'],\n 'input_rhs': ['random'],\n 'is_module': [True, False],\n })\n # TODO(aksub99): Add tests for inf and NaN.\n))\nclass TestMaximum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):\n\n def generate_inputs(self):\n for _ in range(10):\n a, b = super().generate_inputs()\n if ((a.dtype.kind in 'biu' and b.dtype.kind in 'biu') or\n (numpy.abs(a - b) > 0.01).all()):\n return a, b\n assert False, 'Couldn\\'t construct a test case.'\n\n def func(self, xp, a, b):\n return xp.maximum(a, b)\n\n\[email protected]_device(['native:0', 'cuda:0'])\[email protected]('dtype', chainerx.testing.numeric_dtypes)\ndef test_maximum_invalid_dtypes(device, dtype):\n shape = (3, 2)\n bool_array = chainerx.array(array_utils.uniform(shape, 'bool_'))\n numeric_array = chainerx.array(array_utils.uniform(shape, dtype))\n with pytest.raises(chainerx.DtypeError):\n chainerx.maximum(bool_array, numeric_array)\n with pytest.raises(chainerx.DtypeError):\n chainerx.maximum(numeric_array, bool_array)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_comparable_dtypes_array_scalar),\n 'input': ['random'],\n 'scalar_value': [0, 1],\n 'is_scalar_rhs': [False],\n })\n # Differentiable cases\n + chainer.testing.product({\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_comparable_dtypes_array_scalar),\n 'input': [numpy.array([1, 3, 3, 4])],\n 'scalar_value': [0, 2, 5],\n 'is_scalar_rhs': [False, True],\n })\n # Non-differentiable cases\n + chainer.testing.product({\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_comparable_dtypes_array_scalar),\n 'input': [numpy.array([1, 3, 3, 4])],\n 'scalar_value': [1, 3, 4],\n 'is_scalar_rhs': [False, True],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n # Special float values\n + chainer.testing.product({\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_float_dtypes_array_scalar),\n # TODO(imanishi): Add test for NaN.\n 'input': [numpy.array([0, float('inf'), -float('inf')])],\n 'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],\n 'is_scalar_rhs': [False],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n))\nclass TestMaximumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):\n\n dodge_nondifferentiable = True\n\n def func_scalar(self, xp, a, scalar):\n if self.is_scalar_rhs:\n return xp.maximum(a, scalar)\n else:\n return xp.maximum(scalar, a)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'in_shapes': math_utils.shapes_combination_binary,\n 'in_dtypes,out_dtype': (\n dtype_utils.make_same_in_out_dtypes(\n 2, chainerx.testing.numeric_dtypes)),\n 'input_lhs': ['random'],\n 'input_rhs': ['random'],\n 'is_module': [False],\n })\n # Dtype combinations\n + chainer.testing.product({\n 'in_shapes': [((2, 3), (2, 3))],\n 'in_dtypes,out_dtype': dtype_utils.result_comparable_dtypes_two_arrays,\n 'input_lhs': ['random'],\n 'input_rhs': ['random'],\n 'is_module': [False],\n })\n # is_module\n + chainer.testing.product({\n 'in_shapes': [((2, 3), (2, 3))],\n 'in_dtypes,out_dtype': (\n dtype_utils.make_same_in_out_dtypes(\n 2, chainerx.testing.all_dtypes)),\n 'input_lhs': ['random'],\n 'input_rhs': ['random'],\n 'is_module': [True, False],\n })\n # TODO(aksub99): Add tests for inf and NaN.\n))\nclass TestMinimum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):\n\n def generate_inputs(self):\n for _ in range(10):\n a, b = super().generate_inputs()\n if ((a.dtype.kind in 'biu' and b.dtype.kind in 'biu') or\n (numpy.abs(a - b) > 0.01).all()):\n return a, b\n assert False, 'Couldn\\'t construct a test case.'\n\n def func(self, xp, a, b):\n return xp.minimum(a, b)\n\n\[email protected]_device(['native:0', 'cuda:0'])\[email protected]('dtype', chainerx.testing.numeric_dtypes)\ndef test_minimum_invalid_dtypes(device, dtype):\n shape = (3, 2)\n bool_array = chainerx.array(array_utils.uniform(shape, 'bool_'))\n numeric_array = chainerx.array(array_utils.uniform(shape, dtype))\n with pytest.raises(chainerx.DtypeError):\n chainerx.minimum(bool_array, numeric_array)\n with pytest.raises(chainerx.DtypeError):\n chainerx.minimum(numeric_array, bool_array)\n\n\n@op_utils.op_test(['native:0', 'cuda:0'])\[email protected](*(\n # Special shapes\n chainer.testing.product({\n 'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_comparable_dtypes_array_scalar),\n 'input': ['random'],\n 'scalar_value': [1],\n 'is_scalar_rhs': [False],\n })\n # Differentiable cases\n + chainer.testing.product({\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_comparable_dtypes_array_scalar),\n 'input': [numpy.array([1, 3, 3, 4])],\n 'scalar_value': [0, 2, 5],\n 'is_scalar_rhs': [False, True],\n })\n # Non-differentiable cases\n + chainer.testing.product({\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_comparable_dtypes_array_scalar),\n 'input': [numpy.array([1, 3, 3, 4])],\n 'scalar_value': [1, 3, 4],\n 'is_scalar_rhs': [False, True],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n # Special float values\n + chainer.testing.product({\n 'in_dtypes,scalar_type,out_dtype': (\n dtype_utils.result_float_dtypes_array_scalar),\n # TODO(imanishi): Add test for NaN.\n 'input': [numpy.array([0, float('inf'), -float('inf')])],\n 'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],\n 'is_scalar_rhs': [False],\n 'skip_backward_test': [True],\n 'skip_double_backward_test': [True],\n })\n))\nclass TestMinimumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):\n\n dodge_nondifferentiable = True\n\n def func_scalar(self, xp, a, scalar):\n if self.is_scalar_rhs:\n return xp.minimum(a, scalar)\n else:\n return xp.minimum(scalar, a)\n",
"import numpy\nimport six\nimport warnings\nfrom chainer.dataset import dataset_mixin\n\n\nclass SubDataset(dataset_mixin.DatasetMixin):\n\n \"\"\"Subset of a base dataset.\n\n SubDataset defines a subset of a given base dataset. The subset is defined\n as an interval of indexes, optionally with a given permutation.\n\n If ``order`` is given, then the ``i``-th example of this dataset is the\n ``order[start + i]``-th example of the base dataset, where ``i`` is a\n non-negative integer. If ``order`` is not given, then the ``i``-th example\n of this dataset is the ``start + i``-th example of the base dataset.\n Negative indexing is also allowed: in this case, the term ``start + i`` is\n replaced by ``finish + i``.\n\n SubDataset is often used to split a dataset into training and validation\n subsets. The training set is used for training, while the validation set is\n used to track the generalization performance, i.e. how the learned model\n works well on unseen data. We can tune hyperparameters (e.g. number of\n hidden units, weight initializers, learning rate, etc.) by comparing the\n validation performance. Note that we often use another set called test set\n to measure the quality of the tuned hyperparameter, which can be made by\n nesting multiple SubDatasets.\n\n There are two ways to make training-validation splits. One is a single\n split, where the dataset is split just into two subsets. It can be done by\n :func:`split_dataset` or :func:`split_dataset_random`. The other one is a\n :math:`k`-fold cross validation, in which the dataset is divided into\n :math:`k` subsets, and :math:`k` different splits are generated using each\n of the :math:`k` subsets as a validation set and the rest as a training\n set. It can be done by :func:`get_cross_validation_datasets`.\n\n Args:\n dataset: Base dataset.\n start (int): The first index in the interval.\n finish (int): The next-to-the-last index in the interval.\n order (sequence of ints): Permutation of indexes in the base dataset.\n If this is ``None``, then the ascending order of indexes is used.\n\n \"\"\"\n\n def __init__(self, dataset, start, finish, order=None):\n if start < 0 or finish > len(dataset):\n raise ValueError('subset overruns the base dataset.')\n self._dataset = dataset\n self._start = start\n self._finish = finish\n self._size = finish - start\n if order is not None and len(order) != len(dataset):\n msg = ('order option must have the same length as the base '\n 'dataset: len(order) = {} while len(dataset) = {}'.format(\n len(order), len(dataset)))\n raise ValueError(msg)\n self._order = order\n\n def __len__(self):\n return self._size\n\n def get_example(self, i):\n if i >= 0:\n if i >= self._size:\n raise IndexError('dataset index out of range')\n index = self._start + i\n else:\n if i < -self._size:\n raise IndexError('dataset index out of range')\n index = self._finish + i\n\n if self._order is not None:\n index = self._order[index]\n return self._dataset[index]\n\n\ndef split_dataset(dataset, split_at, order=None):\n \"\"\"Splits a dataset into two subsets.\n\n This function creates two instances of :class:`SubDataset`. These instances\n do not share any examples, and they together cover all examples of the\n original dataset.\n\n Args:\n dataset: Dataset to split.\n split_at (int): Position at which the base dataset is split.\n order (sequence of ints): Permutation of indexes in the base dataset.\n See the documentation of :class:`SubDataset` for details.\n\n Returns:\n tuple: Two :class:`SubDataset` objects. The first subset represents the\n examples of indexes ``order[:split_at]`` while the second subset\n represents the examples of indexes ``order[split_at:]``.\n\n \"\"\"\n n_examples = len(dataset)\n if not isinstance(split_at, (six.integer_types, numpy.integer)):\n raise TypeError('split_at must be int, got {} instead'\n .format(type(split_at)))\n if split_at < 0:\n raise ValueError('split_at must be non-negative')\n if split_at > n_examples:\n raise ValueError('split_at exceeds the dataset size')\n subset1 = SubDataset(dataset, 0, split_at, order)\n subset2 = SubDataset(dataset, split_at, n_examples, order)\n return subset1, subset2\n\n\ndef split_dataset_random(dataset, first_size, seed=None):\n \"\"\"Splits a dataset into two subsets randomly.\n\n This function creates two instances of :class:`SubDataset`. These instances\n do not share any examples, and they together cover all examples of the\n original dataset. The split is automatically done randomly.\n\n Args:\n dataset: Dataset to split.\n first_size (int): Size of the first subset.\n seed (int): Seed the generator used for the permutation of indexes.\n If an integer being convertible to 32 bit unsigned integers is\n specified, it is guaranteed that each sample\n in the given dataset always belongs to a specific subset.\n If ``None``, the permutation is changed randomly.\n\n Returns:\n tuple: Two :class:`SubDataset` objects. The first subset contains\n ``first_size`` examples randomly chosen from the dataset without\n replacement, and the second subset contains the rest of the\n dataset.\n\n \"\"\"\n order = numpy.random.RandomState(seed).permutation(len(dataset))\n return split_dataset(dataset, first_size, order)\n\n\ndef split_dataset_n(dataset, n, order=None):\n \"\"\"Splits a dataset into ``n`` subsets.\n\n Args:\n dataset: Dataset to split.\n n(int): The number of subsets.\n order (sequence of ints): Permutation of indexes in the base dataset.\n See the documentation of :class:`SubDataset` for details.\n\n Returns:\n list: List of ``n`` :class:`SubDataset` objects.\n Each subset contains the examples of indexes\n ``order[i * (len(dataset) // n):(i + 1) * (len(dataset) // n)]``\n .\n\n \"\"\"\n n_examples = len(dataset)\n sub_size = n_examples // n\n return [SubDataset(dataset, sub_size * i, sub_size * (i + 1), order)\n for i in six.moves.range(n)]\n\n\ndef split_dataset_n_random(dataset, n, seed=None):\n \"\"\"Splits a dataset into ``n`` subsets randomly.\n\n Args:\n dataset: Dataset to split.\n n(int): The number of subsets.\n seed (int): Seed the generator used for the permutation of indexes.\n If an integer being convertible to 32 bit unsigned integers is\n specified, it is guaranteed that each sample\n in the given dataset always belongs to a specific subset.\n If ``None``, the permutation is changed randomly.\n\n Returns:\n list: List of ``n`` :class:`SubDataset` objects.\n Each subset contains ``len(dataset) // n`` examples randomly chosen\n from the dataset without replacement.\n\n \"\"\"\n n_examples = len(dataset)\n sub_size = n_examples // n\n order = numpy.random.RandomState(seed).permutation(len(dataset))\n return [SubDataset(dataset, sub_size * i, sub_size * (i + 1), order)\n for i in six.moves.range(n)]\n\n\ndef get_cross_validation_datasets(dataset, n_folds=None, order=None, **kwargs):\n \"\"\"Creates a set of training/test splits for cross validation.\n\n This function generates ``n_folds`` splits of the given dataset. The first\n part of each split corresponds to the training dataset, while the second\n part to the test dataset. No pairs of test datasets share any examples, and\n all test datasets together cover the whole base dataset. Each test dataset\n contains almost same number of examples (the numbers may differ up to 1).\n\n Args:\n dataset: Dataset to split.\n n_fold(int): *(deprecated)*\n `n_fold` is now deprecated for consistency of naming choice.\n Please use `n_folds` instead.\n n_folds (int): Number of splits for cross validation.\n order (sequence of ints): Order of indexes with which each split is\n determined. If it is ``None``, then no permutation is used.\n\n Returns:\n list of tuples: List of dataset splits.\n\n \"\"\"\n\n if 'n_fold' in kwargs:\n warnings.warn(\n 'Argument `n_fold` is deprecated. '\n 'Please use `n_folds` instead',\n DeprecationWarning)\n n_folds = kwargs['n_fold']\n if order is None:\n order = numpy.arange(len(dataset))\n else:\n order = numpy.array(order) # copy\n\n whole_size = len(dataset)\n borders = [whole_size * i // n_folds for i in six.moves.range(n_folds + 1)]\n test_sizes = [borders[i + 1] - borders[i]\n for i in six.moves.range(n_folds)]\n\n splits = []\n for test_size in reversed(test_sizes):\n size = whole_size - test_size\n splits.append(split_dataset(dataset, size, order))\n new_order = numpy.empty_like(order)\n new_order[:test_size] = order[-test_size:]\n new_order[test_size:] = order[:-test_size]\n order = new_order\n\n return splits\n\n\ndef get_cross_validation_datasets_random(dataset, n_folds, seed=None,\n **kwargs):\n \"\"\"Creates a set of training/test splits for cross validation randomly.\n\n This function acts almost same as :func:`get_cross_validation_dataset`,\n except automatically generating random permutation.\n\n Args:\n dataset: Dataset to split.\n n_fold (int): *(deprecated)*\n `n_fold` is now deprecated for consistency of naming choice.\n Please use `n_folds` instead.\n n_folds (int): Number of splits for cross validation.\n seed (int): Seed the generator used for the permutation of indexes.\n If an integer beging convertible to 32 bit unsigned integers is\n specified, it is guaranteed that each sample\n in the given dataset always belongs to a specific subset.\n If ``None``, the permutation is changed randomly.\n\n Returns:\n list of tuples: List of dataset splits.\n\n \"\"\"\n if 'n_fold' in kwargs:\n warnings.warn(\n 'Argument `n_fold` is deprecated. '\n 'Please use `n_folds` instead',\n DeprecationWarning)\n n_folds = kwargs['n_fold']\n order = numpy.random.RandomState(seed).permutation(len(dataset))\n return get_cross_validation_datasets(dataset, n_folds, order)\n",
"import unittest\n\nimport numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import initializers\nfrom chainer import testing\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\[email protected]_backend_tests(\n None,\n [\n {},\n {'use_ideep': 'always'},\n {'use_cuda': True, 'cuda_device': 0},\n {'use_cuda': True, 'cuda_device': 1},\n {'use_chainerx': True, 'chainerx_device': 'native:0'},\n {'use_chainerx': True, 'chainerx_device': 'cuda:0'},\n {'use_chainerx': True, 'chainerx_device': 'cuda:1'},\n ]\n)\nclass TestIdentity(unittest.TestCase):\n\n scale = 0.1\n shape = (2, 2)\n\n def setUp(self):\n self.check_options = {}\n if self.dtype == numpy.float16:\n self.check_options = {'atol': 1e-4, 'rtol': 1e-3}\n\n def check_initializer(self, w):\n initializer = initializers.Identity(scale=self.scale)\n initializer(w)\n testing.assert_allclose(\n w, self.scale * numpy.identity(len(self.shape)),\n **self.check_options)\n\n def test_initializer(self, backend_config):\n w = numpy.empty(self.shape, dtype=self.dtype)\n w = backend_config.get_array(w)\n with chainer.using_device(backend_config.device):\n self.check_initializer(w)\n\n def check_shaped_initializer(self, backend_config):\n initializer = initializers.Identity(\n scale=self.scale, dtype=self.dtype)\n xp = backend_config.xp\n w = initializers.generate_array(initializer, self.shape, xp)\n self.assertIs(backend.get_array_module(w), xp)\n self.assertTupleEqual(w.shape, self.shape)\n self.assertEqual(w.dtype, self.dtype)\n testing.assert_allclose(\n w, self.scale * numpy.identity(len(self.shape)),\n **self.check_options)\n\n def test_shaped_initializer(self, backend_config):\n with chainer.using_device(backend_config.device):\n self.check_shaped_initializer(backend_config)\n\n\[email protected](\n {'shape': (2, 3)},\n {'shape': (2, 2, 4)},\n {'shape': ()},\n {'shape': 0})\nclass TestIdentityInvalid(unittest.TestCase):\n\n def setUp(self):\n self.initializer = initializers.Identity()\n\n def test_invalid_shape(self):\n w = numpy.empty(self.shape, dtype=numpy.float32)\n with self.assertRaises(ValueError):\n self.initializer(w)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\[email protected]_backend_tests(\n None,\n [\n {},\n {'use_ideep': 'always'},\n {'use_cuda': True, 'cuda_device': 0},\n {'use_cuda': True, 'cuda_device': 1},\n {'use_chainerx': True, 'chainerx_device': 'native:0'},\n {'use_chainerx': True, 'chainerx_device': 'cuda:0'},\n {'use_chainerx': True, 'chainerx_device': 'cuda:1'},\n ]\n)\nclass TestConstant(unittest.TestCase):\n\n fill_value = 0.1\n shape = (2, 3)\n\n def setUp(self):\n self.check_options = {}\n if self.dtype == numpy.float16:\n self.check_options = {'atol': 1e-4, 'rtol': 1e-3}\n\n def check_initializer(self, w):\n initializer = initializers.Constant(fill_value=self.fill_value)\n initializer(w)\n testing.assert_allclose(\n w, numpy.full(self.shape, self.fill_value),\n **self.check_options)\n\n def test_initializer(self, backend_config):\n w = numpy.empty(self.shape, dtype=self.dtype)\n w = backend_config.get_array(w)\n with chainer.using_device(backend_config.device):\n self.check_initializer(w)\n\n def check_shaped_initializer(self, backend_config):\n initializer = initializers.Constant(\n fill_value=self.fill_value, dtype=self.dtype)\n xp = backend_config.xp\n w = initializers.generate_array(initializer, self.shape, xp)\n self.assertIs(backend.get_array_module(w), xp)\n self.assertTupleEqual(w.shape, self.shape)\n self.assertEqual(w.dtype, self.dtype)\n testing.assert_allclose(\n w, numpy.full(self.shape, self.fill_value),\n **self.check_options)\n\n def test_shaped_initializer(self, backend_config):\n with chainer.using_device(backend_config.device):\n self.check_shaped_initializer(backend_config)\n\n\ntesting.run_module(__name__, __file__)\n",
"import unittest\n\nimport numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import gradient_check\nfrom chainer import links\nfrom chainer import testing\nfrom chainer.testing import attr\n\n\ndef sigmoid(x):\n return numpy.tanh(x * 0.5) * 0.5 + 0.5\n\n\[email protected](*testing.product({\n 'hidden_none': [True, False],\n}))\nclass TestNStepLSTM(unittest.TestCase):\n\n lengths = [3, 1, 2]\n n_layers = 2\n in_size = 3\n out_size = 2\n dropout = 0.0\n\n def setUp(self):\n shape = (self.n_layers, len(self.lengths), self.out_size)\n if self.hidden_none:\n self.h = self.c = numpy.zeros(shape, 'f')\n else:\n self.h = numpy.random.uniform(-1, 1, shape).astype('f')\n self.c = numpy.random.uniform(-1, 1, shape).astype('f')\n self.xs = [\n numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')\n for l in self.lengths]\n\n self.gh = numpy.random.uniform(-1, 1, shape).astype('f')\n self.gc = numpy.random.uniform(-1, 1, shape).astype('f')\n self.gys = [\n numpy.random.uniform(-1, 1, (l, self.out_size)).astype('f')\n for l in self.lengths]\n self.rnn = links.NStepLSTM(\n self.n_layers, self.in_size, self.out_size, self.dropout)\n\n for layer in self.rnn:\n for p in layer.params():\n p.array[...] = numpy.random.uniform(-1, 1, p.shape)\n self.rnn.cleargrads()\n\n def check_forward(self, h_data, c_data, xs_data):\n if self.hidden_none:\n h = c = None\n else:\n h = chainer.Variable(h_data)\n c = chainer.Variable(c_data)\n xs = [chainer.Variable(x) for x in xs_data]\n hy, cy, ys = self.rnn(h, c, xs)\n\n assert hy.shape == h_data.shape\n assert cy.shape == c_data.shape\n assert len(xs) == len(ys)\n for x, y in zip(xs, ys):\n assert len(x) == len(y)\n assert y.shape[1] == self.out_size\n\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_cpu()\n\n for batch, seq in enumerate(self.xs):\n for layer in range(self.n_layers):\n p = self.rnn[layer]\n h_prev = self.h[layer, batch]\n c_prev = self.c[layer, batch]\n hs = []\n for x in seq:\n i = sigmoid(\n x.dot(p.w0.array.T) + h_prev.dot(p.w4.array.T) +\n p.b0.array + p.b4.array)\n f = sigmoid(\n x.dot(p.w1.array.T) + h_prev.dot(p.w5.array.T) +\n p.b1.array + p.b5.array)\n c_bar = numpy.tanh(\n x.dot(p.w2.array.T) + h_prev.dot(p.w6.array.T) +\n p.b2.array + p.b6.array)\n o = sigmoid(\n x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +\n p.b3.array + p.b7.array)\n e_c = (f * c_prev + i * c_bar)\n e_h = o * numpy.tanh(e_c)\n\n h_prev = e_h\n c_prev = e_c\n hs.append(e_h)\n\n seq = hs\n testing.assert_allclose(hy.array[layer, batch], h_prev)\n testing.assert_allclose(cy.array[layer, batch], c_prev)\n\n for y, ey in zip(ys[batch].array, seq):\n testing.assert_allclose(y, ey)\n\n def test_forward_cpu_train(self):\n with chainer.using_config('train', True):\n self.check_forward(self.h, self.c, self.xs)\n\n @attr.gpu\n def test_forward_gpu_train(self):\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu()\n with chainer.using_config('use_cudnn', 'always'), \\\n chainer.using_config('train', True):\n self.check_forward(\n cuda.to_gpu(self.h),\n cuda.to_gpu(self.c),\n [cuda.to_gpu(x) for x in self.xs])\n\n def test_forward_cpu_test(self):\n with chainer.using_config('train', False):\n self.check_forward(self.h, self.c, self.xs)\n\n @attr.gpu\n def test_forward_gpu_test(self):\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu()\n with chainer.using_config('use_cudnn', 'always'), \\\n chainer.using_config('train', False):\n self.check_forward(\n cuda.to_gpu(self.h),\n cuda.to_gpu(self.c),\n [cuda.to_gpu(x) for x in self.xs])\n\n @attr.multi_gpu(2)\n def test_forward_nonzero_gpu_test(self):\n # Issue #5347\n # to_gpu should work without setting the current device\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu(1)\n with chainer.using_config('use_cudnn', 'always'), \\\n chainer.using_config('train', False):\n self.check_forward(\n cuda.to_gpu(self.h, 1),\n cuda.to_gpu(self.c, 1),\n [cuda.to_gpu(x, 1) for x in self.xs])\n\n def check_multi_gpu_forward(self, train=True):\n # See chainer/chainer#6262\n # NStepLSTM w/ cudnn & dropout should work on not current device\n msg = None\n rnn = self.rnn.copy('copy')\n rnn.dropout = .5\n with cuda.get_device_from_id(1):\n if self.hidden_none:\n h = None\n else:\n h = cuda.to_gpu(self.h)\n c = cuda.to_gpu(self.c)\n xs = [cuda.to_gpu(x) for x in self.xs]\n with testing.assert_warns(DeprecationWarning):\n rnn = rnn.to_gpu()\n with cuda.get_device_from_id(0),\\\n chainer.using_config('train', train),\\\n chainer.using_config('use_cudnn', 'always'):\n try:\n rnn(h, c, xs)\n except Exception as e:\n msg = e\n assert msg is None\n\n @attr.cudnn\n @attr.multi_gpu(2)\n def test_multi_gpu_forward_training(self):\n self.check_multi_gpu_forward(True)\n\n @attr.cudnn\n @attr.multi_gpu(2)\n def test_multi_gpu_forward_test(self):\n self.check_multi_gpu_forward(False)\n\n def check_backward(\n self, h_data, c_data, xs_data, gh_data, gc_data, gys_data):\n\n def fun(*args):\n if self.hidden_none:\n h = c = None\n xs = args\n else:\n h, c = args[:2]\n xs = args[2:]\n hy, cy, ys = self.rnn(h, c, xs)\n return tuple([hy, cy] + list(ys))\n\n params = []\n for layer in self.rnn:\n for p in layer.params():\n params.append(p)\n\n if self.hidden_none:\n in_data = xs_data\n else:\n in_data = [h_data, c_data] + xs_data\n gradient_check.check_backward(\n fun, tuple(in_data),\n tuple([gh_data, gc_data] + gys_data),\n tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)\n\n def test_backward_cpu(self):\n self.check_backward(\n self.h, self.c, self.xs, self.gh, self.gc, self.gys)\n\n @attr.gpu\n def test_backward_gpu(self):\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu()\n with chainer.using_config('use_cudnn', 'always'):\n self.check_backward(\n cuda.to_gpu(self.h),\n cuda.to_gpu(self.c),\n [cuda.to_gpu(x) for x in self.xs],\n cuda.to_gpu(self.gh),\n cuda.to_gpu(self.gc),\n [cuda.to_gpu(gy) for gy in self.gys])\n\n def test_n_cells(self):\n self.assertEqual(self.rnn.n_cells, 2)\n assert self.rnn.n_cells == 2\n\n\[email protected](*testing.product({\n 'hidden_none': [True, False],\n}))\nclass TestNStepBiLSTM(unittest.TestCase):\n\n lengths = [3, 1, 2]\n n_layers = 2\n in_size = 3\n out_size = 2\n dropout = 0.0\n\n def setUp(self):\n shape = (self.n_layers * 2, len(self.lengths), self.out_size)\n if self.hidden_none:\n self.h = self.c = numpy.zeros(shape, 'f')\n else:\n self.h = numpy.random.uniform(-1, 1, shape).astype('f')\n self.c = numpy.random.uniform(-1, 1, shape).astype('f')\n self.xs = [\n numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')\n for l in self.lengths]\n\n self.gh = numpy.random.uniform(-1, 1, shape).astype('f')\n self.gc = numpy.random.uniform(-1, 1, shape).astype('f')\n self.gys = [\n numpy.random.uniform(-1, 1, (l, self.out_size * 2)).astype('f')\n for l in self.lengths]\n self.rnn = links.NStepBiLSTM(\n self.n_layers, self.in_size, self.out_size, self.dropout)\n\n for layer in self.rnn:\n for p in layer.params():\n p.array[...] = numpy.random.uniform(-1, 1, p.shape)\n self.rnn.cleargrads()\n\n def check_forward(self, h_data, c_data, xs_data):\n if self.hidden_none:\n h = c = None\n else:\n h = chainer.Variable(h_data)\n c = chainer.Variable(c_data)\n xs = [chainer.Variable(x) for x in xs_data]\n hy, cy, ys = self.rnn(h, c, xs)\n\n assert hy.shape == h_data.shape\n assert cy.shape == c_data.shape\n assert len(xs) == len(ys)\n for x, y in zip(xs, ys):\n assert len(x) == len(y)\n assert y.shape[1] == self.out_size * 2\n\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_cpu()\n\n for batch, seq in enumerate(self.xs):\n for layer in range(self.n_layers):\n # forward\n di = 0\n layer_idx = layer * 2 + di\n p = self.rnn[layer_idx]\n h_prev = self.h[layer_idx, batch]\n c_prev = self.c[layer_idx, batch]\n hs_f = []\n for x in seq:\n i = sigmoid(x.dot(p.w0.array.T) +\n h_prev.dot(p.w4.array.T) +\n p.b0.array + p.b4.array)\n f = sigmoid(x.dot(p.w1.array.T) +\n h_prev.dot(p.w5.array.T) +\n p.b1.array + p.b5.array)\n c_bar = numpy.tanh(x.dot(p.w2.array.T) +\n h_prev.dot(p.w6.array.T) +\n p.b2.array + p.b6.array)\n o = sigmoid(\n x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +\n p.b3.array + p.b7.array)\n e_c = (f * c_prev + i * c_bar)\n e_h = o * numpy.tanh(e_c)\n\n h_prev = e_h\n c_prev = e_c\n hs_f.append(e_h)\n\n testing.assert_allclose(hy.array[layer_idx, batch], h_prev)\n testing.assert_allclose(cy.array[layer_idx, batch], c_prev)\n\n # backward\n di = 1\n layer_idx = layer * 2 + di\n p = self.rnn[layer_idx]\n h_prev = self.h[layer_idx, batch]\n c_prev = self.c[layer_idx, batch]\n hs_b = []\n for x in reversed(seq):\n i = sigmoid(x.dot(p.w0.array.T) +\n h_prev.dot(p.w4.array.T) +\n p.b0.array + p.b4.array)\n f = sigmoid(x.dot(p.w1.array.T) +\n h_prev.dot(p.w5.array.T) +\n p.b1.array + p.b5.array)\n c_bar = numpy.tanh(x.dot(p.w2.array.T) +\n h_prev.dot(p.w6.array.T) +\n p.b2.array + p.b6.array)\n o = sigmoid(\n x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +\n p.b3.array + p.b7.array)\n e_c = (f * c_prev + i * c_bar)\n e_h = o * numpy.tanh(e_c)\n\n h_prev = e_h\n c_prev = e_c\n hs_b.append(e_h)\n\n testing.assert_allclose(hy.array[layer_idx, batch], h_prev)\n testing.assert_allclose(cy.array[layer_idx, batch], c_prev)\n\n hs_b.reverse()\n seq = [numpy.concatenate([hfi, hbi], axis=0) for (hfi, hbi)\n in zip(hs_f, hs_b)]\n\n for y, ey in zip(ys[batch].array, seq):\n testing.assert_allclose(y, ey)\n\n def test_forward_cpu_train(self):\n with chainer.using_config('train', True):\n self.check_forward(self.h, self.c, self.xs)\n\n @attr.gpu\n def test_forward_gpu_train(self):\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu()\n with chainer.using_config('use_cudnn', 'always'), \\\n chainer.using_config('train', True):\n self.check_forward(\n cuda.to_gpu(self.h),\n cuda.to_gpu(self.c),\n [cuda.to_gpu(x) for x in self.xs])\n\n def test_forward_cpu_test(self):\n with chainer.using_config('train', False):\n self.check_forward(self.h, self.c, self.xs)\n\n @attr.gpu\n def test_forward_gpu_test(self):\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu()\n with chainer.using_config('use_cudnn', 'always'), \\\n chainer.using_config('train', False):\n self.check_forward(\n cuda.to_gpu(self.h),\n cuda.to_gpu(self.c),\n [cuda.to_gpu(x) for x in self.xs])\n\n def check_multi_gpu_forward(self, train=True):\n # See chainer/chainer#6262\n # NStepBiLSTM w/ cudnn & dropout should work on not current device\n msg = None\n rnn = self.rnn.copy('copy')\n rnn.dropout = .5\n with cuda.get_device_from_id(1):\n if self.hidden_none:\n h = None\n else:\n h = cuda.to_gpu(self.h)\n c = cuda.to_gpu(self.c)\n xs = [cuda.to_gpu(x) for x in self.xs]\n with testing.assert_warns(DeprecationWarning):\n rnn = rnn.to_gpu()\n with cuda.get_device_from_id(0),\\\n chainer.using_config('train', train),\\\n chainer.using_config('use_cudnn', 'always'):\n try:\n rnn(h, c, xs)\n except Exception as e:\n msg = e\n assert msg is None\n\n @attr.gpu\n @attr.multi_gpu(2)\n def test_multi_gpu_forward_training(self):\n self.check_multi_gpu_forward(True)\n\n @attr.gpu\n @attr.multi_gpu(2)\n def test_multi_gpu_forward_test(self):\n self.check_multi_gpu_forward(False)\n\n def check_backward(\n self, h_data, c_data, xs_data, gh_data, gc_data, gys_data):\n\n def fun(*args):\n if self.hidden_none:\n h = c = None\n xs = args\n else:\n h, c = args[:2]\n xs = args[2:]\n hy, cy, ys = self.rnn(h, c, xs)\n return tuple([hy, cy] + list(ys))\n\n params = []\n for layer in self.rnn:\n for p in layer.params():\n params.append(p)\n\n if self.hidden_none:\n in_data = xs_data\n else:\n in_data = [h_data, c_data] + xs_data\n gradient_check.check_backward(\n fun, tuple(in_data),\n tuple([gh_data, gc_data] + gys_data),\n tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)\n\n def test_backward_cpu(self):\n self.check_backward(\n self.h, self.c, self.xs, self.gh, self.gc, self.gys)\n\n @attr.gpu\n def test_backward_gpu(self):\n with testing.assert_warns(DeprecationWarning):\n self.rnn.to_gpu()\n with chainer.using_config('use_cudnn', 'auto'):\n self.check_backward(\n cuda.to_gpu(self.h),\n cuda.to_gpu(self.c),\n [cuda.to_gpu(x) for x in self.xs],\n cuda.to_gpu(self.gh),\n cuda.to_gpu(self.gc),\n [cuda.to_gpu(gy) for gy in self.gys])\n\n def test_n_cells(self):\n assert self.rnn.n_cells == 2\n\n\ntesting.run_module(__name__, __file__)\n",
"import warnings\n\nimport numpy\nimport six\n\nfrom chainer import configuration\nfrom chainer import functions\nfrom chainer import initializer\nfrom chainer import link\nfrom chainer.links.caffe.protobuf3 import caffe_pb2 as caffe_pb\nfrom chainer.links.connection import convolution_2d\nfrom chainer.links.connection import deconvolution_2d\nfrom chainer.links.connection import linear\nfrom chainer.links.connection import scale\nfrom chainer.links.normalization import batch_normalization\nfrom chainer.utils import argument\n\n\ntry:\n # This method is undocumented, but is required to read large size of\n # model files when a user uses cpp-implementation.\n from google.protobuf.pyext import _message\n _message.SetAllowOversizeProtos(True)\nexcept ImportError:\n pass\n\n_type_to_method = {}\n_oldname_to_method = {}\n\n\ndef _layer(typ, oldname):\n def decorator(meth):\n global _type_to_method\n _type_to_method[typ] = meth\n if oldname is not None:\n typevalue = getattr(caffe_pb.V1LayerParameter, oldname)\n _oldname_to_method[typevalue] = meth\n return meth\n return decorator\n\n\nclass _Blob(initializer.Initializer):\n\n chunk_size = 1024 * 1024\n\n def __init__(self, blob):\n super(_Blob, self).__init__()\n self.data = blob.data\n\n def __call__(self, array):\n array = array.ravel()\n size = len(array)\n indices = list(range(0, size, self.chunk_size))\n\n # Rather than accessing Protobuf's RepeatedScalar fields directly,\n # creating a intermediate list by indexing is more efficient due to\n # the implementation of the Python extension of Protobuf.\n # To avoid allocating excessively large lists, we limit the length\n # of lists by `chunk_size`.\n for start, end in zip(indices, indices[1:] + [size]):\n array[start:end] = self.data[start:end]\n\n\nclass _ConvolutionBlob(_Blob):\n\n def __init__(self, blob, group):\n super(_ConvolutionBlob, self).__init__(blob)\n self.group = group\n\n def __call__(self, array):\n n_out, n_in = array.shape[:2]\n\n part_out = n_out // self.group\n part_in = n_in // self.group\n\n array[...] = 0\n\n part_size = len(self.data) // self.group\n for i in six.moves.range(self.group):\n out_slice = slice(i * part_out, (i + 1) * part_out)\n in_slice = slice(i * part_in, (i + 1) * part_in)\n w = array[out_slice, in_slice]\n\n data = numpy.array(self.data[i * part_size:(i + 1) * part_size])\n w[:] = data.reshape(w.shape)\n\n\nclass CaffeFunction(link.Chain):\n\n \"\"\"Caffe emulator based on the model file of Caffe.\n\n Given a protocol buffers file of a Caffe model, this class loads and\n emulates it on :class:`~chainer.Variable` objects. It supports the official\n reference models provided by BVLC.\n\n .. note::\n\n CaffeFunction ignores the following layers:\n\n - Layers that CaffeFunction does not support (including data layers)\n - Layers that have no top blobs\n - Layers whose bottom blobs are incomplete (i.e., some or all of them\n are not given nor computed)\n\n .. warning::\n\n It does not support full compatibility against Caffe. Some layers and\n configurations are not implemented in Chainer yet, though the reference\n models provided by the BVLC team are supported except data layers.\n\n .. admonition:: Example\n\n Consider we want to extract the (unnormalized) log class probability\n of given images using BVLC reference CaffeNet. The model can be\n downloaded from:\n\n http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel\n\n We want to compute the ``fc8`` blob from the ``data`` blob. It is simply\n written as follows::\n\n # Load the model\n func = CaffeFunction('path/to/bvlc_reference_caffenet.caffemodel')\n\n # Minibatch of size 10\n x_data = numpy.ndarray((10, 3, 227, 227), dtype=numpy.float32)\n ... # (Fill the minibatch here)\n\n # Forward the pre-trained net\n x = Variable(x_data)\n y, = func(inputs={'data': x}, outputs=['fc8'])\n\n The result ``y`` contains the Variable corresponding to the ``fc8``\n blob. The computational graph is memorized as a usual forward\n computation in Chainer, so we can run backprop through this pre-trained\n net.\n\n Args:\n model_path (str): Path to the binary-proto model file of Caffe.\n\n Attributes:\n forwards (dict): A mapping from layer names to corresponding functions.\n\n \"\"\"\n\n def __init__(self, model_path):\n super(CaffeFunction, self).__init__()\n\n net = caffe_pb.NetParameter()\n with open(model_path, 'rb') as model_file:\n net.MergeFromString(model_file.read())\n\n self.forwards = {}\n self.split_map = {}\n self.layers = []\n\n if net.layer:\n for layer in net.layer:\n meth = _type_to_method.get(layer.type)\n if meth:\n meth(self, layer)\n else:\n warnings.warn(\n 'Skip the layer \"%s\", since CaffeFunction does not '\n 'support %s layer' % (layer.name, layer.type))\n else: # v1 format\n for layer in net.layers:\n meth = _oldname_to_method.get(layer.type)\n if meth:\n meth(self, layer)\n else:\n warnings.warn(\n 'Skip the layer \"%s\", since CaffeFunction does not '\n 'support it' % layer.name)\n\n def forward(self, inputs, outputs, disable=(), **kwargs):\n \"\"\"forward(self, inputs, outputs, disable=())\n\n Executes a sub-network of the network.\n\n This function acts as an interpreter of the network definition for\n Caffe. On execution, it interprets each layer one by one, and if the\n bottom blobs are already computed, then emulates the layer and stores\n output blobs as :class:`~chainer.Variable` objects.\n\n Args:\n inputs (dict): A dictionary whose key-value pairs indicate initial\n correspondences between blob names and\n :class:`~chainer.Variable` objects.\n outputs (Iterable): A list of blob names whose corresponding\n :class:`~chainer.Variable` objects are returned.\n disable (Iterable): A list of layer names that will be ignored\n during the forward computation.\n\n Returns:\n tuple: A tuple of output :class:`~chainer.Variable` objects\n corresponding to elements of the `outputs` argument.\n\n \"\"\"\n if kwargs:\n argument.check_unexpected_kwargs(\n kwargs, train='train argument is not supported anymore. '\n 'Use chainer.using_config')\n argument.assert_kwargs_empty(kwargs)\n\n variables = dict(inputs)\n disable = set(disable)\n for func_name, bottom, top in self.layers:\n if (func_name in disable or\n func_name not in self.forwards or\n any(blob not in variables for blob in bottom)):\n continue\n\n func = self.forwards[func_name]\n input_vars = tuple(variables[blob] for blob in bottom)\n output_vars = func(*input_vars)\n if not isinstance(output_vars, (tuple, list)):\n output_vars = output_vars,\n for var, name in zip(output_vars, top):\n variables[name] = var\n\n self.variables = variables\n return tuple(variables[blob] for blob in outputs)\n\n def _add_layer(self, layer):\n bottom = []\n for blob_name in layer.bottom:\n bottom.append(self.split_map.get(blob_name, blob_name))\n self.layers.append((layer.name, bottom, list(layer.top)))\n\n @_layer('Concat', 'CONCAT')\n def _setup_concat(self, layer):\n param = layer.concat_param\n axis = param.axis\n if axis == 1 and param.concat_dim != 1:\n axis = param.concat_dim\n\n self.forwards[layer.name] = _ListArgumentFcuntion(\n functions.concat, axis=axis)\n self._add_layer(layer)\n\n @_layer('Convolution', 'CONVOLUTION')\n def _setup_convolution(self, layer):\n blobs = layer.blobs\n param = layer.convolution_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n num = _get_num(blobs[0])\n channels = _get_channels(blobs[0])\n bias_term = param.bias_term\n\n n_in = channels * param.group\n n_out = num\n\n func = convolution_2d.Convolution2D(\n n_in, n_out, ksize, stride, pad, nobias=not bias_term,\n initialW=_ConvolutionBlob(blobs[0], param.group),\n initial_bias=_Blob(blobs[1]) if bias_term else None)\n\n with self.init_scope():\n setattr(self, layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Deconvolution', 'DECONVOLUTION')\n def _setup_deconvolution(self, layer):\n blobs = layer.blobs\n param = layer.convolution_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n num = _get_num(blobs[0])\n channels = _get_channels(blobs[0])\n bias_term = param.bias_term\n\n n_in = num\n n_out = channels * param.group\n\n func = deconvolution_2d.Deconvolution2D(\n n_in, n_out, ksize, stride, pad, nobias=not bias_term,\n initialW=_ConvolutionBlob(blobs[0], param.group),\n initial_bias=_Blob(blobs[1]) if bias_term else None)\n\n with self.init_scope():\n setattr(self, layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Data', 'DATA')\n def _setup_data(self, layer):\n # We silently skip the data layer.\n pass\n\n @_layer('Dropout', 'DROPOUT')\n def _setup_dropout(self, layer):\n param = layer.dropout_param\n\n self.forwards[layer.name] = _SingleArgumentFunction(\n functions.dropout, ratio=param.dropout_ratio)\n self._add_layer(layer)\n\n @_layer('InnerProduct', 'INNER_PRODUCT')\n def _setup_inner_product(self, layer):\n param = layer.inner_product_param\n bias_term = param.bias_term\n if param.axis != 1:\n raise RuntimeError(\n 'Non-default axis in InnerProduct is not supported')\n\n blobs = layer.blobs\n width, height = _get_width(blobs[0]), _get_height(blobs[0])\n\n func = linear.Linear(\n width, height, nobias=not bias_term,\n initialW=_Blob(blobs[0]),\n initial_bias=_Blob(blobs[1]) if bias_term else None)\n\n with self.init_scope():\n setattr(self, layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('LRN', 'LRN')\n def _setup_lrn(self, layer):\n param = layer.lrn_param\n if param.norm_region != param.ACROSS_CHANNELS:\n raise RuntimeError('Within-channel LRN is not supported')\n\n fwd = _SingleArgumentFunction(\n functions.local_response_normalization,\n n=param.local_size, k=param.k,\n alpha=param.alpha / param.local_size, beta=param.beta)\n self.forwards[layer.name] = fwd\n self._add_layer(layer)\n\n @_layer('Pooling', 'POOLING')\n def _setup_pooling(self, layer):\n param = layer.pooling_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n\n if param.pool == param.MAX:\n func = functions.max_pooling_2d\n elif param.pool == param.AVE:\n func = functions.average_pooling_2d\n else:\n raise RuntimeError('Stochastic pooling is not supported')\n\n if param.global_pooling and not ksize:\n # if global_pooling is set but no kernel size, the kernel size\n # is computed dynamically to cover the whole input feature map\n def _func(x, stride, pad):\n return func(x, x.shape[2:], stride=stride, pad=pad)\n fw = _SingleArgumentFunction(_func, stride=stride, pad=pad)\n else:\n fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('ReLU', 'RELU')\n def _setup_relu(self, layer):\n slope = layer.relu_param.negative_slope\n\n if slope != 0:\n fw = _SingleArgumentFunction(functions.leaky_relu, slope=slope)\n else:\n fw = functions.relu\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('Reshape', None)\n def _setup_reshape(self, layer):\n shape = layer.reshape_param.shape.dim\n\n fw = _SingleArgumentFunction(functions.reshape, shape=shape)\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('BatchNorm', None)\n def _setup_batchnorm(self, layer):\n # Get layer parameters.\n blobs = layer.blobs\n param = layer.batch_norm_param\n use_global_stats = param.use_global_stats\n decay = param.moving_average_fraction\n eps = param.eps\n size = int(blobs[0].shape.dim[0]) # Get channel dim from mean blob.\n\n # Make BatchNormalization link.\n func = batch_normalization.BatchNormalization(\n size, decay=decay, eps=eps, use_gamma=False, use_beta=False)\n\n _Blob(blobs[0])(func.avg_mean)\n _Blob(blobs[1])(func.avg_var)\n\n # Scale the means and variances if a scaling factor is appended to the\n # blobs to correctly mimic to the behavior of Caffe. See\n # https://github.com/BVLC/caffe/issues/4885\n if len(blobs) >= 3:\n scaling_factor = blobs[2].data\n func.avg_mean /= scaling_factor[0]\n func.avg_var /= scaling_factor[0]\n\n with self.init_scope():\n setattr(self, layer.name, func)\n\n # Add layer.\n if use_global_stats:\n func_class = _SingleArgumentFunctionTestMode\n else:\n func_class = _SingleArgumentFunction\n fwd = func_class(_CallChildLink(self, layer.name), finetune=False)\n self.forwards[layer.name] = fwd\n self._add_layer(layer)\n\n @_layer('Eltwise', 'ELTWISE')\n def _setup_eltwise(self, layer):\n # stable_prod_grad parameter is not supported now.\n operation = layer.eltwise_param.operation\n coeffs = layer.eltwise_param.coeff or None\n self.forwards[layer.name] = _EltwiseFunction(operation, coeffs)\n self._add_layer(layer)\n\n @_layer('Scale', None)\n def _setup_scale(self, layer):\n # Following parameters are not supported now:\n # - negative axis\n # - num_axes\n # - filler\n # - bias_filler\n\n # Get layer parameters.\n bottom = layer.bottom\n blobs = layer.blobs\n axis = layer.scale_param.axis\n bias_term = layer.scale_param.bias_term\n\n # Case of only one bottom where W is learnt parameter.\n if len(bottom) == 1:\n W_shape = blobs[0].shape.dim\n func = scale.Scale(axis, W_shape, bias_term)\n _Blob(blobs[0])(func.W.data)\n if bias_term:\n _Blob(blobs[1])(func.bias.b.data)\n # Case of two bottoms where W is given as a bottom.\n else:\n shape = blobs[0].shape.dim if bias_term else None\n func = scale.Scale(\n axis, bias_term=bias_term, bias_shape=shape)\n if bias_term:\n _Blob(blobs[0])(func.bias.b.data)\n\n # Add layer.\n with self.init_scope():\n setattr(self, layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Slice', 'SLICE')\n def _setup_slice(self, layer):\n if layer.slice_param.HasField('axis'):\n axis = layer.slice_param.axis\n elif layer.slice_param.HasField('slice_dim'):\n axis = layer.slice_param.slice_dim\n else:\n axis = 1\n\n if layer.slice_param.slice_point:\n indices_or_sections = list(layer.slice_param.slice_point)\n else:\n indices_or_sections = len(list(layer.top))\n\n self.forwards[layer.name] = _SingleArgumentFunction(\n functions.split_axis,\n indices_or_sections=indices_or_sections,\n axis=axis\n )\n\n self._add_layer(layer)\n\n @_layer('Softmax', 'SOFTMAX')\n def _setup_softmax(self, layer):\n if layer.softmax_param.axis != 1:\n raise RuntimeError(\n 'Softmax along non-channel axis is not supported')\n\n if layer.softmax_param.engine == 0: # DEFAULT\n fw = functions.softmax\n elif layer.softmax_param.engine == 1: # CAFFE\n fw = _SingleArgumentFunctionWithCudnn(False, functions.softmax)\n elif layer.softmax_param.engine == 2: # CUDNN\n fw = _SingleArgumentFunctionWithCudnn(True, functions.softmax)\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('Sigmoid', 'SIGMOID')\n def _setup_sigmoid(self, layer):\n if layer.sigmoid_param.engine == 0: # DEFAULT\n fw = functions.sigmoid\n elif layer.sigmoid_param.engine == 1: # CAFFE\n fw = _SingleArgumentFunctionWithCudnn(False, functions.sigmoid)\n elif layer.sigmoid_param.engine == 2: # CUDNN\n fw = _SingleArgumentFunctionWithCudnn(True, functions.sigmoid)\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('SoftmaxWithLoss', 'SOFTMAX_LOSS')\n def _setup_softmax_with_loss(self, layer):\n if layer.softmax_param.axis != 1:\n raise RuntimeError(\n 'Softmax along non-channel axis is not supported')\n\n self.forwards[layer.name] = functions.softmax_cross_entropy\n self._add_layer(layer)\n\n @_layer('Split', 'SPLIT')\n def _setup_split(self, layer):\n for top in layer.top:\n self.split_map[top] = layer.bottom[0]\n\n\n# Internal functions\n\ndef _get_ksize(param):\n if param.kernel_h > 0:\n return param.kernel_h, param.kernel_w\n elif type(param.kernel_size) == int:\n return param.kernel_size\n elif len(param.kernel_size) == 1:\n return param.kernel_size[0]\n else:\n return param.kernel_size\n\n\ndef _get_stride(param):\n if param.stride_h > 0:\n return param.stride_h, param.stride_w\n elif type(param.stride) == int:\n return param.stride\n elif len(param.stride) == 0:\n return 1\n elif len(param.stride) == 1:\n return param.stride[0]\n else:\n return param.stride\n\n\ndef _get_pad(param):\n if param.pad_h > 0 or param.pad_w > 0:\n return param.pad_h, param.pad_w\n elif type(param.pad) == int:\n return param.pad\n elif len(param.pad) == 0:\n return 0\n elif len(param.pad) == 1:\n return param.pad[0]\n else:\n return param.pad\n\n\ndef _get_num(blob):\n if blob.num > 0:\n return blob.num\n else:\n return blob.shape.dim[0]\n\n\ndef _get_channels(blob):\n if blob.channels > 0:\n return blob.channels\n else:\n return blob.shape.dim[1]\n\n\ndef _get_height(blob):\n if blob.height > 0:\n return blob.height\n elif len(blob.shape.dim) == 2:\n return blob.shape.dim[0]\n elif len(blob.shape.dim) == 4:\n return blob.shape.dim[2]\n else:\n raise RuntimeError(\n '{}-dimensional array is not supported'.format(\n len(blob.shape.dim)))\n\n\ndef _get_width(blob):\n if blob.width > 0:\n return blob.width\n elif len(blob.shape.dim) == 2:\n return blob.shape.dim[1]\n elif len(blob.shape.dim) == 4:\n return blob.shape.dim[3]\n else:\n raise RuntimeError(\n '{}-dimensional array is not supported'.format(\n len(blob.shape.dim)))\n\n\n# Internal class\n# __call__ must return Variable or tuple\n\nclass _SingleArgumentFunction(object):\n\n def __init__(self, func, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, x):\n return self.func(x, *self.args, **self.kwargs)\n\n\nclass _SingleArgumentFunctionTestMode(_SingleArgumentFunction):\n\n def __call__(self, x):\n with configuration.using_config('train', False):\n return super(_SingleArgumentFunctionTestMode, self).__call__(x)\n\n\nclass _ListArgumentFcuntion(object):\n\n def __init__(self, func, **kwargs):\n self.func = func\n self.kwargs = kwargs\n\n def __call__(self, *xs):\n return self.func(xs, **self.kwargs)\n\n\nclass _SingleArgumentFunctionWithCudnn(_SingleArgumentFunction):\n\n def __init__(self, use_cudnn, func, *args, **kwargs):\n super(_SingleArgumentFunctionWithCudnn, self).__init__(\n func, *args, **kwargs)\n self.use_cudnn = use_cudnn\n\n def __call__(self, x):\n with configuration.using_config('use_cudnn', self.use_cudnn):\n return super(_SingleArgumentFunctionWithCudnn, self).__call__(x)\n\n\nclass _CallChildLink(object):\n\n def __init__(self, caffe_func, name):\n self.name = name\n self.caffe_func = caffe_func\n\n def __call__(self, *xs, **kwargs):\n return self.caffe_func[self.name](*xs, **kwargs)\n\n\nclass _EltwiseFunction(object):\n\n def __init__(self, operation, coeffs=None):\n if coeffs is not None:\n assert len(coeffs) > 0\n self.operation = operation\n self.coeffs = coeffs\n\n def __call__(self, *xs):\n operation = self.operation\n\n if operation == 0: # PROD\n return six.moves.reduce(lambda x, y: x * y, xs),\n\n elif operation == 1: # SUM\n coeffs = self.coeffs\n if coeffs is not None:\n assert len(xs) == len(coeffs)\n xs = [x * coeff for x, coeff in zip(xs, coeffs)]\n return six.moves.reduce(lambda x, y: x + y, xs),\n\n elif operation == 2: # MAX\n return six.moves.reduce(lambda x, y: functions.maximum(x, y), xs),\n\n else:\n raise ValueError('Invalid EltwiseParameter.EltwiseOp value.')\n"
] | [
[
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.array",
"numpy.abs",
"numpy.full"
],
[
"numpy.empty_like",
"numpy.array",
"numpy.random.RandomState"
],
[
"numpy.empty",
"numpy.full"
],
[
"numpy.concatenate",
"numpy.random.uniform",
"numpy.tanh",
"numpy.zeros"
],
[
"numpy.array"
]
] |
Substancia/FDTD-Huygens-metasurface | [
"dfb46f43c0653b394b63e7af92a331ae4824d9be",
"dfb46f43c0653b394b63e7af92a331ae4824d9be"
] | [
"quartz_sphere.py",
"remote_stuff/fdtd_output_2021-5-29-2-27-17 (planoconvex_R_200_d_20)_(dBmap_loop_test)/dBmap_loop.py"
] | [
"from fdtd_venv import fdtd_mod as fdtd\nfrom numpy import arange, flip, meshgrid, array\nfrom matplotlib.pyplot import plot, show\n\ndef main():\n\tgrid = fdtd.Grid(shape=(200, 200, 1), grid_spacing=155e-9)\n\n\tlens_width = 10\n\tlens_order = 3\n\tlens_radius = 25\n\tx, y = arange(-90, 90, 1), arange(lens_radius-lens_order*lens_width/2, lens_radius, 1)\n\tX, Y = meshgrid(x, y)\n\tlens_mask = X**2 + Y**2 <= lens_radius**2\n\tfor j, col in enumerate(lens_mask.T):\n\t\tfor i, val in enumerate(flip(col)):\n\t\t\tif val:\n\t\t\t\tgrid[50+i%(lens_width//2):50+lens_width-i%(lens_width//2), j+10:j+11, 0] = fdtd.Object(permittivity=1.5**2, name=str(i)+\",\"+str(j))\n\t\t\t\tbreak\n\n\tgrid[25, 80:120, 0] = fdtd.LineSource(period=1550e-9/3e8, name=\"source\")\n\n\tgrid[30:130, 100, 0] = fdtd.LineDetector(name=\"LineDetector\")\n\tgrid[30:130, 75:125, 0] = fdtd.BlockDetector(name=\"BlockDetector\")\n\n\tgrid[0:10, :, :] = fdtd.PML(name=\"pml_xlow\")\n\tgrid[-10:, :, :] = fdtd.PML(name=\"pml_xhigh\")\n\tgrid[:, 0:10, :] = fdtd.PML(name=\"pml_ylow\")\n\tgrid[:, -10:, :] = fdtd.PML(name=\"pml_yhigh\")\n\n\tgrid.run(total_time=300)\n\tgrid.visualize(z=0, show=True)\n\t#E_val = array(grid.detector.detector_values()['E'])\n\t#arr = []\n\t#for i in range(100):\n\t\t#temp = E_val[:, i, 2]\n\t\t#arr.append(max(temp) - min(temp))\n\t#print(\"Max index:\", 30+arr.index(max(arr)))\n\t#plot(arange(30, 130, 1), arr)\n\t#show()\n\tfdtd.dB_map_2D(array(grid.detectors[1].detector_values()['E'][200:]))\n\nif __name__ == \"__main__\":\n\tmain()\n",
"from matplotlib.pyplot import figure, imshow, colorbar, show, title\nfrom sys import argv\nfrom numpy import log10, load\nfrom time import time\nfrom tqdm import tqdm\n\ndef dBmap(File, interpolation, no_show=False, return_peak=False):\n\tz, chosenDimension, nOfBlocks = 0, 2, 1\n\tdf = load(File)\n\ta = []\n\tsample = df[[x for x in df if x[-4:] == \" (E)\"][nOfBlocks-1]]\n\tfor i in tqdm(range(len(sample[0]))):\n\t\ta.append([])\n\t\tfor j in range(len(sample[0][0])):\n\t\t\ttemp = [x[i][j][z][chosenDimension] for x in sample]\n\t\t\ta[i].append(max(temp) - min(temp))\n\t\n\tpeakVal, minVal = max(map(max, a)), min(map(min, a))\n\tif return_peak:\n\t\treturn [[[i, j] for j, y in enumerate(x) if y == peakVal] for i, x in enumerate(a) if peakVal in x]\n\telse:\n\t\tprint(\"Peak at:\", [[[i, j] for j, y in enumerate(x) if y == peakVal] for i, x in enumerate(a) if peakVal in x])\n\ta = 10*log10([[y/minVal for y in x] for x in a])\n\n\tif not no_show:\n\t\t#figure(figsize=(15, 15))\n\t\ttitle(\"dB map of Electrical waves in detector region\")\n\t\timshow(a, cmap=\"inferno\", interpolation=interpolation)\n\t\tcbar = colorbar()\n\t\tcbar.ax.set_ylabel(\"dB scale\", rotation=270)\n\t\tshow()\n\nif __name__ == \"__main__\":\n\tstart_time = time()\n\tif argv[1][0:7] == \"file://\":\n\t\targv[1] = argv[1][7:]\n\tif len(argv) == 2:\n\t\targv.append(\"spline16\")\n\tlongest_f = 0\n\tfor i in range(100, 400):\n\t\tf = dBmap(argv[1], argv[2], no_show=True, return_peak=True)\n\t\tprint(\"range_start:\", i, \"f:\", f)\n\t\tif f[-1][-1][0] > longest_f:\n\t\t\tf = longest_f\n\tprint(\"Longest f:\", longest_f)\n\tend_time = time()\n\tprint(\"Runtime:\", end_time-start_time)\n"
] | [
[
"numpy.arange",
"numpy.meshgrid",
"numpy.flip"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.colorbar",
"numpy.log10",
"numpy.load",
"matplotlib.pyplot.show"
]
] |
medicode/tensor2tensor | [
"3386fa537957fcf8133536322fcadec0630dde11",
"3386fa537957fcf8133536322fcadec0630dde11"
] | [
"tensor2tensor/utils/usr_dir.py",
"tensor2tensor/bin/t2t_datagen.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility to load code from an external user-supplied directory.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport os\nimport sys\nimport tensorflow as tf\n\nINTERNAL_USR_DIR_PACKAGE = \"t2t_usr_dir_internal\"\n\n\ndef import_usr_dir(usr_dir):\n \"\"\"Import module at usr_dir, if provided.\"\"\"\n if not usr_dir:\n return\n if usr_dir == INTERNAL_USR_DIR_PACKAGE:\n # The package has been installed with pip under this name for Cloud ML\n # Engine so just import it.\n importlib.import_module(INTERNAL_USR_DIR_PACKAGE)\n return\n\n dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip(\"/\"))\n containing_dir, module_name = os.path.split(dir_path)\n tf.logging.info(\"Importing user module %s from path %s\", module_name,\n containing_dir)\n sys.path.insert(0, containing_dir)\n importlib.import_module(module_name)\n sys.path.pop(0)\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Produces the training and dev data for --problem into --data_dir.\n\nProduces sharded and shuffled TFRecord files of tensorflow.Example protocol\nbuffers for a variety of registered datasets.\n\nAll Problems are registered with @registry.register_problem or are in\n_SUPPORTED_PROBLEM_GENERATORS in this file. Each entry maps a string name\n(selectable on the command-line with --problem) to a function that takes 2\narguments - input_directory and mode (one of \"train\" or \"dev\") - and yields for\neach training example a dictionary mapping string feature names to lists of\n{string, int, float}. The generator will be run once for each mode.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport random\nimport tempfile\n\n# Dependency imports\n\n# Fathom\nimport fathomt2t\nimport fathomairflow.dags.dag_management.xcom_manipulation as xcom\nfrom fathomtf.services.model_management import fathom_t2t_model_setup\nimport absl\n\nimport numpy as np\n\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import usr_dir\n\ntry:\n # pylint: disable=g-import-not-at-top\n from tensor2tensor.data_generators import algorithmic_math\n from tensor2tensor.data_generators import audio\n from tensor2tensor.data_generators import snli\n from tensor2tensor.data_generators import wsj_parsing\n # pylint: enable=g-import-not-at-top\nexcept ImportError:\n pass\n\n# Improrting here to prevent pylint from ungrouped-imports warning.\nimport tensorflow as tf # pylint: disable=g-import-not-at-top\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# Fathom\n# TODO: find some more permanent and clean solution to the problem of\n# flags being defined in multiple places. Cleanest is probably to have\n# a flags.py that lives in diseaseTools and which contains literally\n# all fathom-defined t2t flags, and then strip all of them out of t2t.\ntry:\n flags.DEFINE_string(\"gcs_subpath\", None, \"Subpath to the model\")\nexcept absl.flags._exceptions.DuplicateFlagError:\n pass\n\nflags.DEFINE_string(\"data_dir\", \"\", \"Data directory.\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory.\")\nflags.DEFINE_string(\"problem\", \"\",\n \"The name of the problem to generate data for.\")\nflags.DEFINE_string(\"exclude_problems\", \"\",\n \"Comma-separates list of problems to exclude.\")\nflags.DEFINE_integer(\"num_shards\", 0, \"How many shards to use. Ignored for \"\n \"registered Problems.\")\nflags.DEFINE_integer(\"max_cases\", 0,\n \"Maximum number of cases to generate (unbounded if 0).\")\nflags.DEFINE_bool(\"only_list\", False,\n \"If true, we only list the problems that will be generated.\")\nflags.DEFINE_integer(\"random_seed\", 429459, \"Random seed to use.\")\nflags.DEFINE_integer(\"task_id\", -1, \"For distributed data generation.\")\nflags.DEFINE_integer(\"task_id_start\", -1, \"For distributed data generation.\")\nflags.DEFINE_integer(\"task_id_end\", -1, \"For distributed data generation.\")\nflags.DEFINE_integer(\n \"num_concurrent_processes\", None,\n \"Applies only to problems for which multiprocess_generate=True.\")\nflags.DEFINE_string(\"t2t_usr_dir\", \"\",\n \"Path to a Python module that will be imported. The \"\n \"__init__.py file should include the necessary imports. \"\n \"The imported files should contain registrations, \"\n \"e.g. @registry.register_problem calls, that will then be \"\n \"available to t2t-datagen.\")\n\n\n# Mapping from problems that we can generate data for to their generators.\n# pylint: disable=g-long-lambda\n_SUPPORTED_PROBLEM_GENERATORS = {\n \"algorithmic_algebra_inverse\": (\n lambda: algorithmic_math.algebra_inverse(26, 0, 2, 100000),\n lambda: algorithmic_math.algebra_inverse(26, 3, 3, 10000),\n lambda: None), # test set\n \"parsing_english_ptb8k\": (\n lambda: wsj_parsing.parsing_token_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, True, 2**13, 2**9),\n lambda: wsj_parsing.parsing_token_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, False, 2**13, 2**9),\n lambda: None), # test set\n \"parsing_english_ptb16k\": (\n lambda: wsj_parsing.parsing_token_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, True, 2**14, 2**9),\n lambda: wsj_parsing.parsing_token_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, False, 2**14, 2**9),\n lambda: None), # test set\n \"inference_snli32k\": (\n lambda: snli.snli_token_generator(FLAGS.tmp_dir, True, 2**15),\n lambda: snli.snli_token_generator(FLAGS.tmp_dir, False, 2**15),\n lambda: None), # test set\n \"audio_timit_characters_test\": (\n lambda: audio.timit_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, True, 1718),\n lambda: audio.timit_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, False, 626),\n lambda: None), # test set\n \"audio_timit_tokens_8k_test\": (\n lambda: audio.timit_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, True, 1718,\n vocab_filename=\"vocab.endefr.%d\" % 2**13, vocab_size=2**13),\n lambda: audio.timit_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, False, 626,\n vocab_filename=\"vocab.endefr.%d\" % 2**13, vocab_size=2**13),\n lambda: None), # test set\n \"audio_timit_tokens_32k_test\": (\n lambda: audio.timit_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, True, 1718,\n vocab_filename=\"vocab.endefr.%d\" % 2**15, vocab_size=2**15),\n lambda: audio.timit_generator(\n FLAGS.data_dir, FLAGS.tmp_dir, False, 626,\n vocab_filename=\"vocab.endefr.%d\" % 2**15, vocab_size=2**15),\n lambda: None), # test set\n}\n\n# pylint: enable=g-long-lambda\n\n\ndef set_random_seed():\n \"\"\"Set the random seed from flag everywhere.\"\"\"\n tf.set_random_seed(FLAGS.random_seed)\n random.seed(FLAGS.random_seed)\n np.random.seed(FLAGS.random_seed)\n\n\ndef main(_):\n # Fathom\n\n tf.logging.set_verbosity(tf.logging.INFO)\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n # Fathom\n fathom_t2t_model_setup()\n\n # Calculate the list of problems to generate.\n problems = sorted(\n list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_problems())\n for exclude in FLAGS.exclude_problems.split(\",\"):\n if exclude:\n problems = [p for p in problems if exclude not in p]\n if FLAGS.problem and FLAGS.problem[-1] == \"*\":\n problems = [p for p in problems if p.startswith(FLAGS.problem[:-1])]\n elif FLAGS.problem and \",\" in FLAGS.problem:\n problems = [p for p in problems if p in FLAGS.problem.split(\",\")]\n elif FLAGS.problem:\n problems = [p for p in problems if p == FLAGS.problem]\n else:\n problems = []\n\n # Remove TIMIT if paths are not given.\n if getattr(FLAGS, \"timit_paths\", None):\n problems = [p for p in problems if \"timit\" not in p]\n # Remove parsing if paths are not given.\n if getattr(FLAGS, \"parsing_path\", None):\n problems = [p for p in problems if \"parsing_english_ptb\" not in p]\n\n if not problems:\n problems_str = \"\\n * \".join(\n sorted(list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_problems()))\n error_msg = (\"You must specify one of the supported problems to \"\n \"generate data for:\\n * \" + problems_str + \"\\n\")\n error_msg += (\"TIMIT and parsing need data_sets specified with \"\n \"--timit_paths and --parsing_path.\")\n raise ValueError(error_msg)\n\n if not FLAGS.data_dir:\n FLAGS.data_dir = tempfile.gettempdir()\n tf.logging.warning(\"It is strongly recommended to specify --data_dir. \"\n \"Data will be written to default data_dir=%s.\",\n FLAGS.data_dir)\n\n tf.logging.info(\"Generating problems:\\n%s\"\n % registry.display_list_by_prefix(problems,\n starting_spaces=4))\n if FLAGS.only_list:\n return\n for problem in problems:\n set_random_seed()\n\n if problem in _SUPPORTED_PROBLEM_GENERATORS:\n generate_data_for_problem(problem)\n else:\n generate_data_for_registered_problem(problem)\n\n # Fathom\n xcom.echo_yaml_for_xcom_ingest({'t2t_data_dir': FLAGS.data_dir})\n\n\ndef generate_data_for_problem(problem):\n \"\"\"Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.\"\"\"\n training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem]\n\n num_train_shards = FLAGS.num_shards or 10\n tf.logging.info(\"Generating training data for %s.\", problem)\n train_output_files = generator_utils.train_data_filenames(\n problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,\n num_train_shards)\n generator_utils.generate_files(training_gen(), train_output_files,\n FLAGS.max_cases)\n num_dev_shards = int(num_train_shards * 0.1)\n tf.logging.info(\"Generating development data for %s.\", problem)\n dev_output_files = generator_utils.dev_data_filenames(\n problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,\n num_dev_shards)\n generator_utils.generate_files(dev_gen(), dev_output_files)\n num_test_shards = int(num_train_shards * 0.1)\n test_output_files = []\n test_gen_data = test_gen()\n if test_gen_data is not None:\n tf.logging.info(\"Generating test data for %s.\", problem)\n test_output_files = generator_utils.test_data_filenames(\n problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,\n num_test_shards)\n generator_utils.generate_files(test_gen_data, test_output_files)\n all_output_files = train_output_files + dev_output_files + test_output_files\n generator_utils.shuffle_dataset(all_output_files)\n\n\ndef generate_data_in_process(arg):\n problem_name, data_dir, tmp_dir, task_id = arg\n problem = registry.problem(problem_name)\n problem.generate_data(data_dir, tmp_dir, task_id)\n\n\ndef generate_data_for_registered_problem(problem_name):\n \"\"\"Generate data for a registered problem.\"\"\"\n tf.logging.info(\"Generating data for %s.\", problem_name)\n if FLAGS.num_shards:\n raise ValueError(\"--num_shards should not be set for registered Problem.\")\n problem = registry.problem(problem_name)\n task_id = None if FLAGS.task_id < 0 else FLAGS.task_id\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n if task_id is None and problem.multiprocess_generate:\n if FLAGS.task_id_start != -1:\n assert FLAGS.task_id_end != -1\n task_id_start = FLAGS.task_id_start\n task_id_end = FLAGS.task_id_end\n else:\n task_id_start = 0\n task_id_end = problem.num_generate_tasks\n pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes)\n problem.prepare_to_generate(data_dir, tmp_dir)\n args = [(problem_name, data_dir, tmp_dir, task_id)\n for task_id in range(task_id_start, task_id_end)]\n pool.map(generate_data_in_process, args)\n else:\n problem.generate_data(data_dir, tmp_dir, task_id)\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n"
] | [
[
"tensorflow.logging.info"
],
[
"tensorflow.logging.warning",
"numpy.random.seed",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.set_random_seed",
"tensorflow.app.run"
]
] |
dmitryduev/broker | [
"7b9582fae6cd37bbd334bca228ef429d96e0e498",
"7b9582fae6cd37bbd334bca228ef429d96e0e498"
] | [
"kowalski/alert_watcher_zuds.py",
"kowalski/dev/ingest_known_lenses_20180901.py"
] | [
"import argparse\nimport os\nimport sys\nimport io\nimport time\nimport json\nfrom bson.json_util import dumps\nimport traceback\n\nimport confluent_kafka\nfrom ast import literal_eval\nimport avro.schema\nimport fastavro\nimport subprocess\nimport datetime\nimport multiprocessing\n# import threading\n\nimport pymongo\nimport pytz\nfrom numba import jit\nimport numpy as np\n\nfrom tensorflow.keras.models import load_model\nimport gzip\nimport io\nfrom astropy.io import fits\nfrom copy import deepcopy\n\n\n''' load config and secrets '''\nwith open('/app/config.json') as cjson:\n config = json.load(cjson)\n\nwith open('/app/secrets.json') as sjson:\n secrets = json.load(sjson)\n\nfor k in secrets:\n config[k].update(secrets.get(k, {}))\n\n\ndef utc_now():\n return datetime.datetime.now(pytz.utc)\n\n\ndef time_stamps():\n \"\"\"\n\n :return: local time, UTC time\n \"\"\"\n return datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S'), \\\n datetime.datetime.utcnow().strftime('%Y%m%d_%H:%M:%S')\n\n\n@jit\ndef deg2hms(x):\n \"\"\"Transform degrees to *hours:minutes:seconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [0, 360) to be written as a sexagesimal string.\n\n Returns\n -------\n out : str\n The input angle written as a sexagesimal string, in the\n form, hours:minutes:seconds.\n\n \"\"\"\n assert 0.0 <= x < 360.0, 'Bad RA value in degrees'\n # ac = Angle(x, unit='degree')\n # hms = str(ac.to_string(unit='hour', sep=':', pad=True))\n # print(str(hms))\n _h = np.floor(x * 12.0 / 180.)\n _m = np.floor((x * 12.0 / 180. - _h) * 60.0)\n _s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0\n hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)\n # print(hms)\n return hms\n\n\n@jit\ndef deg2dms(x):\n \"\"\"Transform degrees to *degrees:arcminutes:arcseconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [-90, 90] to be converted.\n\n Returns\n -------\n out : str\n The input angle as a string, written as degrees:minutes:seconds.\n\n \"\"\"\n assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'\n # ac = Angle(x, unit='degree')\n # dms = str(ac.to_string(unit='degree', sep=':', pad=True))\n # print(dms)\n _d = np.floor(abs(x)) * np.sign(x)\n _m = np.floor(np.abs(x - _d) * 60.0)\n _s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0\n dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)\n # print(dms)\n return dms\n\n\n@jit\ndef great_circle_distance(ra1_deg, dec1_deg, ra2_deg, dec2_deg):\n \"\"\"\n Distance between two points on the sphere\n :param ra1_deg:\n :param dec1_deg:\n :param ra2_deg:\n :param dec2_deg:\n :return: distance in degrees\n \"\"\"\n # this is orders of magnitude faster than astropy.coordinates.Skycoord.separation\n DEGRA = np.pi / 180.0\n ra1, dec1, ra2, dec2 = ra1_deg * DEGRA, dec1_deg * DEGRA, ra2_deg * DEGRA, dec2_deg * DEGRA\n delta_ra = np.abs(ra2 - ra1)\n distance = np.arctan2(np.sqrt((np.cos(dec2) * np.sin(delta_ra)) ** 2\n + (np.cos(dec1) * np.sin(dec2) - np.sin(dec1) * np.cos(dec2) * np.cos(\n delta_ra)) ** 2),\n np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(delta_ra))\n\n return distance * 180.0 / np.pi\n\n\n@jit\ndef in_ellipse(alpha, delta0, alpha1, delta01, d0, axis_ratio, PA0):\n \"\"\"\n Check if a given point (alpha, delta0)\n is within an ellipse specified by\n center (alpha1, delta01), maj_ax (d0), axis ratio and positional angle\n All angles are in decimal degrees\n Adapted from q3c: https://github.com/segasai/q3c/blob/master/q3cube.c\n :param alpha:\n :param delta0:\n :param alpha1:\n :param delta01:\n :param d0:\n :param axis_ratio:\n :param PA0:\n :return:\n \"\"\"\n DEGRA = np.pi / 180.0\n\n # convert degrees to radians\n d_alpha = (alpha1 - alpha) * DEGRA\n delta1 = delta01 * DEGRA\n delta = delta0 * DEGRA\n PA = PA0 * DEGRA\n d = d0 * DEGRA\n e = np.sqrt(1.0 - axis_ratio * axis_ratio)\n\n t1 = np.cos(d_alpha)\n t22 = np.sin(d_alpha)\n t3 = np.cos(delta1)\n t32 = np.sin(delta1)\n t6 = np.cos(delta)\n t26 = np.sin(delta)\n t9 = np.cos(d)\n t55 = np.sin(d)\n\n if (t3 * t6 * t1 + t32 * t26) < 0:\n return False\n\n t2 = t1 * t1\n\n t4 = t3 * t3\n t5 = t2 * t4\n\n t7 = t6 * t6\n t8 = t5 * t7\n\n t10 = t9 * t9\n t11 = t7 * t10\n t13 = np.cos(PA)\n t14 = t13 * t13\n t15 = t14 * t10\n t18 = t7 * t14\n t19 = t18 * t10\n\n t24 = np.sin(PA)\n\n t31 = t1 * t3\n\n t36 = 2.0 * t31 * t32 * t26 * t6\n t37 = t31 * t32\n t38 = t26 * t6\n t45 = t4 * t10\n\n t56 = t55 * t55\n t57 = t4 * t7\n t60 = -t8 + t5 * t11 + 2.0 * t5 * t15 - t5 * t19 - \\\n 2.0 * t1 * t4 * t22 * t10 * t24 * t13 * t26 - t36 + \\\n 2.0 * t37 * t38 * t10 - 2.0 * t37 * t38 * t15 - t45 * t14 - t45 * t2 + \\\n 2.0 * t22 * t3 * t32 * t6 * t24 * t10 * t13 - t56 + t7 - t11 + t4 - t57 + t57 * t10 + t19 - t18 * t45\n t61 = e * e\n t63 = t60 * t61 + t8 + t57 - t4 - t7 + t56 + t36\n\n return t63 > 0\n\n\n\"\"\"Utilities for manipulating Avro data and schemas.\n\"\"\"\n\n\ndef _loadSingleAvsc(file_path, names):\n \"\"\"Load a single avsc file.\n \"\"\"\n with open(file_path) as file_text:\n json_data = json.load(file_text)\n schema = avro.schema.SchemaFromJSONData(json_data, names)\n return schema\n\n\ndef combineSchemas(schema_files):\n \"\"\"Combine multiple nested schemas into a single schema.\n\n Parameters\n ----------\n schema_files : `list`\n List of files containing schemas.\n If nested, most internal schema must be first.\n\n Returns\n -------\n `dict`\n Avro schema\n \"\"\"\n known_schemas = avro.schema.Names()\n\n for s in schema_files:\n schema = _loadSingleAvsc(s, known_schemas)\n return schema.to_json()\n\n\ndef writeAvroData(json_data, json_schema):\n \"\"\"Encode json into Avro format given a schema.\n\n Parameters\n ----------\n json_data : `dict`\n The JSON data containing message content.\n json_schema : `dict`\n The writer Avro schema for encoding data.\n\n Returns\n -------\n `_io.BytesIO`\n Encoded data.\n \"\"\"\n bytes_io = io.BytesIO()\n fastavro.schemaless_writer(bytes_io, json_schema, json_data)\n return bytes_io\n\n\ndef readAvroData(bytes_io, json_schema):\n \"\"\"Read data and decode with a given Avro schema.\n\n Parameters\n ----------\n bytes_io : `_io.BytesIO`\n Data to be decoded.\n json_schema : `dict`\n The reader Avro schema for decoding data.\n\n Returns\n -------\n `dict`\n Decoded data.\n \"\"\"\n bytes_io.seek(0)\n message = fastavro.schemaless_reader(bytes_io, json_schema)\n return message\n\n\ndef readSchemaData(bytes_io):\n \"\"\"Read data that already has an Avro schema.\n\n Parameters\n ----------\n bytes_io : `_io.BytesIO`\n Data to be decoded.\n\n Returns\n -------\n `dict`\n Decoded data.\n \"\"\"\n bytes_io.seek(0)\n message = fastavro.reader(bytes_io)\n return message\n\n\nclass AlertError(Exception):\n \"\"\"Base class for exceptions in this module.\n \"\"\"\n pass\n\n\nclass EopError(AlertError):\n \"\"\"Exception raised when reaching end of partition.\n\n Parameters\n ----------\n msg : Kafka message\n The Kafka message result from consumer.poll().\n \"\"\"\n def __init__(self, msg):\n message = 'topic:%s, partition:%d, status:end, ' \\\n 'offset:%d, key:%s, time:%.3f\\n' \\\n % (msg.topic(), msg.partition(),\n msg.offset(), str(msg.key()), time.time())\n self.message = message\n\n def __str__(self):\n return self.message\n\n\nclass AlertConsumer(object):\n \"\"\"Creates an alert stream Kafka consumer for a given topic.\n\n Parameters\n ----------\n topic : `str`\n Name of the topic to subscribe to.\n schema_files : Avro schema files\n The reader Avro schema files for decoding data. Optional.\n **kwargs\n Keyword arguments for configuring confluent_kafka.Consumer().\n \"\"\"\n\n def __init__(self, topic, schema_files=None, **kwargs):\n\n # keep track of disconnected partitions\n self.num_disconnected_partitions = 0\n self.topic = topic\n\n def error_cb(err, _self=self):\n print(*time_stamps(), 'error_cb -------->', err)\n # print(err.code())\n if err.code() == -195:\n _self.num_disconnected_partitions += 1\n if _self.num_disconnected_partitions == _self.num_partitions:\n print(*time_stamps(), 'all partitions got disconnected, killing thread')\n sys.exit()\n else:\n print(*time_stamps(), '{:s}: disconnected from partition.'.format(_self.topic),\n 'total:', self.num_disconnected_partitions)\n\n # 'error_cb': error_cb\n kwargs['error_cb'] = error_cb\n\n self.consumer = confluent_kafka.Consumer(**kwargs)\n self.num_partitions = 0\n\n def on_assign(consumer, partitions, _self=self):\n # force-reset offsets when subscribing to a topic:\n for part in partitions:\n # -2 stands for beginning and -1 for end\n part.offset = -2\n # keep number of partitions. when reaching end of last partition, kill thread and start from beginning\n _self.num_partitions += 1\n print(consumer.get_watermark_offsets(part))\n\n self.consumer.subscribe([topic], on_assign=on_assign)\n # self.consumer.subscribe([topic])\n\n # fixme?\n # if schema_files is not None:\n # self.alert_schema = combineSchemas(schema_files)\n\n # MongoDB:\n self.config = config\n self.collection_alerts = 'ZUDS_alerts'\n self.collection_alerts_aux = 'ZUDS_alerts_aux'\n self.db = None\n self.connect_to_db()\n\n # indexes\n self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),\n ('candid', pymongo.DESCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),\n ('objectId', pymongo.DESCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('objectId', pymongo.ASCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('candid', pymongo.ASCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.ztfname', pymongo.ASCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.jdstartstack', pymongo.DESCENDING),\n ('candidate.jdendstack', pymongo.ASCENDING)],\n background=True, sparse=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.jd', pymongo.DESCENDING),\n ('candidate.drb', pymongo.DESCENDING),\n ('candid', pymongo.DESCENDING)],\n background=True, sparse=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.jd', 1),\n ('candidate.drb', 1),\n ('candidate.isdiffpos', 1),\n ('candidate.ndethist', 1)],\n name='jd__braai__magpsf__isdiffpos__ndethist',\n background=True, sparse=True)\n\n # ML models:\n self.ml_models = dict()\n for m in config['ml_models']:\n try:\n m_v = config[\"ml_models\"][m][\"version\"]\n self.ml_models[m] = {'model': load_model(f'/app/models/{m}_{m_v}.h5'),\n 'version': m_v}\n except Exception as e:\n print(*time_stamps(), f'Error loading ML model {m}')\n traceback.print_exc()\n print(e)\n continue\n\n def connect_to_db(self):\n \"\"\"\n Connect to mongo\n :return:\n \"\"\"\n\n _config = self.config\n\n try:\n # there's only one instance of DB, it's too big to be replicated\n _client = pymongo.MongoClient(host=_config['database']['host'],\n port=_config['database']['port'], connect=False)\n # grab main database:\n _db = _client[_config['database']['db']]\n except Exception as _e:\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(_config['database']['user'], _config['database']['pwd'])\n except Exception as _e:\n raise ConnectionRefusedError\n\n self.db = dict()\n self.db['client'] = _client\n self.db['db'] = _db\n\n def insert_db_entry(self, _collection=None, _db_entry=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _collection:\n :param _db_entry:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entry is not None, 'Must specify document'\n try:\n self.db['db'][_collection].insert_one(_db_entry)\n except Exception as _e:\n print(*time_stamps(), 'Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))\n traceback.print_exc()\n print(_e)\n\n def insert_multiple_db_entries(self, _collection=None, _db_entries=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _db:\n :param _collection:\n :param _db_entries:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entries is not None, 'Must specify documents'\n try:\n # ordered=False ensures that every insert operation will be attempted\n # so that if, e.g., a document already exists, it will be simply skipped\n self.db['db'][_collection].insert_many(_db_entries, ordered=False)\n except pymongo.errors.BulkWriteError as bwe:\n print(*time_stamps(), bwe.details)\n except Exception as _e:\n traceback.print_exc()\n print(_e)\n\n def replace_db_entry(self, _collection=None, _filter=None, _db_entry=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _collection:\n :param _filter:\n :param _db_entry:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entry is not None, 'Must specify document'\n try:\n self.db['db'][_collection].replace_one(_filter, _db_entry, upsert=True)\n except Exception as _e:\n print(*time_stamps(), 'Error replacing {:s} in {:s}'.format(str(_db_entry['_id']), _collection))\n traceback.print_exc()\n print(_e)\n\n @staticmethod\n def alert_mongify(alert):\n\n doc = dict(alert)\n\n # let mongo create a unique id\n # candid+objectId is a unique combination:\n # doc['_id'] = f\"{alert['candid']}_{alert['objectId']}\"\n\n # placeholders for cross-matches and classifications\n # doc['cross_matches'] = dict()\n doc['classifications'] = dict()\n\n # GeoJSON for 2D indexing\n doc['coordinates'] = {}\n _ra = doc['candidate']['ra']\n _dec = doc['candidate']['dec']\n _radec = [_ra, _dec]\n # string format: H:M:S, D:M:S\n # tic = time.time()\n _radec_str = [deg2hms(_ra), deg2dms(_dec)]\n # print(time.time() - tic)\n # print(_radec_str)\n doc['coordinates']['radec_str'] = _radec_str\n # for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)\n _radec_geojson = [_ra - 180.0, _dec]\n doc['coordinates']['radec_geojson'] = {'type': 'Point',\n 'coordinates': _radec_geojson}\n # radians and degrees:\n # doc['coordinates']['radec_rad'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]\n # doc['coordinates']['radec_deg'] = [_ra, _dec]\n\n light_curve = deepcopy(doc['light_curve'])\n doc.pop('light_curve', None)\n if light_curve is None:\n light_curve = []\n for lc in light_curve:\n if lc['flux'] > 0:\n lc['mag'] = -2.5 * np.log10(lc['flux']) + lc['zp']\n\n return doc, light_curve\n\n def poll(self, path_alerts=None, path_tess=None, datestr=None, save_packets=True):\n \"\"\"\n Polls Kafka broker to consume topic.\n :param path_alerts:\n :param path_tess:\n :param datestr:\n :return:\n \"\"\"\n # msg = self.consumer.poll(timeout=timeout)\n msg = self.consumer.poll()\n\n if msg is None:\n print(*time_stamps(), 'Caught error: msg is None')\n\n if msg.error():\n print('Caught error:', msg.error())\n # if msg.value() is not None:\n # print(*time_stamps(), msg.value())\n raise EopError(msg)\n\n elif msg is not None:\n # decode avro packet\n msg_decoded = self.decodeMessage(msg)\n for record in msg_decoded:\n\n candid = record['candid']\n objectId = record['objectId']\n\n print(*time_stamps(), self.topic, objectId, candid)\n\n # check that candid not in collection_alerts\n if self.db['db'][self.collection_alerts].count_documents({'candid': candid}, limit=1) == 0:\n # candid not in db, ingest\n\n if save_packets:\n # save avro packet to disk\n path_alert_dir = os.path.join(path_alerts, datestr)\n # mkdir if does not exist\n if not os.path.exists(path_alert_dir):\n os.makedirs(path_alert_dir)\n path_avro = os.path.join(path_alert_dir, f'{candid}.avro')\n print(*time_stamps(), f'saving {candid} to disk')\n with open(path_avro, 'wb') as f:\n f.write(msg.value())\n\n # ingest decoded avro packet into db\n alert, light_curve = self.alert_mongify(record)\n\n # alert filters:\n\n # ML models:\n scores = alert_filter__ml(record, ml_models=self.ml_models)\n alert['classifications'] = scores\n\n print(*time_stamps(), f'ingesting {alert[\"candid\"]} into db')\n self.insert_db_entry(_collection=self.collection_alerts, _db_entry=alert)\n\n # light_curve: pop nulls - save space\n light_curve = [{kk: vv for kk, vv in lc.items() if vv is not None} for lc in light_curve]\n\n # cross-match with external catalogs if objectId not in collection_alerts_aux:\n if self.db['db'][self.collection_alerts_aux].count_documents({'_id': objectId}, limit=1) == 0:\n # tic = time.time()\n xmatches = alert_filter__xmatch(self.db['db'], alert)\n # CLU cross-match:\n xmatches = {**xmatches, **alert_filter__xmatch_clu(self.db['db'], alert)}\n # alert['cross_matches'] = xmatches\n # toc = time.time()\n # print(f'xmatch for {alert[\"candid\"]} took {toc-tic:.2f} s')\n\n alert_aux = {'_id': objectId,\n 'cross_matches': xmatches,\n 'light_curve': light_curve}\n\n self.insert_db_entry(_collection=self.collection_alerts_aux, _db_entry=alert_aux)\n\n else:\n self.db['db'][self.collection_alerts_aux].update_one({'_id': objectId},\n {'$addToSet':\n {'light_curve':\n {'$each': light_curve}}},\n upsert=True)\n\n # dump packet as json to disk if in a public TESS sector\n if 'TESS' in alert['candidate']['programpi']:\n # put light_curve back\n alert['light_curve'] = light_curve\n\n # get cross-matches\n # xmatches = self.db['db'][self.collection_alerts_aux].find_one({'_id': objectId})\n xmatches = self.db['db'][self.collection_alerts_aux].find({'_id': objectId},\n {'cross_matches': 1},\n limit=1)\n xmatches = list(xmatches)[0]\n alert['cross_matches'] = xmatches['cross_matches']\n\n if save_packets:\n path_tess_dir = os.path.join(path_tess, datestr)\n # mkdir if does not exist\n if not os.path.exists(path_tess_dir):\n os.makedirs(path_tess_dir)\n\n print(*time_stamps(), f'saving {alert[\"candid\"]} to disk')\n try:\n with open(os.path.join(path_tess_dir, f\"{alert['candid']}.json\"), 'w') as f:\n f.write(dumps(alert))\n except Exception as e:\n print(time_stamps(), str(e))\n _err = traceback.format_exc()\n print(*time_stamps(), str(_err))\n\n def decodeMessage(self, msg):\n \"\"\"Decode Avro message according to a schema.\n\n Parameters\n ----------\n msg : Kafka message\n The Kafka message result from consumer.poll().\n\n Returns\n -------\n `dict`\n Decoded message.\n \"\"\"\n # print(msg.topic(), msg.offset(), msg.error(), msg.key(), msg.value())\n message = msg.value()\n # print(message)\n try:\n bytes_io = io.BytesIO(message)\n decoded_msg = readSchemaData(bytes_io)\n # print(decoded_msg)\n # decoded_msg = readAvroData(bytes_io, self.alert_schema)\n # print(decoded_msg)\n except AssertionError:\n # FIXME this exception is raised but not sure if it matters yet\n bytes_io = io.BytesIO(message)\n decoded_msg = None\n except IndexError:\n literal_msg = literal_eval(str(message, encoding='utf-8')) # works to give bytes\n bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>\n decoded_msg = readSchemaData(bytes_io) # yields reader\n except Exception:\n decoded_msg = message\n finally:\n return decoded_msg\n\n\ndef msg_text(message):\n \"\"\"Remove postage stamp cutouts from an alert message.\n \"\"\"\n message_text = {k: message[k] for k in message\n if k not in ['cutoutDifference', 'cutoutTemplate', 'cutoutScience']}\n return message_text\n\n\ndef write_stamp_file(stamp_dict, output_dir):\n \"\"\"Given a stamp dict that follows the cutout schema,\n write data to a file in a given directory.\n \"\"\"\n try:\n filename = stamp_dict['fileName']\n try:\n os.makedirs(output_dir)\n except OSError:\n pass\n out_path = os.path.join(output_dir, filename)\n with open(out_path, 'wb') as f:\n f.write(stamp_dict['stampData'])\n except TypeError:\n sys.stderr.write('%% Cannot get stamp\\n')\n return\n\n\ndef alert_filter(alert, stampdir=None):\n \"\"\"Filter to apply to each alert.\n See schemas: https://github.com/ZwickyTransientFacility/ztf-avro-alert\n \"\"\"\n data = msg_text(alert)\n if data: # Write your condition statement here\n print(data) # Print all main alert data to screen\n if stampdir is not None: # Collect all postage stamps\n write_stamp_file(\n alert.get('cutoutDifference'), stampdir)\n write_stamp_file(\n alert.get('cutoutTemplate'), stampdir)\n write_stamp_file(\n alert.get('cutoutScience'), stampdir)\n return\n\n\ndef make_triplet(alert, to_tpu: bool = False):\n \"\"\"\n Feed in alert packet\n \"\"\"\n cutout_dict = dict()\n\n for cutout in ('science', 'template', 'difference'):\n # cutout_data = loads(dumps([alert[f'cutout{cutout.capitalize()}']['stampData']]))[0]\n # cutout_data = alert[f'cutout{cutout.capitalize()}']['stampData']\n cutout_data = alert[f'cutout{cutout.capitalize()}']\n\n # unzip\n with gzip.open(io.BytesIO(cutout_data), 'rb') as f:\n with fits.open(io.BytesIO(f.read())) as hdu:\n data = hdu[0].data\n # replace nans with zeros\n cutout_dict[cutout] = np.nan_to_num(data)\n # L2-normalize\n cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])\n\n # pad to 63x63 if smaller\n shape = cutout_dict[cutout].shape\n if shape != (63, 63):\n # print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')\n cutout_dict[cutout] = np.pad(cutout_dict[cutout], [(0, 63 - shape[0]), (0, 63 - shape[1])],\n mode='constant', constant_values=1e-9)\n\n triplet = np.zeros((63, 63, 3))\n triplet[:, :, 0] = cutout_dict['science']\n triplet[:, :, 1] = cutout_dict['template']\n triplet[:, :, 2] = cutout_dict['difference']\n\n if to_tpu:\n # Edge TPUs require additional processing\n triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()\n\n return triplet\n\n\ndef alert_filter__ml(alert, ml_models: dict = None):\n \"\"\"Filter to apply to each alert.\n \"\"\"\n\n scores = dict()\n\n try:\n ''' braai '''\n triplet = make_triplet(alert)\n triplets = np.expand_dims(triplet, axis=0)\n braai = ml_models['braai']['model'].predict(x=triplets)[0]\n # braai = 1.0\n scores['braai'] = float(braai)\n scores['braai_version'] = ml_models['braai']['version']\n except Exception as e:\n print(*time_stamps(), str(e))\n\n return scores\n\n\n# cone search radius:\ncone_search_radius = float(config['xmatch']['cone_search_radius'])\n# convert to rad:\nif config['xmatch']['cone_search_unit'] == 'arcsec':\n cone_search_radius *= np.pi / 180.0 / 3600.\nelif config['xmatch']['cone_search_unit'] == 'arcmin':\n cone_search_radius *= np.pi / 180.0 / 60.\nelif config['xmatch']['cone_search_unit'] == 'deg':\n cone_search_radius *= np.pi / 180.0\nelif config['xmatch']['cone_search_unit'] == 'rad':\n cone_search_radius *= 1\nelse:\n raise Exception('Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]')\n\n\ndef alert_filter__xmatch(db, alert):\n \"\"\"\n Filter to apply to each alert.\n \"\"\"\n\n xmatches = dict()\n\n try:\n ra_geojson = float(alert['candidate']['ra'])\n # geojson-friendly ra:\n ra_geojson -= 180.0\n dec_geojson = float(alert['candidate']['dec'])\n\n ''' catalogs '''\n for catalog in config['xmatch']['catalogs']:\n catalog_filter = config['xmatch']['catalogs'][catalog]['filter']\n catalog_projection = config['xmatch']['catalogs'][catalog]['projection']\n\n object_position_query = dict()\n object_position_query['coordinates.radec_geojson'] = {\n '$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius]}}\n s = db[catalog].find({**object_position_query, **catalog_filter},\n {**catalog_projection})\n xmatches[catalog] = list(s)\n\n except Exception as e:\n print(*time_stamps(), str(e))\n\n return xmatches\n\n\n# cone search radius in deg:\ncone_search_radius_clu = 3.0\n# convert deg to rad:\ncone_search_radius_clu *= np.pi / 180.0\n\n\ndef alert_filter__xmatch_clu(database, alert, size_margin=3, clu_version='CLU_20190625'):\n \"\"\"\n Filter to apply to each alert.\n :param size_margin: multiply galaxy size by this much before looking for a match\n :param clu_version: CLU catalog version\n \"\"\"\n\n xmatches = dict()\n\n try:\n ra = float(alert['candidate']['ra'])\n dec = float(alert['candidate']['dec'])\n\n # geojson-friendly ra:\n ra_geojson = float(alert['candidate']['ra']) - 180.0\n dec_geojson = dec\n\n catalog_filter = {}\n catalog_projection = {\"_id\": 1, \"name\": 1, \"ra\": 1, \"dec\": 1,\n \"a\": 1, \"b2a\": 1, \"pa\": 1, \"z\": 1,\n \"sfr_fuv\": 1, \"mstar\": 1, \"sfr_ha\": 1,\n \"coordinates.radec_str\": 1}\n\n # first do a coarse search of everything that is around\n object_position_query = dict()\n object_position_query['coordinates.radec_geojson'] = {\n '$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius_clu]}}\n s = database[clu_version].find({**object_position_query, **catalog_filter},\n {**catalog_projection})\n galaxies = list(s)\n\n # these guys are very big, so check them separately\n M31 = {'_id': 596900, 'name': 'PGC2557',\n 'ra': 10.6847, 'dec': 41.26901, 'a': 6.35156, 'b2a': 0.32, 'pa': 35.0,\n 'sfr_fuv': None, 'mstar': 253816876.412914, 'sfr_ha': 0,\n 'coordinates': {'radec_geojson': [\"00:42:44.3503\", \"41:16:08.634\"]}\n }\n M33 = {'_id': 597543, 'name': 'PGC5818',\n 'ra': 23.46204, 'dec': 30.66022, 'a': 2.35983, 'b2a': 0.59, 'pa': 23.0,\n 'sfr_fuv': None, 'mstar': 4502777.420493, 'sfr_ha': 0,\n 'coordinates': {'radec_geojson': [\"01:33:50.8900\", \"30:39:36.800\"]}\n }\n\n # do elliptical matches\n matches = []\n\n for galaxy in galaxies + [M31, M33]:\n alpha1, delta01 = galaxy['ra'], galaxy['dec']\n d0, axis_ratio, PA0 = galaxy['a'], galaxy['b2a'], galaxy['pa']\n\n # no shape info for galaxy? replace with median values\n if d0 < -990:\n d0 = 0.0265889\n if axis_ratio < -990:\n axis_ratio = 0.61\n if PA0 < -990:\n PA0 = 86.0\n\n in_galaxy = in_ellipse(ra, dec, alpha1, delta01, size_margin * d0, axis_ratio, PA0)\n\n if in_galaxy:\n match = galaxy\n distance_arcsec = round(great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2)\n match['coordinates']['distance_arcsec'] = distance_arcsec\n matches.append(match)\n\n xmatches[clu_version] = matches\n\n except Exception as e:\n print(*time_stamps(), str(e))\n\n return xmatches\n\n\ndef listener(topic, bootstrap_servers='', offset_reset='earliest',\n group=None, path_alerts=None, path_tess=None, save_packets=True):\n \"\"\"\n Listen to a topic\n :param topic:\n :param bootstrap_servers:\n :param offset_reset:\n :param group:\n :param path_alerts:\n :return:\n \"\"\"\n\n # def error_cb(err):\n # print(*time_stamps(), 'error_cb -------->', err)\n # # print(err.code())\n # if err.code() == -195:\n # print(*time_stamps(), 'got disconnected, killing thread')\n # sys.exit()\n\n # Configure consumer connection to Kafka broker\n conf = {'bootstrap.servers': bootstrap_servers,\n # 'error_cb': error_cb,\n 'default.topic.config': {'auto.offset.reset': offset_reset}}\n if group is not None:\n conf['group.id'] = group\n else:\n conf['group.id'] = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else 'kowalski.caltech.edu'\n\n # make it unique:\n conf['group.id'] = '{:s}_{:s}'.format(conf['group.id'], datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f'))\n\n # Configure Avro reader schema\n schema_files = [\"ztf-avro-alert/schema/candidate.avsc\",\n \"ztf-avro-alert/schema/cutout.avsc\",\n \"ztf-avro-alert/schema/light_curve.avsc\",\n \"ztf-avro-alert/schema/alert.avsc\"]\n\n # date string:\n datestr = topic.split('_')[1]\n\n # Start alert stream consumer\n stream_reader = AlertConsumer(topic, schema_files, **conf)\n\n # todo: Subscribe alert filters to stream_readers\n # todo: they will be notified when an alert arrived/got x-matched\n\n while True:\n try:\n # poll!\n # print(*time_stamps(), 'Polling')\n stream_reader.poll(path_alerts=path_alerts, path_tess=path_tess,\n datestr=datestr, save_packets=save_packets)\n\n except EopError as e:\n # Write when reaching end of partition\n # sys.stderr.write(e.message)\n print(*time_stamps(), e.message)\n except IndexError:\n # sys.stderr.write('%% Data cannot be decoded\\n')\n print(*time_stamps(), '%% Data cannot be decoded\\n')\n except UnicodeDecodeError:\n # sys.stderr.write('%% Unexpected data format received\\n')\n print(*time_stamps(), '%% Unexpected data format received\\n')\n except KeyboardInterrupt:\n # sys.stderr.write('%% Aborted by user\\n')\n print(*time_stamps(), '%% Aborted by user\\n')\n sys.exit()\n except Exception as e:\n print(*time_stamps(), str(e))\n _err = traceback.format_exc()\n print(*time_stamps(), str(_err))\n sys.exit()\n\n\ndef main(_obs_date=None, _save_packets=True):\n\n topics_on_watch = dict()\n\n while True:\n\n try:\n if True:\n # get kafka topic names with kafka-topics command\n kafka_cmd = [config['kafka-topics']['cmd'],\n '--zookeeper', config['kafka-topics']['zookeeper'], '-list']\n # print(kafka_cmd)\n\n topics = subprocess.run(kafka_cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').split('\\n')[:-1]\n # print(topics)\n\n if _obs_date is None:\n datestr = datetime.datetime.utcnow().strftime('%Y%m%d')\n else:\n datestr = _obs_date\n # as of 20180403 naming convention is ztf_%Y%m%d_programidN\n # topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t)]\n # ZUDS only\n topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t) and ('zuds' in t)]\n print(*time_stamps(), topics_tonight)\n\n if False:\n # for testing\n topics_tonight = ['ztf_20180604_programid3']\n\n for t in topics_tonight:\n if t not in topics_on_watch:\n print(*time_stamps(), f'starting listener thread for {t}')\n offset_reset = config['kafka']['default.topic.config']['auto.offset.reset']\n bootstrap_servers = config['kafka']['bootstrap.servers']\n group = '{:s}'.format(config['kafka']['group'])\n # print(group)\n path_alerts = config['path']['path_alerts']\n path_tess = config['path']['path_tess']\n save_packets = _save_packets\n # topics_on_watch[t] = threading.Thread(target=listener,\n # args=(t, bootstrap_servers,\n # offset_reset, group, path_alerts))\n topics_on_watch[t] = multiprocessing.Process(target=listener,\n args=(t, bootstrap_servers,\n offset_reset, group,\n path_alerts, path_tess,\n save_packets))\n topics_on_watch[t].daemon = True\n topics_on_watch[t].start()\n\n else:\n print(*time_stamps(), f'performing thread health check for {t}')\n try:\n # if not topics_on_watch[t].isAlive():\n if not topics_on_watch[t].is_alive():\n print(*time_stamps(), f'{t} died, removing')\n # topics_on_watch[t].terminate()\n topics_on_watch.pop(t, None)\n else:\n print(*time_stamps(), f'{t} appears normal')\n except Exception as _e:\n print(*time_stamps(), 'Failed to perform health check', str(_e))\n pass\n\n except Exception as e:\n print(*time_stamps(), str(e))\n _err = traceback.format_exc()\n print(*time_stamps(), str(_err))\n\n if _obs_date is None:\n time.sleep(300)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Fetch AVRO packets from Kafka streams and ingest them into DB')\n parser.add_argument('--obsdate', help='observing date')\n parser.add_argument('--noio', help='reduce i/o - do not save packets', action='store_true')\n\n args = parser.parse_args()\n obs_date = args.obsdate\n save = False if args.noio else True\n # print(obs_date)\n\n main(_obs_date=obs_date, _save_packets=save)\n",
"import csv\nimport os\nimport glob\nimport time\n# from astropy.coordinates import Angle\nimport numpy as np\nimport pandas as pd\nimport pymongo\nimport inspect\nimport json\nimport argparse\n# import timeout_decorator\nimport signal\nimport traceback\nimport datetime\nimport pytz\nfrom numba import jit\n# import fastavro as avro\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import ProcessPoolExecutor\nimport time\n\n\n''' load config and secrets '''\nwith open('/app/config.json') as cjson:\n config = json.load(cjson)\n\nwith open('/app/secrets.json') as sjson:\n secrets = json.load(sjson)\n\nfor k in secrets:\n config[k].update(secrets.get(k, {}))\n\n\ndef utc_now():\n return datetime.datetime.now(pytz.utc)\n\n\ndef connect_to_db():\n \"\"\" Connect to the mongodb database\n\n :return:\n \"\"\"\n try:\n # there's only one instance of DB, it's too big to be replicated\n _client = pymongo.MongoClient(host=config['database']['host'],\n port=config['database']['port'])\n # grab main database:\n _db = _client[config['database']['db']]\n except Exception as _e:\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(config['database']['user'], config['database']['pwd'])\n except Exception as _e:\n raise ConnectionRefusedError\n\n return _client, _db\n\n\ndef insert_db_entry(_db, _collection=None, _db_entry=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _collection:\n :param _db_entry:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entry is not None, 'Must specify document'\n try:\n _db[_collection].insert_one(_db_entry)\n except Exception as _e:\n print('Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))\n traceback.print_exc()\n print(_e)\n\n\ndef insert_multiple_db_entries(_db, _collection=None, _db_entries=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _db:\n :param _collection:\n :param _db_entries:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entries is not None, 'Must specify documents'\n try:\n _db[_collection].insert_many(_db_entries, ordered=False)\n except pymongo.errors.BulkWriteError as bwe:\n print(bwe.details)\n except Exception as _e:\n traceback.print_exc()\n print(_e)\n\n\n@jit\ndef deg2hms(x):\n \"\"\"Transform degrees to *hours:minutes:seconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [0, 360) to be written as a sexagesimal string.\n\n Returns\n -------\n out : str\n The input angle written as a sexagesimal string, in the\n form, hours:minutes:seconds.\n\n \"\"\"\n assert 0.0 <= x < 360.0, 'Bad RA value in degrees'\n # ac = Angle(x, unit='degree')\n # hms = str(ac.to_string(unit='hour', sep=':', pad=True))\n # print(str(hms))\n _h = np.floor(x * 12.0 / 180.)\n _m = np.floor((x * 12.0 / 180. - _h) * 60.0)\n _s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0\n hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)\n # print(hms)\n return hms\n\n\n@jit\ndef deg2dms(x):\n \"\"\"Transform degrees to *degrees:arcminutes:arcseconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [-90, 90] to be converted.\n\n Returns\n -------\n out : str\n The input angle as a string, written as degrees:minutes:seconds.\n\n \"\"\"\n assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'\n # ac = Angle(x, unit='degree')\n # dms = str(ac.to_string(unit='degree', sep=':', pad=True))\n # print(dms)\n _d = np.floor(abs(x)) * np.sign(x)\n _m = np.floor(np.abs(x - _d) * 60.0)\n _s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0\n dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)\n # print(dms)\n return dms\n\n\ndef process_file(_file, _collection, _batch_size=2048, verbose=False, _dry_run=False):\n\n # connect to MongoDB:\n if verbose:\n print('Connecting to DB')\n _client, _db = connect_to_db()\n if verbose:\n print('Successfully connected')\n\n print(f'processing {_file}')\n documents = []\n batch_num = 1\n\n try:\n df = pd.read_csv(_file)\n\n for index, row in df.iterrows():\n try:\n # nan -> None\n tmp = row.where((pd.notnull(row)), None)\n # convert to dict:\n doc = tmp.to_dict()\n\n doc['_id'] = doc['system_name']\n\n doc['coordinates'] = {}\n doc['coordinates']['epoch'] = 2015.5\n _ra = doc['ra']\n _dec = doc['dec']\n _radec = [_ra, _dec]\n # string format: H:M:S, D:M:S\n # tic = time.time()\n _radec_str = [deg2hms(_ra), deg2dms(_dec)]\n # print(time.time() - tic)\n # print(_radec_str)\n doc['coordinates']['radec_str'] = _radec_str\n # for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)\n _radec_geojson = [_ra - 180.0, _dec]\n doc['coordinates']['radec_geojson'] = {'type': 'Point',\n 'coordinates': _radec_geojson}\n # radians:\n doc['coordinates']['radec'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]\n\n # print(doc['coordinates'])\n\n documents.append(doc)\n\n # time.sleep(1)\n\n # insert batch, then flush\n if len(documents) == _batch_size:\n print(f'inserting batch #{batch_num}')\n if not _dry_run:\n insert_multiple_db_entries(_db, _collection=_collection, _db_entries=documents)\n # flush:\n documents = []\n batch_num += 1\n\n except Exception as e:\n traceback.print_exc()\n print(e)\n continue\n\n except Exception as e:\n traceback.print_exc()\n print(e)\n\n # stuff left from the last file?\n if len(documents) > 0:\n print(f'inserting batch #{batch_num}')\n if not _dry_run:\n insert_multiple_db_entries(_db, _collection=_collection, _db_entries=documents)\n\n # disconnect from db:\n try:\n _client.close()\n finally:\n if verbose:\n print('Successfully disconnected from db')\n\n\nif __name__ == '__main__':\n ''' Create command line argument parser '''\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description='')\n\n parser.add_argument('--dryrun', action='store_true', help='enforce execution')\n\n args = parser.parse_args()\n\n dry_run = args.dryrun\n\n # connect to MongoDB:\n print('Connecting to DB')\n client, db = connect_to_db()\n print('Successfully connected')\n\n collection = 'Known_lenses_20180901'\n\n # create 2d index:\n print('Creating 2d index')\n if not dry_run:\n db[collection].create_index([('coordinates.radec_geojson', '2dsphere'),\n ('_id', pymongo.ASCENDING)], background=True)\n\n # number of records to insert\n batch_size = 4096\n\n _location = '/_tmp/'\n\n files = glob.glob(os.path.join(_location, 'known_lenses_wradii.csv'))\n\n print(f'# files to process: {len(files)}')\n\n # init threaded operations\n # pool = ThreadPoolExecutor(2)\n # pool = ProcessPoolExecutor(20)\n pool = ProcessPoolExecutor(1)\n\n # for ff in files[::-1]:\n for ff in sorted(files):\n pool.submit(process_file, _file=ff, _collection=collection, _batch_size=batch_size,\n verbose=True, _dry_run=dry_run)\n # process_file(_file=ff, _collection=collection, _batch_size=batch_size,\n # verbose=True, _dry_run=dry_run)\n\n # wait for everything to finish\n pool.shutdown(wait=True)\n\n print('All done')\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.abs",
"numpy.pad",
"numpy.rint",
"numpy.cos",
"numpy.nan_to_num",
"numpy.sin",
"numpy.sign",
"numpy.linalg.norm",
"numpy.log10",
"numpy.floor",
"numpy.zeros"
],
[
"pandas.notnull",
"pandas.read_csv",
"numpy.abs",
"numpy.sign",
"numpy.floor"
]
] |
jshede/Cirq | [
"5db0f6aa8c009735a9ce0b0b7909ffe2532c396d",
"5db0f6aa8c009735a9ce0b0b7909ffe2532c396d",
"5db0f6aa8c009735a9ce0b0b7909ffe2532c396d"
] | [
"cirq/google/api/v1/programs.py",
"cirq/linalg/decompositions_test.py",
"cirq/ops/parallel_gate_operation.py"
] | [
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nfrom typing import (Any, cast, Dict, Iterable, Optional, Sequence, Tuple,\n TYPE_CHECKING)\nimport numpy as np\nimport sympy\n\nfrom cirq import devices, ops, protocols, value\nfrom cirq.schedules import Schedule, ScheduledOperation\nfrom cirq.value import Timestamp\n\nif TYPE_CHECKING:\n import cirq\n from cirq.google import xmon_device\n\n\ndef _load_json_bool(b: Any):\n \"\"\"Converts a json field to bool. If already a bool, pass through.\"\"\"\n if isinstance(b, bool):\n return b\n return json.loads(b)\n\n\ndef gate_to_proto_dict(gate: 'cirq.Gate',\n qubits: Tuple['cirq.Qid', ...]) -> Dict:\n if isinstance(gate, ops.MeasurementGate):\n return _measure_to_proto_dict(gate, qubits)\n\n if isinstance(gate, ops.XPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _x_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.YPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _y_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.PhasedXPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _phased_x_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.ZPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _z_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.CZPowGate):\n if len(qubits) != 2:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _cz_to_proto_dict(gate, *qubits)\n\n raise ValueError(\"Don't know how to serialize this gate: {!r}\".format(gate))\n\n\ndef _x_to_proto_dict(gate: 'cirq.XPowGate', q: 'cirq.Qid') -> Dict:\n exp_w = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'axis_half_turns': _parameterized_value_to_proto_dict(0),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_w': exp_w}\n\n\ndef _y_to_proto_dict(gate: 'cirq.YPowGate', q: 'cirq.Qid') -> Dict:\n exp_w = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'axis_half_turns': _parameterized_value_to_proto_dict(0.5),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_w': exp_w}\n\n\ndef _phased_x_to_proto_dict(gate: 'cirq.PhasedXPowGate', q: 'cirq.Qid') -> Dict:\n exp_w = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'axis_half_turns':\n _parameterized_value_to_proto_dict(gate.phase_exponent),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_w': exp_w}\n\n\ndef _z_to_proto_dict(gate: 'cirq.ZPowGate', q: 'cirq.Qid') -> Dict:\n exp_z = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent),\n }\n return {'exp_z': exp_z}\n\n\ndef _cz_to_proto_dict(gate: 'cirq.CZPowGate', p: 'cirq.Qid',\n q: 'cirq.Qid') -> Dict:\n exp_11 = {\n 'target1': cast(devices.GridQubit, p).to_proto_dict(),\n 'target2': cast(devices.GridQubit, q).to_proto_dict(),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_11': exp_11}\n\n\ndef _measure_to_proto_dict(gate: 'cirq.MeasurementGate',\n qubits: Sequence['cirq.Qid']):\n if len(qubits) == 0:\n raise ValueError('Measurement gate on no qubits.')\n\n invert_mask = None\n if gate.invert_mask:\n invert_mask = gate.invert_mask + (False,) * (gate.num_qubits() -\n len(gate.invert_mask))\n\n if invert_mask and len(invert_mask) != len(qubits):\n raise ValueError('Measurement gate had invert mask of length '\n 'different than number of qubits it acts on.')\n measurement = {\n 'targets': [cast(devices.GridQubit, q).to_proto_dict() for q in qubits],\n 'key': protocols.measurement_key(gate),\n }\n if invert_mask:\n measurement['invert_mask'] = [json.dumps(x) for x in invert_mask]\n return {'measurement': measurement}\n\n\ndef schedule_to_proto_dicts(schedule: Schedule) -> Iterable[Dict]:\n \"\"\"Convert a schedule into an iterable of proto dictionaries.\n\n Args:\n schedule: The schedule to convert to a proto dict. Must contain only\n gates that can be cast to xmon gates.\n\n Yields:\n A proto dictionary corresponding to an Operation proto.\n \"\"\"\n last_time_picos: Optional[int] = None\n for so in schedule.scheduled_operations:\n op = gate_to_proto_dict(\n cast(ops.GateOperation, so.operation).gate, so.operation.qubits)\n time_picos = so.time.raw_picos()\n if last_time_picos is None:\n op['incremental_delay_picoseconds'] = time_picos\n else:\n op['incremental_delay_picoseconds'] = time_picos - last_time_picos\n last_time_picos = time_picos\n yield op\n\n\ndef schedule_from_proto_dicts(\n device: 'xmon_device.XmonDevice',\n ops: Iterable[Dict],\n) -> Schedule:\n \"\"\"Convert proto dictionaries into a Schedule for the given device.\"\"\"\n scheduled_ops = []\n last_time_picos = 0\n for op in ops:\n delay_picos = 0\n if 'incremental_delay_picoseconds' in op:\n delay_picos = op['incremental_delay_picoseconds']\n time_picos = last_time_picos + delay_picos\n last_time_picos = time_picos\n xmon_op = xmon_op_from_proto_dict(op)\n scheduled_ops.append(\n ScheduledOperation.op_at_on(\n operation=xmon_op,\n time=Timestamp(picos=time_picos),\n device=device,\n ))\n return Schedule(device, scheduled_ops)\n\n\ndef pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes:\n \"\"\"Pack measurement results into a byte string.\n\n Args:\n measurements: A sequence of tuples, one for each measurement, consisting\n of a string key and an array of boolean data. The data should be\n a 2-D array indexed by (repetition, qubit_index). All data for all\n measurements must have the same number of repetitions.\n\n Returns:\n Packed bytes, as described in the unpack_results docstring below.\n\n Raises:\n ValueError if the measurement data do not have the compatible shapes.\n \"\"\"\n if not measurements:\n return b''\n\n shapes = [(key, np.shape(data)) for key, data in measurements]\n if not all(len(shape) == 2 for _, shape in shapes):\n raise ValueError(\"Expected 2-D data: shapes={}\".format(shapes))\n\n reps = shapes[0][1][0]\n if not all(shape[0] == reps for _, shape in shapes):\n raise ValueError(\n \"Expected same reps for all keys: shapes={}\".format(shapes))\n\n bits = np.hstack([np.asarray(data, dtype=bool) for _, data in measurements])\n bits = bits.reshape(-1)\n\n # Pad length to multiple of 8 if needed.\n remainder = len(bits) % 8\n if remainder:\n bits = np.pad(bits, (0, 8 - remainder), 'constant')\n\n # Pack in little-endian bit order.\n bits = bits.reshape((-1, 8))[:, ::-1]\n byte_arr = np.packbits(bits, axis=1).reshape(-1)\n\n return byte_arr.tobytes()\n\n\ndef unpack_results(data: bytes, repetitions: int,\n key_sizes: Sequence[Tuple[str, int]]\n ) -> Dict[str, np.ndarray]:\n \"\"\"Unpack data from a bitstring into individual measurement results.\n\n Args:\n data: Packed measurement results, in the form <rep0><rep1>...\n where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...\n with bits packed in little-endian order in each byte.\n repetitions: number of repetitions.\n key_sizes: Keys and sizes of the measurements in the data.\n\n Returns:\n Dict mapping measurement key to a 2D array of boolean results. Each\n array has shape (repetitions, size) with size for that measurement.\n \"\"\"\n bits_per_rep = sum(size for _, size in key_sizes)\n total_bits = repetitions * bits_per_rep\n\n byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))\n bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)\n bits = bits[:total_bits].reshape((repetitions, bits_per_rep))\n\n results = {}\n ofs = 0\n for key, size in key_sizes:\n results[key] = bits[:, ofs:ofs + size]\n ofs += size\n\n return results\n\n\ndef is_native_xmon_op(op: 'cirq.Operation') -> bool:\n \"\"\"Check if the gate corresponding to an operation is a native xmon gate.\n\n Args:\n op: Input operation.\n\n Returns:\n True if the operation is native to the xmon, false otherwise.\n \"\"\"\n return (isinstance(op, ops.GateOperation) and is_native_xmon_gate(op.gate))\n\n\ndef is_native_xmon_gate(gate: 'cirq.Gate') -> bool:\n \"\"\"Check if a gate is a native xmon gate.\n\n Args:\n gate: Input gate.\n\n Returns:\n True if the gate is native to the xmon, false otherwise.\n \"\"\"\n return isinstance(gate,\n (ops.CZPowGate, ops.MeasurementGate, ops.PhasedXPowGate,\n ops.XPowGate, ops.YPowGate, ops.ZPowGate))\n\n\ndef xmon_op_from_proto_dict(proto_dict: Dict) -> 'cirq.Operation':\n \"\"\"Convert the proto dictionary to the corresponding operation.\n\n See protos in api/google/v1 for specification of the protos.\n\n Args:\n proto_dict: Dictionary representing the proto. Keys are always\n strings, but values may be types correspond to a raw proto type\n or another dictionary (for messages).\n\n Returns:\n The operation.\n\n Raises:\n ValueError if the dictionary does not contain required values\n corresponding to the proto.\n \"\"\"\n\n def raise_missing_fields(gate_name: str):\n raise ValueError('{} missing required fields: {}'.format(\n gate_name, proto_dict))\n\n param = _parameterized_value_from_proto_dict\n qubit = devices.GridQubit.from_proto_dict\n if 'exp_w' in proto_dict:\n exp_w = proto_dict['exp_w']\n if ('half_turns' not in exp_w or 'axis_half_turns' not in exp_w or\n 'target' not in exp_w):\n raise_missing_fields('ExpW')\n return ops.PhasedXPowGate(\n exponent=param(exp_w['half_turns']),\n phase_exponent=param(exp_w['axis_half_turns']),\n ).on(qubit(exp_w['target']))\n if 'exp_z' in proto_dict:\n exp_z = proto_dict['exp_z']\n if 'half_turns' not in exp_z or 'target' not in exp_z:\n raise_missing_fields('ExpZ')\n return ops.Z(qubit(exp_z['target']))**param(exp_z['half_turns'])\n if 'exp_11' in proto_dict:\n exp_11 = proto_dict['exp_11']\n if ('half_turns' not in exp_11 or 'target1' not in exp_11 or\n 'target2' not in exp_11):\n raise_missing_fields('Exp11')\n return ops.CZ(qubit(exp_11['target1']),\n qubit(exp_11['target2']))**param(exp_11['half_turns'])\n if 'measurement' in proto_dict:\n meas = proto_dict['measurement']\n invert_mask = cast(Tuple[Any, ...], ())\n if 'invert_mask' in meas:\n invert_mask = tuple(_load_json_bool(x) for x in meas['invert_mask'])\n if 'key' not in meas or 'targets' not in meas:\n raise_missing_fields('Measurement')\n return ops.MeasurementGate(\n num_qubits=len(meas['targets']),\n key=meas['key'],\n invert_mask=invert_mask).on(*[qubit(q) for q in meas['targets']])\n\n raise ValueError('invalid operation: {}'.format(proto_dict))\n\n\ndef _parameterized_value_from_proto_dict(message: Dict) -> value.TParamVal:\n parameter_key = message.get('parameter_key', None)\n if parameter_key:\n return sympy.Symbol(parameter_key)\n if 'raw' in message:\n return message['raw']\n raise ValueError('No value specified for parameterized float. '\n 'Expected \"raw\" or \"parameter_key\" to be set. '\n 'message: {!r}'.format(message))\n\n\ndef _parameterized_value_to_proto_dict(param: value.TParamVal) -> Dict:\n out = {} # type: Dict\n if isinstance(param, sympy.Symbol):\n out['parameter_key'] = str(param.free_symbols.pop())\n else:\n out['raw'] = float(param)\n return out\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\n\nimport numpy as np\nimport pytest\n\nimport cirq\n\nX = np.array([[0, 1], [1, 0]])\nY = np.array([[0, -1j], [1j, 0]])\nZ = np.array([[1, 0], [0, -1]])\nH = np.array([[1, 1], [1, -1]]) * np.sqrt(0.5)\nSQRT_X = np.array([[1, 1j], [1j, 1]])\nc = np.exp(1j * np.pi / 4)\nSQRT_SQRT_X = np.array([[1 + c, 1 - c], [1 - c, 1 + c]]) / 2\nSWAP = np.array([[1, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1]])\nCNOT = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0]])\nCZ = np.diag([1, 1, 1, -1])\n\n\ndef assert_kronecker_factorization_within_tolerance(matrix, g, f1, f2):\n restored = g * cirq.linalg.combinators.kron(f1, f2)\n assert not np.any(np.isnan(restored)), \"NaN in kronecker product.\"\n assert np.allclose(restored, matrix), \"Can't factor kronecker product.\"\n\n\ndef assert_kronecker_factorization_not_within_tolerance(matrix, g, f1, f2):\n restored = g * cirq.linalg.combinators.kron(f1, f2)\n assert (np.any(np.isnan(restored) or\n not np.allclose(restored, matrix)))\n\ndef assert_magic_su2_within_tolerance(mat, a, b):\n M = cirq.linalg.decompositions.MAGIC\n MT = cirq.linalg.decompositions.MAGIC_CONJ_T\n recon = cirq.linalg.combinators.dot(\n MT,\n cirq.linalg.combinators.kron(a, b),\n M)\n assert np.allclose(recon, mat), \"Failed to decompose within tolerance.\"\n\[email protected]('matrix', [\n X,\n cirq.kron(X, X),\n cirq.kron(X, Y),\n cirq.kron(X, np.eye(2))\n])\ndef test_map_eigenvalues_identity(matrix):\n identity_mapped = cirq.map_eigenvalues(matrix, lambda e: e)\n assert np.allclose(matrix, identity_mapped)\n\n\[email protected]('matrix,exponent,desired', [\n [X, 2, np.eye(2)],\n [X, 3, X],\n [Z, 2, np.eye(2)],\n [H, 2, np.eye(2)],\n [Z, 0.5, np.diag([1, 1j])],\n [X, 0.5, np.array([[1j, 1], [1, 1j]]) * (1 - 1j) / 2],\n])\ndef test_map_eigenvalues_raise(matrix, exponent, desired):\n exp_mapped = cirq.map_eigenvalues(matrix, lambda e: complex(e)**exponent)\n assert np.allclose(desired, exp_mapped)\n\n\[email protected]('f1,f2', [\n (H, X),\n (H * 1j, X),\n (H, SQRT_X),\n (H, SQRT_SQRT_X),\n (H, H),\n (SQRT_SQRT_X, H),\n (X, np.eye(2)),\n (1j * X, np.eye(2)),\n (X, 1j * np.eye(2)),\n (-X, 1j * np.eye(2)),\n (X, X),\n] + [\n (cirq.testing.random_unitary(2), cirq.testing.random_unitary(2))\n for _ in range(10)\n])\ndef test_kron_factor(f1, f2):\n p = cirq.kron(f1, f2)\n g, g1, g2 = cirq.kron_factor_4x4_to_2x2s(p)\n assert abs(np.linalg.det(g1) - 1) < 0.00001\n assert abs(np.linalg.det(g2) - 1) < 0.00001\n assert np.allclose(g * cirq.kron(g1, g2), p)\n assert_kronecker_factorization_within_tolerance(\n p, g, g1, g2)\n\n\[email protected]('f1,f2', [\n (cirq.testing.random_special_unitary(2),\n cirq.testing.random_special_unitary(2))\n for _ in range(10)\n])\ndef test_kron_factor_special_unitaries(f1, f2):\n p = cirq.kron(f1, f2)\n g, g1, g2 = cirq.kron_factor_4x4_to_2x2s(p)\n assert np.allclose(cirq.kron(g1, g2), p)\n assert abs(g - 1) < 0.000001\n assert cirq.is_special_unitary(g1)\n assert cirq.is_special_unitary(g2)\n assert_kronecker_factorization_within_tolerance(\n p, g, g1, g2)\n\n\ndef test_kron_factor_fail():\n mat = cirq.kron_with_controls(cirq.CONTROL_TAG, X)\n g, f1, f2 = cirq.kron_factor_4x4_to_2x2s(mat)\n with pytest.raises(ValueError):\n assert_kronecker_factorization_not_within_tolerance(\n mat, g, f1, f2)\n mat = cirq.kron_factor_4x4_to_2x2s(np.diag([1, 1, 1, 1j]))\n with pytest.raises(ValueError):\n assert_kronecker_factorization_not_within_tolerance(\n mat, g, f1, f2)\n\n\ndef recompose_so4(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n assert a.shape == (2, 2)\n assert b.shape == (2, 2)\n assert cirq.is_special_unitary(a)\n assert cirq.is_special_unitary(b)\n\n magic = np.array([[1, 0, 0, 1j],\n [0, 1j, 1, 0],\n [0, 1j, -1, 0],\n [1, 0, 0, -1j]]) * np.sqrt(0.5)\n result = np.real(cirq.dot(np.conj(magic.T),\n cirq.kron(a, b),\n magic))\n assert cirq.is_orthogonal(result)\n return result\n\n\[email protected]('m', [\n cirq.testing.random_special_orthogonal(4)\n for _ in range(10)\n])\ndef test_so4_to_magic_su2s(m):\n a, b = cirq.so4_to_magic_su2s(m)\n m2 = recompose_so4(a, b)\n assert_magic_su2_within_tolerance(m2, a, b)\n assert np.allclose(m, m2)\n\n\[email protected]('a,b', [\n (cirq.testing.random_special_unitary(2),\n cirq.testing.random_special_unitary(2))\n for _ in range(10)\n])\ndef test_so4_to_magic_su2s_known_factors(a, b):\n m = recompose_so4(a, b)\n a2, b2 = cirq.so4_to_magic_su2s(m)\n m2 = recompose_so4(a2, b2)\n\n assert np.allclose(m2, m)\n\n # Account for kron(A, B) = kron(-A, -B).\n if np.linalg.norm(a + a2) > np.linalg.norm(a - a2):\n assert np.allclose(a2, a)\n assert np.allclose(b2, b)\n else:\n assert np.allclose(a2, -a)\n assert np.allclose(b2, -b)\n\n\[email protected]('mat', [\n np.diag([0, 1, 1, 1]),\n np.diag([0.5, 2, 1, 1]),\n np.diag([1, 1j, 1, 1]),\n np.diag([1, 1, 1, -1]),\n])\ndef test_so4_to_magic_su2s_fail(mat):\n with pytest.raises(ValueError):\n _ = cirq.so4_to_magic_su2s(mat)\n\n\[email protected]('x,y,z', [\n [(random.random() * 2 - 1) * np.pi * 2 for _ in range(3)]\n for _ in range(10)\n])\ndef test_kak_canonicalize_vector(x, y, z):\n i = np.eye(2)\n m = cirq.unitary(cirq.KakDecomposition(\n global_phase=1,\n single_qubit_operations_after=(i, i),\n interaction_coefficients=(x, y, z),\n single_qubit_operations_before=(i, i)))\n\n kak = cirq.kak_canonicalize_vector(x, y, z, atol=1e-10)\n a1, a0 = kak.single_qubit_operations_after\n x2, y2, z2 = kak.interaction_coefficients\n b1, b0 = kak.single_qubit_operations_before\n m2 = cirq.unitary(kak)\n\n assert 0.0 <= x2 <= np.pi / 4\n assert 0.0 <= y2 <= np.pi / 4\n assert -np.pi / 4 < z2 <= np.pi / 4\n assert abs(x2) >= abs(y2) >= abs(z2)\n assert x2 < np.pi / 4 - 1e-10 or z2 >= 0\n assert cirq.is_special_unitary(a1)\n assert cirq.is_special_unitary(a0)\n assert cirq.is_special_unitary(b1)\n assert cirq.is_special_unitary(b0)\n assert np.allclose(m, m2)\n\n\[email protected]('target', [\n np.eye(4),\n SWAP,\n SWAP * 1j,\n CZ,\n CNOT,\n SWAP @ CZ,\n] + [cirq.testing.random_unitary(4) for _ in range(10)])\ndef test_kak_decomposition(target):\n kak = cirq.kak_decomposition(target)\n np.testing.assert_allclose(cirq.unitary(kak), target, atol=1e-8)\n\n\ndef test_kak_decomposition_unitary_object():\n op = cirq.ISWAP(*cirq.LineQubit.range(2))**0.5\n kak = cirq.kak_decomposition(op)\n np.testing.assert_allclose(cirq.unitary(kak), cirq.unitary(op), atol=1e-8)\n assert cirq.kak_decomposition(kak) is kak\n\n\ndef test_kak_decomposition_invalid_object():\n with pytest.raises(TypeError, match='unitary effect'):\n _ = cirq.kak_decomposition('test')\n\n with pytest.raises(ValueError, match='4x4 unitary matrix'):\n _ = cirq.kak_decomposition(np.eye(3))\n\n with pytest.raises(ValueError, match='4x4 unitary matrix'):\n _ = cirq.kak_decomposition(np.eye(8))\n\n with pytest.raises(ValueError, match='4x4 unitary matrix'):\n _ = cirq.kak_decomposition(np.ones((4, 4)))\n\n with pytest.raises(ValueError, match='4x4 unitary matrix'):\n _ = cirq.kak_decomposition(np.zeros((4, 4)))\n\n nil = cirq.kak_decomposition(np.zeros((4, 4)), check_preconditions=False)\n np.testing.assert_allclose(cirq.unitary(nil), np.eye(4), atol=1e-8)\n\n\ndef test_kak_decomposition_eq():\n eq = cirq.testing.EqualsTester()\n\n eq.make_equality_group(lambda: cirq.KakDecomposition(\n global_phase=1,\n single_qubit_operations_before=(cirq.unitary(cirq.X),\n cirq.unitary(cirq.Y)),\n interaction_coefficients=(0.3, 0.2, 0.1),\n single_qubit_operations_after=(np.eye(2), cirq.unitary(cirq.Z)),\n ))\n\n eq.add_equality_group(cirq.KakDecomposition(\n global_phase=-1,\n single_qubit_operations_before=(cirq.unitary(cirq.X),\n cirq.unitary(cirq.Y)),\n interaction_coefficients=(0.3, 0.2, 0.1),\n single_qubit_operations_after=(np.eye(2), cirq.unitary(cirq.Z)),\n ))\n\n eq.make_equality_group(lambda: cirq.KakDecomposition(\n global_phase=1,\n single_qubit_operations_before=(cirq.unitary(cirq.X),\n cirq.unitary(cirq.H)),\n interaction_coefficients=(0.3, 0.2, 0.1),\n single_qubit_operations_after=(np.eye(2), cirq.unitary(cirq.Z)),\n ))\n\n eq.make_equality_group(lambda: cirq.KakDecomposition(\n global_phase=1,\n single_qubit_operations_before=(cirq.unitary(cirq.X),\n cirq.unitary(cirq.Y)),\n interaction_coefficients=(0.5, 0.2, 0.1),\n single_qubit_operations_after=(np.eye(2), cirq.unitary(cirq.Z)),\n ))\n\n\ndef test_kak_repr():\n cirq.testing.assert_equivalent_repr(cirq.KakDecomposition(\n global_phase=1j,\n single_qubit_operations_before=(cirq.unitary(cirq.X),\n cirq.unitary(cirq.Y)),\n interaction_coefficients=(0.3, 0.2, 0.1),\n single_qubit_operations_after=(np.eye(2), cirq.unitary(cirq.Z)),\n ))\n\n assert repr(\n cirq.KakDecomposition(\n global_phase=1,\n single_qubit_operations_before=(cirq.unitary(cirq.X),\n cirq.unitary(cirq.Y)),\n interaction_coefficients=(0.5, 0.25, 0),\n single_qubit_operations_after=(np.eye(2), cirq.unitary(cirq.Z)),\n )) == \"\"\"\ncirq.KakDecomposition(\n interaction_coefficients=(0.5, 0.25, 0),\n single_qubit_operations_before=(\n np.array([[0j, (1+0j)], [(1+0j), 0j]], dtype=np.complex128),\n np.array([[0j, -1j], [1j, 0j]], dtype=np.complex128),\n ),\n single_qubit_operations_after=(\n np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64),\n np.array([[(1+0j), 0j], [0j, (-1+0j)]], dtype=np.complex128),\n ),\n global_phase=1)\n\"\"\".strip()\n\n\ndef test_kak_str():\n v = cirq.KakDecomposition(\n interaction_coefficients=(0.3 * np.pi / 4, 0.2 * np.pi / 4,\n 0.1 * np.pi / 4),\n single_qubit_operations_before=(cirq.unitary(cirq.I),\n cirq.unitary(cirq.X)),\n single_qubit_operations_after=(cirq.unitary(cirq.Y),\n cirq.unitary(cirq.Z)),\n global_phase=1j)\n assert str(v) == \"\"\"KAK {\n xyz*(4/π): 0.3, 0.2, 0.1\n before: (0*π around X) ⊗ (1*π around X)\n after: (1*π around Y) ⊗ (1*π around Z)\n}\"\"\"\n\n\ndef test_axis_angle_decomposition_eq():\n eq = cirq.testing.EqualsTester()\n\n eq.make_equality_group(lambda: cirq.AxisAngleDecomposition(\n angle=1, axis=(0.8, 0.6, 0), global_phase=-1))\n eq.add_equality_group(\n cirq.AxisAngleDecomposition(angle=5,\n axis=(0.8, 0.6, 0),\n global_phase=-1))\n eq.add_equality_group(\n cirq.AxisAngleDecomposition(angle=1,\n axis=(0.8, 0, 0.6),\n global_phase=-1))\n eq.add_equality_group(\n cirq.AxisAngleDecomposition(angle=1, axis=(0.8, 0.6, 0),\n global_phase=1))\n\n\ndef test_axis_angle_decomposition_repr():\n cirq.testing.assert_equivalent_repr(\n cirq.AxisAngleDecomposition(angle=1,\n axis=(0, 0.6, 0.8),\n global_phase=-1))\n\n\ndef test_axis_angle_decomposition_str():\n assert str(cirq.axis_angle(cirq.unitary(cirq.X))) == '1*π around X'\n assert str(cirq.axis_angle(cirq.unitary(cirq.Y))) == '1*π around Y'\n assert str(cirq.axis_angle(cirq.unitary(cirq.Z))) == '1*π around Z'\n assert str(cirq.axis_angle(cirq.unitary(\n cirq.H))) == '1*π around 0.707*X+0.707*Z'\n assert str(cirq.axis_angle(cirq.unitary(\n cirq.H**0.5))) == '0.5*π around 0.707*X+0.707*Z'\n assert str(\n cirq.axis_angle(\n cirq.unitary(cirq.X**0.25) @ cirq.unitary(cirq.Y**0.25)\n @ cirq.unitary(cirq.Z**\n 0.25))) == '0.477*π around 0.679*X+0.281*Y+0.679*Z'\n\n\ndef test_axis_angle_decomposition_unitary():\n u = cirq.testing.random_unitary(2)\n u = cirq.unitary(cirq.T)\n a = cirq.axis_angle(u)\n np.testing.assert_allclose(u, cirq.unitary(a), atol=1e-8)\n\n\ndef test_axis_angle():\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.Ry(1e-10))),\n cirq.AxisAngleDecomposition(angle=0,\n axis=(1, 0, 0),\n global_phase=1),\n atol=1e-8)\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.Rx(np.pi))),\n cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(1, 0, 0),\n global_phase=1),\n atol=1e-8)\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.X)),\n cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(1, 0, 0),\n global_phase=1j),\n atol=1e-8)\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.X**0.5)),\n cirq.AxisAngleDecomposition(angle=np.pi / 2,\n axis=(1, 0, 0),\n global_phase=np.exp(\n 1j * np.pi / 4)),\n atol=1e-8)\n assert cirq.approx_eq(\n cirq.axis_angle(cirq.unitary(cirq.X**-0.5)),\n cirq.AxisAngleDecomposition(angle=-np.pi / 2,\n axis=(1, 0, 0),\n global_phase=np.exp(-1j * np.pi / 4)))\n\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.Y)),\n cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(0, 1, 0),\n global_phase=1j),\n atol=1e-8)\n\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.Z)),\n cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(0, 0, 1),\n global_phase=1j),\n atol=1e-8)\n\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.H)),\n cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(np.sqrt(0.5), 0,\n np.sqrt(0.5)),\n global_phase=1j),\n atol=1e-8)\n\n assert cirq.approx_eq(cirq.axis_angle(cirq.unitary(cirq.H**0.5)),\n cirq.AxisAngleDecomposition(\n angle=np.pi / 2,\n axis=(np.sqrt(0.5), 0, np.sqrt(0.5)),\n global_phase=np.exp(1j * np.pi / 4)),\n atol=1e-8)\n\n\ndef test_axis_angle_canonicalize():\n a = cirq.AxisAngleDecomposition(angle=np.pi * 2.3,\n axis=(1, 0, 0),\n global_phase=1j).canonicalize()\n assert a.global_phase == -1j\n assert a.axis == (1, 0, 0)\n np.testing.assert_allclose(a.angle, np.pi * 0.3, atol=1e-8)\n\n a = cirq.AxisAngleDecomposition(angle=np.pi / 2,\n axis=(-1, 0, 0),\n global_phase=1j).canonicalize()\n assert a.global_phase == 1j\n assert a.axis == (1, 0, 0)\n assert a.angle == -np.pi / 2\n\n a = cirq.AxisAngleDecomposition(angle=np.pi + 0.01,\n axis=(1, 0, 0),\n global_phase=1j).canonicalize(atol=0.1)\n assert a.global_phase == 1j\n assert a.axis == (1, 0, 0)\n assert a.angle == np.pi + 0.01\n\n a = cirq.AxisAngleDecomposition(angle=np.pi + 0.01,\n axis=(1, 0, 0),\n global_phase=1j).canonicalize(atol=0.001)\n assert a.global_phase == -1j\n assert a.axis == (1, 0, 0)\n assert np.isclose(a.angle, -np.pi + 0.01)\n\n\ndef test_axis_angle_canonicalize_approx_equal():\n a1 = cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(1, 0, 0),\n global_phase=1)\n a2 = cirq.AxisAngleDecomposition(angle=-np.pi,\n axis=(1, 0, 0),\n global_phase=-1)\n b1 = cirq.AxisAngleDecomposition(angle=np.pi,\n axis=(1, 0, 0),\n global_phase=-1)\n assert cirq.approx_eq(a1, a2, atol=1e-8)\n assert not cirq.approx_eq(a1, b1, atol=1e-8)\n\n\ndef test_axis_angle_init():\n a = cirq.AxisAngleDecomposition(angle=1, axis=(0, 1, 0), global_phase=1j)\n assert a.angle == 1\n assert a.axis == (0, 1, 0)\n assert a.global_phase == 1j\n\n with pytest.raises(ValueError, match='normalize'):\n cirq.AxisAngleDecomposition(angle=1, axis=(0, 0.5, 0), global_phase=1)\n\n\ndef test_scatter_plot_normalized_kak_interaction_coefficients():\n a, b = cirq.LineQubit.range(2)\n data = [\n cirq.kak_decomposition(cirq.unitary(cirq.CZ)),\n cirq.unitary(cirq.CZ),\n cirq.CZ,\n cirq.Circuit.from_ops(cirq.H(a), cirq.CNOT(a, b)),\n ]\n ax = cirq.scatter_plot_normalized_kak_interaction_coefficients(data)\n assert ax is not None\n ax2 = cirq.scatter_plot_normalized_kak_interaction_coefficients(\n data, s=1, c='blue', ax=ax, include_frame=False, label=f'test')\n assert ax2 is ax\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Sequence, Tuple, Union, Any, Optional\n\nimport numpy as np\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types, op_tree\nfrom cirq.type_workarounds import NotImplementedType\n\n\[email protected]_equality\nclass ParallelGateOperation(raw_types.Operation):\n \"\"\"An application of several copies of a gate to a group of qubits.\"\"\"\n\n def __init__(self,\n gate: raw_types.Gate,\n qubits: Sequence[raw_types.Qid]) -> None:\n \"\"\"\n Args:\n gate: the gate to apply\n qubits: lists of lists of qubits to apply the gate to.\n \"\"\"\n if gate.num_qubits() != 1:\n raise ValueError(\"gate must be a single qubit gate\")\n if len(set(qubits)) != len(qubits):\n raise ValueError(\"repeated qubits are not allowed\")\n for qubit in qubits:\n gate.validate_args([qubit])\n self._gate = gate\n self._qubits = tuple(qubits)\n\n @property\n def gate(self) -> raw_types.Gate:\n \"\"\"The single qubit gate applied by the operation.\"\"\"\n return self._gate\n\n @property\n def qubits(self) -> Tuple[raw_types.Qid, ...]:\n \"\"\"The qubits targeted by the operation.\"\"\"\n return self._qubits\n\n def with_qubits(self,\n *new_qubits: raw_types.Qid) -> 'ParallelGateOperation':\n \"\"\"ParallelGateOperation with same the gate but new qubits\"\"\"\n return ParallelGateOperation(self.gate, new_qubits)\n\n def with_gate(self, new_gate: raw_types.Gate) -> 'ParallelGateOperation':\n \"\"\"ParallelGateOperation with same qubits but a new gate\"\"\"\n return ParallelGateOperation(new_gate, self.qubits)\n\n def __repr__(self):\n return 'cirq.ParallelGateOperation(gate={!r}, qubits={!r})'.format(\n self.gate,\n list(self.qubits))\n\n def __str__(self):\n return '{}({})'.format(self.gate,\n ', '.join(str(e) for e in self.qubits))\n\n def _value_equality_values_(self):\n return self.gate, frozenset(self.qubits)\n\n def _decompose_(self) -> op_tree.OP_TREE:\n \"\"\"List of gate operations that correspond to applying the single qubit\n gate to each of the target qubits individually\n \"\"\"\n return [self.gate.on(qubit) for qubit in self.qubits]\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs'\n ) -> Union[np.ndarray, None, NotImplementedType]:\n \"\"\"Replicates the logic the simulators use to apply the equivalent\n sequence of GateOperations\n \"\"\"\n if not protocols.has_unitary(self.gate):\n return NotImplemented\n return protocols.apply_unitaries((self.gate(q) for q in self.qubits),\n self.qubits, args)\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.gate)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n # Obtain the unitary for the single qubit gate\n single_unitary = protocols.unitary(self.gate, NotImplemented)\n\n # Make sure we actually have a matrix\n if single_unitary is NotImplemented:\n return single_unitary\n\n # Create a unitary which corresponds to applying the single qubit\n # unitary to each qubit. This will blow up memory fast.\n unitary = single_unitary\n for _ in range(len(self.qubits) - 1):\n unitary = np.kron(unitary, single_unitary)\n\n return unitary\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.gate)\n\n def _resolve_parameters_(self, resolver):\n resolved_gate = protocols.resolve_parameters(self.gate, resolver)\n return self.with_gate(resolved_gate)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n angle = (len(self.qubits) *\n np.arcsin(protocols.trace_distance_bound(self._gate)))\n if angle >= np.pi * 0.5:\n return 1.0\n return np.sin(angle)\n\n def _circuit_diagram_info_(self, args: 'protocols.CircuitDiagramInfoArgs'\n ) -> 'protocols.CircuitDiagramInfo':\n diagram_info = protocols.circuit_diagram_info(self.gate,\n args,\n NotImplemented)\n if diagram_info == NotImplemented:\n return diagram_info\n\n # Include symbols for every qubit instead of just one\n symbol = diagram_info.wire_symbols[0]\n wire_symbols = (symbol,) * len(self.qubits)\n\n return protocols.CircuitDiagramInfo(wire_symbols=wire_symbols,\n exponent=diagram_info.exponent,\n connected=False)\n\n def __pow__(self, exponent: Any) -> 'ParallelGateOperation':\n \"\"\"Raise gate to a power, then reapply to the same qubits.\n\n Only works if the gate implements cirq.ExtrapolatableEffect.\n\n For extrapolatable gate G this means the following two are equivalent:\n\n (G ** 1.5)(qubit) or G(qubit) ** 1.5\n\n Args:\n exponent: The amount to scale the gate's effect by.\n\n Returns:\n A new operation on the same qubits with the scaled gate.\n \"\"\"\n new_gate = protocols.pow(self.gate,\n exponent,\n NotImplemented)\n if new_gate is NotImplemented:\n return NotImplemented\n return self.with_gate(new_gate)\n"
] | [
[
"numpy.pad",
"numpy.asarray",
"numpy.frombuffer",
"numpy.packbits",
"numpy.shape",
"numpy.unpackbits"
],
[
"numpy.diag",
"numpy.sqrt",
"numpy.allclose",
"numpy.conj",
"numpy.isnan",
"numpy.eye",
"numpy.linalg.norm",
"numpy.ones",
"numpy.linalg.det",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.exp",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.kron",
"numpy.sin"
]
] |
gideont/TensorFlow-Object-Detection-API-Tutorial-Train-Multiple-Objects-Windows-10 | [
"f8b24ccba44e3a55cc20da2ed0ad44d7ad2216bf"
] | [
"detect_single_image.py"
] | [
"######## Image Object Detection Using Tensorflow-trained Classifier #########\n#\n# Author: Evan Juras\n# Date: 1/15/18\n# Description: \n# This program uses a TensorFlow-trained neural network to perform object detection.\n# It loads the classifier and uses it to perform object detection on an image.\n# It draws boxes, scores, and labels around the objects of interest in the image.\n\n## Some of the code is copied from Google's example at\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n\n## and some is copied from Dat Tran's example at\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\n\n## but I changed it to make it more understandable to me.\n\n# Import packages\nimport os\n#os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n#os.environ['CUDA_VISIBLE_DEVICES'] = '1'\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} to supress warnings\nimport cv2\nimport numpy as np\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nimport logging\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\nimport sys\nimport time\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'inference_graph'\nIMAGE_NAME = 'test_orig.jpg'\nIMAGE_RESULT_NAME = 'test_result.jpg'\n\n# Grab path to current working directory\nCWD_PATH = os.getcwd()\n\n# patch tf1 into `utils.ops`\n#utils_ops.tf = tf.compat.v1\n\n# Patch the location of gfile\ntf.gfile = tf.io.gfile\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\n\n# Path to image\n#PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\n\n# Number of classes the object detector can identify\nNUM_CLASSES = 90\n\n# Load the label map.\n# Label maps map indices to category names, so that when our convolution\n# network predicts `5`, we know that this corresponds to `king`.\n# Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n # CPU only\n #sess = tf.compat.v1.Session(graph=detection_graph)\n\n # GPU options to avoid GPU out-of-memory crash\n #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.5) \n #gpu_options = tf.GPUOptions(allow_growth = True)\n # for tf2\n gpu_options = tf.compat.v1.GPUOptions(allow_growth = True)\n sess = tf.compat.v1.Session(graph=detection_graph,config=tf.compat.v1.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n\n# Define input and output tensors (i.e. data) for the object detection classifier\n\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\nstart = time.time()\n# Load image using OpenCV and\n# expand image dimensions to have shape: [1, None, None, 3]\n# i.e. a single-column array, where each item in the column has the pixel RGB value\nPATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\nprint(\"Detecting objects in file:\", PATH_TO_IMAGE)\nimage = cv2.imread(PATH_TO_IMAGE)\nimage_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage_expanded = np.expand_dims(image_rgb, axis=0)\n\n# Perform the actual detection by running the model with the image as input\n(boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n\n# Draw the results of the detection (aka 'visulaize the results')\n\nvis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4,\n min_score_thresh=0.50)\n\nend = time.time()\nprint(end - start)\n# All the results have been drawn on image. Now display the image.\n#cv2.imshow('Object detector', image)\ncv2.imwrite(IMAGE_RESULT_NAME, image)\nprint(\"Saving result image to: \", IMAGE_RESULT_NAME)\n\n# Clean up\ncv2.destroyAllWindows()\n"
] | [
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.GPUOptions",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.io.gfile.GFile",
"numpy.squeeze",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.GraphDef"
]
] |
farzana0/pgm_graph_inference | [
"37f1ea68f191d4f3021e7fdc8dd246d945e37ead",
"37f1ea68f191d4f3021e7fdc8dd246d945e37ead"
] | [
"experiments/saved_exp_res/exp_helpers.py",
"graphical_models/data_structs.py"
] | [
"\"\"\"\n\nExperiment specifications:\nan experiment is defined by train,test dataset pair,\neach dataset is loaded from graphical_models/datasets.\nAuthors: [email protected]\n\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom graphical_models import BinaryMRF\nfrom inference import get_algorithm\nfrom graphical_models.data_gen import struct_names\nfrom constants import *\n\n\n# Give specs in form structure->size\n# when used for train, the same is model name\ndata_specs = {\n \"debug\": \n {\"star\": [5],\n \"fc\": []},\n \"larger_debug\": \n {\"star\": [10],\n \"fc\": []},\n}\n\n# add simple datasets\ndata_specs.update({struct+\"_small\": {struct: [9]} for struct in struct_names})\nassert \"star_small\" in data_specs\n\n# add compound datasets\ndata_specs.update({struct+\"_medium\": {struct: [15,16,17]} for struct in struct_names})\ndata_specs.update({\"trees_medium\": {\"star\": [15, 16, 17],\n \"path\": [15, 16, 17],\n },\n \"conn_medium\": {\"bipart\": [15, 16, 17],\n # \"tripart\": [15, 16, 17],\n \"fc\": [15, 16, 17],\n },\n })\ndata_specs.update({\"grid_large\":{\"grid\":[49]},\n \"path_large\": {\"path\": [9,10,100]},\n \"fc_large\": {\"fc\": [15,16,17]},\n \"barbell_large\": {\"barbell\": [15,16,17]},\n \"ladder_large\": {\"ladder\": [15,16,17]},\n \"random_tree_large\": {\"random_tree\": [15,16,17]},\n \"wheel_large\": {\"wheel\": [15,16,17,100]},\n })\n\n\n# Add experiments for part 2: Trees+BP\ndata_specs.update({\"trees_approx\": {\"random_tree\": [100]},\n })\n\n# Add experiments for part 2: NonTrees+MCMC\ndata_specs.update({\"nontrees_approx\": \n {\"barbell\": [100],\n \"fc\": [100]},\n \"barbell_approx\": \n {\"barbell\": [100]},\n \"fc_approx\": \n {\"fc\": [100]}\n })\n\n# Data loading ----------------------------------------------------------------\ndef get_dataset_by_name(specs_name, data_dir, mode=None):\n \"\"\"\n Assumes graphs live as\n graphical_models/datasets/{train/val/test} <-- data_dir\n |-- star/\n | |- 9/<file1.npy>, <file2.npy> ...\n | |- 10/\n |- 11/\n ... ...\n Loads all graphs of given size and structure,\n this needs to be updated in the future\n (so that we can train and test on the same structures)\n\n Arguments:\n specs_name - key to the data_specs dictionary\n data_dir - train or test directory\n mode - map or marginal\n \"\"\"\n if specs_name not in data_specs:\n raise ValueError(\"Specification {} not supported\".format(specs_name))\n specs = data_specs[specs_name]\n graphs = []\n for struct in specs:\n size_list = specs[struct]\n for size in size_list:\n # go to specified dir, load and append\n directory = os.path.join(data_dir, struct, str(size))\n\n for filename in os.listdir(directory):\n if filename.endswith(\".npy\"):\n path_to_graph = os.path.join(directory, filename)\n data_dict = np.load(path_to_graph, allow_pickle=True)[()] # funny indexing\n graph = BinaryMRF(data_dict[\"W\"], data_dict[\"b\"])\n graph.set_ground_truth(marginal_est=data_dict[\"marginal\"],\n map_est=data_dict[\"map\"])\n graph.struct = struct\n graphs.append(graph)\n\n if mode is not None:\n graphs = [g for g in graphs if getattr(g, mode) is not None]\n print(\"Loaded {} graphs\".format(len(graphs)))\n return graphs\n\n\n# Some simple checks ----------------------------------------------------------\nif __name__ == \"__main__\":\n train_data = get_dataset_by_name(\"debug\")\n print(train_data[0])\n print(\"W, b:\", train_data[0].W, train_data[0].b)\n print(\"Marginals:\", train_data[0].marginal)\n print(\"MAP:\", train_data[0].map)\n\n",
"\"\"\"\n\nGraphical model class\nAuthors: [email protected]\n\nTODO:\n* MST generation in BinaryMRF\n\"\"\"\n\nimport networkx as nx\nimport numpy as np\nfrom inference import get_algorithm\n\ndflt_algo = {\"marginal\": \"bp\", \"map\": \"bp\"}\n\n\nclass GraphicalModel:\n def __init__(self, n_nodes, params=None, default_algo=dflt_algo):\n \"\"\"Constructor\n\n Arguments:\n n_nodes {int} - number of vertices in graphical model\n params {dictionary<str,np.array> or None} -- parameters of the model\n\n Keyword Arguments:\n default_algo {dict} -- default inference methods to use,\n unless they are overriden in corresponding methods\n (default: {dflt_algo})\n \"\"\"\n self.algo_marginal = default_algo[\"marginal\"]\n self.algo_map = default_algo[\"map\"]\n\n def set_ground_truth(self, marginal_est=None, map_est=None):\n \"\"\" Setting labels:\n To be used when instantiating\n a model from saved parameters\n \"\"\"\n self.marginal = marginal_est\n self.map = map_est\n\n # Running inference with/without Inference object\n def get_marginals(self, algo_obj=None, algo=None):\n if algo_obj is None:\n if algo is None:\n algo = self.algo_marginal\n algo_obj = get_algorithm(algo)\n inf_res = algo_obj.run(self, mode=\"marginal\")\n return inf_res\n\n def get_map(self, algo_obj=None, algo=None):\n if algo_obj is None:\n if algo is None:\n algo = self.algo_map\n algo_obj = get_algorithm(algo)\n inf_res = algo_obj.run(self, mode=\"map\")\n return inf_res\n\n def __repr__(self):\n return \"GraphicalModel:{} on {} nodes\".format(\n self.__class__.__name__, self.n_nodes)\n\n\nclass BinaryMRF(GraphicalModel):\n def __init__(self, W, b, struct=None):\n \"\"\"Constructor of BinaryMRF class.\n\n Arguments:\n W {np.array} -- (N, N) matrix of pairwise parameters\n b {np.array} -- (N,) vector of unary parameters\n \n Keyword Arguments:\n struct {string or None} -- description of graph structure\n (default: {None})\n \"\"\"\n self.W = W\n self.b = b\n self.struct = struct\n self.n_nodes = len(W)\n self.default_algo = {\"marginal\": \"bp\",\n \"map\": \"bp\"}\n # params = {\"W\": W, \"b\": b}\n super(BinaryMRF, self).__init__(\n n_nodes=self.n_nodes,\n default_algo=self.default_algo)\n\n def get_subgraph_on_nodes(self, node_list):\n \"\"\" node_list does not need to be ordered,\n return in the same order\n \"\"\"\n nx_graph = nx.from_numpy_matrix(self.W)\n sg = nx_graph.subgraph(node_list)\n W_sg = np.array(nx.to_numpy_matrix(sg))\n b_sg = self.b[node_list] # in the same order\n return BinaryMRF(W_sg, b_sg)\n\n def get_max_abs_spanning_tree(self):\n nx_graph = nx.from_numpy_matrix(np.abs(self.W))\n tree = nx.minimum_spanning_tree(nx_graph)\n W_abs_tree = np.array(nx.to_numpy_matrix(tree))\n W_mask = np.where(W_abs_tree > 0, 1, 0)\n # zero out unused edges:\n W_tree = W_mask * self.W\n b_tree = self.b\n return BinaryMRF(W_tree, b_tree)\n\n\nif __name__ == \"__main__\":\n print(get_algorithm(\"bp\"))\n"
] | [
[
"numpy.load"
],
[
"numpy.where",
"numpy.abs"
]
] |
jasperroebroek/sklearn-quantile | [
"d357240527f32b04b0fec3dcd308bb23de517209"
] | [
"tests/test_weighted_quantile.py"
] | [
"import numpy as np\nimport pytest\n\nfrom sklearn_quantile.utils import weighted_quantile\n\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_almost_equal\nfrom numpy.testing import assert_raises\n\n\ndef test_quantile_equal_weights():\n rng = np.random.RandomState(0)\n x = rng.randn(10)\n weights = 0.1 * np.ones(10)\n\n # since weights are equal, quantiles lie in the midpoint.\n sorted_x = np.sort(x)\n expected = 0.5 * (sorted_x[1:] + sorted_x[:-1])\n actual = np.asarray([weighted_quantile(x, q, weights) for q in np.arange(0.1, 1.0, 0.1)])\n\n assert_array_almost_equal(expected, actual)\n\n # check quantiles at (0.05, 0.95) at intervals of 0.1\n actual = np.asarray([weighted_quantile(x, q, weights) for q in np.arange(0.05, 1.05, 0.1)])\n assert_array_almost_equal(sorted_x, actual)\n\n # it should be the same the calculated all quantiles at the same time instead of looping over them\n assert_array_almost_equal(actual, weighted_quantile(x, weights=weights, q=np.arange(0.05, 1.05, 0.1)))\n\n\ndef test_quantile_toy_data():\n x = [1, 2, 3]\n weights = [1, 4, 5]\n\n assert_equal(weighted_quantile(x, 0.0, weights), 1)\n assert_equal(weighted_quantile(x, 1.0, weights), 3)\n\n assert_equal(weighted_quantile(x, 0.05, weights), 1)\n assert_almost_equal(weighted_quantile(x, 0.30, weights), 2)\n assert_equal(weighted_quantile(x, 0.75, weights), 3)\n assert_almost_equal(weighted_quantile(x, 0.50, weights), 2.44, 2)\n\n\[email protected]('q', [0, 0.1, 0.5, 0.9, 1])\ndef test_zero_weights(q):\n x = [1, 2, 3, 4, 5]\n w = [0, 0, 0, 0.1, 0.1]\n\n assert_equal(\n weighted_quantile(x, q, w),\n weighted_quantile([4, 5], q, [0.1, 0.1])\n )\n\n\[email protected](\"keepdims\", [True, False])\ndef test_return_shapes(keepdims):\n rng = np.random.RandomState(0)\n x = rng.randn(100, 10, 20)\n weights = 0.01 * np.ones_like(x)\n\n # shape should be the same as the output of np.quantile. Without weights it is actually the same calculation\n assert (\n weighted_quantile(x, 0.5, weights, axis=0, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=0, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, 0.5, weights, axis=1, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=1, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, 0.5, weights, axis=2, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=2, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, (0.5, 0.8), weights, axis=0, keepdims=keepdims).shape ==\n np.quantile(x, (0.5, 0.8), axis=0, keepdims=keepdims).shape\n )\n if keepdims:\n assert (\n weighted_quantile(x, 0.5, weights, axis=None, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=None, keepdims=keepdims).shape\n )\n else:\n assert isinstance(weighted_quantile(x, 0.5, weights, axis=None, keepdims=keepdims), (np.float32, float))\n\n\[email protected](\"keepdims\", [True, False])\ndef test_return_shapes_empty_dims(keepdims):\n rng = np.random.RandomState(0)\n x = rng.randn(1, 100, 1)\n weights = 0.01 * np.ones_like(x)\n\n assert (\n weighted_quantile(x, 0.5, weights, axis=1, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=1, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, 0.5, weights=None, axis=1, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=1, keepdims=keepdims).shape\n )\n\n if keepdims:\n assert (\n weighted_quantile(x, 0.5, weights, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, keepdims=keepdims).shape\n )\n\n\ndef test_errors():\n rng = np.random.RandomState(0)\n x = rng.randn(100, 10, 20)\n weights = 0.01 * np.ones_like(x)\n\n # axis should be integer\n assert_raises(NotImplementedError, weighted_quantile, x, 0.5, weights, axis=(1, 2))\n"
] | [
[
"numpy.ones_like",
"numpy.arange",
"numpy.quantile",
"numpy.sort",
"numpy.ones",
"numpy.testing.assert_raises",
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equal"
]
] |
ghanashyamchalla/cis_interface | [
"7b59439276eacb66f1f6ea4177d3a85cc061eed5"
] | [
"yggdrasil/drivers/CModelDriver.py"
] | [
"import os\nimport re\nimport warnings\nimport copy\nimport shutil\nimport subprocess\nimport numpy as np\nimport sysconfig\nfrom collections import OrderedDict\nfrom yggdrasil import platform, tools\nfrom yggdrasil.drivers.CompiledModelDriver import (\n CompiledModelDriver, CompilerBase, ArchiverBase)\nfrom yggdrasil.metaschema.properties.ScalarMetaschemaProperties import (\n _valid_types)\nfrom yggdrasil.languages import get_language_dir\nfrom yggdrasil.config import ygg_cfg\nfrom numpy import distutils as numpy_distutils\n\n\n_default_internal_libtype = 'object'\n# if platform._is_win: # pragma: windows\n# _default_internal_libtype = 'static'\n\n\ndef get_OSX_SYSROOT():\n r\"\"\"Determin the path to the OSX SDK.\n\n Returns:\n str: Full path to the SDK directory if one is located. None\n otherwise.\n\n \"\"\"\n fname = None\n if platform._is_mac:\n try:\n xcode_dir = subprocess.check_output(\n 'echo \"$(xcode-select -p)\"', shell=True).decode(\"utf-8\").strip()\n except BaseException: # pragma: debug\n xcode_dir = None\n fname_try = []\n cfg_sdkroot = ygg_cfg.get('c', 'macos_sdkroot', None)\n if cfg_sdkroot:\n fname_try.append(cfg_sdkroot)\n if xcode_dir is not None:\n fname_base = os.path.join(xcode_dir, 'Platforms',\n 'MacOSX.platform', 'Developer',\n 'SDKs', 'MacOSX%s.sdk')\n fname_try += [\n fname_base % os.environ.get('MACOSX_DEPLOYMENT_TARGET', ''),\n fname_base % '',\n os.path.join(xcode_dir, 'SDKs', 'MacOSX.sdk')]\n if os.environ.get('SDKROOT', False):\n fname_try.insert(0, os.environ['SDKROOT'])\n for fcheck in fname_try:\n if os.path.isdir(fcheck):\n fname = fcheck\n break\n return fname\n\n\n_osx_sysroot = get_OSX_SYSROOT()\n\n\nclass CCompilerBase(CompilerBase):\n r\"\"\"Base class for C compilers.\"\"\"\n languages = ['c']\n default_executable_env = 'CC'\n default_flags_env = 'CFLAGS'\n default_flags = ['-g', '-Wall']\n # GCC & CLANG have similar call patterns\n linker_attributes = {'default_flags_env': 'LDFLAGS',\n 'search_path_envvar': ['LIBRARY_PATH', 'LD_LIBRARY_PATH']}\n search_path_envvar = ['C_INCLUDE_PATH']\n search_path_flags = ['-E', '-v', '-xc', '/dev/null']\n search_regex_begin = '#include \"...\" search starts here:'\n search_regex_end = 'End of search list.'\n search_regex = [r'(?:#include <...> search starts here:)|'\n r'(?: ([^\\n]+?)(?: \\(framework directory\\))?)\\n']\n\n @staticmethod\n def before_registration(cls):\n r\"\"\"Operations that should be performed to modify class attributes prior\n to registration including things like platform dependent properties and\n checking environment variables for default settings.\n \"\"\"\n if platform._is_mac:\n cls.linker_attributes = dict(cls.linker_attributes,\n search_path_flags=['-Xlinker', '-v'],\n search_regex=[r'\\t([^\\t\\n]+)\\n'],\n search_regex_begin='Library search paths:')\n elif platform._is_linux:\n cls.linker_attributes = dict(cls.linker_attributes,\n search_path_flags=['-Xlinker', '--verbose'],\n search_regex=[r'SEARCH_DIR\\(\"=([^\"]+)\"\\);'])\n CompilerBase.before_registration(cls)\n\n @classmethod\n def set_env(cls, *args, **kwargs):\n r\"\"\"Set environment variables required for compilation.\n\n Args:\n *args: Arguments are passed to the parent class's method.\n **kwargs: Keyword arguments are passed to the parent class's\n method.\n\n Returns:\n dict: Environment variables for the model process.\n\n \"\"\"\n out = super(CCompilerBase, cls).set_env(*args, **kwargs)\n if _osx_sysroot is not None:\n out['CONDA_BUILD_SYSROOT'] = _osx_sysroot\n out['SDKROOT'] = _osx_sysroot\n grp = re.search(r'MacOSX(?P<target>[0-9]+\\.[0-9]+)?',\n _osx_sysroot).groupdict()\n # This is only utilized on local installs where a\n # non-default SDK is installed in addition to the default\n if grp['target']: # pragma: debug\n out['MACOSX_DEPLOYMENT_TARGET'] = grp['target']\n return out\n \n @classmethod\n def call(cls, args, **kwargs):\n r\"\"\"Call the compiler with the provided arguments. For |yggdrasil| C\n models will always be linked using the C++ linker since some parts of\n the interface library are written in C++.\"\"\"\n if not kwargs.get('dont_link', False):\n kwargs.setdefault('linker_language', 'c++')\n return super(CCompilerBase, cls).call(args, **kwargs)\n \n\nclass GCCCompiler(CCompilerBase):\n r\"\"\"Interface class for gcc compiler/linker.\"\"\"\n toolname = 'gcc'\n platforms = ['MacOS', 'Linux', 'Windows']\n default_archiver = 'ar'\n\n\nclass ClangCompiler(CCompilerBase):\n r\"\"\"clang compiler on Apple Mac OS.\"\"\"\n toolname = 'clang'\n platforms = ['MacOS']\n default_archiver = 'libtool'\n flag_options = OrderedDict(list(CCompilerBase.flag_options.items())\n + [('sysroot', '--sysroot'),\n ('isysroot', {'key': '-isysroot',\n 'prepend': True}),\n ('mmacosx-version-min',\n '-mmacosx-version-min=%s')])\n\n\nclass MSVCCompiler(CCompilerBase):\n r\"\"\"Microsoft Visual Studio C Compiler.\"\"\"\n toolname = 'cl'\n languages = ['c', 'c++']\n platforms = ['Windows']\n default_flags_env = ['CFLAGS', 'CXXFLAGS']\n # TODO: Currently everything compiled as C++ on windows to allow use\n # of complex types. Use '/TC' instead of '/TP' for strictly C\n default_flags = ['/W4', # Display all errors\n '/Zi', # Symbolic debug in .pdb (implies debug)\n # '/MTd', # Use LIBCMTD.lib to create multithreaded .exe\n # '/Z7', # Symbolic debug in .obj (implies debug)\n \"/EHsc\", # Catch C++ exceptions only (C don't throw C++)\n '/TP', # Treat all files as C++\n \"/nologo\", # Suppress startup banner\n # Don't show errors from using scanf, strcpy, etc.\n \"-D_CRT_SECURE_NO_WARNINGS\"]\n output_key = '/Fo%s'\n output_first = True\n default_linker = 'LINK'\n default_archiver = 'LIB'\n linker_switch = '/link'\n search_path_envvar = 'INCLUDE'\n search_path_flags = None\n version_flags = []\n product_exts = ['.dir', '.ilk', '.pdb', '.sln', '.vcxproj', '.vcxproj.filters']\n combine_with_linker = True # Must be explicit; linker is separate .exe\n linker_attributes = dict(GCCCompiler.linker_attributes,\n default_executable=None,\n default_executable_env=None,\n default_flags_env=None,\n output_key='/OUT:%s',\n output_first=True,\n output_first_library=False,\n flag_options=OrderedDict(\n [('library_libs', ''),\n ('library_dirs', '/LIBPATH:%s')]),\n shared_library_flag='/DLL',\n search_path_envvar='LIB',\n search_path_flags=None)\n \n @classmethod\n def language_version(cls, **kwargs): # pragma: windows\n r\"\"\"Determine the version of this language.\n\n Args:\n **kwargs: Keyword arguments are passed to cls.call.\n\n Returns:\n str: Version of compiler/interpreter for this language.\n\n \"\"\"\n out = cls.call(cls.version_flags, skip_flags=True,\n allow_error=True, **kwargs)\n if 'Copyright' not in out: # pragma: debug\n raise RuntimeError(\"Version call failed: %s\" % out)\n return out.split('Copyright')[0]\n\n \n# C Archivers\nclass ARArchiver(ArchiverBase):\n r\"\"\"Archiver class for ar tool.\"\"\"\n toolname = 'ar'\n languages = ['c', 'c++']\n default_executable_env = 'AR'\n default_flags_env = None\n static_library_flag = 'rcs'\n output_key = ''\n output_first_library = True\n\n\nclass LibtoolArchiver(ArchiverBase):\n r\"\"\"Archiver class for libtool tool.\"\"\"\n toolname = 'libtool'\n languages = ['c', 'c++']\n default_executable_env = 'LIBTOOL'\n static_library_flag = '-static' # This is the default\n \n\nclass MSVCArchiver(ArchiverBase):\n r\"\"\"Microsoft Visual Studio C Archiver.\"\"\"\n toolname = 'LIB'\n languages = ['c', 'c++']\n platforms = ['Windows']\n static_library_flag = None\n output_key = '/OUT:%s'\n \n\n_top_lang_dir = get_language_dir('c')\n_incl_interface = _top_lang_dir\n_incl_seri = os.path.join(_top_lang_dir, 'serialize')\n_incl_comm = os.path.join(_top_lang_dir, 'communication')\n_python_inc = ygg_cfg.get('c', 'python_include', None)\nif (_python_inc is None) or (not os.path.isfile(_python_inc)): # pragma: no cover\n _python_inc = sysconfig.get_paths()['include']\nelse:\n _python_inc = os.path.dirname(_python_inc)\ntry:\n _python_lib = ygg_cfg.get('c', 'python_shared',\n ygg_cfg.get('c', 'python_static', None))\n if (_python_lib is None) or (not os.path.isfile(_python_lib)): # pragma: no cover\n _python_lib = tools.get_python_c_library(allow_failure=False)\nexcept BaseException: # pragma: debug\n warnings.warn(\"ERROR LOCATING PYTHON LIBRARY\")\n _python_lib = None\n_numpy_inc = numpy_distutils.misc_util.get_numpy_include_dirs()\n_numpy_lib = None\n\n\nclass CModelDriver(CompiledModelDriver):\n r\"\"\"Class for running C models.\"\"\"\n\n _schema_subtype_description = ('Model is written in C.')\n language = 'c'\n language_ext = ['.c', '.h']\n interface_library = 'ygg'\n supported_comms = ['ipc', 'zmq']\n supported_comm_options = {\n 'ipc': {'platforms': ['MacOS', 'Linux']},\n 'zmq': {'libraries': ['zmq', 'czmq']}}\n interface_dependencies = ['rapidjson']\n interface_directories = [_incl_interface]\n external_libraries = {\n 'rapidjson': {'include': os.path.join(os.path.dirname(tools.__file__),\n 'rapidjson', 'include',\n 'rapidjson', 'rapidjson.h'),\n 'libtype': 'header_only',\n 'language': 'c'},\n 'zmq': {'include': 'zmq.h',\n 'libtype': 'shared',\n 'language': 'c'},\n 'czmq': {'include': 'czmq.h',\n 'libtype': 'shared',\n 'language': 'c'},\n 'numpy': {'include': os.path.join(_numpy_inc[0], 'numpy',\n 'arrayobject.h'),\n 'libtype': 'header_only',\n 'language': 'c'},\n 'python': {'include': os.path.join(_python_inc, 'Python.h'),\n 'language': 'c'}}\n internal_libraries = {\n 'ygg': {'source': os.path.join(_incl_interface, 'YggInterface.c'),\n 'linker_language': 'c++', # Some dependencies are C++\n 'internal_dependencies': ['regex', 'datatypes'],\n 'external_dependencies': ['rapidjson',\n 'python', 'numpy'],\n 'include_dirs': [_incl_comm, _incl_seri],\n 'compiler_flags': []},\n 'regex_win32': {'source': 'regex_win32.cpp',\n 'directory': os.path.join(_top_lang_dir, 'regex'),\n 'language': 'c++',\n 'libtype': _default_internal_libtype,\n 'internal_dependencies': [],\n 'external_dependencies': []},\n 'regex_posix': {'source': 'regex_posix.h',\n 'directory': os.path.join(_top_lang_dir, 'regex'),\n 'language': 'c',\n 'libtype': 'header_only',\n 'internal_dependencies': [],\n 'external_dependencies': []},\n 'datatypes': {'directory': os.path.join(_top_lang_dir, 'datatypes'),\n 'language': 'c++',\n 'libtype': _default_internal_libtype,\n 'internal_dependencies': ['regex'],\n 'external_dependencies': ['rapidjson',\n 'python', 'numpy'],\n 'include_dirs': []}}\n type_map = {\n 'int': 'intX_t',\n 'float': 'double',\n 'string': 'string_t',\n 'array': 'json_array_t',\n 'object': 'json_object_t',\n 'boolean': 'bool',\n 'null': 'void*',\n 'uint': 'uintX_t',\n 'complex': 'complex_X',\n 'bytes': 'char*',\n 'unicode': 'unicode_t',\n '1darray': '*',\n 'ndarray': '*',\n 'ply': 'ply_t',\n 'obj': 'obj_t',\n 'schema': 'schema_t',\n 'flag': 'int',\n 'class': 'python_class_t',\n 'function': 'python_function_t',\n 'instance': 'python_instance_t',\n 'any': 'generic_t'}\n function_param = {\n 'import': '#include \\\"{filename}\\\"',\n 'index': '{variable}[{index}]',\n 'interface': '#include \\\"{interface_library}\\\"',\n 'input': ('yggInput_t {channel} = yggInputType('\n '\\\"{channel_name}\\\", {channel_type});'),\n 'output': ('yggOutput_t {channel} = yggOutputType('\n '\\\"{channel_name}\\\", {channel_type});'),\n 'recv_heap': 'yggRecvRealloc',\n 'recv_stack': 'yggRecv',\n 'recv_function': 'yggRecvRealloc',\n 'send_function': 'yggSend',\n 'not_flag_cond': '{flag_var} < 0',\n 'flag_cond': '{flag_var} >= 0',\n 'declare': '{type_name} {variable};',\n 'init_array': 'init_json_array()',\n 'init_object': 'init_json_object()',\n 'init_schema': 'init_schema()',\n 'init_ply': 'init_ply()',\n 'init_obj': 'init_obj()',\n 'init_class': 'init_python()',\n 'init_function': 'init_python()',\n 'init_instance': 'init_generic()',\n 'init_any': 'init_generic()',\n 'copy_array': '{name} = copy_json_array({value});',\n 'copy_object': '{name} = copy_json_object({value});',\n 'copy_schema': '{name} = copy_schema({value});',\n 'copy_ply': '{name} = copy_ply({value});',\n 'copy_obj': '{name} = copy_obj({value});',\n 'copy_class': '{name} = copy_python({value});',\n 'copy_function': '{name} = copy_python({value});',\n 'copy_instance': '{name} = copy_generic({value});',\n 'copy_any': '{name} = copy_generic({value});',\n 'free_array': 'free_json_array({variable});',\n 'free_object': 'free_json_object({variable});',\n 'free_schema': 'free_schema({variable});',\n 'free_ply': 'free_ply({variable});',\n 'free_obj': 'free_obj({variable});',\n 'free_class': 'destroy_python({variable});',\n 'free_function': 'destroy_python({variable});',\n 'free_instance': 'free_generic({variable});',\n 'free_any': 'free_generic({variable});',\n 'print_float': 'printf(\"%f\\\\n\", {object});',\n 'print_int': 'printf(\"%i\\\\n\", {object});',\n 'print_uint': 'printf(\"%u\\\\n\", {object});',\n 'print_string': 'printf(\"%s\\\\n\", {object});',\n 'print_unicode': 'printf(\"%s\\\\n\", {object});',\n 'print_bytes': 'printf(\"%s\\\\n\", {object});',\n 'print_complex': 'print_complex({object});',\n 'print_array': 'display_json_array({object});',\n 'print_object': 'display_json_object({object});',\n 'print_schema': 'display_schema({object});',\n 'print_ply': 'display_ply({object});',\n 'print_obj': 'display_obj({object});',\n 'print_class': 'display_python({object});',\n 'print_function': 'display_python({object});',\n 'print_instance': 'display_generic({object});',\n 'print_any': 'display_generic({object});',\n 'assign': '{name} = {value};',\n 'assign_copy': 'memcpy({name}, {value}, {N}*sizeof({native_type}));',\n 'comment': '//',\n 'true': '1',\n 'false': '0',\n 'not': '!',\n 'and': '&&',\n 'indent': 2 * ' ',\n 'quote': '\\\"',\n 'print': 'printf(\\\"{message}\\\\n\\\");',\n 'fprintf': 'printf(\\\"{message}\\\\n\\\", {variables});',\n 'error': 'printf(\\\"{error_msg}\\\\n\\\"); return -1;',\n 'block_end': '}',\n 'line_end': ';',\n 'if_begin': 'if ({cond}) {{',\n 'if_elif': '}} else if ({cond}) {{',\n 'if_else': '}} else {{',\n 'for_begin': ('for ({iter_var} = {iter_begin}; {iter_var} < {iter_end}; '\n '{iter_var}++) {{'),\n 'while_begin': 'while ({cond}) {{',\n 'break': 'break;',\n 'exec_begin': 'int main() {',\n 'exec_end': ' return 0;\\n}',\n 'exec_prefix': '#include <stdbool.h>',\n 'free': 'if ({variable} != NULL) {{ free({variable}); {variable} = NULL; }}',\n 'function_def_begin': '{output_type} {function_name}({input_var}) {{',\n 'return': 'return {output_var};',\n 'function_def_regex': (\n r'(?P<flag_type>.+?)\\s*{function_name}\\s*'\n r'\\((?P<inputs>(?:[^{{])*?)\\)\\s*\\{{'\n r'(?P<body>(?:.*?\\n?)*?)'\n r'(?:(?:return *(?P<flag_var>.+?)?;(?:.*?\\n?)*?\\}})'\n r'|(?:\\}}))'),\n 'inputs_def_regex': (\n r'\\s*(?P<native_type>(?:[^\\s\\*])+(\\s+)?'\n r'(?P<ptr>\\*+)?)(?(ptr)(?(1)(?:\\s*)|(?:\\s+)))'\n r'(\\((?P<name_ptr>\\*+)?)?(?P<name>.+?)(?(4)(?:\\)))'\n r'(?P<shape>(?:\\[.+?\\])+)?\\s*(?:,|$)(?:\\n)?'),\n 'outputs_def_regex': (\n r'\\s*(?P<native_type>(?:[^\\s\\*])+(\\s+)?'\n r'(?P<ptr>\\*+)?)(?(ptr)(?(1)(?:\\s*)|(?:\\s+)))'\n r'(?P<name>.+?)(?P<shape>(?:\\[.+?\\])+)?\\s*(?:,|$)(?:\\n)?')}\n outputs_in_inputs = True\n include_channel_obj = True\n is_typed = True\n brackets = (r'{', r'}')\n\n @staticmethod\n def after_registration(cls, **kwargs):\n r\"\"\"Operations that should be performed to modify class attributes after\n registration.\"\"\"\n if cls.default_compiler is None:\n if platform._is_linux:\n cls.default_compiler = 'gcc'\n elif platform._is_mac:\n cls.default_compiler = 'clang'\n elif platform._is_win: # pragma: windows\n cls.default_compiler = 'cl'\n CompiledModelDriver.after_registration(cls, **kwargs)\n if kwargs.get('second_pass', False):\n return\n if _python_lib:\n if _python_lib.endswith(('.lib', '.a')):\n cls.external_libraries['python']['libtype'] = 'static'\n cls.external_libraries['python']['static'] = _python_lib\n else:\n cls.external_libraries['python']['libtype'] = 'shared'\n cls.external_libraries['python']['shared'] = _python_lib\n for x in ['zmq', 'czmq']:\n if x in cls.external_libraries:\n if platform._is_win: # pragma: windows\n cls.external_libraries[x]['libtype'] = 'static'\n # Platform specific regex internal library\n if platform._is_win: # pragma: windows\n regex_lib = cls.internal_libraries['regex_win32']\n else:\n regex_lib = cls.internal_libraries['regex_posix']\n cls.internal_libraries['regex'] = regex_lib\n # Platform specific internal library options\n cls.internal_libraries['ygg']['include_dirs'] += [_top_lang_dir]\n if platform._is_win: # pragma: windows\n stdint_win = os.path.join(_top_lang_dir, 'windows_stdint.h')\n assert(os.path.isfile(stdint_win))\n shutil.copy(stdint_win, os.path.join(_top_lang_dir, 'stdint.h'))\n cls.internal_libraries['datatypes']['include_dirs'] += [_top_lang_dir]\n if platform._is_linux:\n for x in ['ygg', 'datatypes']:\n if 'compiler_flags' not in cls.internal_libraries[x]:\n cls.internal_libraries[x]['compiler_flags'] = []\n if '-fPIC' not in cls.internal_libraries[x]['compiler_flags']:\n cls.internal_libraries[x]['compiler_flags'].append('-fPIC')\n \n @classmethod\n def configure(cls, cfg, macos_sdkroot=None):\n r\"\"\"Add configuration options for this language. This includes locating\n any required external libraries and setting option defaults.\n\n Args:\n cfg (YggConfigParser): Config class that options should be set for.\n macos_sdkroot (str, optional): Full path to the root directory for\n the MacOS SDK that should be used. Defaults to None and is\n ignored.\n\n Returns:\n list: Section, option, description tuples for options that could not\n be set.\n\n \"\"\"\n # Call __func__ to avoid direct invoking of class which dosn't exist\n # in after_registration where this is called\n out = CompiledModelDriver.configure.__func__(cls, cfg)\n # Change configuration to be directory containing include files\n rjlib = cfg.get(cls._language, 'rapidjson_include', None)\n if (rjlib is not None) and os.path.isfile(rjlib):\n cfg.set(cls._language, 'rapidjson_include',\n os.path.dirname(os.path.dirname(rjlib)))\n nplib = cfg.get(cls._language, 'numpy_include', None)\n if (nplib is not None) and os.path.isfile(nplib):\n cfg.set(cls._language, 'numpy_include',\n os.path.dirname(os.path.dirname(nplib)))\n if macos_sdkroot is None:\n macos_sdkroot = _osx_sysroot\n if macos_sdkroot is not None:\n if not os.path.isdir(macos_sdkroot): # pragma: debug\n raise ValueError(\"Path to MacOS SDK root directory \"\n \"does not exist: %s.\" % macos_sdkroot)\n cfg.set(cls._language, 'macos_sdkroot', macos_sdkroot)\n return out\n\n @classmethod\n def call_linker(cls, obj, language=None, **kwargs):\n r\"\"\"Link several object files to create an executable or library (shared\n or static), checking for errors.\n\n Args:\n obj (list): Object files that should be linked.\n language (str, optional): Language that should be used to link\n the files. Defaults to None and the language of the current\n driver is used.\n **kwargs: Additional keyword arguments are passed to run_executable.\n\n Returns:\n str: Full path to compiled source.\n\n \"\"\"\n if (((cls.language == 'c') and (language is None)\n and kwargs.get('for_model', False)\n and (not kwargs.get('skip_interface_flags', False)))):\n language = 'c++'\n kwargs.update(cls.update_linker_kwargs(**kwargs))\n kwargs['skip_interface_flags'] = True\n return super(CModelDriver, cls).call_linker(obj, language=language,\n **kwargs)\n \n @classmethod\n def update_ld_library_path(cls, env, paths_to_add=None, add_to_front=False):\n r\"\"\"Update provided dictionary of environment variables so that\n LD_LIBRARY_PATH includes the interface directory containing the interface\n libraries.\n\n Args:\n env (dict): Dictionary of enviroment variables to be updated.\n paths_to_add (list, optional): Paths that should be added. If not\n provided, defaults to [cls.get_language_dir()].\n add_to_front (bool, optional): If True, new paths are added to the\n front, rather than the end. Defaults to False.\n\n Returns:\n dict: Updated dictionary of environment variables.\n\n \"\"\"\n if paths_to_add is None:\n paths_to_add = [cls.get_language_dir()]\n if platform._is_linux:\n path_list = []\n prev_path = env.pop('LD_LIBRARY_PATH', '')\n if prev_path:\n path_list.append(prev_path)\n for x in paths_to_add:\n if x not in prev_path:\n if add_to_front:\n path_list.insert(0, x)\n else:\n path_list.append(x)\n if path_list:\n env['LD_LIBRARY_PATH'] = os.pathsep.join(path_list)\n return env\n\n def set_env(self, **kwargs):\n r\"\"\"Get environment variables that should be set for the model process.\n\n Args:\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n dict: Environment variables for the model process.\n\n \"\"\"\n out = super(CModelDriver, self).set_env(**kwargs)\n out = self.update_ld_library_path(out)\n if platform._is_win: # pragma: windows\n out.setdefault('PYTHONHOME', sysconfig.get_config_var('prefix'))\n out.setdefault('PYTHONPATH', os.pathsep.join([\n sysconfig.get_path('stdlib'), sysconfig.get_path('purelib'),\n os.path.join(sysconfig.get_config_var('prefix'), 'DLLs')]))\n return out\n \n @classmethod\n def parse_var_definition(cls, io, value, **kwargs):\n r\"\"\"Extract information about input/output variables from a\n string definition.\n\n Args:\n io (str): Description of variables contained in the provided\n string. Must be 'inputs' or 'outputs'.\n value (str): String containing one or more variable definitions.\n **kwargs: Additional keyword arguments are passed to the\n parent class's method.\n\n Returns:\n list: List of information about the variables contained in\n the provided string.\n\n Raises:\n AssertionError: If io is not 'inputs' or 'outputs'.\n NotImplementedError: If the def_regex for the specified\n io is not defined.\n\n \"\"\"\n out = super(CModelDriver, cls).parse_var_definition(io, value, **kwargs)\n io_map = {x['name']: x for x in out}\n for i, x in enumerate(out):\n if (x['name'] + '_length') in io_map:\n x['length_var'] = x['name'] + '_length'\n elif ('length_' + x['name']) in io_map:\n x['length_var'] = 'length_' + x['name']\n elif (((x['name'] + '_ndim') in io_map)\n and ((x['name'] + '_shape') in io_map)):\n x['ndim_var'] = x['name'] + '_ndim'\n x['shape_var'] = x['name'] + '_shape'\n x['datatype']['type'] = 'ndarray'\n elif ((('ndim_' + x['name']) in io_map)\n and (('shape_' + x['name']) in io_map)):\n x['ndim_var'] = 'ndim_' + x['name']\n x['shape_var'] = 'shape_' + x['name']\n x['datatype']['type'] = 'ndarray'\n elif 'shape' in x:\n x['datatype']['shape'] = [\n int(float(s.strip('[]')))\n for s in x.pop('shape').split('][')]\n assert(x['datatype']['subtype'] in _valid_types)\n if len(x['datatype']['shape']) == 1:\n x['datatype']['length'] = x['datatype'].pop(\n 'shape')[0]\n x['datatype']['type'] = '1darray'\n else:\n x['datatype']['type'] = 'ndarray'\n return out\n \n @classmethod\n def update_io_from_function(cls, model_file, model_function,\n inputs=[], outputs=[], contents=None,\n outputs_in_inputs=None):\n r\"\"\"Update inputs/outputs from the function definition.\n\n Args:\n model_file (str): Full path to the file containing the model\n function's declaration.\n model_function (str): Name of the model function.\n inputs (list, optional): List of model inputs including types.\n Defaults to [].\n outputs (list, optional): List of model outputs including types.\n Defaults to [].\n contents (str, optional): Contents of file to parse rather than\n re-reading the file. Defaults to None and is ignored.\n outputs_in_inputs (bool, optional): If True, the outputs are\n presented in the function definition as inputs. Defaults\n to False.\n\n Returns:\n dict, None: Flag variable used by the model. If None, the\n model does not use a flag variable.\n\n \"\"\"\n flag_var = super(CModelDriver, cls).update_io_from_function(\n model_file, model_function, inputs=inputs,\n outputs=outputs, contents=contents,\n outputs_in_inputs=outputs_in_inputs)\n # Add length_vars if missing for use by yggdrasil\n for x in inputs:\n for v in x['vars']:\n if cls.requires_length_var(v) and (not v.get('length_var', False)):\n v['length_var'] = {'name': v['name'] + '_length',\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True,\n 'dependent': True}\n elif cls.requires_shape_var(v):\n if not (v.get('ndim_var', False)\n and v.get('shape_var', False)): # pragma: debug\n raise RuntimeError(\"Uncomment logic that follows.\")\n # if not v.get('ndim_var', False):\n # v['ndim_var'] = {\n # 'name': v['name'] + '_ndim',\n # 'datatype': {'type': 'uint',\n # 'precision': 64},\n # 'is_length_var': True,\n # 'dependent': True}\n # if not v.get('shape_var', False):\n # v['shape_var'] = {\n # 'name': v['name'] + '_ndim',\n # 'datatype': {'type': '1darray',\n # 'subtype': 'uint',\n # 'precision': 64},\n # 'is_length_var': True,\n # 'dependent': True}\n for x in outputs:\n for v in x['vars']:\n if cls.requires_length_var(v) and (not v.get('length_var', False)):\n if v['datatype']['type'] in ['1darray', 'ndarray']: # pragma: debug\n raise RuntimeError(\"Length must be defined for arrays.\")\n elif v['datatype'].get('subtype', v['datatype']['type']) == 'bytes':\n v['length_var'] = 'strlen(%s)' % v['name']\n else:\n v['length_var'] = 'strlen4(%s)' % v['name']\n elif (cls.requires_shape_var(v)\n and not (v.get('ndim_var', False)\n and v.get('shape_var', False))): # pragma: debug\n raise RuntimeError(\"Shape must be defined for ND arrays.\")\n # Flag input variables for reallocation\n for x in inputs:\n allows_realloc = [cls.allows_realloc(v) for v in x['vars']]\n if all(allows_realloc):\n for v in x['vars']:\n if (((v['native_type'] not in ['char*', 'string_t',\n 'bytes_t', 'unicode_t'])\n and (not v.get('is_length_var', False))\n and (v['datatype']['type'] not in\n ['any', 'object', 'array', 'schema',\n 'instance', '1darray', 'ndarray'])\n and (cls.function_param['recv_function']\n == cls.function_param['recv_heap']))):\n v['allow_realloc'] = True\n for x in inputs + outputs:\n if x['datatype']['type'] == 'array':\n nvars_items = len(x['datatype'].get('items', []))\n nvars = sum([(not ix.get('is_length_var', False))\n for ix in x['vars']])\n if nvars_items == nvars:\n x['use_generic'] = False\n else:\n x['use_generic'] = True\n return flag_var\n \n @classmethod\n def input2output(cls, var):\n r\"\"\"Perform conversion necessary to turn a variable extracted from a\n function definition from an input to an output.\n\n Args:\n var (dict): Variable definition.\n\n Returns:\n dict: Updated variable definition.\n\n \"\"\"\n out = super(CModelDriver, cls).input2output(var)\n if out.get('ptr', ''):\n assert(out['native_type'].endswith('*'))\n out['ptr'] = out['ptr'][:-1]\n out['native_type'] = out['native_type'][:-1]\n out['datatype'] = cls.get_json_type(out['native_type'])\n if (((out['datatype']['type'] == '1darray')\n and var.get('ndim_var', False)\n and var.get('shape_var', False))):\n out['datatype']['type'] = 'ndarray'\n return out\n\n @classmethod\n def output2input(cls, var, in_definition=True):\n r\"\"\"Perform conversion necessary to turn an output variable\n into an corresponding input that can be used to format a\n function definition.\n\n Args:\n var (dict): Variable definition.\n in_definition (bool, optional): If True, the returned\n dictionary corresponds to an input variable in a\n function definition. If False, the returned value\n will correspond to an input to a function. Defaults to\n True.\n\n Returns:\n dict: Updated variable definition.\n\n \"\"\"\n out = super(CModelDriver, cls).output2input(var)\n if isinstance(var, dict):\n if in_definition:\n out = dict(out, name='*' + out['name'])\n if ((('shape' in out.get('datatype', {}))\n or ('length' in out.get('datatype', {})))):\n out['name'] = '(%s)' % out['name']\n else:\n out = dict(out, name='&' + out['name'])\n if ('shape' in out.get('datatype', {})) and (not platform._is_win):\n out['name'] += len(out['datatype']['shape']) * '[0]'\n return out\n \n @classmethod\n def allows_realloc(cls, var):\n r\"\"\"Determine if a variable allows the receive call to perform\n realloc.\n\n Args:\n var (dict): Dictionary of variable properties.\n\n Returns:\n bool: True if the variable allows realloc, False otherwise.\n\n \"\"\"\n if isinstance(var, dict):\n datatype = var.get('datatype', var)\n if ('shape' in datatype) or ('length' in datatype):\n return False\n return True\n \n @classmethod\n def requires_length_var(cls, var):\n r\"\"\"Determine if a variable requires a separate length variable.\n\n Args:\n var (dict): Dictionary of variable properties.\n\n Returns:\n bool: True if a length variable is required, False otherwise.\n\n \"\"\"\n if ((isinstance(var, dict)\n and ((cls.get_native_type(**var) in ['char*', 'string_t',\n 'bytes_t', 'unicode_t'])\n or var.get('datatype', {}).get(\n 'type', var.get('type', None)) in ['1darray'])\n and (not var.get('is_length_var', False))\n and ('length' not in var.get('datatype', {})))):\n return True\n return False\n \n @classmethod\n def requires_shape_var(cls, var):\n r\"\"\"Determine if a variable requires a separate shape variable.\n\n Args:\n var (dict): Dictionary of variable properties.\n\n Returns:\n bool: True if a shape variable is required, False otherwise.\n\n \"\"\"\n if ((isinstance(var, dict)\n and (var.get('datatype', {}).get(\n 'type', var.get('type', None)) == 'ndarray')\n and (not var.get('is_length_var', False))\n and ('shape' not in var.get('datatype', {})))):\n return True\n return False\n \n @classmethod\n def get_native_type(cls, **kwargs):\n r\"\"\"Get the native type.\n\n Args:\n type (str, optional): Name of |yggdrasil| extended JSON\n type or JSONSchema dictionary defining a datatype.\n **kwargs: Additional keyword arguments may be used in determining\n the precise declaration that should be used.\n\n Returns:\n str: The native type.\n\n \"\"\"\n out = super(CModelDriver, cls).get_native_type(**kwargs)\n if not ((out == '*') or ('X' in out) or (out == 'double')):\n return out\n from yggdrasil.metaschema.datatypes import get_type_class\n json_type = kwargs.get('datatype', kwargs.get('type', 'bytes'))\n if isinstance(json_type, str):\n json_type = {'type': json_type}\n assert(isinstance(json_type, dict))\n json_type = get_type_class(json_type['type']).normalize_definition(\n json_type)\n if out == '*':\n json_subtype = copy.deepcopy(json_type)\n json_subtype['type'] = json_subtype.pop('subtype')\n out = cls.get_native_type(datatype=json_subtype)\n if ('length' not in json_type) and ('shape' not in json_type):\n out += '*'\n elif 'X' in out:\n precision = json_type['precision']\n if json_type['type'] == 'complex':\n precision_map = {64: 'float',\n 128: 'double',\n 256: 'long_double'}\n if precision in precision_map:\n out = out.replace('X', precision_map[precision])\n else: # pragma: debug\n raise ValueError(\"Unsupported precision for complex types: %d\"\n % precision)\n else:\n out = out.replace('X', str(precision))\n elif out == 'double':\n if json_type['precision'] == 32:\n out = 'float'\n return out.replace(' ', '')\n \n @classmethod\n def get_json_type(cls, native_type):\n r\"\"\"Get the JSON type from the native language type.\n\n Args:\n native_type (str): The native language type.\n\n Returns:\n str, dict: The JSON type.\n\n \"\"\"\n out = {}\n regex_var = r'(?P<type>.+?(?P<precision>\\d*)(?:_t)?)\\s*(?P<pointer>\\**)'\n grp = re.fullmatch(regex_var, native_type).groupdict()\n if grp.get('precision', False):\n out['precision'] = int(grp['precision'])\n grp['type'] = grp['type'].replace(grp['precision'], 'X')\n if grp['type'] == 'char':\n out['type'] = 'bytes'\n out['precision'] = 0\n elif grp['type'] == 'void':\n out['type'] = 'null'\n elif grp['type'].startswith('complex'):\n out['type'] = 'complex'\n precision_map = {'long_double': 256,\n 'double': 128,\n 'float': 64}\n prec_str = grp['type'].split('complex_')[-1]\n if prec_str in precision_map:\n out['precision'] = precision_map[prec_str]\n else: # pragma: debug\n raise ValueError(\"Cannot determine precision for complex type '%s'\"\n % grp['type'])\n else:\n if grp['type'] == 'double':\n out['precision'] = 8 * 8\n elif grp['type'] == 'float':\n grp['type'] = 'double'\n out['precision'] = 4 * 8\n elif grp['type'] in ['int', 'uint']:\n grp['type'] += 'X_t'\n out['precision'] = 8 * np.dtype('intc').itemsize\n elif grp['type'] in ['bytes_t', 'string_t', 'unicode_t']:\n out['precision'] = 0\n out['type'] = super(CModelDriver, cls).get_json_type(grp['type'])\n if grp.get('pointer', False):\n nptr = len(grp['pointer'])\n if grp['type'] in ['char', 'void']:\n nptr -= 1\n if nptr > 0:\n out['subtype'] = out['type']\n out['type'] = '1darray'\n if out['type'] in _valid_types:\n out['subtype'] = out['type']\n out['type'] = 'scalar'\n return out\n \n @classmethod\n def format_function_param(cls, key, default=None, **kwargs):\n r\"\"\"Return the formatted version of the specified key.\n\n Args:\n key (str): Key in cls.function_param mapping that should be\n formatted.\n default (str, optional): Format that should be returned if key\n is not in cls.function_param. Defaults to None.\n **kwargs: Additional keyword arguments are used in formatting the\n request function parameter.\n\n Returns:\n str: Formatted string.\n\n Raises:\n NotImplementedError: If key is not in cls.function_param and default\n is not set.\n\n \"\"\"\n if (key == 'import') and ('filename' in kwargs):\n kwargs['filename'] = os.path.basename(kwargs['filename'])\n elif (key == 'interface') and ('interface_library' in kwargs):\n kwargs['interface_library'] = os.path.basename(\n kwargs['interface_library']).replace('.c', '.h')\n kwargs['default'] = default\n return super(CModelDriver, cls).format_function_param(key, **kwargs)\n \n @classmethod\n def write_model_function_call(cls, model_function, flag_var,\n inputs, outputs, **kwargs):\n r\"\"\"Write lines necessary to call the model function.\n\n Args:\n model_function (str): Handle of the model function that should be\n called.\n flag_var (str): Name of variable that should be used as a flag.\n inputs (list): List of dictionaries describing inputs to the model.\n outputs (list): List of dictionaries describing outputs from the model.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: Lines required to carry out a call to a model function in\n this language.\n\n \"\"\"\n new_inputs = copy.deepcopy(inputs)\n for x in new_inputs:\n for v in x['vars']:\n if v.get('allow_realloc', False):\n v['name'] = '*' + v['name']\n return super(CModelDriver, cls).write_model_function_call(\n model_function, flag_var, new_inputs, outputs, **kwargs)\n \n @classmethod\n def write_model_recv(cls, channel, recv_var, **kwargs):\n r\"\"\"Write a model receive call include checking the return flag.\n\n Args:\n channel (str): Name of variable that the channel being received from\n was stored in.\n recv_var (dict, list): Information of one or more variables that\n receieved information should be stored in.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: Lines required to carry out a receive call in this language.\n\n \"\"\"\n recv_var_str = recv_var\n if not isinstance(recv_var, str):\n recv_var_par = cls.channels2vars(recv_var)\n allows_realloc = [cls.allows_realloc(v)\n for v in recv_var_par]\n if all(allows_realloc):\n kwargs['alt_recv_function'] = cls.function_param['recv_heap']\n else:\n kwargs['alt_recv_function'] = cls.function_param['recv_stack']\n recv_var_str = cls.prepare_output_variables(\n recv_var_par, in_inputs=cls.outputs_in_inputs,\n for_yggdrasil=True)\n return super(CModelDriver, cls).write_model_recv(channel, recv_var_str, **kwargs)\n \n @classmethod\n def write_declaration(cls, var, **kwargs):\n r\"\"\"Return the lines required to declare a variable with a certain\n type.\n\n Args:\n var (dict, str): Name or information dictionary for the variable\n being declared.\n **kwargs: Addition keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: The lines declaring the variable.\n\n \"\"\"\n if isinstance(var, str): # pragma: no cover\n var = {'name': var}\n type_name = cls.get_native_type(**var)\n if var.get('allow_realloc', False):\n type_name += '*'\n var = dict(var, native_type=type_name)\n if ((type_name.endswith('*')\n or (type_name in ['bytes_t', 'string_t', 'unicode_t']))):\n kwargs.get('requires_freeing', []).append(var)\n kwargs.setdefault('value', 'NULL')\n elif var.get('is_length_var', False):\n kwargs.setdefault('value', '0')\n var = dict(var, name=cls.get_name_declare(var))\n out = super(CModelDriver, cls).write_declaration(var, **kwargs)\n for k in ['length', 'ndim', 'shape']:\n if ((isinstance(var.get(k + '_var', None), dict)\n and var[k + '_var'].get('dependent', False))):\n out += cls.write_declaration(var[k + '_var'])\n return out\n\n @classmethod\n def get_name_declare(cls, var):\n r\"\"\"Determine the name that should be used for declaration.\n\n Args:\n var (str, dict): Name of variable or dictionary of information.\n\n Returns:\n str: Modified name for declaration.\n\n \"\"\"\n if isinstance(var, str): # pragma: no cover\n return var\n assert(isinstance(var, dict))\n out = var['name']\n if 'length' in var.get('datatype', {}):\n out += '[%d]' % var['datatype']['length']\n elif 'shape' in var.get('datatype', {}):\n for s in var['datatype']['shape']:\n out += '[%d]' % s\n return out\n \n @classmethod\n def write_free(cls, var, **kwargs):\n r\"\"\"Return the lines required to free a variable with a certain type.\n\n Args:\n var (dict, str): Name or information dictionary for the variable\n being declared.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: The lines freeing the variable.\n\n \"\"\"\n out = []\n if isinstance(var, str):\n var = {'name': var}\n if ((isinstance(var.get('datatype', False), dict)\n and (('free_%s' % var['datatype']['type'])\n in cls.function_param))):\n if var.get('allow_realloc', False):\n out += super(CModelDriver, cls).write_free(\n var, **kwargs)\n var = {'name': var['name']}\n else:\n var = dict(var, name=('&' + var['name']))\n out += super(CModelDriver, cls).write_free(var, **kwargs)\n return out\n \n @classmethod\n def prepare_variables(cls, vars_list, in_definition=False,\n for_yggdrasil=False):\n r\"\"\"Concatenate a set of input variables such that it can be passed as a\n single string to the function_call parameter.\n\n Args:\n vars_list (list): List of variable dictionaries containing info\n (e.g. names) that should be used to prepare a string representing\n input/output to/from a function call.\n in_definition (bool, optional): If True, the returned sequence\n will be of the format required for specifying variables\n in a function definition. Defaults to False.\n for_yggdrasil (bool, optional): If True, the variables will be\n prepared in the formated expected by calls to yggdarsil\n send/recv methods. Defaults to False.\n\n Returns:\n str: Concatentated variables list.\n\n \"\"\"\n if not isinstance(vars_list, list):\n vars_list = [vars_list]\n new_vars_list = []\n for x in vars_list:\n if isinstance(x, str):\n new_vars_list.append(x)\n else:\n assert(isinstance(x, dict))\n if for_yggdrasil and x.get('is_length_var', False):\n continue\n new_vars_list.append(x)\n if for_yggdrasil:\n for k in ['length', 'ndim', 'shape']:\n kvar = k + '_var'\n if x.get(kvar, False):\n if ((x['name'].startswith('*')\n or x['name'].startswith('&'))):\n new_vars_list.append(\n dict(x[kvar],\n name=x['name'][0] + x[kvar]['name']))\n else:\n new_vars_list.append(x[kvar])\n if in_definition:\n new_vars_list2 = []\n for x in new_vars_list:\n if x['name'].startswith('*'):\n name = '%s%s* %s' % tuple(\n [cls.get_native_type(**x)]\n + x['name'].rsplit('*', 1))\n else:\n name = '%s %s' % (cls.get_native_type(**x), x['name'])\n new_var = dict(x, name=name)\n new_var['name'] = cls.get_name_declare(new_var)\n new_vars_list2.append(new_var)\n new_vars_list = new_vars_list2\n return super(CModelDriver, cls).prepare_variables(\n new_vars_list, in_definition=in_definition,\n for_yggdrasil=for_yggdrasil)\n \n @classmethod\n def prepare_output_variables(cls, vars_list, in_definition=False,\n in_inputs=False, for_yggdrasil=False):\n r\"\"\"Concatenate a set of output variables such that it can be passed as\n a single string to the function_call parameter.\n\n Args:\n vars_list (list): List of variable names to concatenate as output\n from a function call.\n in_definition (bool, optional): If True, the returned sequence\n will be of the format required for specifying output\n variables in a function definition. Defaults to False.\n in_inputs (bool, optional): If True, the output variables should\n be formated to be included as input variables. Defaults to\n False.\n for_yggdrasil (bool, optional): If True, the variables will be\n prepared in the formated expected by calls to yggdarsil\n send/recv methods. Defaults to False.\n\n Returns:\n str: Concatentated variables list.\n\n \"\"\"\n if not in_inputs:\n # If the output is a True output and not passed as an input\n # parameter, then the output should not include the type\n # information that is added if in_definition is True.\n in_definition = False\n return super(CModelDriver, cls).prepare_output_variables(\n vars_list, in_definition=in_definition, in_inputs=in_inputs,\n for_yggdrasil=for_yggdrasil)\n\n @classmethod\n def write_print_output_var(cls, var, in_inputs=False, **kwargs):\n r\"\"\"Get the lines necessary to print an output variable in this\n language.\n\n Args:\n var (dict): Variable information.\n in_inputs (bool, optional): If True, the output variable\n is passed in as an input variable to be populated.\n Defaults to False.\n **kwargs: Additional keyword arguments are passed to write_print_var.\n\n Returns:\n list: Lines printing the specified variable.\n\n \"\"\"\n if in_inputs and (cls.language != 'c++'):\n if isinstance(var, dict):\n var = dict(var, name='%s[0]' % var['name'])\n else:\n var = '%s[0]' % var\n return super(CModelDriver, cls).write_print_output_var(\n var, in_inputs=in_inputs, **kwargs)\n \n @classmethod\n def write_function_def(cls, function_name, dont_add_lengths=False,\n use_length_prefix=False, **kwargs):\n r\"\"\"Write a function definition.\n\n Args:\n function_name (str): Name fo the function being defined.\n dont_add_lengths (bool, optional): If True, length variables\n are not added for arrays. Defaults to False.\n use_length_prefix (bool, optional): If True and length variables\n are added, they will be named using prefixes. Otherwise,\n suffixes will be used. Defaults to False.\n **kwargs: Additional keyword arguments are passed to the\n parent class's method.\n\n Returns:\n list: Lines completing the function call.\n\n Raises:\n ValueError: If outputs_in_inputs is not True and more than\n one output variable is specified.\n\n \"\"\"\n if not dont_add_lengths:\n for io in ['input', 'output']:\n if io + '_var' in kwargs:\n io_var = cls.parse_var_definition(\n io + 's', kwargs.pop(io + '_var'))\n else:\n io_var = kwargs.get(io + 's', [])\n for x in io_var:\n if use_length_prefix:\n v_length = 'length_' + x['name']\n v_ndim = 'ndim_' + x['name']\n v_shape = 'shape_' + x['name']\n else:\n v_length = x['name'] + '_length'\n v_ndim = x['name'] + '_ndim'\n v_shape = x['name'] + '_shape'\n if x.get('is_length_var', False):\n continue\n if cls.requires_length_var(x):\n if not x.get('length_var', False):\n x['length_var'] = {\n 'name': v_length,\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n io_var.append(x['length_var'])\n elif cls.requires_shape_var(x):\n if not x.get('ndim_var', False):\n x['ndim_var'] = {\n 'name': v_ndim,\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n io_var.append(x['ndim_var'])\n if not x.get('shape_var', False):\n x['shape_var'] = {\n 'name': v_shape,\n 'datatype': {'type': '1darray',\n 'subtype': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n io_var.append(x['shape_var'])\n length_var = {\n 'name': v_length,\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n kwargs['function_contents'] = (\n cls.write_declaration(length_var)\n + kwargs.get('function_contents', []))\n kwargs[io + 's'] = io_var\n output_type = None\n if kwargs.get('outputs_in_inputs', False):\n output_type = cls.get_native_type(datatype='flag')\n else:\n if 'output_var' in kwargs:\n kwargs['outputs'] = cls.parse_var_definition(\n 'outputs', kwargs.pop('output_var'))\n outputs = kwargs.get('outputs', [])\n nout = len(outputs)\n if nout == 0:\n output_type = 'void'\n elif nout == 1:\n output_type = cls.get_native_type(**(outputs[0]))\n else: # pragma: debug\n raise ValueError(\"C does not support more than one \"\n \"output variable.\")\n kwargs['output_type'] = output_type\n return super(CModelDriver, cls).write_function_def(\n function_name, **kwargs)\n \n @classmethod\n def write_native_type_definition(cls, name, datatype, name_base=None,\n requires_freeing=None, no_decl=False,\n use_generic=False):\n r\"\"\"Get lines declaring the data type within the language.\n\n Args:\n name (str): Name of variable that definition should be stored in.\n datatype (dict): Type definition.\n requires_freeing (list, optional): List that variables requiring\n freeing should be appended to. Defaults to None.\n no_decl (bool, optional): If True, the variable is defined without\n declaring it (assumes that variable has already been declared).\n Defaults to False.\n use_generic (bool, optional): If True variables serialized\n and/or deserialized by the type will be assumed to be\n generic objects. Defaults to False.\n\n Returns:\n list: Lines required to define a type definition.\n\n \"\"\"\n out = []\n fmt = None\n keys = {}\n if use_generic:\n keys['use_generic'] = 'true'\n else:\n keys['use_generic'] = 'false'\n typename = datatype['type']\n if name_base is None:\n name_base = name\n if datatype['type'] == 'array':\n if 'items' in datatype:\n assert(isinstance(datatype['items'], list))\n keys['nitems'] = len(datatype['items'])\n keys['items'] = '%s_items' % name_base\n fmt = ('create_dtype_json_array({nitems}, {items}, '\n '{use_generic})')\n out += [('dtype_t** %s = '\n '(dtype_t**)malloc(%d*sizeof(dtype_t*));')\n % (keys['items'], keys['nitems'])]\n for i, x in enumerate(datatype['items']):\n # Prevent recusion\n x_copy = copy.deepcopy(x)\n x_copy.pop('items', None)\n x_copy.pop('properties', None)\n out += cls.write_native_type_definition(\n '%s[%d]' % (keys['items'], i), x_copy,\n name_base=('%s_item%d' % (name_base, i)),\n requires_freeing=requires_freeing, no_decl=True,\n use_generic=use_generic)\n assert(isinstance(requires_freeing, list))\n requires_freeing += [keys['items']]\n else:\n keys['use_generic'] = 'true'\n fmt = ('create_dtype_json_array(0, NULL, '\n '{use_generic})')\n elif datatype['type'] == 'object':\n keys['use_generic'] = 'true'\n if 'properties' in datatype:\n assert(isinstance(datatype['properties'], dict))\n keys['nitems'] = len(datatype['properties'])\n keys['keys'] = '%s_keys' % name_base\n keys['values'] = '%s_vals' % name_base\n fmt = ('create_dtype_json_object({nitems}, {keys}, '\n '{values}, {use_generic})')\n out += [('dtype_t** %s = '\n '(dtype_t**)malloc(%d*sizeof(dtype_t*));')\n % (keys['values'], keys['nitems']),\n ('char** %s = (char**)malloc(%d*sizeof(char*));')\n % (keys['keys'], keys['nitems'])]\n for i, (k, v) in enumerate(datatype['properties'].items()):\n # Prevent recusion\n v_copy = copy.deepcopy(v)\n v_copy.pop('items', None)\n v_copy.pop('properties', None)\n out += ['%s[%d] = \\\"%s\\\";' % (keys['keys'], i, k)]\n out += cls.write_native_type_definition(\n '%s[%d]' % (keys['values'], i), v_copy,\n name_base=('%s_prop%d' % (name_base, i)),\n requires_freeing=requires_freeing, no_decl=True,\n use_generic=use_generic)\n assert(isinstance(requires_freeing, list))\n requires_freeing += [keys['values'], keys['keys']]\n else:\n fmt = ('create_dtype_json_object(0, NULL, NULL, '\n '{use_generic})')\n elif datatype['type'] in ['ply', 'obj']:\n fmt = 'create_dtype_%s({use_generic})' % datatype['type']\n elif datatype['type'] == '1darray':\n fmt = ('create_dtype_1darray(\\\"{subtype}\\\", {precision}, {length}, '\n '\\\"{units}\\\", {use_generic})')\n for k in ['subtype', 'precision']:\n keys[k] = datatype[k]\n keys['length'] = datatype.get('length', '0')\n keys['units'] = datatype.get('units', '')\n elif datatype['type'] == 'ndarray':\n fmt = ('create_dtype_ndarray(\\\"{subtype}\\\", {precision},'\n ' {ndim}, {shape}, \\\"{units}\\\", {use_generic})')\n for k in ['subtype', 'precision']:\n keys[k] = datatype[k]\n if 'shape' in datatype:\n shape_var = '%s_shape' % name_base\n out += ['size_t %s[%d] = {%s};' % (\n shape_var, len(datatype['shape']),\n ', '.join([str(s) for s in datatype['shape']]))]\n keys['ndim'] = len(datatype['shape'])\n keys['shape'] = shape_var\n fmt = fmt.replace('create_dtype_ndarray',\n 'create_dtype_ndarray_arr')\n else:\n keys['ndim'] = 0\n keys['shape'] = 'NULL'\n keys['units'] = datatype.get('units', '')\n elif (typename == 'scalar') or (typename in _valid_types):\n fmt = ('create_dtype_scalar(\\\"{subtype}\\\", {precision}, '\n '\\\"{units}\\\", {use_generic})')\n keys['subtype'] = datatype.get('subtype', datatype['type'])\n keys['units'] = datatype.get('units', '')\n if keys['subtype'] in ['bytes', 'string', 'unicode']:\n keys['precision'] = datatype.get('precision', 0)\n else:\n keys['precision'] = datatype['precision']\n typename = 'scalar'\n elif datatype['type'] in ['boolean', 'null', 'number',\n 'integer', 'string']:\n fmt = 'create_dtype_default(\\\"{type}\\\", {use_generic})'\n keys['type'] = datatype['type']\n elif (typename in ['class', 'function']):\n fmt = 'create_dtype_pyobj(\\\"{type}\\\", {use_generic})'\n keys['type'] = typename\n elif typename == 'instance':\n keys['use_generic'] = 'true'\n # fmt = 'create_dtype_pyinst(NULL, NULL)'\n fmt = 'create_dtype_empty({use_generic})'\n elif typename == 'schema':\n keys['use_generic'] = 'true'\n fmt = 'create_dtype_schema({use_generic})'\n elif typename == 'any':\n keys['use_generic'] = 'true'\n fmt = 'create_dtype_empty({use_generic})'\n else: # pragma: debug\n raise ValueError(\"Cannot create C version of type '%s'\"\n % typename)\n def_line = '%s = %s;' % (name, fmt.format(**keys))\n if not no_decl:\n def_line = 'dtype_t* ' + def_line\n out.append(def_line)\n return out\n\n @classmethod\n def write_channel_def(cls, key, datatype=None, requires_freeing=None,\n use_generic=False, **kwargs):\n r\"\"\"Write an channel declaration/definition.\n\n Args:\n key (str): Entry in cls.function_param that should be used.\n datatype (dict, optional): Data type associated with the channel.\n Defaults to None and is ignored.\n requires_freeing (list, optional): List that variables requiring\n freeing should be appended to. Defaults to None.\n use_generic (bool, optional): If True variables serialized\n and/or deserialized by the channel will be assumed to be\n generic objects. Defaults to False.\n **kwargs: Additional keyword arguments are passed as parameters\n to format_function_param.\n\n Returns:\n list: Lines required to declare and define an output channel.\n\n \"\"\"\n out = []\n if (datatype is not None) and ('{channel_type}' in cls.function_param[key]):\n kwargs['channel_type'] = '%s_type' % kwargs['channel']\n out += cls.write_native_type_definition(\n kwargs['channel_type'], datatype,\n requires_freeing=requires_freeing,\n use_generic=use_generic)\n out += super(CModelDriver, cls).write_channel_def(key, datatype=datatype,\n **kwargs)\n return out\n\n @classmethod\n def write_assign_to_output(cls, dst_var, src_var,\n outputs_in_inputs=False,\n dont_add_lengths=False,\n use_length_prefix=False, **kwargs):\n r\"\"\"Write lines assigning a value to an output variable.\n\n Args:\n dst_var (str, dict): Name or information dictionary for\n variable being assigned to.\n src_var (str, dict): Name or information dictionary for\n value being assigned to dst_var.\n outputs_in_inputs (bool, optional): If True, outputs are passed\n as input parameters. In some languages, this means that a\n pointer or reference is passed (e.g. C) and so the assignment\n should be to the memory indicated rather than the variable.\n Defaults to False.\n dont_add_lengths (bool, optional): If True, length variables\n are not added for arrays. Defaults to False.\n use_length_prefix (bool, optional): If True and length variables\n are added, they will be named using prefixes. Otherwise,\n suffixes will be used. Defaults to False.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: Lines achieving assignment.\n\n \"\"\"\n out = []\n if cls.requires_length_var(dst_var):\n src_var_length = None\n dst_var_length = None\n if isinstance(src_var, dict):\n src_var_length = src_var.get('length_var', None)\n if isinstance(dst_var, dict):\n dst_var_length = dst_var.get('length_var', None)\n if not dont_add_lengths:\n if src_var_length is None:\n if use_length_prefix:\n src_var_length = 'length_' + src_var['name']\n else:\n src_var_length = src_var['name'] + '_length'\n if dst_var_length is None:\n if use_length_prefix:\n dst_var_length = 'length_' + dst_var['name']\n else:\n dst_var_length = dst_var['name'] + '_length'\n out += cls.write_assign_to_output(\n dst_var_length, src_var_length,\n outputs_in_inputs=outputs_in_inputs)\n elif src_var_length is None:\n if ((dst_var['datatype']['type']\n in ['1darray', 'ndarray'])): # pragma: debug\n raise RuntimeError(\"Length must be set in order \"\n \"to write array assignments.\")\n elif (dst_var['datatype'].get('subtype', dst_var['datatype']['type'])\n in ['bytes']):\n src_var_length = '(strlen(%s)+1)' % src_var['name']\n else:\n src_var_length = '(strlen4(%s)+1)' % src_var['name']\n src_var_dtype = cls.get_native_type(**src_var)\n if src_var_dtype in ['bytes_t', 'unicode_t', 'string_t']:\n src_var_dtype = 'char*'\n src_var_dtype = src_var_dtype.rsplit('*', 1)[0]\n out += cls.write_assign_to_output(\n dst_var['name'], 'value',\n outputs_in_inputs=outputs_in_inputs,\n replacement=('{name} = ({native_type}*)realloc({name}, '\n '{N}*sizeof({native_type}));'),\n native_type=src_var_dtype, N=src_var_length)\n kwargs.update(copy=True, native_type=src_var_dtype,\n N=src_var_length)\n elif cls.requires_shape_var(dst_var):\n if dont_add_lengths: # pragma: debug\n raise RuntimeError(\"Shape must be set in order \"\n \"to write ND array assignments.\")\n # Dimensions\n src_var_ndim = None\n dst_var_ndim = None\n if isinstance(src_var, dict):\n src_var_ndim = src_var.get('ndim_var', None)\n if isinstance(dst_var, dict):\n dst_var_ndim = dst_var.get('ndim_var', None)\n if src_var_ndim is None:\n if use_length_prefix:\n src_var_ndim = 'ndim_' + src_var['name']\n else:\n src_var_ndim = src_var['name'] + '_ndim'\n if dst_var_ndim is None:\n if use_length_prefix:\n dst_var_ndim = 'ndim_' + dst_var['name']\n else:\n dst_var_ndim = dst_var['name'] + '_ndim'\n if isinstance(src_var_ndim, str):\n src_var_ndim = {'name': src_var_ndim,\n 'datatype': {'type': 'uint',\n 'precision': 64}}\n if isinstance(dst_var_ndim, str):\n dst_var_ndim = {'name': dst_var_ndim,\n 'datatype': {'type': 'uint',\n 'precision': 64}}\n\n out += cls.write_assign_to_output(\n dst_var_ndim, src_var_ndim,\n outputs_in_inputs=outputs_in_inputs)\n # Shape\n src_var_shape = None\n dst_var_shape = None\n if isinstance(src_var, dict):\n src_var_shape = src_var.get('shape_var', None)\n if isinstance(dst_var, dict):\n dst_var_shape = dst_var.get('shape_var', None)\n if src_var_shape is None:\n if use_length_prefix:\n src_var_shape = 'shape_' + src_var['name']\n else:\n src_var_shape = src_var['name'] + '_shape'\n if dst_var_shape is None:\n if use_length_prefix:\n dst_var_shape = 'shape_' + dst_var['name']\n else:\n dst_var_shape = dst_var['name'] + '_shape'\n if isinstance(src_var_shape, str):\n src_var_shape = {'name': src_var_shape,\n 'datatype': {'type': '1darray',\n 'subtype': 'uint',\n 'precision': 64},\n 'length_var': src_var_ndim['name']}\n if isinstance(dst_var_shape, str):\n dst_var_shape = {'name': dst_var_shape,\n 'datatype': {'type': '1darray',\n 'subtype': 'uint',\n 'precision': 64},\n 'length_var': dst_var_ndim['name']}\n out += cls.write_assign_to_output(\n dst_var_shape, src_var_shape,\n outputs_in_inputs=outputs_in_inputs,\n dont_add_lengths=True)\n src_var_dtype = cls.get_native_type(**src_var).rsplit('*', 1)[0]\n if use_length_prefix:\n src_var_length = 'length_' + src_var['name']\n else:\n src_var_length = src_var['name'] + '_length'\n out += (('{length} = 1;\\n'\n 'size_t cdim;\\n'\n 'for (cdim = 0; cdim < {ndim}; cdim++) {{\\n'\n ' {length} = {length}*{shape}[cdim];\\n'\n '}}\\n').format(length=src_var_length,\n ndim=src_var_ndim['name'],\n shape=src_var_shape['name'])).splitlines()\n out += cls.write_assign_to_output(\n dst_var['name'], 'value',\n outputs_in_inputs=outputs_in_inputs,\n replacement=('{name} = ({native_type}*)realloc({name}, '\n '{N}*sizeof({native_type}));'),\n native_type=src_var_dtype, N=src_var_length)\n kwargs.update(copy=True, native_type=src_var_dtype,\n N=src_var_length)\n elif isinstance(dst_var, dict):\n if 'shape' in dst_var.get('datatype', {}):\n nele = 1\n for s in dst_var['datatype']['shape']:\n nele *= s\n kwargs.update(copy=True, N=nele,\n native_type=dst_var['datatype']['subtype'])\n elif 'length' in dst_var.get('datatype', {}):\n kwargs.update(copy=True, N=dst_var['datatype']['length'],\n native_type=dst_var['datatype']['subtype'])\n if outputs_in_inputs and (cls.language != 'c++'):\n if isinstance(dst_var, dict):\n dst_var = dict(dst_var,\n name='%s[0]' % dst_var['name'])\n else:\n dst_var = '%s[0]' % dst_var\n if ((outputs_in_inputs and isinstance(dst_var, dict)\n and isinstance(dst_var['datatype'], dict)\n and ('copy_' + dst_var['datatype']['type']\n in cls.function_param))):\n kwargs['copy'] = True\n out += super(CModelDriver, cls).write_assign_to_output(\n dst_var, src_var, outputs_in_inputs=outputs_in_inputs,\n **kwargs)\n return out\n"
] | [
[
"numpy.distutils.misc_util.get_numpy_include_dirs",
"numpy.dtype"
]
] |
marcoabrate/transformers | [
"3f77c26d74e1282955fefa8dfff2451e44f6d4a9"
] | [
"src/transformers/trainer.py"
] | [
"# coding=utf-8\n# Copyright 2020-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.\n\"\"\"\n\nimport collections\nimport inspect\nimport math\nimport os\nimport re\nimport shutil\nimport time\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\n\n# Integrations must be imported before ML frameworks:\nfrom .integrations import ( # isort: split\n default_hp_search_backend,\n get_reporting_integration_callbacks,\n hp_params,\n is_fairscale_available,\n is_optuna_available,\n is_ray_tune_available,\n run_hp_search_optuna,\n run_hp_search_ray,\n init_deepspeed,\n)\n\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler, SequentialSampler\n\nfrom .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator\nfrom .file_utils import (\n WEIGHTS_NAME,\n is_apex_available,\n is_datasets_available,\n is_in_notebook,\n is_sagemaker_distributed_available,\n is_torch_tpu_available,\n)\nfrom .modeling_utils import PreTrainedModel\nfrom .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING\nfrom .optimization import Adafactor, AdamW, get_scheduler\nfrom .tokenization_utils_base import PreTrainedTokenizerBase\nfrom .trainer_callback import (\n CallbackHandler,\n DefaultFlowCallback,\n PrinterCallback,\n ProgressCallback,\n TrainerCallback,\n TrainerControl,\n TrainerState,\n)\nfrom .trainer_pt_utils import (\n DistributedLengthGroupedSampler,\n DistributedTensorGatherer,\n LabelSmoother,\n LengthGroupedSampler,\n SequentialDistributedSampler,\n distributed_broadcast_scalars,\n distributed_concat,\n nested_concat,\n nested_detach,\n nested_numpify,\n nested_xla_mesh_reduce,\n reissue_pt_warnings,\n)\nfrom .trainer_utils import (\n PREFIX_CHECKPOINT_DIR,\n BestRun,\n EvalPrediction,\n HPSearchBackend,\n PredictionOutput,\n TrainOutput,\n default_compute_objective,\n default_hp_space,\n set_seed,\n speed_metrics,\n)\nfrom .training_args import ParallelMode, TrainingArguments\nfrom .utils import logging\n\n\n_is_native_amp_available = False\n\nDEFAULT_CALLBACKS = [DefaultFlowCallback]\nDEFAULT_PROGRESS_CALLBACK = ProgressCallback\n\nif is_in_notebook():\n from .utils.notebook import NotebookProgressCallback\n\n DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback\n\nif is_apex_available():\n from apex import amp\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\nif is_datasets_available():\n import datasets\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n import torch_xla.distributed.parallel_loader as pl\n\nif is_fairscale_available():\n from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\n from fairscale.optim import OSS\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\nif is_sagemaker_distributed_available():\n import smdistributed.dataparallel.torch.distributed as dist\n from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP\nelse:\n import torch.distributed as dist\n\nif TYPE_CHECKING:\n import optuna\n\nlogger = logging.get_logger(__name__)\n\n\ndef _model_unwrap(model: nn.Module) -> nn.Module:\n # since there could be multiple levels of wrapping, unwrap recursively\n if hasattr(model, \"module\"):\n return _model_unwrap(model.module)\n else:\n return model\n\n\nclass Trainer:\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.\n\n Args:\n model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):\n The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.\n\n .. note::\n\n :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`\n provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as\n they work the same way as the 🤗 Transformers models.\n args (:class:`~transformers.TrainingArguments`, `optional`):\n The arguments to tweak for training. Will default to a basic instance of\n :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in\n the current directory if not provided.\n data_collator (:obj:`DataCollator`, `optional`):\n The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.\n Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of\n :func:`~transformers.DataCollatorWithPadding` otherwise.\n train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):\n The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the\n maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an\n interrupted training or reuse the fine-tuned model.\n model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):\n A function that instantiates the model to be used. If provided, each call to\n :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.\n\n The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be\n able to choose different architectures according to hyper parameters (such as layer count, sizes of inner\n layers, dropout probabilities etc).\n compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):\n The function that will be used to compute metrics at evaluation. Must take a\n :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.\n callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):\n A list of callbacks to customize the training loop. Will add those to the list of default callbacks\n detailed in :doc:`here <callback>`.\n\n If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.\n optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple\n containing the optimizer and the scheduler to use. Will default to an instance of\n :class:`~transformers.AdamW` on your model and a scheduler given by\n :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.\n\n Important attributes:\n\n - **model** -- Always points to the core model. If using a transformers model, it will be a\n :class:`~transformers.PreTrainedModel` subclass.\n - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the\n original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,\n the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the\n inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.\n - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from\n data parallelism, this means some of the model layers are split on different GPUs).\n \"\"\"\n\n def __init__(\n self,\n model: Union[PreTrainedModel, torch.nn.Module] = None,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Dataset] = None,\n tokenizer: Optional[\"PreTrainedTokenizerBase\"] = None,\n model_init: Callable[[], PreTrainedModel] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),\n ):\n if args is None:\n output_dir = \"tmp_trainer\"\n logger.info(f\"No `TrainingArguments` passed, using `output_dir={output_dir}`.\")\n args = TrainingArguments(output_dir=output_dir)\n self.args = args\n # Seed must be set before instantiating the model when using model\n set_seed(self.args.seed)\n self.hp_name = None\n self.deepspeed = None\n\n if model is None:\n if model_init is not None:\n self.model_init = model_init\n model = self.call_model_init()\n else:\n raise RuntimeError(\"`Trainer` requires either a `model` or `model_init` argument\")\n else:\n if model_init is not None:\n warnings.warn(\n \"`Trainer` requires either a `model` or `model_init` argument, but not both. \"\n \"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.\",\n FutureWarning,\n )\n self.model_init = model_init\n\n if hasattr(model, \"is_parallelizable\") and model.is_parallelizable and model.model_parallel:\n self.is_model_parallel = True\n else:\n self.is_model_parallel = False\n\n default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)\n self.data_collator = data_collator if data_collator is not None else default_collator\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.tokenizer = tokenizer\n\n # Model parallel\n if not self.is_model_parallel:\n model = model.to(args.device)\n else:\n # Force n_gpu to 1 to avoid DataParallel.\n self.args._n_gpu = 1\n\n # later use `self.model is self.model_wrapped` to check if it's wrapped or not\n self.model_wrapped = model\n self.model = model\n\n self.compute_metrics = compute_metrics\n self.optimizer, self.lr_scheduler = optimizers\n if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):\n raise RuntimeError(\n \"Passing a `model_init` is incompatible with providing the `optimizers` argument.\"\n \"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.\"\n )\n default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)\n callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks\n self.callback_handler = CallbackHandler(\n callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler\n )\n self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)\n\n # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.\n self._loggers_initialized = False\n\n # Create output directory if needed\n if self.is_world_process_zero():\n os.makedirs(self.args.output_dir, exist_ok=True)\n if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):\n # Set an xla_device flag on the model's config.\n # We'll find a more elegant and not need to do this in the future.\n self.model.config.xla_device = True\n if not callable(self.data_collator) and callable(getattr(self.data_collator, \"collate_batch\", None)):\n raise ValueError(\"The `data_collator` should be a simple callable (function, class with `__call__`).\")\n\n if args.max_steps > 0:\n logger.info(\"max_steps is given, it will override any value given in num_train_epochs\")\n\n # Enforce rules on using datasets with no __len__\n if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:\n raise ValueError(\"train_dataset does not implement __len__, max_steps has to be specified\")\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n if is_datasets_available():\n if isinstance(train_dataset, datasets.Dataset):\n self._remove_unused_columns(self.train_dataset, description=\"training\")\n if isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(self.eval_dataset, description=\"evaluation\")\n\n # Setup Sharded DDP training\n self.sharded_dpp = False\n if args.sharded_ddp:\n if args.deepspeed:\n raise ValueError(\n \"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags.\"\n )\n\n if args.local_rank == -1:\n raise ValueError(\"Using sharded DDP only works in distributed training.\")\n elif not is_fairscale_available():\n raise ImportError(\"Sharded DDP training requires fairscale: `pip install fairscale`.\")\n else:\n self.sharded_dpp = True\n\n # Mixed precision setup\n self.use_apex = False\n self.use_amp = False\n self.fp16_backend = None\n\n if args.fp16:\n if args.fp16_backend == \"auto\":\n self.fp16_backend = \"amp\" if _is_native_amp_available else \"apex\"\n else:\n self.fp16_backend = args.fp16_backend\n logger.info(f\"Using {self.fp16_backend} fp16 backend\")\n\n if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16\n if self.fp16_backend == \"amp\":\n self.use_amp = True\n self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()\n else:\n if not is_apex_available():\n raise ImportError(\n \"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.\"\n )\n self.use_apex = True\n\n # Label smoothing\n if self.args.label_smoothing_factor != 0:\n self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)\n else:\n self.label_smoother = None\n\n self.state = TrainerState()\n self.control = TrainerControl()\n # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the\n # state at each call to self.log.\n self._total_flos = None\n self.hp_search_backend = None\n self.use_tune_checkpoints = False\n default_label_names = (\n [\"start_positions\", \"end_positions\"]\n if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()\n else [\"labels\"]\n )\n self.label_names = default_label_names if self.args.label_names is None else self.args.label_names\n self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)\n\n def add_callback(self, callback):\n \"\"\"\n Add a callback to the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will instantiate a member of that class.\n \"\"\"\n self.callback_handler.add_callback(callback)\n\n def pop_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.\n\n If the callback is not found, returns :obj:`None` (and no error is raised).\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will pop the first member of that class found in the list of callbacks.\n\n Returns:\n :class:`~transformer.TrainerCallback`: The callback removed, if found.\n \"\"\"\n return self.callback_handler.pop_callback(callback)\n\n def remove_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will remove the first member of that class found in the list of callbacks.\n \"\"\"\n self.callback_handler.remove_callback(callback)\n\n def _remove_unused_columns(self, dataset: \"datasets.Dataset\", description: Optional[str] = None):\n if not self.args.remove_unused_columns:\n return\n # Inspect model forward signature to keep only the arguments it accepts.\n signature = inspect.signature(self.model.forward)\n signature_columns = list(signature.parameters.keys())\n # Labels may be named label or label_ids, the default data collator handles that.\n signature_columns += [\"label\", \"label_ids\"]\n columns = [k for k in signature_columns if k in dataset.column_names]\n ignored_columns = list(set(dataset.column_names) - set(signature_columns))\n dset_description = \"\" if description is None else f\"in the {description} set \"\n logger.info(\n f\"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}.\"\n )\n dataset.set_format(type=dataset.format[\"type\"], columns=columns)\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:\n if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(\n self.train_dataset, collections.abc.Sized\n ):\n return None\n\n # Gather the number of processes and this process index.\n if self.args.parallel_mode == ParallelMode.TPU:\n num_processes = xm.xrt_world_size()\n process_index = xm.get_ordinal()\n elif (\n self.args.parallel_mode == ParallelMode.DISTRIBUTED\n or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED\n ):\n num_processes = dist.get_world_size()\n process_index = dist.get_rank()\n else:\n num_processes = 1\n process_index = 0\n\n # Build the sampler.\n if self.args.group_by_length:\n if num_processes <= 1:\n return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)\n else:\n return DistributedLengthGroupedSampler(\n self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index\n )\n\n else:\n if num_processes <= 1:\n return RandomSampler(self.train_dataset)\n else:\n return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)\n\n def get_train_dataloader(self) -> DataLoader:\n \"\"\"\n Returns the training :class:`~torch.utils.data.DataLoader`.\n\n Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted\n to distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n train_sampler = self._get_train_sampler()\n\n return DataLoader(\n self.train_dataset,\n batch_size=self.args.train_batch_size,\n sampler=train_sampler,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:\n if is_torch_tpu_available():\n return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n elif self.args.local_rank != -1:\n return SequentialDistributedSampler(eval_dataset)\n else:\n return SequentialSampler(eval_dataset)\n\n def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:\n \"\"\"\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(eval_dataset, description=\"evaluation\")\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n eval_sampler = self._get_eval_sampler(eval_dataset)\n\n return DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n \"\"\"\n Returns the test :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):\n self._remove_unused_columns(test_dataset, description=\"test\")\n test_sampler = self._get_eval_sampler(test_dataset)\n\n # We use the same batch_size as for eval.\n return DataLoader(\n test_dataset,\n sampler=test_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def create_optimizer_and_scheduler(self, num_training_steps: int):\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if self.optimizer is None:\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer_cls = Adafactor if self.args.adafactor else AdamW\n if self.args.adafactor:\n optimizer_cls = Adafactor\n optimizer_kwargs = {\"scale_parameter\": False, \"relative_step\": False}\n else:\n optimizer_cls = AdamW\n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n }\n optimizer_kwargs[\"lr\"] = self.args.learning_rate\n if self.sharded_dpp:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n\n if self.lr_scheduler is None:\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n self.optimizer,\n num_warmup_steps=self.args.warmup_steps,\n num_training_steps=num_training_steps,\n )\n\n def num_examples(self, dataloader: DataLoader) -> int:\n \"\"\"\n Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.\n\n Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`\n \"\"\"\n return len(dataloader.dataset)\n\n def _hp_search_setup(self, trial: Union[\"optuna.Trial\", Dict[str, Any]]):\n \"\"\" HP search setup code \"\"\"\n self._trial = trial\n\n if self.hp_search_backend is None or trial is None:\n return\n\n params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial\n for key, value in params.items():\n if not hasattr(self.args, key):\n raise AttributeError(\n f\"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`.\"\n )\n old_attr = getattr(self.args, key, None)\n # Casting value to the proper type\n if old_attr is not None:\n value = type(old_attr)(value)\n setattr(self.args, key, value)\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n logger.info(\"Trial:\", trial.params)\n\n def _report_to_hp_search(\n self, trial: Union[\"optuna.Trial\", Dict[str, Any]], epoch: int, metrics: Dict[str, float]\n ):\n if self.hp_search_backend is None or trial is None:\n return\n self.objective = self.compute_objective(metrics.copy())\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n import optuna\n\n trial.report(self.objective, epoch)\n if trial.should_prune():\n raise optuna.TrialPruned()\n elif self.hp_search_backend == HPSearchBackend.RAY:\n from ray import tune\n\n if self.state.global_step % self.args.save_steps == 0:\n self._tune_save_checkpoint()\n tune.report(objective=self.objective, **metrics)\n\n def _tune_save_checkpoint(self):\n from ray import tune\n\n if not self.use_tune_checkpoints:\n return\n with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:\n self.args.output_dir = checkpoint_dir\n output_dir = os.path.join(self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\")\n self.save_model(output_dir)\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n def call_model_init(self, trial=None):\n model_init_argcount = len(inspect.signature(self.model_init).parameters)\n if model_init_argcount == 0:\n model = self.model_init()\n elif model_init_argcount == 1:\n model = self.model_init(trial)\n else:\n raise RuntimeError(\"model_init should have 0 or 1 argument.\")\n\n if model is None:\n raise RuntimeError(\"model_init should not return None.\")\n\n return model\n\n def train(\n self,\n resume_from_checkpoint: Optional[str] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None,\n **kwargs,\n ):\n \"\"\"\n Main training entry point.\n\n Args:\n resume_from_checkpoint (:obj:`str`, `optional`):\n Local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If\n present, training will resume from the model/optimizer/scheduler states loaded here.\n trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):\n The trial run or the hyperparameter dictionary for hyperparameter search.\n kwargs:\n Additional keyword arguments used to hide deprecated arguments\n \"\"\"\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n warnings.warn(\n \"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` \"\n \"instead.\",\n FutureWarning,\n )\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(self.args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):\n logger.info(f\"Loading model from {resume_from_checkpoint}).\")\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(resume_from_checkpoint)\n model_reloaded = True\n else:\n state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if not self.is_model_parallel:\n self.model = self.model.to(self.args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if self.args.max_steps > 0:\n max_steps = self.args.max_steps\n num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(\n self.args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(self.args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = self.args.max_steps\n num_train_epochs = 1\n num_update_steps_per_epoch = max_steps\n\n if self.args.deepspeed:\n model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)\n self.model = model.module\n self.model_wrapped = model # will get further wrapped in DDP\n self.deepspeed = model # DeepSpeedEngine object\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n else:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n model = self.model_wrapped\n\n # Mixed precision training with apex (torch < 1.6)\n if self.use_apex:\n model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)\n\n # Multi-gpu training (should be after apex fp16 initialization)\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if self.sharded_dpp:\n model = ShardedDDP(model, self.optimizer)\n elif is_sagemaker_distributed_available():\n model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)\n elif self.args.local_rank != -1:\n if self.args.ddp_find_unused_parameters is not None:\n find_unused_parameters = self.args.ddp_find_unused_parameters\n elif isinstance(model, PreTrainedModel):\n # find_unused_parameters breaks checkpointing as per\n # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021\n find_unused_parameters = not getattr(model.config, \"gradient_checkpointing\", False)\n else:\n find_unused_parameters = True\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.args.local_rank],\n output_device=self.args.local_rank,\n find_unused_parameters=find_unused_parameters,\n )\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.\n\n # Train!\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = dist.get_world_size()\n else:\n world_size = 1\n\n total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size\n num_examples = (\n self.num_examples(train_dataloader)\n if train_dataset_is_sized\n else total_train_batch_size * self.args.max_steps\n )\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {self.args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, \"trainer_state.json\")\n ):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not self.args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not self.args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch.\"\n )\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(self.args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n self._total_flos = self.state.total_flos\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not self.args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(\n self.args.device\n )\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if self.args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps\n self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)\n\n if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss += self.training_step(model, inputs)\n else:\n tr_loss += self.training_step(model, inputs)\n self._total_flos += self.floating_point_ops(inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= self.args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.use_amp:\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(self.args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n torch.nn.utils.clip_grad_norm_(\n amp.master_params(self.optimizer) if self.use_apex else model.parameters(),\n self.args.max_grad_norm,\n )\n\n # Optimizer step\n if self.deepspeed:\n self.deepspeed.step()\n elif is_torch_tpu_available():\n xm.optimizer_step(self.optimizer)\n elif self.use_amp:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n self.lr_scheduler.step()\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(\n f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\"\n )\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(self.state.best_model_checkpoint)\n if not self.is_model_parallel:\n self.model = self.model.to(self.args.device)\n else:\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n if self.deepspeed:\n self.deepspeed.load_checkpoint(\n self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False\n )\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n if self._total_flos is not None:\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)\n\n def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):\n if self.control.should_log:\n logs: Dict[str, float] = {}\n tr_loss_scalar = tr_loss.item()\n # reset tr_loss to zero\n tr_loss -= tr_loss\n\n logs[\"loss\"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)\n # backward compatibility for pytorch schedulers\n logs[\"learning_rate\"] = (\n self.lr_scheduler.get_last_lr()[0]\n if version.parse(torch.__version__) >= version.parse(\"1.4\")\n else self.lr_scheduler.get_lr()[0]\n )\n self._total_loss_scalar += tr_loss_scalar\n self._globalstep_last_logged = self.state.global_step\n\n self.log(logs)\n\n metrics = None\n if self.control.should_evaluate:\n metrics = self.evaluate()\n self._report_to_hp_search(trial, epoch, metrics)\n\n if self.control.should_save:\n self._save_checkpoint(model, trial, metrics=metrics)\n self.control = self.callback_handler.on_save(self.args, self.state, self.control)\n\n def _save_checkpoint(self, model, trial, metrics=None):\n # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we\n # want to save.\n assert _model_unwrap(model) is self.model, \"internal model should be a reference to self.model\"\n\n # Save model checkpoint\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n if self.hp_search_backend is not None and trial is not None:\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n run_id = trial.number\n else:\n from ray import tune\n\n run_id = tune.get_trial_id()\n run_name = self.hp_name(trial) if self.hp_name is not None else f\"run-{run_id}\"\n output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)\n else:\n output_dir = os.path.join(self.args.output_dir, checkpoint_folder)\n\n self.store_flos()\n\n self.save_model(output_dir)\n if self.deepspeed:\n self.deepspeed.save_checkpoint(output_dir)\n\n # Save optimizer and scheduler\n if self.sharded_dpp:\n self.optimizer.consolidate_state_dict()\n\n if is_torch_tpu_available():\n xm.rendezvous(\"saving_optimizer_states\")\n xm.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n elif self.is_world_process_zero() and not self.deepspeed:\n # deepspeed.save_checkpoint above saves model/optim/sched\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n\n # Determine the new best metric / best model checkpoint\n if metrics is not None and self.args.metric_for_best_model is not None:\n metric_to_check = self.args.metric_for_best_model\n if not metric_to_check.startswith(\"eval_\"):\n metric_to_check = f\"eval_{metric_to_check}\"\n metric_value = metrics[metric_to_check]\n\n operator = np.greater if self.args.greater_is_better else np.less\n if (\n self.state.best_metric is None\n or self.state.best_model_checkpoint is None\n or operator(metric_value, self.state.best_metric)\n ):\n self.state.best_metric = metric_value\n self.state.best_model_checkpoint = output_dir\n\n # Save the Trainer state\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n\n # Maybe delete some older checkpoints.\n if self.is_world_process_zero():\n self._rotate_checkpoints(use_mtime=True)\n\n def _load_optimizer_and_scheduler(self, checkpoint):\n \"\"\"If optimizer and scheduler states exist, load them.\"\"\"\n if checkpoint is None:\n return\n\n if os.path.isfile(os.path.join(checkpoint, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(checkpoint, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n if is_torch_tpu_available():\n # On TPU we have to take some extra precautions to properly load the states on the right device.\n optimizer_state = torch.load(os.path.join(checkpoint, \"optimizer.pt\"), map_location=\"cpu\")\n with warnings.catch_warnings(record=True) as caught_warnings:\n lr_scheduler_state = torch.load(os.path.join(checkpoint, \"scheduler.pt\"), map_location=\"cpu\")\n reissue_pt_warnings(caught_warnings)\n\n xm.send_cpu_data_to_device(optimizer_state, self.args.device)\n xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)\n\n self.optimizer.load_state_dict(optimizer_state)\n self.lr_scheduler.load_state_dict(lr_scheduler_state)\n else:\n self.optimizer.load_state_dict(\n torch.load(os.path.join(checkpoint, \"optimizer.pt\"), map_location=self.args.device)\n )\n with warnings.catch_warnings(record=True) as caught_warnings:\n self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, \"scheduler.pt\")))\n reissue_pt_warnings(caught_warnings)\n\n if self.deepspeed:\n # Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function\n self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)\n\n def hyperparameter_search(\n self,\n hp_space: Optional[Callable[[\"optuna.Trial\"], Dict[str, float]]] = None,\n compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,\n n_trials: int = 20,\n direction: str = \"minimize\",\n backend: Optional[Union[\"str\", HPSearchBackend]] = None,\n hp_name: Optional[Callable[[\"optuna.Trial\"], str]] = None,\n **kwargs,\n ) -> BestRun:\n \"\"\"\n Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by\n :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is\n provided, the sum of all metrics otherwise.\n\n .. warning::\n\n To use this method, you need to have provided a ``model_init`` when initializing your\n :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible\n with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the\n method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.\n\n Args:\n hp_space (:obj:`Callable[[\"optuna.Trial\"], Dict[str, float]]`, `optional`):\n A function that defines the hyperparameter search space. Will default to\n :func:`~transformers.trainer_utils.default_hp_space_optuna` or\n :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.\n compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):\n A function computing the objective to minimize or maximize from the metrics returned by the\n :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.\n n_trials (:obj:`int`, `optional`, defaults to 100):\n The number of trial runs to test.\n direction(:obj:`str`, `optional`, defaults to :obj:`\"minimize\"`):\n Whether to optimize greater or lower objects. Can be :obj:`\"minimize\"` or :obj:`\"maximize\"`, you should\n pick :obj:`\"minimize\"` when optimizing the validation loss, :obj:`\"maximize\"` when optimizing one or\n several metrics.\n backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):\n The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which\n one is installed. If both are installed, will default to optuna.\n kwargs:\n Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For\n more information see:\n\n - the documentation of `optuna.create_study\n <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__\n - the documentation of `tune.run\n <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__\n\n Returns:\n :class:`transformers.trainer_utils.BestRun`: All the information about the best run.\n \"\"\"\n if backend is None:\n backend = default_hp_search_backend()\n if backend is None:\n raise RuntimeError(\n \"At least one of optuna or ray should be installed. \"\n \"To install optuna run `pip install optuna`.\"\n \"To install ray run `pip install ray[tune]`.\"\n )\n backend = HPSearchBackend(backend)\n if backend == HPSearchBackend.OPTUNA and not is_optuna_available():\n raise RuntimeError(\"You picked the optuna backend, but it is not installed. Use `pip install optuna`.\")\n if backend == HPSearchBackend.RAY and not is_ray_tune_available():\n raise RuntimeError(\n \"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`.\"\n )\n self.hp_search_backend = backend\n if self.model_init is None:\n raise RuntimeError(\n \"To use hyperparameter search, you need to pass your model through a model_init function.\"\n )\n\n self.hp_space = default_hp_space[backend] if hp_space is None else hp_space\n self.hp_name = hp_name\n self.compute_objective = default_compute_objective if compute_objective is None else compute_objective\n\n run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray\n best_run = run_hp_search(self, n_trials, direction, **kwargs)\n\n self.hp_search_backend = None\n return best_run\n\n def log(self, logs: Dict[str, float]) -> None:\n \"\"\"\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n \"\"\"\n if self.state.epoch is not None:\n logs[\"epoch\"] = round(self.state.epoch, 2)\n\n output = {**logs, **{\"step\": self.state.global_step}}\n self.state.log_history.append(output)\n self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)\n\n def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:\n \"\"\"\n Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n \"\"\"\n for k, v in inputs.items():\n if isinstance(v, torch.Tensor):\n inputs[k] = v.to(self.args.device)\n\n if self.args.past_index >= 0 and self._past is not None:\n inputs[\"mems\"] = self._past\n\n return inputs\n\n def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to train.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n :obj:`torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if self.use_amp:\n with autocast():\n loss = self.compute_loss(model, inputs)\n else:\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1:\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.use_amp:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()\n\n def compute_loss(self, model, inputs, return_outputs=False):\n \"\"\"\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n \"\"\"\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n # Save past state if it exists\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n # We don't use .loss here since the model may return tuples instead of ModelOutput.\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss\n\n def is_local_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several\n machines) main process.\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=True)\n else:\n return self.args.local_rank in [-1, 0]\n\n def is_world_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the global main process (when training in a distributed fashion on several\n machines, this is only going to be :obj:`True` for one process).\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=False)\n else:\n return self.args.local_rank == -1 or dist.get_rank() == 0\n\n def save_model(self, output_dir: Optional[str] = None):\n \"\"\"\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n\n Will only save from the world_master process (unless in TPUs).\n \"\"\"\n\n if is_torch_tpu_available():\n self._save_tpu(output_dir)\n elif self.is_world_process_zero():\n self._save(output_dir)\n\n # If on sagemaker and we are saving the main model (not a checkpoint so output_dir=None), save a copy to\n # SM_MODEL_DIR for easy deployment.\n if output_dir is None and os.getenv(\"SM_MODEL_DIR\") is not None:\n self.save_model(output_dir=os.getenv(\"SM_MODEL_DIR\"))\n\n def _save_tpu(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if xm.is_master_ordinal():\n os.makedirs(output_dir, exist_ok=True)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n xm.rendezvous(\"saving_checkpoint\")\n if not isinstance(self.model, PreTrainedModel):\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n def _save(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n def store_flos(self):\n # Storing the number of floating-point operations that went into the model\n if self._total_flos is not None:\n if self.args.local_rank != -1:\n self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()\n else:\n self.state.total_flos = self._total_flos\n\n def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f\"{checkpoint_prefix}-*\")]\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(f\".*{checkpoint_prefix}-([0-9]+)\", path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n # Make sure we don't delete the best model.\n if self.state.best_model_checkpoint is not None:\n best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))\n checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (\n checkpoints_sorted[-1],\n checkpoints_sorted[best_model_index],\n )\n return checkpoints_sorted\n\n def _rotate_checkpoints(self, use_mtime=False) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if self.compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n\n n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))\n self.log(output.metrics)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)\n return output.metrics\n\n def predict(\n self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = \"eval\"\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:obj:`Dataset`):\n Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n .. note::\n\n If your predictions or labels have different sequence length (for instance because you're doing dynamic\n padding in a token classification task) the predictions will be padded (on the right) to allow for\n concatenation into one array. The padding index is -100.\n\n Returns: `NamedTuple` A namedtuple with the following keys:\n\n - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.\n - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).\n - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset\n contained labels).\n \"\"\"\n if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n\n test_dataloader = self.get_test_dataloader(test_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n test_dataloader, description=\"Prediction\", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix\n )\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))\n return output\n\n def prediction_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n if not isinstance(dataloader.dataset, collections.abc.Sized):\n raise ValueError(\"dataset must implement __len__\")\n prediction_loss_only = (\n prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only\n )\n\n model = self.model\n # multi-gpu eval\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n # Note: in torch.distributed mode, there's no point in wrapping the model\n # inside a DistributedDataParallel as we'll be under `no_grad` anyways.\n\n batch_size = dataloader.batch_size\n num_examples = self.num_examples(dataloader)\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", num_examples)\n logger.info(\" Batch size = %d\", batch_size)\n losses_host: torch.Tensor = None\n preds_host: Union[torch.Tensor, List[torch.Tensor]] = None\n labels_host: Union[torch.Tensor, List[torch.Tensor]] = None\n\n world_size = 1\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = dist.get_world_size()\n world_size = max(1, world_size)\n\n eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)\n if not prediction_loss_only:\n preds_gatherer = DistributedTensorGatherer(world_size, num_examples)\n labels_gatherer = DistributedTensorGatherer(world_size, num_examples)\n\n model.eval()\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)\n\n if self.args.past_index >= 0:\n self._past = None\n\n self.callback_handler.eval_dataloader = dataloader\n\n for step, inputs in enumerate(dataloader):\n loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n if loss is not None:\n losses = loss.repeat(batch_size)\n losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)\n if logits is not None:\n preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)\n if labels is not None:\n labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, labels_host = None, None, None\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n eval_loss = eval_losses_gatherer.finalize()\n preds = preds_gatherer.finalize() if not prediction_loss_only else None\n label_ids = labels_gatherer.finalize() if not prediction_loss_only else None\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n if eval_loss is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = eval_loss.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def _gather_and_numpify(self, tensors, name):\n \"\"\"\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n \"\"\"\n if tensors is None:\n return\n if is_torch_tpu_available():\n tensors = nested_xla_mesh_reduce(tensors, name)\n elif self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n\n return nested_numpify(tensors)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n has_labels = all(inputs.get(k) is not None for k in self.label_names)\n inputs = self._prepare_inputs(inputs)\n if ignore_keys is None:\n if hasattr(self.model, \"config\"):\n ignore_keys = getattr(self.model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n with torch.no_grad():\n if has_labels:\n loss, outputs = self.compute_loss(model, inputs, return_outputs=True)\n loss = loss.mean().detach()\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n logits = outputs[1:]\n else:\n loss = None\n if self.use_amp:\n with autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)\n else:\n logits = outputs\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index - 1]\n\n if prediction_loss_only:\n return (loss, None, None)\n\n logits = nested_detach(logits)\n if len(logits) == 1:\n logits = logits[0]\n\n if has_labels:\n labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))\n if len(labels) == 1:\n labels = labels[0]\n else:\n labels = None\n\n return (loss, logits, labels)\n\n def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):\n \"\"\"\n For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of\n floating point operations for every backward + forward pass. If using another model, either implement such a\n method in the model or subclass and override this method.\n\n Args:\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n Returns:\n :obj:`int`: The number of floating-point operations.\n \"\"\"\n if hasattr(self.model, \"floating_point_ops\"):\n return self.model.floating_point_ops(inputs)\n else:\n return 0\n"
] | [
[
"torch.utils.data.distributed.DistributedSampler",
"torch.cat",
"torch.utils.data.sampler.SequentialSampler",
"torch.tensor",
"torch.cuda.amp.autocast",
"torch.nn.DataParallel",
"torch.cuda.amp.GradScaler",
"torch.no_grad",
"torch.distributed.get_rank",
"torch.utils.data.dataloader.DataLoader",
"torch.utils.data.sampler.RandomSampler",
"torch.distributed.get_local_rank",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel"
]
] |
arpitdm/nifty | [
"763792d2ddc72f2af8c6d1372c5ed8d04c741ae1"
] | [
"models/fairgnn.py"
] | [
"import torch.nn as nn\nfrom models import *\nimport torch\nimport gc\n\ndef get_model(nfeat, args):\n if args.model == \"gcn\":\n model = GCN_Body(nfeat,args.num_hidden,args.dropout)\n elif args.model == \"gat\":\n heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]\n model = GAT_body(args.num_layers,nfeat,args.num_hidden,heads,args.dropout,args.attn_drop,args.negative_slope,args.residual)\n else:\n print(\"Model not implement\")\n return\n\n return model\n\nclass FairGNN(nn.Module):\n\n def __init__(self, nfeat, args):\n super(FairGNN,self).__init__()\n\n nhid = args.num_hidden\n dropout = args.dropout\n self.estimator = GCN(nfeat,args.hidden,1,dropout)\n self.GNN = get_model(nfeat,args)\n self.classifier = nn.Linear(nhid,1)\n self.adv = nn.Linear(nhid,1)\n\n # G_params = list(self.GNN.parameters()) + list(self.classifier.parameters()) + list(self.estimator.parameters())\n # self.optimizer_G = torch.optim.Adam(G_params, lr = args.lr, weight_decay = args.weight_decay)\n # self.optimizer_A = torch.optim.Adam(self.adv.parameters(), lr = args.lr, weight_decay = args.weight_decay)\n\n self.args = args\n # self.criterion = nn.BCEWithLogitsLoss()\n\n self.G_loss = 0\n self.A_loss = 0\n\n def forward(self, x, edge_index):\n s = self.estimator(x, edge_index)\n z = self.GNN(x, edge_index)\n y = self.classifier(z)\n return y, s, z\n \n def optimize(self,g,x,labels,idx_train,sens,idx_sens_train):\n self.train()\n\n ### update E, G\n self.adv.requires_grad_(False)\n self.optimizer_G.zero_grad()\n\n s = self.estimator(g,x)\n h = self.GNN(g,x)\n y = self.classifier(h)\n\n s_g = self.adv(h)\n\n s_score = torch.sigmoid(s.detach())\n # s_score = (s_score > 0.5).float()\n s_score[idx_sens_train]=sens[idx_sens_train].unsqueeze(1).float()\n y_score = torch.sigmoid(y)\n self.cov = torch.abs(torch.mean((s_score - torch.mean(s_score)) * (y_score - torch.mean(y_score))))\n \n self.cls_loss = self.criterion(y[idx_train],labels[idx_train].unsqueeze(1).float())\n self.adv_loss = self.criterion(s_g,s_score)\n \n self.G_loss = self.cls_loss + self.args.alpha * self.cov - self.args.beta * self.adv_loss\n self.G_loss.backward()\n self.optimizer_G.step()\n\n ## update Adv\n self.adv.requires_grad_(True)\n self.optimizer_A.zero_grad()\n s_g = self.adv(h.detach())\n self.A_loss = self.criterion(s_g,s_score)\n self.A_loss.backward()\n self.optimizer_A.step()\n\n\n"
] | [
[
"torch.nn.Linear",
"torch.sigmoid",
"torch.mean"
]
] |
buchgr/tensorflow | [
"2938772a08ed02ced4663ca38168ab3f82e8f81b"
] | [
"tensorflow/python/keras/saving/model_architectures.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for saving/loading function for keras Model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.python import keras\n\n# Declaring namedtuple()\nModelFn = collections.namedtuple('ModelFn',\n ['model', 'input_shape', 'target_shape'])\n\n\ndef basic_sequential():\n \"\"\"Basic sequential model.\"\"\"\n model = keras.Sequential([\n keras.layers.Dense(3, activation='relu', input_shape=(3,)),\n keras.layers.Dense(2, activation='softmax'),\n ])\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef basic_sequential_deferred():\n \"\"\"Sequential model with deferred input shape.\"\"\"\n model = keras.Sequential([\n keras.layers.Dense(3, activation='relu'),\n keras.layers.Dense(2, activation='softmax'),\n ])\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef stacked_rnn():\n \"\"\"Stacked RNN model.\"\"\"\n inputs = keras.Input((None, 3))\n layer = keras.layers.RNN([keras.layers.LSTMCell(2) for _ in range(3)])\n x = layer(inputs)\n outputs = keras.layers.Dense(2)(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 4, 3), (None, 2))\n\n\ndef lstm():\n \"\"\"LSTM model.\"\"\"\n inputs = keras.Input((None, 3))\n x = keras.layers.LSTM(4, return_sequences=True)(inputs)\n x = keras.layers.LSTM(3, return_sequences=True)(x)\n x = keras.layers.LSTM(2, return_sequences=False)(x)\n outputs = keras.layers.Dense(2)(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 4, 3), (None, 2))\n\n\ndef multi_input_multi_output():\n \"\"\"Multi-input Multi-ouput model.\"\"\"\n body_input = keras.Input(shape=(None,), name='body')\n tags_input = keras.Input(shape=(2,), name='tags')\n\n x = keras.layers.Embedding(10, 4)(body_input)\n body_features = keras.layers.LSTM(5)(x)\n x = keras.layers.concatenate([body_features, tags_input])\n\n pred_1 = keras.layers.Dense(2, activation='sigmoid', name='priority')(x)\n pred_2 = keras.layers.Dense(3, activation='softmax', name='department')(x)\n\n model = keras.Model(\n inputs=[body_input, tags_input], outputs=[pred_1, pred_2])\n return ModelFn(model, [(None, 1), (None, 2)], [(None, 2), (None, 3)])\n\n\ndef nested_sequential_in_functional():\n \"\"\"A sequential model nested in a functional model.\"\"\"\n inner_model = keras.Sequential([\n keras.layers.Dense(3, activation='relu', input_shape=(3,)),\n keras.layers.Dense(2, activation='relu'),\n ])\n\n inputs = keras.Input(shape=(3,))\n x = inner_model(inputs)\n outputs = keras.layers.Dense(2, activation='softmax')(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef seq_to_seq():\n \"\"\"Sequence to sequence model.\"\"\"\n num_encoder_tokens = 3\n num_decoder_tokens = 3\n latent_dim = 2\n encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))\n encoder = keras.layers.LSTM(latent_dim, return_state=True)\n _, state_h, state_c = encoder(encoder_inputs)\n encoder_states = [state_h, state_c]\n decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))\n decoder_lstm = keras.layers.LSTM(\n latent_dim, return_sequences=True, return_state=True)\n decoder_outputs, _, _ = decoder_lstm(\n decoder_inputs, initial_state=encoder_states)\n decoder_dense = keras.layers.Dense(num_decoder_tokens, activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n return ModelFn(\n model, [(None, 2, num_encoder_tokens), (None, 2, num_decoder_tokens)],\n (None, 2, num_decoder_tokens))\n\n\ndef shared_layer_functional():\n \"\"\"Shared layer in a functional model.\"\"\"\n main_input = keras.Input(shape=(10,), dtype='int32', name='main_input')\n x = keras.layers.Embedding(\n output_dim=5, input_dim=4, input_length=10)(main_input)\n lstm_out = keras.layers.LSTM(3)(x)\n auxiliary_output = keras.layers.Dense(\n 1, activation='sigmoid', name='aux_output')(lstm_out)\n auxiliary_input = keras.Input(shape=(5,), name='aux_input')\n x = keras.layers.concatenate([lstm_out, auxiliary_input])\n x = keras.layers.Dense(2, activation='relu')(x)\n main_output = keras.layers.Dense(\n 1, activation='sigmoid', name='main_output')(x)\n model = keras.Model(\n inputs=[main_input, auxiliary_input],\n outputs=[main_output, auxiliary_output])\n return ModelFn(model, [(None, 10), (None, 5)], [(None, 1), (None, 1)])\n\n\ndef shared_sequential():\n \"\"\"Shared sequential model in a functional model.\"\"\"\n inner_model = keras.Sequential([\n keras.layers.Conv2D(2, 3, activation='relu'),\n keras.layers.Conv2D(2, 3, activation='relu'),\n ])\n inputs_1 = keras.Input((5, 5, 3))\n inputs_2 = keras.Input((5, 5, 3))\n x1 = inner_model(inputs_1)\n x2 = inner_model(inputs_2)\n x = keras.layers.concatenate([x1, x2])\n outputs = keras.layers.GlobalAveragePooling2D()(x)\n model = keras.Model([inputs_1, inputs_2], outputs)\n return ModelFn(model, [(None, 5, 5, 3), (None, 5, 5, 3)], (None, 4))\n\n\nclass _MySubclassModel(keras.Model):\n \"\"\"A subclass model.\"\"\"\n\n def __init__(self):\n super(_MySubclassModel, self).__init__(name='my_subclass_model')\n self.dense1 = keras.layers.Dense(8, activation='relu')\n self.dense2 = keras.layers.Dense(2, activation='softmax')\n self.bn = keras.layers.BatchNormalization()\n self.dp = keras.layers.Dropout(0.5)\n\n def call(self, inputs, **kwargs):\n x = self.dense1(inputs)\n x = self.dp(x)\n x = self.bn(x)\n return self.dense2(x)\n\n\ndef nested_subclassed_model():\n \"\"\"A subclass model nested in another subclass model.\"\"\"\n\n class NestedSubclassModel(keras.Model):\n \"\"\"A nested subclass model.\"\"\"\n\n def __init__(self):\n super(NestedSubclassModel, self).__init__()\n self.dense1 = keras.layers.Dense(4, activation='relu')\n self.dense2 = keras.layers.Dense(2, activation='relu')\n self.bn = keras.layers.BatchNormalization()\n self.inner_subclass_model = _MySubclassModel()\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.bn(x)\n x = self.inner_subclass_model(x)\n return self.dense2(x)\n\n return ModelFn(NestedSubclassModel(), (None, 3), (None, 2))\n\n\ndef nested_subclassed_in_functional_model():\n \"\"\"A subclass model nested in a functional model.\"\"\"\n inner_subclass_model = _MySubclassModel()\n inputs = keras.Input(shape=(3,))\n x = inner_subclass_model(inputs)\n x = keras.layers.BatchNormalization()(x)\n outputs = keras.layers.Dense(2, activation='softmax')(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef nested_functional_in_subclassed_model():\n \"\"\"A functional model nested in a subclass model.\"\"\"\n def get_functional_model():\n inputs = keras.Input(shape=(4,))\n x = keras.layers.Dense(4, activation='relu')(inputs)\n x = keras.layers.BatchNormalization()(x)\n outputs = keras.layers.Dense(2)(x)\n return keras.Model(inputs, outputs)\n\n class NestedFunctionalInSubclassModel(keras.Model):\n \"\"\"A functional nested in subclass model.\"\"\"\n\n def __init__(self):\n super(NestedFunctionalInSubclassModel, self).__init__(\n name='nested_functional_in_subclassed_model')\n self.dense1 = keras.layers.Dense(4, activation='relu')\n self.dense2 = keras.layers.Dense(2, activation='relu')\n self.inner_functional_model = get_functional_model()\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.inner_functional_model(x)\n return self.dense2(x)\n return ModelFn(NestedFunctionalInSubclassModel(), (None, 3), (None, 2))\n\n\ndef shared_layer_subclassed_model():\n \"\"\"Shared layer in a subclass model.\"\"\"\n\n class SharedLayerSubclassModel(keras.Model):\n \"\"\"A subclass model with shared layers.\"\"\"\n\n def __init__(self):\n super(SharedLayerSubclassModel, self).__init__(\n name='shared_layer_subclass_model')\n self.dense = keras.layers.Dense(3, activation='relu')\n self.dp = keras.layers.Dropout(0.5)\n self.bn = keras.layers.BatchNormalization()\n\n def call(self, inputs):\n x = self.dense(inputs)\n x = self.dp(x)\n x = self.bn(x)\n return self.dense(x)\n return ModelFn(SharedLayerSubclassModel(), (None, 3), (None, 3))\n\n\ndef functional_with_keyword_args():\n \"\"\"A functional model with keyword args.\"\"\"\n inputs = keras.Input(shape=(3,))\n x = keras.layers.Dense(4)(inputs)\n x = keras.layers.BatchNormalization()(x)\n outputs = keras.layers.Dense(2)(x)\n\n model = keras.Model(inputs, outputs, name='m', trainable=False)\n return ModelFn(model, (None, 3), (None, 2))\n\n\nALL_MODELS = [\n ('basic_sequential', basic_sequential),\n ('basic_sequential_deferred', basic_sequential_deferred),\n ('stacked_rnn', stacked_rnn),\n ('lstm', lstm),\n ('multi_input_multi_output', multi_input_multi_output),\n ('nested_sequential_in_functional', nested_sequential_in_functional),\n ('seq_to_seq', seq_to_seq),\n ('shared_layer_functional', shared_layer_functional),\n ('shared_sequential', shared_sequential),\n ('nested_subclassed_model', nested_subclassed_model),\n ('nested_subclassed_in_functional_model',\n nested_subclassed_in_functional_model),\n ('nested_functional_in_subclassed_model',\n nested_functional_in_subclassed_model),\n ('shared_layer_subclassed_model', shared_layer_subclassed_model),\n ('functional_with_keyword_args', functional_with_keyword_args)\n]\n\n\ndef get_models(exclude_models=None):\n \"\"\"Get all models excluding the specificed ones.\"\"\"\n models = [model for model in ALL_MODELS\n if model[0] not in exclude_models]\n return models\n"
] | [
[
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.keras.layers.LSTM",
"tensorflow.python.keras.Input",
"tensorflow.python.keras.layers.Embedding",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.layers.concatenate",
"tensorflow.python.keras.Model",
"tensorflow.python.keras.layers.LSTMCell",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.keras.layers.Conv2D",
"tensorflow.python.keras.layers.GlobalAveragePooling2D"
]
] |
gvashishtha/azureml-examples | [
"dc7ee4c01410757beeaa52a4f696882ca38e0be7"
] | [
"code/deployment/triton/bidaf_utils.py"
] | [
"\"\"\"score_bidaf.py\n\nScoring script for use with the Bi-directional Attention Flow model from the ONNX model zoo.\nhttps://github.com/onnx/models/tree/master/text/machine_comprehension/bidirectional_attention_flow\n\"\"\"\n\nimport json\nimport nltk\nimport numpy as np\nimport os\n\nfrom nltk import word_tokenize\nfrom utils import get_model_info, parse_model_http, triton_init, triton_infer\nfrom tritonclientutils import triton_to_np_dtype\n\n\ndef preprocess(text, dtype):\n \"\"\"Tokenizes text for use in the bidirectional attention flow model\n\n Parameters\n ---------\n text : str\n Text to be tokenized\n\n dtype : numpy datatype\n Datatype of the resulting array\n\n Returns\n ---------\n (np.array(), np.array())\n Tuple containing two numpy arrays with the tokenized\n words and chars, respectively.\n \n From: https://github.com/onnx/models/tree/master/text/machine_comprehension/bidirectional_attention_flow # noqa\n \"\"\"\n nltk.download(\"punkt\")\n tokens = word_tokenize(text)\n # split into lower-case word tokens, in numpy array with shape of (seq, 1)\n words = np.array([w.lower() for w in tokens], dtype=dtype).reshape(-1, 1)\n # split words into chars, in numpy array with shape of (seq, 1, 1, 16)\n chars = [[c for c in t][:16] for t in tokens]\n chars = [cs + [\"\"] * (16 - len(cs)) for cs in chars]\n chars = np.array(chars, dtype=dtype).reshape(-1, 1, 1, 16)\n return words, chars\n\n\ndef postprocess(context_words, answer):\n \"\"\"Post-process results to show the chosen result\n\n Parameters\n --------\n context_words : str\n Original context\n\n answer : InferResult\n Triton inference result containing start and\n end positions of desired answer\n\n Returns\n --------\n Numpy array containing the words from the context that\n answer the given query.\n \"\"\"\n\n start = answer.as_numpy(\"start_pos\")[0]\n end = answer.as_numpy(\"end_pos\")[0]\n print(f\"start is {start}, end is {end}\")\n return [w.encode() for w in context_words[start : end + 1].reshape(-1)]\n"
] | [
[
"numpy.array"
]
] |
wi1k1n/nrf-accelerations | [
"3075d63177e8ac04ee91784d5b0c56379335740f",
"3075d63177e8ac04ee91784d5b0c56379335740f"
] | [
"util/visualize_light_samples.py",
"util/prepare_flower_dataset.py"
] | [
"import argparse, sys, os, os.path as op, json, subprocess\nimport numpy as np\nimport open3d as o3d\nimport plotly.graph_objects as go\n\nPATH = 'D:\\\\edu\\\\UniBonn\\\\Study\\\\thesis\\\\codes\\\\NSVF\\\\'\n# PATH2MODEL = 'D:\\\\edu\\\\UniBonn\\\\Study\\\\thesis\\\\codes\\\\blender\\\\projects\\\\brdf_sphere\\\\brdf_sphere.ply'\n\nplotData = []\n\n\nlight_start = np.load(op.join(PATH, 'light_start.npy'))\nlight_dirs = np.load(op.join(PATH, 'light_dirs.npy'))\nhits = np.load(op.join(PATH, 'hits.npy'))\n\n# sample_xyz = ray_start + ray_dir * sampled_depth\n# sample_xyz = sample_xyz[np.tile(sample_mask, sample_mask + (3,))].reshape(sample_xyz.shape)\n\n# light_start = light_start[39, :25, :]\n# light_dirs = light_dirs[39, :25, :]\n\nlight_start = light_start[:5, ...]\nlight_dirs = light_dirs[:5, ...]\nhits = hits[:5, ...]\n\nfor i, ls in enumerate(light_start):\n\tcv = ls[hits[i] > 0]\n\tplotData.append(go.Scatter3d(x=cv[:, 0], y=cv[:, 1], z=cv[:, 2],\n\t\t\t\t\t\t\t\tname='v{}'.format(i),\n\t\t\t\t\t\t\t\tmarker=dict(size=1, color=\"blue\"),\n\t\t\t\t\t\t\t\tmode='markers')\n\t\t\t\t\t)\nfor i, d in enumerate(light_dirs):\n\tcd = d[hits[i] > 0]\n\tcd /= np.linalg.norm(cd, axis=0)\n\tcv = light_start[i][hits[i] > 0]\n\tcvt = cv + cd\n\tfor j, cp in enumerate(cv):\n\t\tplotData.append(go.Scatter3d(\n\t\t\t\t\t\t\tx=[cp[0], cvt[j, 0]],\n\t\t\t\t\t\t\ty=[cp[1], cvt[j, 1]],\n\t\t\t\t\t\t\tz=[cp[2], cvt[j, 2]],\n\t\t\t\t\t\t\tname='pts',\n\t\t\t\t\t\t\tmarker=dict(size=1, color=\"red\"),\n\t\t\t\t\t\t\tmode='lines')\n\t\t\t\t\t\t)\n\nfig = go.Figure(data=plotData)\nprint('Saving to {0}'.format(os.path.abspath('visualize_light_samples.html')))\nfig.write_html('visualize_light_samples.html', auto_open=True)",
"import os, os.path as op, re, json, random\nimport xml.etree.ElementTree as ET\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as Rot\nimport cv2 as cv\n\nVOXEL_NUMS = 64\nINTREXTR_FROM_MEASXML = True\n\nXML_MEAS_PATH = '../realdata/flower_dome/meas.xml'\n# RAW_DATA_FOLDER = '../realdata/flower_dome/meas/tv000_045_cl133'\n# OUTPUT_FOLDER = '../realdata/flower_dome/dataset/'\n\nEXTR_MEAS_PATH = '../realdata/flower_dome/meas/tv000_045_cl133_masked/extrinsics.json'\nINTR_MEAS_PATH = '../realdata/flower_dome/meas/tv000_045_cl133_masked/intrinsics.json'\nRAW_DATA_FOLDER = '../realdata/flower_dome/meas/tv000_045_cl133_masked'\nOUTPUT_FOLDER = '../realdata/flower_dome/dataset_png/'\n\nNAME_POSTFIX = '_masked'\nEXTENSION = 'png' # 'jpg'\nPROCESS_IMAGES = False\nSHUFFLE = True\n# BBOX = [-0.75, -0.75, -1.5, 0.75, 0.75, -0.5]\n# BBOX = [-50, -50, -10, 50, 50, 80]\nBBOX = [-30, -30, -10, 30, 30, 70]\n# ROTATE = [0, -1, -1]\nUNDO_CV1 = True\nINVERT_TRANSLATION = False\n\nrandom.seed(1)\nnp.random.seed(1)\nprint('Processing measurements at path:')\nprint(RAW_DATA_FOLDER)\nprint('Out folder:', OUTPUT_FOLDER)\n\n\nRAW_DATA_FOLDER = op.abspath(RAW_DATA_FOLDER)\nOUTPUT_FOLDER = op.abspath(OUTPUT_FOLDER)\n\nos.makedirs(OUTPUT_FOLDER, exist_ok=True)\nassert op.isdir(RAW_DATA_FOLDER), 'Folder with measurements is not found'\n\n# dict of zoom levels\n# --- each zoom level is a dict of cameras\nzoomLevels = dict()\n\n##### Read extrinsics/intrinsics from meta files\n# If getting intrinsics/extrinsics from meas.xml\nif INTREXTR_FROM_MEASXML:\n\tXML_MEAS_PATH = op.abspath(XML_MEAS_PATH)\n\tassert op.isfile(XML_MEAS_PATH), 'File meas.xml is not found!'\n\n\tprint('Processing XML measurement file:')\n\tprint(XML_MEAS_PATH)\n\n\t# Parse meas.xml file and get zoom levels\n\ttree = ET.parse(op.abspath(XML_MEAS_PATH))\n\troot = tree.getroot()\n\tcamConfigs = root.find('cameras')\n\n\t# iterate over cameras\n\tfor camConf in camConfigs:\n\t\tidx, phi, theta = [camConf.attrib[k] for k in camConf.attrib.keys()]\n\t\tcalib = camConf.find('calib3d')\n\t\tfor zoom in calib:\n\t\t\tzmLevel = zoom.attrib.get('zoom')\n\t\t\tcalibData = [l.strip() for l in zoom.text.split('OpenCV ')[1].splitlines() if l.strip()]\n\t\t\tintrinsic = np.fromstring(' '.join(calibData[:3]), sep=' ').reshape((3, 3))\n\t\t\tdistort = np.fromstring(calibData[3], sep=' ')\n\t\t\ttranslation = -np.fromstring(calibData[4], sep=' ')\n\t\t\trotation = np.fromstring(calibData[5], sep=' ')\n\n\t\t\tif not zmLevel in zoomLevels:\n\t\t\t\tzoomLevels[zmLevel] = dict()\n\t\t\tif not idx in zoomLevels[zmLevel]:\n\t\t\t\tzoomLevels[zmLevel][idx] = dict()\n\t\t\tzoomLevels[zmLevel][idx] = {\n\t\t\t\t'cam_idx': idx,\n\t\t\t\t'phi': phi,\n\t\t\t\t'theta': theta,\n\t\t\t\t'intrinsic': intrinsic,\n\t\t\t\t'distort': distort,\n\t\t\t\t'translation': translation,\n\t\t\t\t'rotation': rotation\n\t\t\t}\n# if getting extrinsics/intrinsics from *.json files (meshroom + raytracing)\nelse:\n\tEXTR_MEAS_PATH = op.abspath(EXTR_MEAS_PATH)\n\tINTR_MEAS_PATH = op.abspath(INTR_MEAS_PATH)\n\tassert op.isfile(EXTR_MEAS_PATH) and op.isfile(INTR_MEAS_PATH), '*.json files are not found!'\n\n\tzoomLevels['-1'] = dict()\n\n\t# name_postfix = '_masked'\n\n\t# get intrinsics\n\twith open(INTR_MEAS_PATH) as fh:\n\t\tintrinsics = np.array(json.load(fh))\n\n\t# get extrinsics\n\twith open(EXTR_MEAS_PATH) as fh:\n\t\textr = json.load(fh)\n\tfor k, v in extr.items():\n\t\tidx = int(k[2:])\n\t\tif not k in zoomLevels['-1']:\n\t\t\tzoomLevels['-1'][str(idx)] = dict()\n\t\tzoomLevels['-1'][str(idx)] = {\n\t\t\t'cam_idx': idx,\n\t\t\t'phi': '\\d{3}',\n\t\t\t'theta': '\\d{3}',\n\t\t\t'intrinsic': intrinsics,\n\t\t\t'distort': None,\n\t\t\t'translation': None,\n\t\t\t'rotation': None,\n\t\t\t'extrinsics': np.array(v)\n\t\t}\n\n\t# still need distort vector\n\ttree = ET.parse(op.abspath(XML_MEAS_PATH))\n\troot = tree.getroot()\n\tcamConfigs = root.find('cameras')\n\n\t# iterate over cameras\n\tfor camConf in camConfigs:\n\t\tidx, phi, theta = [camConf.attrib[k] for k in camConf.attrib.keys()]\n\t\tidx = str(int(idx))\n\t\tif not idx in zoomLevels['-1']:\n\t\t\tcontinue\n\t\tcalib = camConf.find('calib3d')\n\t\tfor zoom in calib:\n\t\t\tzmLevel = zoom.attrib.get('zoom')\n\t\t\tcalibData = [l.strip() for l in zoom.text.split('OpenCV ')[1].splitlines() if l.strip()]\n\t\t\tdistort = np.fromstring(calibData[3], sep=' ')\n\t\t\tzoomLevels['-1'][idx].update({\n\t\t\t\t'distort': distort\n\t\t\t})\n\t\t\tbreak\n\nprint('Zoom levels: ', list(zoomLevels.keys()))\nzoomLevel = list(zoomLevels.keys())[0]\nprint('Cameras #{}: [{}] ... [{}]'.format(len(zoomLevels[zoomLevel].keys()), list(zoomLevels[zoomLevel].keys())[0], list(zoomLevels[zoomLevel].keys())[-1]))\n\n\n\n\n\n\n##### Create dataset\n# Iterate zoom levels\nmeasurementFNames = os.listdir(RAW_DATA_FOLDER)\nfor zoomIdx, cameras in zoomLevels.items():\n\tprint('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n\tprint('>> Processing zoom level: ', zoomIdx)\n\tprint('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n\n\tcurFolder = op.abspath(op.join(OUTPUT_FOLDER, 'zoom_' + str(zoomIdx)))\n\tos.makedirs(curFolder, exist_ok=True)\n\tprint('Outdir: ', curFolder)\n\n\tpathRGB = op.join(curFolder, 'rgb')\n\tpathPose = op.join(curFolder, 'pose')\n\tpathPosePL = op.join(curFolder, 'pose_pl')\n\tos.makedirs(pathRGB, exist_ok=True)\n\tos.makedirs(pathPose, exist_ok=True)\n\tos.makedirs(pathPosePL, exist_ok=True)\n\n\tadjust_ext = np.eye(4)\n\tif 'UNDO_CV1' in locals() and UNDO_CV1:\n\t\tcam1 = cameras['1']\n\t\tif 'extrinsics' in cam1 and cam1['extrinsics'] is not None:\n\t\t\text1 = cam1['extrinsics']\n\t\telse:\n\t\t\text1 = np.concatenate((\n\t\t\t\tnp.concatenate((np.array(Rot.from_rotvec(cam1['rotation']).as_matrix()),\n\t\t\t\t\t\t\t\tcam1['translation'][:, None],), axis=1),\n\t\t\t\tnp.r_[0, 0, 0, 1][None]), axis=0)\n\t\tadjust_ext = ext1\n\n\tjson_data = {'frames': []}\n\tcamPoints = []\n\tmeasImgPaths = {} # paths to real existing images\n\n\t# Create list of available images\n\tfor camIdx, cam in cameras.items():\n\t\t# Searching for measurement file in folder\n\t\tlightIdx = 133\n\t\tlightPhi = 270\n\t\tlightTheta = 75\n\t\tregText = '^cv0{0,2}' + str(camIdx) +'_tv0{0,2}' + str(cam['theta']) +'(\\.)?(\\d{0,2})?_pv0{0,2}' + str(cam['phi']) +'(\\.)?(\\d{0,2})?_cl0{0,2}' + str(lightIdx) +'_tl0{0,2}' + str(lightTheta) +'(\\.)?(\\d{0,2})?_pl0{0,2}' + str(lightPhi) +'(\\.)?(\\d{0,2})?_ISO400_FQ0_IDX1' + NAME_POSTFIX + '\\.' + EXTENSION + '$'\n\t\tregex = re.compile(regText)\n\t\tmeasFile = [fn for i, fn in enumerate(measurementFNames) if regex.match(fn)]\n\t\t# assert len(measFile) == 1, 'Either measurement file is not found or found more than one corresponding files. Regex: ' + regText\n\t\tif len(measFile) != 1:\n\t\t\tprint('Measurement cv{}_tv{}_pv{}_cl{}_tl{}_pl{} not found!'.format(camIdx, cam['theta'], cam['phi'], lightIdx, lightTheta, lightPhi))\n\t\t\tcontinue\n\t\tmeasPath = op.abspath(op.join(RAW_DATA_FOLDER, measFile[0]))\n\t\tmeasImgPaths[camIdx] = measPath\n\n\tsaveIdcs = [i for i in range(len(measImgPaths.keys()))]\n\n\tif 'SHUFFLE' in locals() and SHUFFLE:\n\t\trandom.shuffle(saveIdcs)\n\n\t# Loop for processing images\n\tfor camIdx, cam in cameras.items():\n\t\tprint('Cam #{}/{}{}{}'.format(camIdx,\n\t\t\t\t\t\t\t\t\t len(cameras.keys()),\n\t\t\t\t\t\t\t\t\t ' --> {}'.format(saveIdcs[0]) if len(saveIdcs) else '',\n\t\t\t\t\t\t\t\t\t ('' if camIdx in measImgPaths else ' skipped!')))\n\t\tif not camIdx in measImgPaths:\n\t\t\tcontinue\n\n\n\t\tmeasPath = measImgPaths[camIdx]\n\n\t\tif 'extrinsics' in cam and cam['extrinsics'] is not None:\n\t\t\textrinsics = cam['extrinsics']\n\t\telse:\n\t\t\t# Prepare extrinsics retrieved from meas.xml file\n\t\t\textrinsics = np.concatenate((\n\t\t\t\tnp.concatenate((np.array(Rot.from_rotvec(cam['rotation']).as_matrix()),\n\t\t\t\t\t\t\t\tcam['translation'][:, None],), axis=1),\n\t\t\t\tnp.r_[0, 0, 0, 1][None]), axis=0)\n\n\t\t# Invert camera directions\n\t\t# rm = np.eye(3)\n\t\t# rm[2, 2] = -1\n\n\t\textrinsics = adjust_ext @ extrinsics\n\n\t\tif 'INVERT_TRANSLATION' in locals() and INVERT_TRANSLATION:\n\t\t\textrinsics[:3, 3] *= -1\n\n\n\n\t\t# if 'ROTATE' in locals():\n\t\t# \trotM = np.concatenate((Rot.from_rotvec(ROTATE).as_matrix(), np.r_[0, 0, 1][None]), axis=0)\n\t\t# \trotM = np.concatenate((rotM, np.zeros(4)[:, None]), axis=1)\n\t\t# \textrinsics = rotM @ extrinsics\n\n\n\t\tcamPoint = (extrinsics @ np.r_[0, 0, 0, 1])[:3]\n\t\t# if 'ROTATE' in locals():\n\t\t# \tcamPoint = Rot.from_rotvec(ROTATE).as_matrix() @ camPoint\n\t\tcamPoints.append(camPoint)\n\n\n\t\t# Add data for transforms.json file (used for light/cam visualization)\n\t\tframe_data = {\n\t\t\t'file_path': measPath,\n\t\t\t'transform_matrix': np.linalg.inv(extrinsics).tolist(),\n\t\t\t'pl_transform_matrix': np.concatenate((np.zeros((3, 4)), np.r_[0, 0, 0, 1][None]), axis=0).tolist()\n\t\t}\n\t\tjson_data['frames'].append(frame_data)\n\n\n\n\t\t# Load image, undistort, (resize?) and save to dataset\n\t\tif 'PROCESS_IMAGES' in locals() and PROCESS_IMAGES:\n\t\t\timg = cv.imread(measPath, cv.IMREAD_UNCHANGED)\n\t\t\tundistortedImg = cv.undistort(img, cam['intrinsic'], cam['distort'])\n\t\t\t# cv.imwrite(op.join(pathRGB, '{:04d}.{}'.format(int(camIdx), EXTENSION)), undistortedImg)\n\t\t\tcv.imwrite(op.join(pathRGB, '{:04d}.{}'.format(saveIdcs[0], EXTENSION)), undistortedImg)\n\n\t\t# cv.imwrite(op.join(pathRGB, '{:04d}.jpg'.format(int(camIdx))), img)\n\n\t\twith open(op.join(pathPose, '{:04d}.txt'.format(saveIdcs[0])), 'w') as fo:\n\t\t\tfor ii, pose in enumerate(frame_data['transform_matrix']):\n\t\t\t\tprint(\" \".join([str(-p) if (((j == 2) | (j == 1)) and (ii < 3)) else str(p)\n\t\t\t\t\t\t\t\tfor j, p in enumerate(pose)]), file=fo)\n\n\t\twith open(op.join(pathPosePL, '{:04d}.txt'.format(saveIdcs[0])), 'w') as fo:\n\t\t\tfor ii, pose in enumerate(frame_data['pl_transform_matrix']):\n\t\t\t\tprint(\" \".join([str(-p) if (((j == 2) | (j == 1)) and (ii < 3)) else str(p)\n\t\t\t\t\t\t\t\tfor j, p in enumerate(pose)]), file=fo)\n\n\t\tsaveIdcs.pop(0)\n\n\tassert len(camPoints), 'No files have been processed'\n\n\t# Writing intrinsics from the last camera (since it is the same in all cameras for one zoom level)\n\tnp.savetxt(op.join(curFolder, 'intrinsics.txt'), cam['intrinsic'])\n\n\tif 'BBOX' in locals():\n\t\tbbox = BBOX\n\telse:\n\t\t# Estimate bbox simply by taking min/max coordinates of camera positions\n\t\tcamPoints = np.array(camPoints)\n\t\tbbox = camPoints.min(axis=0).tolist() + camPoints.max(axis=0).tolist()\n\t\t# large bbox: 1.4/-0.5/0.66\n\t\tshrinkRate = 0.6\n\t\tbbox = [b * (1 if i % 3 == 2 else shrinkRate) for i, b in enumerate(bbox)]\n\t\tbbox[2] = -bbox[5] * 0.1\n\t\tbbox[5] = bbox[5] * 0.36\n\tif 'VOXEL_SIZE' in locals():\n\t\tvoxel_size = VOXEL_SIZE\n\telse:\n\t\tvoxel_size = ((bbox[3]-bbox[0]) * (bbox[4]-bbox[1]) * (bbox[5]-bbox[2]) / VOXEL_NUMS) ** (1/3)\n\n\twith open(op.join(curFolder, 'bbox.txt'), 'w') as out_file:\n\t\tprint(\" \".join(['{:.5f}'.format(f) for f in bbox + [voxel_size]]), file=out_file)\n\n\twith open(op.join(curFolder, 'transforms.json'), 'w') as out_file:\n\t\tjson.dump(json_data, out_file, indent=4)"
] | [
[
"numpy.linalg.norm"
],
[
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.random.seed",
"numpy.linalg.inv",
"numpy.eye",
"numpy.fromstring",
"numpy.array",
"numpy.zeros"
]
] |
evenrus/myeloma_SNV | [
"b8faa365babcc5583bc7b8431e4c5053acb35cb9"
] | [
"myeloma_snv/commands.py"
] | [
"\"\"\"variants_process main command.\"\"\"\n\nimport timeit\nimport re\nfrom datetime import datetime\nfrom os.path import join\nimport pandas as pd\nimport numpy as np\nimport pybedtools as pyb\n\nSTART = timeit.default_timer()\n\n## IMPORT VARIANTS FILE\ndef import_variants(path):\n \"\"\"\n Determine filetype and import, returns pandas dataFrame\n \"\"\"\n if re.search('.csv$', path):\n try:\n variants = pd.read_csv(\n filepath_or_buffer=path,\n comment='#',\n low_memory=False)\n except NameError:\n raise Exception(f'Error when importing file {path}')\n\n print(f'Loaded file containing {variants.shape[0]} '\n f'variant calls. Processing...')\n return(variants)\n elif re.search('.tsv.gz$', path):\n try:\n variants = pd.read_csv(\n filepath_or_buffer=path,\n compression='gzip',\n sep='\\t',\n comment='#',\n low_memory=False)\n except NameError:\n raise Exception(f'Error when importing file {path}')\n print(f'Loaded file containing {variants.shape[0]} '\n f'variant calls. Processing...')\n return(variants)\n else:\n raise Exception(f'Input file {path} has unsupported '\n f'extension: try .csv or .tsv.gz')\n\n## ANNOTATION FUNCTIONS\ndef annotate_cosmic(variants):\n \"\"\"\n Generate columns:\n HEME_EXACT: Number of exact matches for hematopoietic and\n lymphoid tissue in cosmic.\n ANY_EXACT_POS: YES/NO for any EXACT or POS match in cosmic.\n \"\"\"\n heme_exact = []\n cosmic = variants['COSMIC'].tolist()\n search_1 = 'HAEMATOPOIETIC_AND_LYMPHOID_TISSUE'\n search_2 = r'(?<=HAEMATOPOIETIC_AND_LYMPHOID_TISSUE=)\\w+'\n for entry in cosmic:\n if pd.isnull(entry):\n heme_exact.append(None)\n else:\n first = entry.split('|')[0]\n if re.search('^GENOMIC_EXACT', first):\n if re.search(search_1, first):\n count = re.search(search_2, first)[0]\n heme_exact.append(count)\n else:\n heme_exact.append(None)\n else:\n heme_exact.append(None)\n variants['HEME_EXACT'] = heme_exact\n any_exact_pos = []\n for entry in cosmic:\n if pd.isnull(entry):\n any_exact_pos.append(0)\n elif re.search(\n 'GENOMIC_EXACT', entry) or re.search(\n 'GENOMIC_POS', entry):\n any_exact_pos.append(1)\n else:\n any_exact_pos.append(0)\n variants['ANY_EXACT_POS'] = any_exact_pos\n return(variants)\n\ndef annotate_genefreq(variants, genes):\n \"\"\"\n Generate column:\n MAX_MUTFREQ: Maximal mutation frequency in gene\n as previously published in large MM studies.\n \"\"\"\n freqlist = pd.read_excel(io=genes)\n freqlist['MAX_MUTFREQ'] = round(\n freqlist.filter(regex='freq').max(axis=1), 1)\n freqlist = freqlist[['GENE', 'MAX_MUTFREQ']]\n variants = pd.merge(variants, freqlist, how='left')\n return(variants)\n\ndef annotate_maf(variants):\n \"\"\"\n Generate column:\n MAX_MAF: Maximal MAF of variant in any normal database\n \"\"\"\n variants['MAX_MAF'] = 0 # Sets variable to 0 if frequency is not reported\n variants['MAX_MAF'] = variants.filter(regex='MAF').max(axis=1)\n return(variants)\n\ndef annotate_normals(variants, path_normals):\n \"\"\"\n Annotates variants with internal normal controls:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n VAF: Median VAF\n Q25: 25th VAF-quartile\n Q75: 75th VAF-quartile\n Positions: START position\n Change: REF > ALT\n \"\"\"\n normals = pd.read_csv(\n filepath_or_buffer=path_normals)\n\n normals = normals.set_index(['CHR','START'])\n \n def annot_row(row, data):\n thres = 10\n chrom = str(row['CHR'])\n start = row['START']\n po = (chrom, start) in data.index\n close = data.ix[(chrom, start-thres):(chrom, start+thres)]\n if po:\n pos = data.loc[(chrom, start)]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['count'].iloc[0],\n exact['MEDIAN_VAF'].iloc[0],\n exact['VAF_Q25'].iloc[0],\n exact['VAF_Q75'].iloc[0],\n start,\n exact['REF'].iloc[0] + '>' + exact['ALT'].iloc[0]\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n ', '.join(pos['count'].astype(str)),\n ', '.join(pos['MEDIAN_VAF'].astype(str)),\n ', '.join(pos['VAF_Q25'].astype(str)),\n ', '.join(pos['VAF_Q75'].astype(str)),\n ', '.join([str(i) for i in pos.index.\\\n get_level_values('START').tolist()]),\n ', '.join([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'], pos['ALT'])])\n ]\n return pd.Series(pos_out)\n elif close.shape[0] > 0:\n cl_out = ['genomic_close',\n ', '.join(close['count'].astype(str).tolist()),\n ', '.join(close['MEDIAN_VAF'].astype(str).tolist()),\n ', '.join(close['VAF_Q25'].astype(str).tolist()),\n ', '.join(close['VAF_Q75'].astype(str).tolist()),\n ', '.join([str(i) for i in close.index.\\\n get_level_values('START').tolist()]),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())]))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*7)\n\n out_names = [\"_Class\", \"_Frequency\", \"_VAF\", \"_Q25\", \"_Q75\",\n \"_Position\", \"_Change\"]\n out_names = ['Normals' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, normals),\n axis=1)\n return(variants)\n\ndef annotate_mmrf(variants, path_mmrf):\n \"\"\"\n Annotates variants with MMRF data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n VAF: Median VAF\n Q25: 25th VAF-quartile\n Q75: 75th VAF-quartile\n Positions: START position\n Change: REF > ALT\n \"\"\"\n mmrf = pd.read_csv(filepath_or_buffer=path_mmrf, sep='\\t')\n mmrf = mmrf[[\"#CHROM\", \"POS\", \"REF\", \"ALT\", \"GEN[1].AR\"]]\n mmrf = mmrf.drop_duplicates() ## What are these duplicates?\n mmrf.columns = [\"CHR\", \"START\", \"REF\", \"ALT\", \"TARGET_VAF\"]\n\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n exact['TARGET_VAF'].median(),\n exact['TARGET_VAF'].quantile(q=0.25),\n exact['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())]))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n pos['TARGET_VAF'].median(),\n pos['TARGET_VAF'].quantile(q=0.25),\n pos['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())]))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .median().astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.25).astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.75).astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())]))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*7)\n\n out_names = [\"_Class\", \"_Frequency\", \"_VAF\", \"_Q25\", \"_Q75\",\n \"_Position\", \"_Change\"]\n out_names = ['MMRF' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, mmrf),\n axis=1)\n return(variants)\n\ndef annotate_bolli(variants, path_bolli):\n \"\"\"\n Annotates variants with Bolli data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n Positions: START position\n Change: REF > ALT\n Annotation: Manual annotation category.\n \"\"\"\n bolli = pd.read_csv(filepath_or_buffer=path_bolli, sep='\\t')\n bolli = bolli[[\"CHR\", \"START\", \"WT\", \"MT\", \"Variant_class\"]]\n bolli.columns = [\"CHR\", \"START\", \"REF\", \"ALT\", \"ANNOTATION\"]\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())])),\n ', '.join(set(exact['ANNOTATION']))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())])),\n ', '.join(set(pos['ANNOTATION']))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())])),\n ', '.join(set(close['ANNOTATION']))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*5)\n\n out_names = [\"_Class\", \"_Frequency\",\n \"_Position\", \"_Change\", \"_Annotation\"]\n out_names = ['Bolli' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, bolli),\n axis=1)\n return(variants)\n\ndef annotate_lohr(variants, lohr_path):\n \"\"\"\n Annotates variants with lohr data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n Positions: START position\n Change: REF > ALT\n \"\"\"\n lohr = pd.read_csv(filepath_or_buffer=lohr_path, sep='\\t')\n lohr = lohr[[\"Chromosome\", \"Start_Position\", \"Reference_Allele\",\n \"Tumor_Seq_Allele2\"]]\n lohr.columns = [\"CHR\", \"START\", \"REF\", \"ALT\"]\n\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())]))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())]))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())]))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*4)\n\n out_names = [\"_Class\", \"_Frequency\", \n \"_Position\", \"_Change\"]\n out_names = ['Lohr' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, lohr),\n axis=1)\n return(variants)\n\ndef annotate_mytype(variants, path_mytype):\n \"\"\"\n Annotates variants with previous myTYPE data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n VAF: Median VAF\n Q25: 25th VAF-quartile\n Q75: 75th VAF-quartile\n Positions: START position\n Change: REF > ALT\n Annotation: Manual annotation category.\n \"\"\"\n mytype = pd.read_csv(filepath_or_buffer=path_mytype, sep=',')\n mytype = mytype[[\"CHR\", \"START\", \"REF\", \"ALT\",\n \"MANUAL_ANNOTATION\", \"TARGET_VAF\"]]\n mytype.columns = [\"CHR\", \"START\", \"REF\", \"ALT\",\n \"ANNOTATION\", \"TARGET_VAF\"]\n\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n exact['TARGET_VAF'].median(),\n exact['TARGET_VAF'].quantile(q=0.25),\n exact['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())])),\n ', '.join(set(exact['ANNOTATION']))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n pos['TARGET_VAF'].median(),\n pos['TARGET_VAF'].quantile(q=0.25),\n pos['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())])),\n ', '.join(set(pos['ANNOTATION']))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .median().astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.25).astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.75).astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())])),\n ', '.join(set(close['ANNOTATION']))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*8)\n\n out_names = [\"_Class\", \"_Frequency\", \"_VAF\", \"_Q25\", \"_Q75\",\n \"_Position\", \"_Change\", \"_Annotation\"]\n out_names = ['myTYPE' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, mytype),\n axis=1)\n return(variants)\n\ndef annotate_known(variants, mytype):\n \"\"\"\n Generate columns:\n KNOWN_MM = 1 if previously found in MM. Includes any match in MMRF,\n Bolli and Lohr, and UNKNOWN/LIKELY/ONCOGENIC by mytype\n \"\"\"\n\n # Only run function if data is passed to the optional variable \"mytype\"\n if mytype:\n mytype_annot = variants['myTYPE_Annotation'].tolist()\n myTYPE_somatic = []\n for entry in mytype_annot:\n if pd.isnull(entry):\n myTYPE_somatic.append(0)\n else:\n search_1 = re.search('ONCOGENIC', entry)\n search_2 = re.search('LIKELY', entry)\n search_3 = re.search('UNKNOWN', entry)\n if search_1 or search_2 or search_3:\n myTYPE_somatic.append(1)\n else:\n myTYPE_somatic.append(0)\n variants['myTYPE_somatic'] = myTYPE_somatic\n else:\n variants['myTYPE_somatic'] = 0\n\n # Define column KNOWN_MM based on annotation data\n variants['KNOWN_MM'] = np.where((variants['myTYPE_somatic'] == 1) |\n (variants['MMRF_Class'].notnull()) |\n (variants['Bolli_Class'].notnull()) |\n (variants['Lohr_Class'].notnull()), 1, 0)\n\n variants = variants.drop('myTYPE_somatic', axis=1)\n return(variants)\n\n## APPLY FLAGS FOR FILTERING\ndef filter_panel(variants, genes_bed):\n \"\"\"\n Filter MFLAG_PANEL: 1 if variant is not in BED file of regions to keep\n \"\"\"\n variants_bed = variants[[\"CHR\", \"START\", \"END\", \"ID_VARIANT\"]]\n # Turning variants file into bed format\n variants_bed = pyb.BedTool.from_dataframe(variants_bed)\n # Import list of genes in panel as bed format\n genes = pyb.BedTool(genes_bed)\n # Bed file with intersection of panel and input file\n variants_inter = variants_bed.intersect(genes, u=True)\n # Empty list for names of variants in intersection bed file\n flaglist = []\n\n # If bed file is not empty\n if not variants_inter.head(n=1, as_string=True) == '':\n # Convert intersect bed file to data frame; subset col with variant ID\n flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']\n # Flag variant if ID is not in overlap list\n variants['MFLAG_PANEL'] = np.where(variants.ID_VARIANT.isin(flaglist), 0, 1)\n return(variants)\n\ndef filter_drop(variants, genes_drop):\n \"\"\"\n Filter MFLAG_DROP: 1 if variant is in list of genes to drop.\n \"\"\"\n drop = pd.read_excel(io=genes_drop)['GENE']\n variants['MFLAG_DROP'] = np.where(variants.GENE.isin(drop), 1, 0)\n return(variants)\n\ndef filter_igh(variants, igh_path):\n \"\"\"\n Filter MFLAG_IGH: 1 if variant in IGH locus\n \"\"\"\n variants_bed = variants[[\"CHR\", \"START\", \"END\", \"ID_VARIANT\"]]\n variants_bed = pyb.BedTool.from_dataframe(variants_bed)\n igh = pyb.BedTool(igh_path)\n variants_inter = variants_bed.intersect(igh, u=True)\n flaglist = []\n if not variants_inter.head(n=1, as_string=True) == '':\n flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']\n variants['MFLAG_IGH'] = np.where(variants.ID_VARIANT.isin(flaglist), 1, 0)\n return(variants)\n\ndef filter_maf(variants):\n \"\"\"\n Filter MFLAG_MAF: 1 if variant MAF > 3 % in exac/1000genomes\n \"\"\"\n variants['MFLAG_MAF'] = np.where(variants['MAX_MAF'] > 0.03, 1, 0)\n return(variants)\n\ndef filter_maf_cosmic(variants, mode):\n \"\"\"\n Filter MFLAG_MAFCOS: 1 if variant has >0.1 % MAF and not in COSMIC\n For SNVs: Only counts exact and pos as in cosmic\n For Indels: Counts all COSMIC.\n \"\"\"\n if mode == 'snv':\n variants['MFLAG_MAFCOS'] = np.where(\n (variants['MAX_MAF'] > 0.001) &\n (variants['ANY_EXACT_POS'] == 0), 1, 0)\n if mode == 'indel':\n variants['MFLAG_MAFCOS'] = np.where(\n (variants['MAX_MAF'] > 0.001) &\n (variants['COSMIC'].isnull()), 1, 0)\n return(variants)\n\ndef filter_nonpass(variants, mode):\n \"\"\"\n Filter MFLAG_MAF: 1 if NON-PASS AND not in cosmic or previously known in MM\n Counts SNVs and Indels as \"in cosmic\" like for MAFCOS flag.\n For SNV: Only removes missense mutations with this flag\n \"\"\"\n if mode == 'snv':\n drop = ['non_synonymous_codon']\n variants['MFLAG_NONPASS'] = np.where(\n (variants['FILTER'] != \"PASS\") &\n (variants['EFFECT'].isin(drop)) &\n (variants['ANY_EXACT_POS'] == 0) &\n (variants['KNOWN_MM'] == 0), 1, 0)\n return(variants)\n variants['MFLAG_NONPASS'] = np.where(\n (variants['FILTER'] != \"PASS\") &\n (variants['COSMIC'].isnull()) &\n (variants['KNOWN_MM'] == 0), 1, 0)\n return(variants)\n\ndef filter_normals(variants):\n \"\"\"\n Filter MFLAG_NORM: 1 if variant has genomic exact or pos in normals\n \"\"\"\n match = ['genomic_exact', 'genomic_pos']\n variants['MFLAG_NORM'] = np.where(variants['Normals_Class'] \\\n .isin(match), 1, 0)\n return(variants)\n\ndef filter_vaf(variants):\n \"\"\"\n Filter MFLAG_VAF: 1 if target VAF < 1 %\n \"\"\"\n variants['MFLAG_VAF'] = np.where(\n (variants['TARGET_VAF'] < 0.01) & (variants['FILTER'] != 'PASS'), 1, 0)\n return(variants)\n\ndef filter_bidir(variants):\n \"\"\"\n Filter MFLAG_BIDIR: 1 if BIDIR = 0\n \"\"\"\n variants['MFLAG_BIDIR'] = np.where(variants['BIDIR'] == 0, 1, 0)\n return(variants)\n\n## FILTER AND EXPORT\ndef namecore(infile):\n \"\"\"\n Returns the \"core\" of the input file name, for use in output files.\n \"\"\"\n name = infile.split('/')[-1]\n if re.search('.csv$', name):\n return(re.sub('.csv$', '', name))\n return(re.sub('.tsv.gz$', '', name))\n\ndef filter_export(variants, outdir, name, mode):\n \"\"\"\n Function properties:\n 1. Filters variants into \"good\" or \"bad\" based on flags.\n 2. Writes files with good and bad variants.\n 3. Creates processing summary report.\n \"\"\"\n # Filtering\n good = variants[variants.filter(regex='MFLAG').sum(axis=1) == 0]\n bad = variants[variants.filter(regex='MFLAG').sum(axis=1) > 0]\n\n # Define output names\n date = str(datetime.today()).split()[0].split(\"-\")\n name = '_'.join([name, '_'.join(date)])\n goodname = join(outdir, name + '_goodcalls.csv')\n badname = join(outdir, name + '_badcalls.csv')\n textname = join(outdir, name + '_report.txt')\n\n # Export files\n good.to_csv(\n path_or_buf=goodname,\n index=False)\n bad.to_csv(\n path_or_buf=badname,\n index=False)\n\n # Summary report\n stop = timeit.default_timer()\n\n with open(textname, 'w') as f:\n # Call the \"Version\" file for version info?\n f.write(\n f'Somatic variant processing for myTYPE\\nv.1.0\\n '\n f'Completed time: {str(datetime.today()).split(\".\")[0]}\\n')\n f.write(f'Run time: {round(stop-START, 3)}\\n')\n f.write(f'####\\nMode: {mode}\\n')\n f.write(f'Imported calls: {variants.shape[0]}\\n')\n f.write('Flagging variants for filtering:\\n')\n f.write(f'MFLAG_PANEL: Variant not in BED file of '\n f'regions to keep: {variants[\"MFLAG_PANEL\"].sum()}\\n')\n f.write(f'MFLAG_DROP: Variant in excluded gene: '\n f'{variants[\"MFLAG_DROP\"].sum()}\\n')\n f.write(f'MFLAG_IGH: In IGH locus: {variants[\"MFLAG_IGH\"].sum()}\\n')\n f.write(f'MFLAG_MAF: MAF > 3 % in exac/1000genomes: '\n f'{variants[\"MFLAG_MAF\"].sum()}\\n')\n f.write(f'MFLAG_MAFCOS: MAF > 0.1 % and not in COSMIC '\n f'(exact/pos): {variants[\"MFLAG_MAFCOS\"].sum()}\\n')\n f.write(f'MFLAG_NONPASS: NON-PASS IF not in cosmic, previously '\n f'known in MM, not stopgain, splicesite..: '\n f'{variants[\"MFLAG_NONPASS\"].sum()}\\n')\n f.write(f'MFLAG_NORM: Variant exact or pos in >0 good normals: '\n f'{variants[\"MFLAG_NORM\"].sum()}\\n')\n f.write(f'MFLAG_VAF: Remove NON-PASS calls with target '\n f'VAF < 1 %: {variants[\"MFLAG_VAF\"].sum()}\\n')\n f.write(f'MFLAG_BIDIR: Remove variants BIDIR = 0 (only reads '\n f'on one strand): {variants[\"MFLAG_BIDIR\"].sum(0)}\\n')\n f.write(f'Removing calls with >= 1 MFLAG: {bad.shape[0]}\\n')\n f.write(f'Calls passed filters: {good.shape[0]}\\n')\n return()\n\n# Main Function\ndef process(\n mode,\n infile,\n outdir,\n genes,\n genes_drop,\n genes_bed,\n igh,\n mmrf,\n bolli,\n lohr,\n normals,\n mytype):\n \"\"\"Main function to process myTYPE SNV and indel output\"\"\"\n ## IMPORTING DATA\n variants = import_variants(infile)\n\n ## ANNOTATIONS\n variants = annotate_cosmic(variants)\n if genes:\n # Only runs if a path was passed to optional argument \"gene\"\n variants = annotate_genefreq(variants, genes)\n # Replace this with mutation frequency from MMRF? (and other raw data?)\n variants = annotate_maf(variants)\n variants = annotate_normals(variants, normals)\n variants = annotate_mmrf(variants, mmrf)\n variants = annotate_bolli(variants, bolli)\n variants = annotate_lohr(variants, lohr)\n if mytype:\n # Only runs if a path was passed to optional argument \"mytype\"\n variants = annotate_mytype(variants, mytype)\n variants = annotate_known(variants, mytype)\n\n ## FILTERS\n variants = filter_panel(variants, genes_bed)\n if genes_drop:\n variants = filter_drop(variants, genes_drop)\n variants = filter_igh(variants, igh)\n variants = filter_maf(variants)\n variants = filter_maf_cosmic(variants, mode)\n variants = filter_nonpass(variants, mode)\n variants = filter_normals(variants)\n variants = filter_vaf(variants)\n variants = filter_bidir(variants)\n\n ## OUTPUT\n name = namecore(infile)\n filter_export(variants, outdir, name, mode)\n print('Variant processing complete')\n return(variants) # Added this here - may be necessary for test?\n"
] | [
[
"pandas.merge",
"pandas.read_excel",
"pandas.read_csv",
"pandas.Series",
"pandas.isnull",
"numpy.where"
]
] |
rubyvanrooyen/katdal | [
"e90bca3c2cd6305492d03ddc9aa48e67c1800428"
] | [
"katdal/h5datav2.py"
] | [
"################################################################################\n# Copyright (c) 2011-2021, National Research Foundation (SARAO)\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n\"\"\"Data accessor class for HDF5 files produced by KAT-7 correlator.\"\"\"\n\nimport logging\nimport pathlib\nimport secrets\n\nimport h5py\nimport katpoint\nimport numpy as np\n\nfrom .categorical import CategoricalData, sensor_to_categorical\nfrom .dataset import (DEFAULT_SENSOR_PROPS, DEFAULT_VIRTUAL_SENSORS,\n BrokenFile, DataSet, Subarray, WrongVersion,\n _robust_target, _selection_to_list)\nfrom .flags import DESCRIPTIONS as FLAG_DESCRIPTIONS\nfrom .flags import NAMES as FLAG_NAMES\nfrom .lazy_indexer import LazyIndexer, LazyTransform\nfrom .sensordata import RecordSensorGetter, SensorCache, to_str\nfrom .spectral_window import SpectralWindow\n\nlogger = logging.getLogger(__name__)\n\n# Simplify the scan activities to derive the basic state of the antenna (slewing, scanning, tracking, stopped)\nSIMPLIFY_STATE = {'scan_ready': 'slew', 'scan': 'scan', 'scan_complete': 'scan', 'track': 'track', 'slew': 'slew'}\n\nSENSOR_PROPS = dict(DEFAULT_SENSOR_PROPS)\nSENSOR_PROPS.update({\n '*activity': {'greedy_values': ('slew', 'stop'), 'initial_value': 'slew',\n 'transform': lambda act: SIMPLIFY_STATE.get(act, 'stop')},\n '*target': {'initial_value': '', 'transform': _robust_target},\n # These float sensors are actually categorical by nature as they represent user settings\n 'RFE/center-frequency-hz': {'categorical': True},\n 'RFE/rfe7.lo1.frequency': {'categorical': True},\n '*attenuation': {'categorical': True},\n '*attenuator.horizontal': {'categorical': True},\n '*attenuator.vertical': {'categorical': True},\n})\n\nSENSOR_ALIASES = {\n 'nd_coupler': 'rfe3.rfe15.noise.coupler.on',\n 'nd_pin': 'rfe3.rfe15.noise.pin.on',\n}\n\n\ndef _calc_azel(cache, name, ant):\n \"\"\"Calculate virtual (az, el) sensors from actual ones in sensor cache.\"\"\"\n base_name = 'pos.actual-scan-azim' if name.endswith('az') else 'pos.actual-scan-elev'\n real_sensor = f'Antennas/{ant}/{base_name}'\n cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor))\n return sensor_data\n\n\nVIRTUAL_SENSORS = dict(DEFAULT_VIRTUAL_SENSORS)\nVIRTUAL_SENSORS.update({'Antennas/{ant}/az': _calc_azel, 'Antennas/{ant}/el': _calc_azel})\n\nWEIGHT_NAMES = ('precision',)\nWEIGHT_DESCRIPTIONS = ('visibility precision (inverse variance, i.e. 1 / sigma^2)',)\n\n# -------------------------------------------------------------------------------------------------\n# -- Utility functions\n# -------------------------------------------------------------------------------------------------\n\n\ndef get_single_value(group, name):\n \"\"\"Return single value from attribute or dataset with given name in group.\n\n If `name` is an attribute of the HDF5 group `group`, it is returned,\n otherwise it is interpreted as an HDF5 dataset of `group` and the last value\n of `name` is returned. This is meant to retrieve static configuration values\n that potentially get set more than once during capture initialisation, but\n then does not change during actual capturing.\n\n Parameters\n ----------\n group : :class:`h5py.Group` object\n HDF5 group to query\n name : string\n Name of HDF5 attribute or dataset to query\n\n Returns\n -------\n value : object\n Attribute or last value of dataset\n\n \"\"\"\n attrs = group.attrs\n value = attrs[name] if name in attrs else group[name][-1]\n return to_str(value)\n\n\ndef dummy_dataset(name, shape, dtype, value):\n \"\"\"Dummy HDF5 dataset containing a single value.\n\n This creates a dummy HDF5 dataset in memory containing a single value. It\n can have virtually unlimited size as the dataset is highly compressed.\n\n Parameters\n ----------\n name : string\n Name of dataset\n shape : sequence of int\n Shape of dataset\n dtype : :class:`numpy.dtype` object or equivalent\n Type of data stored in dataset\n value : object\n All elements in the dataset will equal this value\n\n Returns\n -------\n dataset : :class:`h5py.Dataset` object\n Dummy HDF5 dataset\n\n \"\"\"\n # It is important to randomise the filename as h5py does not allow two writable file objects with the same name\n # Without this randomness katdal can only open one file requiring a dummy dataset\n dummy_file = h5py.File(f'{name}_{secrets.token_hex(8)}.h5', 'x', driver='core', backing_store=False)\n return dummy_file.create_dataset(name, shape=shape, maxshape=shape,\n dtype=dtype, fillvalue=value, compression='gzip')\n\n# -------------------------------------------------------------------------------------------------\n# -- CLASS : H5DataV2\n# -------------------------------------------------------------------------------------------------\n\n\nclass H5DataV2(DataSet):\n \"\"\"Load HDF5 format version 2 file produced by KAT-7 correlator.\n\n For more information on attributes, see the :class:`DataSet` docstring.\n\n Parameters\n ----------\n filename : string\n Name of HDF5 file\n ref_ant : string, optional\n Name of reference antenna, used to partition data set into scans\n (default is first antenna in use)\n time_offset : float, optional\n Offset to add to all correlator timestamps, in seconds\n mode : string, optional\n HDF5 file opening mode (e.g. 'r+' to open file in write mode)\n quicklook : {False, True}\n True if synthesised timestamps should be used to partition data set even\n if real timestamps are irregular, thereby avoiding the slow loading of\n real timestamps at the cost of slightly inaccurate label borders\n keepdims : {False, True}, optional\n Force vis / weights / flags to be 3-dimensional, regardless of selection\n kwargs : dict, optional\n Extra keyword arguments, typically meant for other formats and ignored\n\n Attributes\n ----------\n file : :class:`h5py.File` object\n Underlying HDF5 file, exposed via :mod:`h5py` interface\n\n \"\"\"\n\n def __init__(self, filename, ref_ant='', time_offset=0.0, mode='r',\n quicklook=False, keepdims=False, **kwargs):\n # The closest thing to a capture block ID is the Unix timestamp in the original filename\n # There is only one (unnamed) output stream, so leave off the stream name\n cbid = pathlib.Path(filename).stem\n DataSet.__init__(self, cbid, ref_ant, time_offset, url=filename)\n\n # Load file\n self.file, self.version = H5DataV2._open(filename, mode)\n f = self.file\n\n # Load main HDF5 groups\n data_group, sensors_group, config_group = f['Data'], f['MetaData/Sensors'], f['MetaData/Configuration']\n markup_group = f['Markup']\n # Get observation script parameters, with defaults\n for k, v in config_group['Observation'].items():\n # For KAT-7 (v2.1) data, strip the 'script_' prefix from most parameters\n k = k if self.version > '2.1' or k in ('script_name', 'script_arguments') else k[7:]\n self.obs_params[str(k)] = to_str(v)\n self.observer = self.obs_params.get('observer', '')\n self.description = self.obs_params.get('description', '')\n self.experiment_id = self.obs_params.get('experiment_id', '')\n # Get script log from History group\n self.obs_script_log = f['History/script_log']['log'].tolist()\n\n # ------ Extract timestamps ------\n\n self.dump_period = get_single_value(config_group['Correlator'], 'int_time')\n # Obtain visibility data and timestamps\n self._vis = data_group['correlator_data']\n self._timestamps = data_group['timestamps']\n num_dumps = len(self._timestamps)\n if num_dumps != self._vis.shape[0]:\n raise BrokenFile(f'Number of timestamps received from k7_capture ({num_dumps}) '\n f'differs from number of dumps in data ({self._vis.shape[0]})')\n # Discard the last sample if the timestamp is a duplicate (caused by stop packet in k7_capture)\n num_dumps = (num_dumps - 1) if num_dumps > 1 and (self._timestamps[-1] == self._timestamps[-2]) else num_dumps\n # Do quick test for uniform spacing of timestamps (necessary but not sufficient)\n expected_dumps = (self._timestamps[num_dumps - 1] - self._timestamps[0]) / self.dump_period + 1\n # The expected_dumps should always be an integer (like num_dumps), unless the timestamps and/or dump period\n # are messed up in the file, so the threshold of this test is a bit arbitrary (e.g. could use > 0.5)\n irregular = abs(expected_dumps - num_dumps) >= 0.01\n if irregular:\n # Warn the user, as this is anomalous\n logger.warning(\"Irregular timestamps detected in file '%s': expected %.3f dumps \"\n \"based on dump period and start/end times, got %d instead\",\n filename, expected_dumps, num_dumps)\n if quicklook:\n logger.warning(\"Quicklook option selected - partitioning data based on synthesised timestamps instead\")\n if not irregular or quicklook:\n # Estimate timestamps by assuming they are uniformly spaced (much quicker than loading them from file).\n # This is useful for the purpose of segmenting data set, where accurate timestamps are not that crucial.\n # The real timestamps are still loaded when the user explicitly asks for them.\n data_timestamps = self._timestamps[0] + self.dump_period * np.arange(num_dumps)\n else:\n # Load the real timestamps instead (could take several seconds on a large data set)\n data_timestamps = self._timestamps[:num_dumps]\n # Move timestamps from start of each dump to the middle of the dump\n data_timestamps += 0.5 * self.dump_period + self.time_offset\n if data_timestamps[0] < 1e9:\n logger.warning(\"File '%s' has invalid first correlator timestamp (%f)\", filename, data_timestamps[0])\n self._time_keep = np.ones(num_dumps, dtype=np.bool)\n self.start_time = katpoint.Timestamp(data_timestamps[0] - 0.5 * self.dump_period)\n self.end_time = katpoint.Timestamp(data_timestamps[-1] + 0.5 * self.dump_period)\n self._keepdims = keepdims\n\n # ------ Extract flags ------\n\n # Check if flag group is present, else use dummy flag data\n self._flags = markup_group['flags'] if 'flags' in markup_group else \\\n dummy_dataset('dummy_flags', shape=self._vis.shape[:-1], dtype=np.uint8, value=0)\n # Obtain flag descriptions from file or recreate default flag description table\n self._flags_description = to_str(markup_group['flags_description'][:]) \\\n if 'flags_description' in markup_group else np.array(list(zip(FLAG_NAMES, FLAG_DESCRIPTIONS)))\n self._flags_select = np.array([0], dtype=np.uint8)\n self._flags_keep = 'all'\n\n # ------ Extract weights ------\n\n # Check if weight group present, else use dummy weight data\n self._weights = markup_group['weights'] if 'weights' in markup_group else \\\n dummy_dataset('dummy_weights', shape=self._vis.shape[:-1], dtype=np.float32, value=1.0)\n # Obtain weight descriptions from file or recreate default weight description table\n self._weights_description = to_str(markup_group['weights_description'][:]) \\\n if 'weights_description' in markup_group else np.array(list(zip(WEIGHT_NAMES, WEIGHT_DESCRIPTIONS)))\n self._weights_select = []\n self._weights_keep = 'all'\n\n # ------ Extract sensors ------\n\n # Populate sensor cache with all HDF5 datasets below sensor group that fit the description of a sensor\n cache = {}\n\n def register_sensor(name, obj):\n \"\"\"A sensor is defined as a non-empty dataset with expected dtype.\"\"\"\n if isinstance(obj, h5py.Dataset) and obj.shape != () and \\\n obj.dtype.names == ('timestamp', 'value', 'status'):\n # Rename pedestal sensors from the old regime to become sensors of the corresponding antenna\n name = ('Antennas/ant' + name[13:]) if name.startswith('Pedestals/ped') else name\n cache[name] = RecordSensorGetter(obj, name)\n sensors_group.visititems(register_sensor)\n # Use estimated data timestamps for now, to speed up data segmentation\n self.sensor = SensorCache(cache, data_timestamps, self.dump_period, keep=self._time_keep,\n props=SENSOR_PROPS, virtual=VIRTUAL_SENSORS, aliases=SENSOR_ALIASES)\n\n # ------ Extract subarrays ------\n\n # By default, only pick antennas that were in use by the script\n script_ants = to_str(config_group['Observation'].attrs['script_ants']).split(',')\n self.ref_ant = script_ants[0] if not ref_ant else ref_ant\n # Original list of correlation products as pairs of input labels\n corrprods = get_single_value(config_group['Correlator'], 'bls_ordering')\n if len(corrprods) != self._vis.shape[2]:\n # Apply k7_capture baseline mask after the fact, in the hope that it fixes correlation product mislabelling\n corrprods = np.array([cp for cp in corrprods if cp[0][:-1] in script_ants and cp[1][:-1] in script_ants])\n # If there is still a mismatch between labels and data shape, file is considered broken (maybe bad labels?)\n if len(corrprods) != self._vis.shape[2]:\n raise BrokenFile('Number of baseline labels (containing expected antenna names) '\n 'received from correlator (%d) differs from number of baselines in data (%d)' %\n (len(corrprods), self._vis.shape[2]))\n else:\n logger.warning('Reapplied k7_capture baseline mask to fix unexpected number of baseline labels')\n # All antennas in configuration as katpoint Antenna objects\n ants = [katpoint.Antenna(to_str(config_group['Antennas'][name].attrs['description']))\n for name in config_group['Antennas']]\n self.subarrays = [Subarray(ants, corrprods)]\n self.sensor['Observation/subarray'] = CategoricalData(self.subarrays, [0, len(data_timestamps)])\n self.sensor['Observation/subarray_index'] = CategoricalData([0], [0, len(data_timestamps)])\n # Store antenna objects in sensor cache too, for use in virtual sensor calculations\n for ant in ants:\n self.sensor[f'Antennas/{ant.name}/antenna'] = CategoricalData([ant], [0, len(data_timestamps)])\n # Extract array reference from first antenna (first 5 fields of description)\n array_ant_fields = ['array'] + ants[0].description.split(',')[1:5]\n array_ant = katpoint.Antenna(','.join(array_ant_fields))\n self.sensor['Antennas/array/antenna'] = CategoricalData([array_ant], [0, len(data_timestamps)])\n\n # ------ Extract spectral windows / frequencies ------\n\n # Ideally we would like to use calculated center-frequency-hz sensor produced by k7_capture (better for nband)\n if self.version >= '2.1':\n centre_freq = self.sensor.get('RFE/center-frequency-hz')\n else:\n # Fall back to basic RFE7 LO frequency, as this supported multiple spectral windows before k7_capture did\n # This assumes WBC mode, though (NBC modes only fully supported since HDF5 v2.1)\n centre_freq = self.sensor.get('RFE/rfe7.lo1.frequency')\n centre_freq.unique_values = [freq - 4200e6 for freq in centre_freq.unique_values]\n num_chans = get_single_value(config_group['Correlator'], 'n_chans')\n if num_chans != self._vis.shape[1]:\n raise BrokenFile(f'Number of channels received from correlator ({num_chans}) '\n f'differs from number of channels in data ({self._vis.shape[1]})')\n bandwidth = get_single_value(config_group['Correlator'], 'bandwidth')\n channel_width = bandwidth / num_chans\n try:\n mode = self.sensor.get('DBE/dbe.mode').unique_values[0]\n except (KeyError, IndexError):\n # Guess the mode for version 2.0 files that haven't been re-augmented\n mode = 'wbc' if num_chans <= 1024 else 'wbc8k' if bandwidth > 200e6 else 'nbc'\n self.spectral_windows = [SpectralWindow(spw_centre, channel_width, num_chans, mode)\n for spw_centre in centre_freq.unique_values]\n self.sensor['Observation/spw'] = CategoricalData([self.spectral_windows[idx] for idx in centre_freq.indices],\n centre_freq.events)\n self.sensor['Observation/spw_index'] = CategoricalData(centre_freq.indices, centre_freq.events)\n\n # ------ Extract scans / compound scans / targets ------\n\n # Use the activity sensor of reference antenna to partition the data set into scans (and to set their states)\n scan = self.sensor.get(f'Antennas/{self.ref_ant}/activity')\n # If the antenna starts slewing on the second dump, incorporate the first dump into the slew too.\n # This scenario typically occurs when the first target is only set after the first dump is received.\n # The workaround avoids putting the first dump in a scan by itself, typically with an irrelevant target.\n if len(scan) > 1 and scan.events[1] == 1 and scan[1] == 'slew':\n scan.events, scan.indices = scan.events[1:], scan.indices[1:]\n scan.events[0] = 0\n # Use labels to partition the data set into compound scans\n label = sensor_to_categorical(markup_group['labels']['timestamp'], to_str(markup_group['labels']['label'][:]),\n data_timestamps, self.dump_period, **SENSOR_PROPS['Observation/label'])\n # Discard empty labels (typically found in raster scans, where first scan has proper label and rest are empty)\n # However, if all labels are empty, keep them, otherwise whole data set will be one pathological compscan...\n if len(label.unique_values) > 1:\n label.remove('')\n # Create duplicate scan events where labels are set during a scan (i.e. not at start of scan)\n # ASSUMPTION: Number of scans >= number of labels (i.e. each label should introduce a new scan)\n scan.add_unmatched(label.events)\n self.sensor['Observation/scan_state'] = scan\n self.sensor['Observation/scan_index'] = CategoricalData(list(range(len(scan))), scan.events)\n # Move proper label events onto the nearest scan start\n # ASSUMPTION: Number of labels <= number of scans (i.e. only a single label allowed per scan)\n label.align(scan.events)\n # If one or more scans at start of data set have no corresponding label, add a default label for them\n if label.events[0] > 0:\n label.add(0, '')\n self.sensor['Observation/label'] = label\n self.sensor['Observation/compscan_index'] = CategoricalData(list(range(len(label))), label.events)\n # Use the target sensor of reference antenna to set the target for each scan\n target = self.sensor.get(f'Antennas/{self.ref_ant}/target')\n # Move target events onto the nearest scan start\n # ASSUMPTION: Number of targets <= number of scans (i.e. only a single target allowed per scan)\n target.align(scan.events)\n self.sensor['Observation/target'] = target\n self.sensor['Observation/target_index'] = CategoricalData(target.indices, target.events)\n # Set up catalogue containing all targets in file, with reference antenna as default antenna\n self.catalogue.add(target.unique_values)\n self.catalogue.antenna = self.sensor[f'Antennas/{self.ref_ant}/antenna'][0]\n # Ensure that each target flux model spans all frequencies in data set if possible\n self._fix_flux_freq_range()\n\n # Avoid storing reference to self in transform closure below, as this hinders garbage collection\n dump_period, time_offset = self.dump_period, self.time_offset\n # Restore original (slow) timestamps so that subsequent sensors (e.g. pointing) will have accurate values\n extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)\n self.sensor.timestamps = LazyIndexer(self._timestamps, keep=slice(num_dumps), transforms=[extract_time])\n # Apply default selection and initialise all members that depend on selection in the process\n self.select(spw=0, subarray=0, ants=script_ants)\n\n @staticmethod\n def _open(filename, mode='r'):\n \"\"\"Open file and do basic version and augmentation sanity check.\"\"\"\n f = h5py.File(filename, mode)\n version = to_str(f.attrs.get('version', '1.x'))\n if not version.startswith('2.'):\n raise WrongVersion(f\"Attempting to load version '{version}' file with version 2 loader\")\n if 'augment_ts' not in f.attrs:\n raise BrokenFile('HDF5 file not augmented - please run '\n 'k7_augment.py (provided by katcapture package)')\n return f, version\n\n @staticmethod\n def _get_ants(filename):\n \"\"\"Quick look function to get the list of antennas in a data file.\n\n This is intended to be called without createing a full katdal object.\n\n Parameters\n ----------\n filename : string\n Data file name\n\n Returns\n -------\n antennas : list of :class:'katpoint.Antenna' objects\n\n \"\"\"\n f, version = H5DataV2._open(filename)\n config_group = f['MetaData/Configuration']\n all_ants = [ant for ant in config_group['Antennas']]\n script_ants = to_str(config_group['Observation'].attrs.get('script_ants'))\n script_ants = script_ants.split(',') if script_ants else all_ants\n return [katpoint.Antenna(to_str(config_group['Antennas'][ant].attrs['description']))\n for ant in script_ants if ant in all_ants]\n\n @staticmethod\n def _get_targets(filename):\n \"\"\"Quick look function to get the list of targets in a data file.\n\n This is intended to be called without createing a full katdal object.\n\n Parameters\n ----------\n filename : string\n Data file name\n\n Returns\n -------\n targets : :class:'katpoint.Catalogue' object\n All targets in file\n\n \"\"\"\n f, version = H5DataV2._open(filename)\n # Use the delay-tracking centre as the one and only target\n # Try two different sensors for the DBE target\n try:\n target_list = f['MetaData/Sensors/DBE/target']\n except Exception:\n # Since h5py errors have varied over the years, we need Exception\n target_list = f['MetaData/Sensors/Beams/Beam0/target']\n all_target_strings = [to_str(target_data[1]) for target_data in target_list]\n return katpoint.Catalogue(np.unique(all_target_strings))\n\n def __str__(self):\n \"\"\"Verbose human-friendly string representation of data set.\"\"\"\n descr = [super().__str__()]\n # append the process_log, if it exists, for non-concatenated h5 files\n if 'process_log' in self.file['History']:\n descr.append('-------------------------------------------------------------------------------')\n descr.append('Process log:')\n for proc in self.file['History']['process_log']:\n # proc has a structured dtype and to_str doesn't work on it, so\n # we have to to_str each element.\n param_list = f'{to_str(proc[0]):>15}:'\n for param in to_str(proc[1]).split(','):\n param_list += f' {param}'\n descr.append(param_list)\n return '\\n'.join(descr)\n\n @property\n def _weights_keep(self):\n known_weights = [row[0] for row in getattr(self, '_weights_description', [])]\n return [known_weights[ind] for ind in self._weights_select]\n\n @_weights_keep.setter\n def _weights_keep(self, names):\n known_weights = [row[0] for row in getattr(self, '_weights_description', [])]\n # Ensure a sequence of weight names\n names = _selection_to_list(names, all=known_weights)\n # Create index list for desired weights\n selection = []\n for name in names:\n try:\n selection.append(known_weights.index(name))\n except ValueError:\n logger.warning(\"%r is not a legitimate weight type for this file, \"\n \"supported ones are %s\", name, known_weights)\n if known_weights and not selection:\n logger.warning('No valid weights were selected - setting all weights to 1.0 by default')\n self._weights_select = selection\n\n @property\n def _flags_keep(self):\n if not hasattr(self, '_flags_description'):\n return []\n known_flags = [row[0] for row in self._flags_description]\n # The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip\n selection = np.unpackbits(self._flags_select)\n assert len(known_flags) == len(selection), \\\n f'Expected {len(selection)} flag types in file, got {self._flags_description}'\n return [name for name, bit in zip(known_flags, selection) if bit]\n\n @_flags_keep.setter\n def _flags_keep(self, names):\n if not hasattr(self, '_flags_description'):\n self._flags_select = np.array([0], dtype=np.uint8)\n return\n known_flags = [row[0] for row in self._flags_description]\n # Ensure `names` is a sequence of valid flag names (or an empty list)\n names = _selection_to_list(names, all=known_flags)\n # Create boolean list for desired flags\n selection = np.zeros(8, dtype=np.uint8)\n assert len(known_flags) == len(selection), \\\n f'Expected {len(selection)} flag types in file, got {self._flags_description}'\n for name in names:\n try:\n selection[known_flags.index(name)] = 1\n except ValueError:\n logger.warning(\"%r is not a legitimate flag type for this file, \"\n \"supported ones are %s\", name, known_flags)\n # Pack index list into bit mask\n # The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip\n flagmask = np.packbits(selection)\n if known_flags and not flagmask:\n logger.warning('No valid flags were selected - setting all flags to False by default')\n self._flags_select = flagmask\n\n @property\n def timestamps(self):\n \"\"\"Visibility timestamps in UTC seconds since Unix epoch.\n\n The timestamps are returned as an array indexer of float64, shape (*T*,),\n with one timestamp per integration aligned with the integration\n *midpoint*. To get the data array itself from the indexer `x`, do `x[:]`\n or perform any other form of indexing on it.\n\n \"\"\"\n # Avoid storing reference to self in transform closure below, as this hinders garbage collection\n dump_period, time_offset = self.dump_period, self.time_offset\n extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)\n return LazyIndexer(self._timestamps, keep=self._time_keep, transforms=[extract_time])\n\n def _vislike_indexer(self, dataset, extractor):\n \"\"\"Lazy indexer for vis-like datasets (vis / weights / flags).\n\n This operates on datasets with shape (*T*, *F*, *B*) and potentially\n different dtypes. The data type conversions are all left to the provided\n extractor transform, while this method takes care of the common\n selection issues, such as preserving singleton dimensions and dealing\n with duplicate final dumps.\n\n Parameters\n ----------\n dataset : :class:`h5py.Dataset` object or equivalent\n Underlying vis-like dataset on which lazy indexing will be done\n extractor : function, signature ``data = f(data, keep)``\n Transform to apply to data (`keep` is user-provided 2nd-stage index)\n\n Returns\n -------\n indexer : :class:`LazyIndexer` object\n Lazy indexer with appropriate selectors and transforms included\n\n \"\"\"\n # Create first-stage index from dataset selectors\n time_keep = self._time_keep\n # If there is a duplicate final dump, these lengths don't match -> ignore last dump in file\n if len(time_keep) == len(dataset) - 1:\n time_keep = np.zeros(len(dataset), dtype=np.bool)\n time_keep[:len(self._time_keep)] = self._time_keep\n stage1 = (time_keep, self._freq_keep, self._corrprod_keep)\n\n def _force_3dim(data, keep):\n \"\"\"Keep singleton dimensions in stage 2 (i.e. final) indexing.\"\"\"\n # Ensure that keep tuple has length of 3 (truncate or pad with blanket slices as necessary)\n keep = keep[:3] + (slice(None),) * (3 - len(keep))\n # Final indexing ensures that returned data are always 3-dimensional (i.e. keep singleton dimensions)\n keep_singles = [(np.newaxis if np.isscalar(dim_keep) else slice(None))\n for dim_keep in keep]\n return data[tuple(keep_singles)]\n force_3dim = LazyTransform('force_3dim', _force_3dim)\n transforms = [extractor, force_3dim] if self._keepdims else [extractor]\n return LazyIndexer(dataset, stage1, transforms)\n\n @property\n def vis(self):\n r\"\"\"Complex visibility data as a function of time, frequency and baseline.\n\n The visibility data are returned as an array indexer of complex64, shape\n (*T*, *F*, *B*), with time along the first dimension, frequency along the\n second dimension and correlation product (\"baseline\") index along the\n third dimension. The returned array always has all three dimensions,\n even for scalar (single) values. The number of integrations *T* matches\n the length of :meth:`timestamps`, the number of frequency channels *F*\n matches the length of :meth:`freqs` and the number of correlation\n products *B* matches the length of :meth:`corr_products`. To get the\n data array itself from the indexer `x`, do `x[:]` or perform any other\n form of indexing on it. Only then will data be loaded into memory.\n\n The sign convention of the imaginary part is consistent with an\n electric field of :math:`e^{i(\\omega t - jz)}` i.e. phase that\n increases with time.\n \"\"\"\n extract = LazyTransform('extract_vis',\n # Discard the 4th / last dimension as this is subsumed in complex view\n # The visibilities are conjugated due to using the lower sideband\n lambda vis, keep: vis.view(np.complex64)[..., 0].conjugate(),\n lambda shape: shape[:-1], np.complex64)\n return self._vislike_indexer(self._vis, extract)\n\n @property\n def weights(self):\n \"\"\"Visibility weights as a function of time, frequency and baseline.\n\n The weights data are returned as an array indexer of float32, shape\n (*T*, *F*, *B*), with time along the first dimension, frequency along the\n second dimension and correlation product (\"baseline\") index along the\n third dimension. The number of integrations *T* matches the length of\n :meth:`timestamps`, the number of frequency channels *F* matches the\n length of :meth:`freqs` and the number of correlation products *B*\n matches the length of :meth:`corr_products`. To get the data array\n itself from the indexer `x`, do `x[:]` or perform any other form of\n indexing on it. Only then will data be loaded into memory.\n\n \"\"\"\n # We currently only cater for a single weight type (i.e. either select it or fall back to 1.0)\n def transform(weights, keep):\n return weights.astype(np.float32) if self._weights_select else \\\n np.ones_like(weights, dtype=np.float32)\n extract = LazyTransform('extract_weights', transform, dtype=np.float32)\n return self._vislike_indexer(self._weights, extract)\n\n @property\n def flags(self):\n \"\"\"Flags as a function of time, frequency and baseline.\n\n The flags data are returned as an array indexer of bool, shape\n (*T*, *F*, *B*), with time along the first dimension, frequency along the\n second dimension and correlation product (\"baseline\") index along the\n third dimension. The number of integrations *T* matches the length of\n :meth:`timestamps`, the number of frequency channels *F* matches the\n length of :meth:`freqs` and the number of correlation products *B*\n matches the length of :meth:`corr_products`. To get the data array\n itself from the indexer `x`, do `x[:]` or perform any other form of\n indexing on it. Only then will data be loaded into memory.\n\n \"\"\"\n def transform(flags, keep):\n \"\"\"Use flagmask to blank out the flags we don't want.\"\"\"\n # Then convert uint8 to bool -> if any flag bits set, flag is set\n return np.bool_(np.bitwise_and(self._flags_select, flags))\n extract = LazyTransform('extract_flags', transform, dtype=np.bool)\n return self._vislike_indexer(self._flags, extract)\n\n @property\n def temperature(self):\n \"\"\"Air temperature in degrees Celsius.\"\"\"\n return self.sensor['Enviro/asc.air.temperature']\n\n @property\n def pressure(self):\n \"\"\"Barometric pressure in millibars.\"\"\"\n return self.sensor['Enviro/asc.air.pressure']\n\n @property\n def humidity(self):\n \"\"\"Relative humidity as a percentage.\"\"\"\n return self.sensor['Enviro/asc.air.relative-humidity']\n\n @property\n def wind_speed(self):\n \"\"\"Wind speed in metres per second.\"\"\"\n return self.sensor['Enviro/asc.wind.speed']\n\n @property\n def wind_direction(self):\n \"\"\"Wind direction as an azimuth angle in degrees.\"\"\"\n return self.sensor['Enviro/asc.wind.direction']\n"
] | [
[
"numpy.ones_like",
"numpy.unique",
"numpy.arange",
"numpy.ones",
"numpy.bitwise_and",
"numpy.packbits",
"numpy.isscalar",
"numpy.array",
"numpy.zeros",
"numpy.unpackbits"
]
] |
Statfactory/ColdBrew | [
"ee16eee73e8dc89646abd6ee3e19858e49c6ffb7"
] | [
"cortado/cutcovfactor.py"
] | [
"from cortado.abstractfactor import AbstractFactor\nimport numpy as np\nfrom cortado.seq import Seq\nfrom cortado.funcslicer import FuncSlicer\nfrom cortado.consts import HEADLENGTH, SLICELEN, MISSINGLEVEL\nfrom numba import jit\nfrom numba.typed import Dict\nfrom numba import types\n\n@jit(nopython=True, cache=False)\ndef g_leftclosed(slice, buf, cuts, k):\n def f(x):\n if np.isnan(x):\n return 0\n if x == np.PINF:\n return k\n else:\n i = np.searchsorted(cuts, x, side='right') \n return i\n\n for i in range(len(slice)):\n buf[i] = f(slice[i])\n if len(buf) == len(slice):\n return buf\n else:\n return buf[:len(slice)]\n\n@jit(nopython=True, cache=False)\ndef g_rightclosed(slice, buf, cuts):\n def f(x):\n if np.isnan(x):\n return 0\n if x == np.NINF:\n return 1\n else:\n i = np.searchsorted(cuts, x, side='left') \n return i\n\n for i in range(len(slice)):\n buf[i] = f(slice[i])\n if len(buf) == len(slice):\n return buf\n else:\n return buf[:len(slice)]\n\nclass CutCovFactor(AbstractFactor):\n\n def __init__(self, covariate, cuts, rightclosed = False):\n self.covariate = covariate\n self.cuts = cuts\n \n assert cuts[0] == np.NINF and cuts[-1] == np.PINF\n levelcount = len(cuts) - 1\n if rightclosed:\n levels = [MISSINGLEVEL] + [\"{z}{x},{y}]\".format(x=str(cuts[i]), y=str(cuts[i + 1]), z=\"[\" if i == 0 else \"(\") for i in range(levelcount)]\n else:\n levels = [MISSINGLEVEL] + [\"[{x},{y}{z}\".format(x=str(cuts[i]), y=str(cuts[i + 1]), z=\"]\" if i == (levelcount - 1) else \")\") for i in range(levelcount)]\n dtype = np.uint8 if levelcount <= 256 else np.uint16\n\n \n def slicer(start, length, slicelen):\n length = min(len(self) - start, length)\n slicelen = min(length, slicelen)\n buf = np.empty(slicelen, dtype = dtype)\n if rightclosed:\n return Seq.map((lambda s: g_rightclosed(s, buf, cuts)), covariate.slicer(start, length, slicelen))\n else:\n return Seq.map((lambda s: g_leftclosed(s, buf, cuts, levelcount - 1)), covariate.slicer(start, length, slicelen))\n\n self._levels = levels\n self._slicer = FuncSlicer(slicer, dtype)\n\n @property\n def name(self):\n return self.covariate.name\n\n def __len__(self):\n return len(self.covariate)\n\n @property\n def isordinal(self):\n return True\n\n @property\n def levels(self):\n return self._levels\n\n @property\n def slicer(self):\n return self._slicer"
] | [
[
"numpy.isnan",
"numpy.empty",
"numpy.searchsorted"
]
] |
primasanjaya/muat-github | [
"4603c6c960188643fb38d8dba82e0dcc1ba00b40",
"4603c6c960188643fb38d8dba82e0dcc1ba00b40"
] | [
"main_old.py",
"dataset/pcawgtcga_dataloader.py"
] | [
"# make deterministic\r\nfrom mingpt.utils import set_seed\r\nset_seed(42)\r\n#frompc\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport math\r\nfrom torch.utils.data import Dataset\r\n\r\nfrom mingpt.model import *\r\n\r\nfrom mingpt.trainer import Trainer, TrainerConfig\r\nfrom mingpt.utils import sample\r\n\r\nimport logging\r\nlogging.basicConfig(\r\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\r\n datefmt=\"%m/%d/%Y %H:%M:%S\",\r\n level=logging.INFO,\r\n)\r\nimport pdb\r\n\r\nfrom dataset.tcga_dataset import TCGA\r\nfrom dataset.tcga_conv_dataset import TCGAConv\r\nfrom dataset.pcawg_conv_dataset import *\r\nfrom dataset.pcawg_dataset import PCAWG\r\nfrom dataset.pcawg_emb_dataset import PCAWGEmb\r\nfrom dataset.pcawg_sepdataset import PCAWGSep\r\nfrom dataset.pcawg_2stream import PCAWG2Stream\r\nfrom dataset.tcgadisttabletoemb_dataset import TCGADist\r\nfrom dataset.tcgamutdist_dataset import TCGAMutDist\r\nfrom dataset.tcgamutdistasone_dataset import TCGAMutDistasOne\r\nfrom dataset.tcgapcawg_dataset import TCGAPCAWG\r\nfrom dataset.newtcgapcawg_dataset import NewTCGAPCAWG\r\nfrom dataset.finaltcgapcawg_dataset import FinalTCGAPCAWG\r\n\r\nfrom mingpt.bert import *\r\nfrom preprocessing.dmm.dmm import *\r\nfrom preprocessing.fromvcffiles import *\r\n\r\nimport argparse\r\nimport os\r\nimport pandas as pd\r\n\r\ndef translate_args(args):\r\n\r\n cwd = os.getcwd()\r\n args.cwd = cwd\r\n\r\n args.mutation_coding = cwd + '/preprocessing/dmm/data/mutation_codes_sv.tsv'\r\n args.input = args.data_dir\r\n\r\n args.output = cwd + '/data/raw/out/00b9d0e6-69dc-4345-bffd-ce32880c8eef.consensus.20160830.somatic.snv_mnv.tsv.gz' \r\n\r\n args.reference = '/csc/epitkane/data/ref_genomes/hs37d5_1000GP/hs37d5_1000GP.fa'\r\n args.context = 8\r\n\r\n args.sample_id = 'submitted_sample_id'\r\n\r\n args.tmp = cwd + '/data/raw/tmp/'\r\n args.verbose = 1\r\n args.generate_negatives = 1\r\n args.report_interval = 100000\r\n\r\n return args\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser(description='TCGA / PEACOCK experiment')\r\n\r\n # DATASET\r\n parser.add_argument('--cwd', type=str,help='project dir')\r\n\r\n parser.add_argument('--dataset', type=str, default='pcawg',\r\n help='dataset')\r\n # MODEL\r\n parser.add_argument('--arch', type=str, default=None,\r\n help='architecture')\r\n # DIRECTORY\r\n parser.add_argument('--data-dir', type=str, default=None,\r\n help='data directory')\r\n parser.add_argument('--crossdata-dir', type=str, default=None,\r\n help='data directory')\r\n parser.add_argument('--adddata-dir', type=str, default=None,\r\n help='data directory')\r\n\r\n parser.add_argument('--n-class', type=int, default=None,\r\n help='number of class')\r\n\r\n parser.add_argument('--batch-size', type=int, default=1,\r\n help='batch size')\r\n\r\n parser.add_argument('--block-size', type=int, default=1000,\r\n help='block of sequence')\r\n\r\n parser.add_argument('--context-length', type=int, default=256,\r\n help='length of sequence')\r\n parser.add_argument('--n-layer', type=int, default=1,\r\n help='attention layer')\r\n parser.add_argument('--n-head', type=int, default=8,\r\n help='attention head')\r\n parser.add_argument('--n-emb', type=int, default=128,\r\n help='embedding dimension')\r\n parser.add_argument('--n-vocab-type', type=int, default=1,\r\n help='embedding dimension')\r\n\r\n parser.add_argument('--tag', type=str, default='myexperiment',\r\n help='dataset')\r\n \r\n parser.add_argument('--train', action='store_true', default=False)\r\n parser.add_argument('--predict', action='store_true', default=False)\r\n parser.add_argument('--trainbp', action='store_true', default=False)\r\n parser.add_argument('--vis-weight', action='store_true', default=False)\r\n parser.add_argument('--top-weight', action='store_true', default=False)\r\n\r\n parser.add_argument('--visval', action='store_true', default=False)\r\n\r\n\r\n parser.add_argument('--single-predict', action='store_true', default=False)\r\n parser.add_argument('--create-dataset', action='store_true', default=False)\r\n parser.add_argument('--two-streams', action='store_true', default=False)\r\n parser.add_argument('--three-streams', action='store_true', default=False)\r\n\r\n parser.add_argument('--filter', action='store_true', default=False)\r\n\r\n parser.add_argument('--bert', action='store_true', default=False)\r\n parser.add_argument('--withclass', action='store_true', default=False)\r\n parser.add_argument('--default', action='store_true', default=False)\r\n parser.add_argument('--addposition', action='store_true', default=False)\r\n parser.add_argument('--oneDhot', action='store_true', default=False)\r\n parser.add_argument('--addorder', action='store_true', default=False)\r\n parser.add_argument('--addtoken', action='store_true', default=False)\r\n parser.add_argument('--addtriplet', action='store_true', default=False)\r\n parser.add_argument('--addtriplettoken', action='store_true', default=False)\r\n parser.add_argument('--addgestoken', action='store_true', default=False)\r\n parser.add_argument('--addrt', action='store_true', default=False)\r\n parser.add_argument('--addlongcontext', action='store_true', default=False)\r\n parser.add_argument('--tokenizedlongcontext', action='store_true', default=False)\r\n parser.add_argument('--ohlongcontext', action='store_true', default=False)\r\n parser.add_argument('--flattenohlongcontext', action='store_true', default=False)\r\n parser.add_argument('--addpostoken', action='store_true', default=False)\r\n parser.add_argument('--addrttoken', action='store_true', default=False)\r\n parser.add_argument('--balance', action='store_true', default=False)\r\n\r\n parser.add_argument('--l1', action='store_true', default=False)\r\n\r\n parser.add_argument('--fold', type=int, default=1, \r\n help='number of mutation')\r\n\r\n parser.add_argument('--output-mode', type=str, default='token',help='dataset')\r\n\r\n parser.add_argument('--rbm', action='store_true', default=False)\r\n\r\n parser.add_argument('--newtraining', action='store_true', default=False)\r\n parser.add_argument('--newpredict', action='store_true', default=False)\r\n parser.add_argument('--newpredict2', action='store_true', default=False)\r\n parser.add_argument('--normal', action='store_true', default=False)\r\n\r\n parser.add_argument('--freezeemb', action='store_true', default=False)\r\n\r\n parser.add_argument('--predictvis', action='store_true', default=False)\r\n\r\n parser.add_argument('--crossdata', action='store_true', default=False)\r\n\r\n parser.add_argument('--nummut', type=int, default=0,\r\n help='number of mutation')\r\n parser.add_argument('--frac', type=float, default=0,\r\n help='frac')\r\n\r\n parser.add_argument('--mutratio', type=str, default='',\r\n help='mutation ratio')\r\n\r\n parser.add_argument('--spectral', action='store_true', default=False)\r\n parser.add_argument('--finalpredict', action='store_true', default=False)\r\n\r\n parser.add_argument('--finalpredictnewdata', action='store_true', default=False)\r\n parser.add_argument('--single-pred-vcf', action='store_true', default=False)\r\n\r\n\r\n parser.add_argument('--vis-attention', action='store_true', default=False)\r\n\r\n\r\n #dmm_parser\r\n parser.add_argument('-v', '--verbose', type=int, help='Try to be more verbose')\r\n parser.add_argument('--mutation-coding', help='Mutation coding table (\"ref alt code\"/line) [{}]'.format(\\\r\n defaults['mutation_coding']), metavar='fn')\r\n parser.add_argument('--config', help='Read parameters from a JSON file')\r\n parser.add_argument('--data-config',\r\n help='Column specification for --input, --validation and --aux-data [{}]'.format(\\\r\n defaults['data_config']))\r\n parser.add_argument('--random-seed', default=None, type=int, metavar='seed')\r\n parser.add_argument('--tmp')\r\n \r\n parser.add_argument('-i', '--input', action='append', metavar='dir(s)',\r\n help='Either a directory with vcf/maf[.gz] files or a vcf/maf[.gz] file (-i may be given more than once)')\r\n parser.add_argument('-o', '--output', metavar='fn', help='Preprocessed mutation data')\r\n parser.add_argument('-r', '--reference', metavar='ref', help='Reference genome (fasta) [{}]'.format(\\\r\n defaults['reference']))\r\n parser.add_argument('-k', '--context', help='Sequence context length (power of 2) [{}]'.format(\\\r\n defaults['context']), metavar='bp', type=int,default=8)\r\n parser.add_argument('-e', '--errors', metavar='fn',\r\n help='File where to log errors [{}]'.format(defaults['errors']))\r\n parser.add_argument('--no-ref-preload', help='Use samtools to read reference on demand (slow but fast startup) [false]',\r\n action='store_true')\r\n parser.add_argument('--no-filter', help='Process all variants [default=only PASS/. variants]',\r\n action='store_true')\r\n parser.add_argument('--sample-id', help='Sample identifier column name in MAF file')\r\n parser.add_argument('-n', '--generate_negatives', help='Ratio of negative to positive examples [{}]. Two passes on data are required for n>0.'.format(\\\r\n defaults['negative_ratio']), type=float)\r\n parser.add_argument('--median-variant-type-negatives', action='store_true',\r\n help='Generate median number of each variant type as negative examples for each sample')\r\n parser.add_argument('--median-variant-type-file', help='Load median variant numbers from a file')\r\n parser.add_argument('--negative-generation-mode', help='[generate] output in one go (default), [augment] input files or [process] augmented files', default='generate')\r\n parser.add_argument('--info-column', help='Input column name to write toutputo output (MAF input only). May be specified more than once.', action='append')\r\n parser.add_argument('--report-interval', help='Interval to report number of variants processed',\r\n type=int)\r\n parser.add_argument('--array-jobs', help='How many array jobs in total', type=int)\r\n parser.add_argument('--array-index', help='Index of this job', type=int)\r\n parser.add_argument('--nope', help='Only one variant per output sequence', action='store_true')\r\n parser.add_argument('--no-overwrite', help='Do not overwrite if output exists', action='store_true')\r\n\r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef get_dataloader(args,train_val,load):\r\n\r\n if args.dataset == 'finalpcawg' or args.dataset == 'wgspcawg':\r\n if train_val=='training':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='training', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir\r\n )\r\n\r\n elif train_val=='validation':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='validation', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir)\r\n\r\n elif args.dataset == 'finaltcga' or args.dataset == 'westcga':\r\n if train_val=='training':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='training', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir)\r\n\r\n elif train_val=='validation':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='validation', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir)\r\n \r\n return dataloader_class\r\n\r\ndef get_model(args,mconf):\r\n if args.arch == 'GPTConv':\r\n model = GPTConv(mconf)\r\n elif args.arch == 'GPTConvDeeper':\r\n model = GPTConvDeeper(mconf)\r\n elif args.arch == 'GPTNonPosition':\r\n model = GPTNonPosition(mconf)\r\n elif args.arch == 'CTransformer':\r\n model = CTransformer(mconf)\r\n elif args.arch == 'ConvTransformer':\r\n model = ConvTransformer(mconf)\r\n elif args.arch == 'Conv2DTransformer':\r\n model = Conv2DTransform\r\n elif args.arch == 'Transformer2Stream':\r\n model = Transformer2Stream(mconf)\r\n elif args.arch == 'CTransformerDNN':\r\n model = CTransformerDNN(mconf)\r\n elif args.arch == 'CTransformerMutDist':\r\n model = CTransformerMutDist(mconf)\r\n elif args.arch == 'SimpleAttention':\r\n model = SimpleAttention(mconf)\r\n elif args.arch == 'BertForSequenceClassification':\r\n model = BertForSequenceClassification(mconf)\r\n elif args.arch == 'BertwithPosition':\r\n model = BertwithPosition(mconf)\r\n elif args.arch == 'CTransformerWithPaddingIDX':\r\n model = CTransformerWithPaddingIDX(mconf)\r\n elif args.arch == 'Conv2DTransformerOnehot':\r\n model = Conv2DTransformerOnehot(mconf)\r\n elif args.arch == 'CTransformerWithPaddingIDXandfirstvec':\r\n model = CTransformerWithPaddingIDXandfirstvec(mconf)\r\n elif args.arch == 'Conv2DTransformerOnehotDeeper':\r\n model = Conv2DTransformerOnehotDeeper(mconf)\r\n elif args.arch == 'DNNTransformerOnehotDeeper':\r\n model = DNNTransformerOnehotDeeper(mconf)\r\n elif args.arch == 'CTransformerWithPosition':\r\n model = CTransformerWithPosition(mconf)\r\n elif args.arch == 'CTransformerWithPositionConcate':\r\n model = CTransformerWithPositionConcate(mconf)\r\n elif args.arch == 'DNNTransformerOnehotDeeperwithPosition':\r\n model = DNNTransformerOnehotDeeperwithPosition(mconf)\r\n elif args.arch == 'DNNTransformerOnehotDeeperwithPositionwithOrder':\r\n model = DNNTransformerOnehotDeeperwithPositionwithOrder(mconf)\r\n elif args.arch == 'CTransformerDNNWithPositionConcateToken':\r\n model = CTransformerDNNWithPositionConcateToken(mconf)\r\n elif args.arch == 'CTransformerDNNWithPositionConcateTokenSep':\r\n model = CTransformerDNNWithPositionConcateTokenSep(mconf)\r\n elif args.arch == 'CTransformerRBMWithPositionConcate':\r\n model = CTransformerRBMWithPositionConcate(mconf)\r\n elif args.arch == 'TripletPositionTokenandOnehot':\r\n model = TripletPositionTokenandOnehot(mconf) \r\n elif args.arch == 'PositionToken':\r\n model = PositionToken(mconf) \r\n elif args.arch == 'TripletPositionTokenandOnehotConcAfter':\r\n model = TripletPositionTokenandOnehotConcAfter(mconf)\r\n elif args.arch == 'TripletPositionRTToken':\r\n model = TripletPositionRTToken(mconf)\r\n elif args.arch == 'FullConvTransformer':\r\n model = FullConvTransformer(mconf)\r\n elif args.arch == 'TripletPositionTokenBest':\r\n model = TripletPositionTokenBest(mconf)\r\n elif args.arch == 'TripletPositionTokenRT':\r\n model = TripletPositionTokenRT(mconf)\r\n elif args.arch == 'EmbFC':\r\n model = EmbFC(mconf) \r\n elif args.arch == 'TripletPositionTokenOldBest':\r\n model = TripletPositionTokenOldBest(mconf)\r\n elif args.arch == 'CTransformerPCAWGtoTCGA_TPGES':\r\n model = CTransformerPCAWGtoTCGA_TPGES(mconf)\r\n elif args.arch == 'CTransformerPCAWGtoTCGA_T':\r\n model = CTransformerPCAWGtoTCGA_T(mconf)\r\n elif args.arch == 'TripletPosition':\r\n model = TripletPosition(mconf) \r\n elif args.arch == 'TripletPositionGES':\r\n model = TripletPositionGES(mconf)\r\n elif args.arch == 'TripletPositionGESRT':\r\n model = TripletPositionGESRT (mconf) \r\n elif args.arch == 'TripletPositionF':\r\n model = TripletPositionF(mconf) \r\n elif args.arch == 'TripletPositionGESF':\r\n model = TripletPositionGESF(mconf)\r\n elif args.arch == 'CTransformerF':\r\n model = CTransformerF(mconf)\r\n elif args.arch == 'EmbFCPos':\r\n model = EmbFCPos(mconf)\r\n elif args.arch == 'EmbFCPosGES':\r\n model = EmbFCPosGES(mconf)\r\n\r\n return model\r\n\r\ndef fold_split(args):\r\n\r\n num_class = os.listdir(args.data_dir)\r\n class_name = [i for i in num_class if len(i.split('.'))==1]\r\n class_name = sorted(class_name)\r\n\r\n num_samples = []\r\n\r\n for i in class_name:\r\n ns = len(os.listdir(args.data_dir+i))\r\n num_samples.append(ns)\r\n\r\n d = {'class_name':class_name,'n_samples':num_samples}\r\n pd_class_info = pd.DataFrame(d)\r\n \r\n folds=10\r\n\r\n class_used = pd_class_info.loc[pd_class_info['n_samples']>=folds]\r\n class_used = class_used.rename_axis('class_index').reset_index()\r\n class_used.to_csv(args.data_dir + 'sample_info_' + args.dataset + '.csv', index=False)\r\n\r\n num_class=len(class_used)\r\n\r\n tuple_list = []\r\n\r\n for nm_class in class_used['class_name']:\r\n num_sample = class_used.loc[class_used['class_name']==nm_class]['n_samples'].values[0]\r\n class_idx = class_used.loc[class_used['class_name']==nm_class]['class_index'].values[0]\r\n samples = os.listdir(args.data_dir+nm_class)\r\n count_split = 0\r\n\r\n for i in range(0,num_sample):\r\n count_split = count_split+1\r\n if count_split > folds:\r\n count_split = 1\r\n\r\n tuple_onerow = tuple([nm_class,class_idx,samples[i],count_split])\r\n tuple_list.append(tuple_onerow)\r\n \r\n all_split = pd.DataFrame(tuple_list,columns = ['class_name','class_index','name_samples','split'])\r\n\r\n test_split = pd.DataFrame(columns = all_split.columns)\r\n train_split = pd.DataFrame(columns = all_split.columns)\r\n validation_split = pd.DataFrame(columns = all_split.columns)\r\n\r\n for i in range(1,folds):\r\n test = all_split.loc[all_split['split']==i]\r\n train = all_split.loc[all_split['split']!=i]\r\n split_min = i + 1\r\n if split_min >= folds:\r\n split_min = 1\r\n validation = train.loc[train['split']==split_min]\r\n train = train.loc[train['split']!=split_min]\r\n train['split'] = i\r\n validation['split'] = i\r\n\r\n test_split = test_split.append(test)\r\n validation_split = validation_split.append(validation)\r\n train_split = train_split.append(train)\r\n\r\n train_split.to_csv(args.data_dir + 'train_split.csv', index=False)\r\n validation_split.to_csv(args.data_dir + 'validation_split.csv', index=False)\r\n test_split.to_csv(args.data_dir + 'test_split.csv', index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n best_accuracy=0\r\n\r\n args = get_args()\r\n\r\n if args.train:\r\n\r\n #class_info = fold_split(args)\r\n\r\n block_size = args.block_size # spatial extent of the model for its context\r\n train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)\r\n\r\n if args.bert:\r\n if args.default:\r\n mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class)\r\n else:\r\n if args.addposition:\r\n mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type,position_size=train_dataset.position_size)\r\n else: \r\n mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{args.block_size:.0f}_nl{args.n_layer:.0f}_nh{args.n_head:.0f}_ne{args.n_emb:.0f}_cl{args.context_length:.0f}/\"\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=0.001,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n trainer.bert_train()\r\n\r\n if args.rbm:\r\n num_class=args.n_class\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.multi_stream_rbm(len(output_mode))\r\n else:\r\n trainer.basic_train()\r\n else:\r\n num_class=args.n_class\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.multi_stream(len(output_mode))\r\n else:\r\n trainer.basic_train()\r\n\r\n if args.newtraining:\r\n block_size = args.block_size # spatial extent of the model for its context\r\n train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)\r\n\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n \r\n if args.addpostoken:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size,rt_size = train_dataset.rt_size)\r\n\r\n if args.addgestoken:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size, ges_size = train_dataset.ges_size,rt_size = train_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n trainer.dynamic_stream()\r\n\r\n \r\n if args.predict:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.predict_multi_stream(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n if args.newpredict:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.visval:\r\n trainer.vis_embed()\r\n\r\n if args.crossdata:\r\n trainer.newpredict_dynamic_streamc(args.predictvis)\r\n else:\r\n trainer.newpredict_dynamic_stream(args.predictvis)\r\n\r\n if args.finalpredict:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n train_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n\r\n num_class=args.n_class\r\n \r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,\r\n position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.vis_attention:\r\n trainer = Trainer(model, None,[train_dataset, validation_dataset], tconf) \r\n\r\n trainer.visualize_attention(args.vis_attention)\r\n\r\n else: \r\n if args.visval: \r\n trainer.vis_embed()\r\n \r\n if args.predictvis:\r\n trainer = Trainer(model, None,[train_dataset,validation_dataset], tconf) \r\n\r\n trainer.finalpredict_dynamic_stream(args.predictvis,args.adddata_dir)\r\n\r\n if args.finalpredictnewdata:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n \r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,\r\n position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.vis_attention:\r\n trainer = Trainer(model, None,[validation_dataset], tconf) \r\n trainer.visualize_attention(args.vis_attention)\r\n\r\n else: \r\n if args.visval: \r\n trainer.vis_embed()\r\n \r\n if args.predictvis:\r\n trainer = Trainer(model, None,[validation_dataset], tconf) \r\n\r\n trainer.finalpredict_newdata(args.predictvis,args.adddata_dir)\r\n\r\n if args.newpredict2:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size, rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.visval:\r\n trainer.vis_embed2()\r\n\r\n trainer.newpredict_dynamic_stream(args.predictvis)\r\n\r\n if args.single_predict:\r\n\r\n block_size = args.block_size\r\n num_class=args.n_class\r\n\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n\r\n test_dataset = SinglePrediction(data_dir = args.data_dir)\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n model = get_model(args,mconf)\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[test_dataset], tconf)\r\n\r\n trainer.single_predict()\r\n\r\n if args.vis_weight:\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset,test_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.predict_vis(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n if args.top_weight:\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.topweight_vis(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n if args.single_pred_vcf:\r\n\r\n args = translate_args(args)\r\n\r\n #cmd_preprocess(args)\r\n preprocessing_fromdmm(args)\r\n\r\n pdb.set_trace()\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.topweight_vis(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n\r\n\r\n\r\n\r\n ",
"\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport math\r\nfrom torch.utils.data import Dataset\r\nimport os\r\nimport pandas as pd\r\nimport pdb\r\nimport numpy as np\r\nimport math\r\nimport pickle\r\nimport random\r\nfrom sklearn.utils import shuffle\r\n\r\nclass TCGAPCAWG_Dataloader(Dataset):\r\n\r\n def __init__(self, dataset_name = None, \r\n data_dir=None, \r\n mode='training', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n addtriplettoken=False,\r\n addpostoken=False,\r\n addgestoken=False,\r\n addrt=False,\r\n nummut = 0,\r\n frac = 0,\r\n crossdata=False,\r\n crossdatadir=None,\r\n pcawg2tgca_class=False,\r\n tcga2pcawg_class=False,\r\n mutratio = '1-0-0-0-0',\r\n adddatadir = None,\r\n input_filename=None,\r\n args = None,\r\n gx_dir=None,\r\n addepigen=False):\r\n\r\n self.dataset_name = dataset_name\r\n self.data_dir=data_dir\r\n self.mode=mode\r\n self.curr_fold=int(curr_fold)\r\n self.block_size=block_size\r\n self.load=load\r\n self.addtriplettoken=addtriplettoken\r\n self.addpostoken=addpostoken\r\n self.addrt=addrt\r\n self.nummut = nummut\r\n self.frac = frac\r\n self.addgestoken = addgestoken\r\n self.crossdata= crossdata\r\n self.crossdatadir = crossdatadir\r\n self.adddatadir = adddatadir\r\n self.args = args\r\n self.gx_dir = gx_dir\r\n self.mutratio = mutratio\r\n\r\n self.pcawg2tgca_class=pcawg2tgca_class\r\n self.tcga2pcawg_class=tcga2pcawg_class\r\n\r\n self.newformat = True\r\n self.newformat = False\r\n\r\n self.NiSi = False\r\n self.SNV = False\r\n self.indel = False\r\n self.SVMEI = False\r\n self.Normal = False\r\n\r\n self.dnn_input = 1\r\n\r\n if self.args == None:\r\n self.single_pred_vcf = False\r\n self.cwd = str(os.path.abspath('..')) + '/'\r\n else:\r\n self.single_pred_vcf = self.args.single_pred_vcf\r\n self.cwd = self.args.cwd\r\n\r\n\r\n self.input_filename = input_filename\r\n\r\n if self.nummut > 0 :\r\n self.block_size = self.nummut\r\n\r\n if self.dataset_name == 'pcawg':\r\n if self.args.multi_pred_vcf:\r\n fulltuple = []\r\n for idx in range(len(input_filename)):\r\n va = input_filename[idx]\r\n onetup = (va[:-4],'',1,1)\r\n #print(onetup)\r\n fulltuple.append(onetup)\r\n self.validation_fold = pd.DataFrame(fulltuple,columns =['samples', 'nm_class', 'slices','fold'])\r\n self.test_fold = self.validation_fold\r\n self.newformat = True\r\n \r\n if self.single_pred_vcf:\r\n self.onlyfilename = self.args.input_filename[:-4]\r\n onetup = [(self.onlyfilename,'',1,1)]\r\n self.validation_fold = pd.DataFrame(onetup,columns =['samples', 'nm_class', 'slices','fold'])\r\n self.test_fold = self.validation_fold\r\n \r\n self.newformat = True\r\n '''\r\n else:\r\n if self.newformat:\r\n self.training_fold = pd.read_csv('./dataset_utils/pcawg_train.csv',index_col=0)\r\n self.training_fold = self.training_fold.loc[self.training_fold['fold']==self.curr_fold]\r\n self.validation_fold = pd.read_csv('./dataset_utils/pcawg_val.csv',index_col=0)\r\n self.validation_fold = self.validation_fold.loc[self.validation_fold['fold']==self.curr_fold]\r\n else:\r\n self.training_fold = pd.read_csv('./oldformat/pcawg_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./oldformat/pcawg_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n '''\r\n elif self.dataset_name == 'tcga':\r\n self.training_fold = pd.read_csv('./dataset_utils/tcga_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./dataset_utils/tcga_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n elif self.dataset_name == 'westcga':\r\n self.training_fold = pd.read_csv('./dataset_utils/tcgawes_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./dataset_utils/tcgawes_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n elif self.dataset_name == 'wgspcawg':\r\n self.training_fold = pd.read_csv('./dataset_utils/pcawgwgs_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./dataset_utils/pcawgwgs_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n\r\n if self.dataset_name == 'wgsgx':\r\n self.gx = pd.read_csv(self.gx_dir + 'PCAWG_gene_expression.tsv',sep='\\t',index_col=0)\r\n #self.gx = self.gx.iloc[:,-100:]\r\n\r\n self.training_fold = pd.read_csv(self.cwd + 'dataset_utils/wgsgx_train.csv',index_col=0)\r\n self.training_fold = self.training_fold.loc[self.training_fold['fold'] == self.curr_fold]\r\n\r\n self.validation_fold = pd.read_csv(self.cwd + 'dataset_utils/wgsgx_val.csv',index_col=0)\r\n self.validation_fold = self.validation_fold.loc[self.validation_fold['fold'] == self.curr_fold] \r\n self.newformat = True\r\n\r\n self.dnn_input = len(self.gx.iloc[0,:-2])\r\n #pdb.set_trace()\r\n\r\n if self.adddatadir is not None:\r\n adddata = pd.DataFrame(columns=self.validation_fold.columns)\r\n adddata.columns = self.validation_fold.columns\r\n\r\n folder = os.listdir(self.adddatadir)\r\n\r\n for i in folder:\r\n\r\n samples = os.listdir(self.adddatadir + i )\r\n for j in samples:\r\n if j[0:3] == 'new':\r\n counter = pd.read_csv(self.adddatadir + i + '/count_new_' + j[4:],index_col=0)\r\n\r\n listall = [i,j[4:]] + counter['0'].values.tolist() + [1]\r\n\r\n pds = pd.DataFrame(listall)\r\n pds = pds.T\r\n pds.columns=self.validation_fold.columns\r\n\r\n adddata = adddata.append(pds)\r\n\r\n adddata = adddata.reset_index(drop=True)\r\n\r\n self.adddata = adddata\r\n\r\n #self.validation_fold = self.validation_fold.append(self.adddata)\r\n self.validation_fold = self.adddata\r\n self.data_dir = self.adddatadir\r\n\r\n if self.single_pred_vcf:\r\n samples_names = input_filename[:-4]\r\n pd_count = pd.read_csv(args.tmp_dir + 'count_' + input_filename[:-4] + '.csv', index_col=0)['0'].to_list()\r\n onerow = ['',samples_names] + pd_count + [1]\r\n pd_data = pd.DataFrame(onerow).T\r\n pd_data.columns = ['nm_class','samples','NiSi','SNV','indel','SVMEI','Normal','fold']\r\n self.validation_fold = pd_data\r\n self.test_fold = self.validation_fold\r\n\r\n self.load_classinfo()\r\n\r\n self.vocab_mutation = pd.read_csv(self.cwd + 'extfile/dictMutation.csv',index_col=0)\r\n self.allSNV_index = 0\r\n\r\n if self.mutratio is not None:\r\n self.mutratio = mutratio.split('-')\r\n self.mutratio = [float(i) for i in self.mutratio]\r\n \r\n if self.mutratio[0]>0:\r\n self.NiSi = True \r\n if self.mutratio[1]>0:\r\n self.SNV = True\r\n if self.mutratio[2]>0:\r\n self.indel = True\r\n if self.mutratio[3]>0:\r\n self.SVMEI = True\r\n if self.mutratio[4]>0:\r\n self.Normal = True\r\n\r\n vocabsize = 0\r\n if self.NiSi:\r\n vocabsize = len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='NiSi'])\r\n if self.SNV:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='SNV'])\r\n if self.indel:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='indel']) \r\n if self.SVMEI:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ'].isin(['MEI','SV'])])\r\n if self.Normal:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='Normal'])\r\n\r\n self.vocab_size = vocabsize + 1\r\n #print(self.vocab_size)\r\n\r\n #pdb.set_trace()\r\n\r\n self.pd_position_vocab = pd.read_csv(self.cwd + 'extfile/dictChpos.csv',index_col=0)\r\n self.pd_ges_vocab = pd.read_csv(self.cwd + 'extfile/dictGES.csv',index_col=0)\r\n\r\n self.position_size = len(self.pd_position_vocab) + 1\r\n self.ges_size = len(self.pd_ges_vocab) + 1\r\n \r\n self.rt_size = 1\r\n\r\n self.midstring = '.' + self.dataset_name + str(mutratio) + str(int(self.addtriplettoken)) + str(int(self.addpostoken)) + str(int(self.addgestoken)) + str(int(self.addrt)) + '/' \r\n \r\n if self.mode == 'validation' or self.mode == 'testing':\r\n if self.crossdata:\r\n os.makedirs(self.crossdatadir + self.midstring, exist_ok=True)\r\n self.data_dir = self.crossdatadir\r\n #pdb.set_trace()\r\n \r\n else:\r\n os.makedirs(self.data_dir + self.midstring, exist_ok=True)\r\n\r\n def load_classinfo(self):\r\n if self.dataset_name == 'pcawg':\r\n pd_data = pd.read_csv(self.cwd + 'dataset_utils/classinfo_pcawg.csv',index_col = 0)\r\n self.pd_class_info = pd.DataFrame(pd_data)\r\n elif self.dataset_name == 'wgsgx':\r\n pd_data = pd.read_csv(self.cwd + 'dataset_utils/classinfo_wgsgx.csv',index_col = 0)\r\n self.pd_class_info = pd.DataFrame(pd_data)\r\n else:\r\n num_class = os.listdir(self.data_dir)\r\n name_class = [i for i in num_class if len(i.split('.'))==1]\r\n name_class = sorted(name_class)\r\n n_samples = []\r\n for idx,nm_class in enumerate(name_class):\r\n samples = os.listdir(self.data_dir+nm_class)\r\n samples = [x for x in samples if x[:10]=='count_new_']\r\n n_samples.append(len(samples))\r\n data = list(zip(name_class, np.arange(len(name_class)),n_samples)) \r\n self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])\r\n\r\n def get_data(self,idx):\r\n \r\n if self.mode=='training':\r\n instances=self.training_fold.iloc[idx] \r\n elif self.mode=='validation':\r\n instances=self.validation_fold.iloc[idx]\r\n elif self.mode == 'testing':\r\n instances=self.test_fold.iloc[idx]\r\n\r\n if self.newformat:\r\n samples = instances['samples'] + '.csv'\r\n target_name = instances['nm_class']\r\n\r\n if self.single_pred_vcf:\r\n pd_row = pd.read_csv(self.data_dir +'/count_' + samples,index_col=0).T\r\n row_count = pd_row.values[0]\r\n else:\r\n pd_row = pd.read_csv(self.data_dir + target_name +'/count_' + samples,index_col=0).T\r\n row_count = pd_row.values[0]\r\n \r\n else:\r\n target_name = instances['nm_class']\r\n samples = instances[1]\r\n row_count = instances[['NiSi','SNV','indel','SVMEI','Normal']].to_numpy()\r\n\r\n if self.mutratio is not None:\r\n avail_count = np.asarray(self.mutratio) * self.block_size \r\n \r\n diff = avail_count - row_count\r\n pos = diff>0\r\n avail_count1 = row_count * pos\r\n diff = row_count > avail_count\r\n\r\n avail_count2 = avail_count * diff\r\n avail_count3 = avail_count1 + avail_count2\r\n shadowavail_count3 = avail_count3\r\n shadowavail_count3[0] = row_count[0]\r\n\r\n if sum(shadowavail_count3) > self.block_size:\r\n diff = self.block_size - sum(avail_count3) \r\n shadowavail_count3[0] = diff + avail_count3[0]\r\n \r\n avail_count2 = shadowavail_count3.astype(int)\r\n\r\n if avail_count2[0]<0:\r\n \r\n secondmax = avail_count2[np.argmax(avail_count2)]\r\n avail_count2 = avail_count2 * 0.7\r\n\r\n avail_count = avail_count2\r\n\r\n diff = avail_count - row_count\r\n pos = diff>0\r\n avail_count1 = row_count * pos\r\n diff = row_count > avail_count\r\n\r\n avail_count2 = avail_count * diff\r\n avail_count3 = avail_count1 + avail_count2\r\n shadowavail_count3 = avail_count3\r\n shadowavail_count3[0] = row_count[0]\r\n\r\n if sum(shadowavail_count3) > self.block_size:\r\n diff = self.block_size - sum(avail_count3) \r\n shadowavail_count3[0] = diff + avail_count3[0]\r\n \r\n avail_count2 = shadowavail_count3.astype(int)\r\n\r\n avail_count = avail_count2\r\n\r\n \r\n def grab(pd_input,grabcol):\r\n return pd_input[grabcol]\r\n\r\n def allgrab(grabcol):\r\n \r\n\r\n if self.NiSi:\r\n #pdb.set_trace()\r\n if self.newformat:\r\n pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_' + samples,index_col=0)\r\n else:\r\n pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'NiSi_new_' + samples,index_col=0)\r\n pd_nisi = pd_nisi.sample(n = avail_count[0], replace = False)\r\n pd_nisi = grab(pd_nisi,grabcol)\r\n\r\n if self.SNV:\r\n if self.newformat:\r\n pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'MNV_' + samples,index_col=0)\r\n else:\r\n pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_new_' + samples,index_col=0)\r\n pd_SNV = pd_SNV.sample(n = avail_count[1], replace = False)\r\n pd_SNV = grab(pd_SNV,grabcol)\r\n pd_nisi = pd_nisi.append(pd_SNV)\r\n\r\n if self.indel:\r\n pd_indel = pd.read_csv(self.data_dir + target_name + '/' + 'indel_' + samples,index_col=0)\r\n pd_indel = pd_indel.sample(n = avail_count[2], replace = False)\r\n pd_indel = grab(pd_indel,grabcol)\r\n pd_nisi = pd_nisi.append(pd_indel)\r\n \r\n if self.SVMEI:\r\n if self.newformat:\r\n pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_' + samples,index_col=0)\r\n else:\r\n pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_new_' + samples,index_col=0)\r\n pd_meisv = pd_meisv.sample(n = avail_count[3], replace = False)\r\n pd_meisv = grab(pd_meisv,grabcol)\r\n pd_nisi = pd_nisi.append(pd_meisv)\r\n\r\n if self.Normal:\r\n if self.newformat:\r\n pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Neg_' + samples,index_col=0)\r\n else:\r\n pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Normal_new_' + samples,index_col=0)\r\n pd_normal = pd_normal.sample(n = avail_count[4], replace = False)\r\n pd_normal = grab(pd_normal,grabcol)\r\n pd_nisi = pd_nisi.append(pd_normal) \r\n\r\n pd_nisi = pd_nisi.fillna(0)\r\n return pd_nisi\r\n\r\n pd_nisi = pd.DataFrame()\r\n if self.addtriplettoken:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken'])\r\n else:\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken'])\r\n pd_nisi = pd_nisi.dropna()\r\n pd_nisi.to_csv(filename) \r\n \r\n else:\r\n pd_nisi = allgrab(['triplettoken'])\r\n pd_nisi.to_csv(filename)\r\n\r\n if self.addpostoken:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken','postoken'])\r\n else:\r\n #pdb.set_trace()\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken','postoken'])\r\n pdb.set_trace()\r\n pd_nisi.to_csv(filename)\r\n else:\r\n pd_nisi = allgrab(['triplettoken','postoken'])\r\n pd_nisi.to_csv(filename)\r\n \r\n if self.addgestoken:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken'])\r\n else:\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken'])\r\n pd_nisi.to_csv(filename)\r\n\r\n else:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken'])\r\n pd_nisi.to_csv(filename)\r\n\r\n if self.addrt:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])\r\n else:\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])\r\n pd_nisi.to_csv(filename)\r\n\r\n else:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])\r\n pd_nisi.to_csv(filename)\r\n\r\n #pdb.set_trace()\r\n pd_nisi = pd_nisi.dropna()\r\n \r\n if self.nummut > 0:\r\n if self.nummut < len(pd_nisi):\r\n pd_nisi = pd_nisi.sample(n = self.nummut, replace = False)\r\n else:\r\n pd_nisi = pd_nisi.sample(n = len(pd_nisi), replace = False)\r\n \r\n #pdb.set_trace()\r\n\r\n if self.frac > 0:\r\n pd_nisi = pd_nisi.sample(frac = self.frac)\r\n\r\n if self.mode =='training':\r\n pd_nisi = pd_nisi.sample(frac = 1)\r\n\r\n #pdb.set_trace()\r\n\r\n np_triplettoken = pd_nisi.to_numpy() \r\n\r\n is_padding = False\r\n if len(pd_nisi) < self.block_size:\r\n mins = self.block_size - len(np_triplettoken)\r\n is_padding = True\r\n \r\n datanumeric = []\r\n #pdb.set_trace()\r\n for i in pd_nisi.columns:\r\n np_data = pd_nisi[i].to_numpy() \r\n if is_padding:\r\n np_data = np.copy(np.pad(np_data, ((0, mins)), mode='constant', constant_values=0))\r\n \r\n if i == 'rt':\r\n tensordata = torch.tensor(np.round(np_data, 1), dtype=torch.half)\r\n #tensordata = np.round(np_data, 3)\r\n\r\n if len(np_data) > self.block_size:\r\n np_data = np.asarray(np_data[:self.block_size],dtype=int)\r\n tensordata = torch.tensor(np_data, dtype=torch.long)\r\n else:\r\n np_data = np.asarray(np_data,dtype=int)\r\n tensordata = torch.tensor(np_data, dtype=torch.long)\r\n datanumeric.append(tensordata)\r\n \r\n datastring = samples\r\n\r\n if self.dataset_name=='wgsgx':\r\n #pdb.set_trace()\r\n gx_data = self.gx.loc[self.gx['samples']==samples[:-4]]\r\n gx_data = gx_data.iloc[:,:-2].values\r\n tensorgx_data = torch.tensor(gx_data, dtype=torch.float)\r\n\r\n datanumeric.append(tensorgx_data)\r\n\r\n #print(datanumeric)\r\n data=[datastring,datanumeric]\r\n #pdb.set_trace()\r\n\r\n if target_name != '':\r\n if self.crossdata:\r\n #pdb.set_trace()\r\n target = self.pd_class_infoto.loc[self.pd_class_infoto['class_name']==target_name].class_index.values[0]\r\n else:\r\n target = self.pd_class_info.loc[self.pd_class_info['class_name']==target_name].class_index.values[0]\r\n target = target.astype(np.int16)\r\n target = torch.tensor(target, dtype=torch.long)\r\n else:\r\n target = ''\r\n\r\n\r\n if self.adddatadir is not None:\r\n return data,[target,target_name]\r\n else: \r\n return data,target\r\n\r\n def __len__(self):\r\n\r\n if self.mode=='training':\r\n return len(self.training_fold)\r\n elif self.mode=='validation':\r\n return len(self.validation_fold)\r\n elif self.mode=='testing':\r\n return len(self.test_fold)\r\n\r\n def __getitem__(self, idx): \r\n\r\n data,target = self.get_data(idx)\r\n\r\n return data, target\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n #dataloader = PCAWG(dataset_name = 'PCAWG', data_dir='/csc/epitkane/projects/PCAWG/shuffled_samples/', mode='training',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True)\r\n\r\n #dataloader = PCAWG(dataset_name = 'pcawg_mut3_comb0', data_dir='/csc/epitkane/projects/PCAWG20191001/data/modified_data/train/all24classes/', mode='training',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=3,addposition=False,filter=False,topk=5000)\r\n #dataloaderVal = PCAWG(dataset_name = 'pcawg_mut3_comb0', data_dir='/csc/epitkane/projects/PCAWG20191001/data/modified_data/train/all24classes/', mode='validation',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=3,addposition=False,filter=False,topk=5000)\r\n #/csc/epitkane/projects/tcga/new23classes/\r\n #/csc/epitkane/projects/PCAWG20191001/data/modified_data/train/new24classes/\r\n\r\n #G:/experiment/data/new24classes/\r\n '''\r\n dataloaderVal = FinalTCGAPCAWG(dataset_name = 'finalpcawg', \r\n data_dir='G:/experiment/data/new24classes/', \r\n mode='validation', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n mutratio = '0.3-0.3-0.3-0-0',\r\n addtriplettoken=False,\r\n addpostoken=False,\r\n addgestoken=True,\r\n addrt=False,\r\n nummut = 0,\r\n frac = 0,\r\n adddatadir='G:/experiment/data/icgc/')\r\n\r\n #pdb.set_trace()\r\n data,target = dataloaderVal.__getitem__(0)\r\n pdb.set_trace()\r\n\r\n for k in range(0,len(dataloaderVal)):\r\n print(k)\r\n data,target = dataloaderVal.__getitem__(k)\r\n '''\r\n\r\n\r\n\r\n '''\r\n WGS GX\r\n '''\r\n\r\n #/scratch/project_2001668/data/pcawg\r\n\r\n dataloaderVal = TCGAPCAWG_Dataloader(dataset_name = 'wgsgx', \r\n data_dir='/scratch/project_2001668/data/pcawg/allclasses/newformat/', \r\n mode='training', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n addtriplettoken=True,\r\n addpostoken=False,\r\n addgestoken=False,\r\n addrt=False,\r\n nummut = 0,\r\n frac = 0,\r\n mutratio = '1-0-0-0-0',\r\n adddatadir = None,\r\n input_filename=None,\r\n args = None,\r\n gx_dir = '/scratch/project_2001668/data/pcawg/PCAWG_geneexp/')\r\n \r\n data,target = dataloaderVal.__getitem__(0)\r\n pdb.set_trace()\r\n\r\n '''\r\n fold = [1,2,3,4,5,6,7,8,9,10]\r\n mutratios = ['1-0-0-0-0','0.5-0.5-0-0-0','0.4-0.3-0.3-0-0','0.3-0.3-0.20-0.20-0','0.25-0.25-0.25-0.15-0.1']\r\n\r\n retrieve = ['addtriplettoken','addpostoken','addgestoken','addrt']\r\n\r\n for fo in fold:\r\n for i in retrieve:\r\n if i == 'addtriplettoken':\r\n addtriplettoken = True\r\n else:\r\n addtriplettoken = False\r\n \r\n if i == 'addpostoken':\r\n addpostoken = True\r\n else:\r\n addpostoken = False\r\n\r\n if i == 'addgestoken':\r\n addgestoken = True\r\n else:\r\n addgestoken = False\r\n\r\n if i == 'addrt':\r\n addrt = True\r\n else:\r\n addrt = False\r\n\r\n for j in mutratios:\r\n dataloaderVal = FinalTCGAPCAWG(dataset_name = 'finalpcawg', \r\n data_dir='G:/experiment/data/new24classes/', \r\n mode='validation', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n mutratio = j,\r\n addtriplettoken=addtriplettoken,\r\n addpostoken=addpostoken,\r\n addgestoken=addgestoken,\r\n addrt=addrt,\r\n nummut = 0,\r\n frac = 0)\r\n for k in range(0,len(dataloaderVal)):\r\n print(str(fo) + ' ' + str(k) + ' ' + i + ' ' + j + ' ' + str(addtriplettoken) + str(addpostoken) + str(addgestoken) + str(addrt))\r\n data,target = dataloaderVal.__getitem__(k)\r\n pdb.set_trace()\r\n\r\n dataloaderVal = TCGA(dataset_name = 'tcga_emb', data_dir='/csc/epitkane/projects/tcga/all23classes/', mode='validation',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=64,addposition=True,filter=True,block_size=300,withclass=True,twostream=False)\r\n\r\n for i in range(len(dataloaderVal)):\r\n data,target = dataloaderVal.__getitem__(i)\r\n\r\n dataloaderVal = TCGA(dataset_name = 'tcga_emb', data_dir='/csc/epitkane/projects/tcga/all23classes/', mode='testing',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=64,addposition=True,filter=True,block_size=300,loaddist=False,withclass=True,twostream=False)\r\n\r\n for i in range(len(dataloaderVal)):\r\n data,target = dataloaderVal.__getitem__(i)\r\n \r\n pdb.set_trace()\r\n '''\r\n\r\n "
] | [
[
"pandas.DataFrame"
],
[
"pandas.read_csv",
"numpy.pad",
"numpy.asarray",
"pandas.DataFrame",
"torch.tensor",
"numpy.round",
"numpy.argmax"
]
] |
Bartosz-D3V/ml-dataset-analysis | [
"cb2458dcb7cecba01f52be5b12e816ca00ce7da4"
] | [
"bike-sharing-demand/one_hot_encoder_transformer.py"
] | [
"import pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass OneHotEncoderTransformer(BaseEstimator, TransformerMixin):\n\n def __init__(self, columns) -> None:\n self.columns = columns\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = pd.get_dummies(X, columns=self.columns)\n return X\n"
] | [
[
"pandas.get_dummies"
]
] |
payoto/graphcore_examples | [
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6",
"46d2b7687b829778369fc6328170a7b14761e5c6"
] | [
"applications/tensorflow/detection/yolov3/log.py",
"applications/pytorch/miniDALL-E/train.py",
"applications/popart/transformer_transducer/training/common/data/dali/pipeline.py",
"applications/tensorflow/tgn/model.py",
"applications/tensorflow/dynamic_sparsity/ipu_sparse_ops/tools/sparse_transformer_encoder_layer.py",
"code_examples/tensorflow/block_sparse/utils.py",
"applications/popart/transformer_transducer/custom_ops/rnnt_loss/torch_reference/transducer.py",
"code_examples/popart/block_sparse/examples/mnist/bs_mnist.py",
"applications/tensorflow/bert/run_pretraining.py",
"applications/popart/bert/tests/unit/pytorch/weight_decay_test.py",
"applications/tensorflow/bert/bert_data/squad.py",
"applications/popart/faster-rcnn/utils/bbox.py",
"applications/popart/resnext_inference/dataloader.py",
"applications/popart/faster-rcnn/check_tput.py",
"applications/pytorch/conformer/src/utils/score.py"
] | [
"# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nLogging utilities.\n\"\"\"\n\nimport csv\nimport datetime\nimport json\nimport logging\nimport os\nimport random\nimport subprocess\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import pywrap_tensorflow\n\n# Set Python logger\n# Match TensorFlow's default logging format.\nlogFormatter = logging.Formatter(\n '%(asctime)s.%(msecs)06d: %(levelname)-1.1s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nlogger.addHandler(consoleHandler)\n\n\ndef get_logger():\n return logger\n\n\ndef set_log_file_path(log_file_path):\n global logger\n fileHandler = logging.FileHandler(log_file_path)\n fileHandler.setFormatter(logFormatter)\n logger.addHandler(fileHandler)\n\n\ndef add_arguments(parser):\n group = parser.add_argument_group('Logging')\n group.add_argument('--log-dir', type=str, default=\"./logs/\",\n help=\"Log and weights save directory\")\n group.add_argument('--name-suffix', type=str,\n help=\"Suffix added to name string\")\n group.add_argument('--steps-per-logs', type=int, default=1,\n help=\"Logs per epoch (if number of epochs specified)\")\n group.add_argument('--steps-per-tensorboard', type=int, default=0,\n help='Number of steps between saving statistics to TensorBoard. 0 to disable.')\n return parser\n\n\ndef set_defaults(opts):\n name = opts['name']\n\n if opts[\"name_suffix\"]:\n name = name + \"_\" + opts[\"name_suffix\"]\n\n if opts.get(\"poplar_version\"):\n v = opts['poplar_version']\n # name += \"_v\" + v[v.find(\"version \") + 8: v.rfind(' ')]\n name += \"_v\" + v[v.find(\"version \") + 8: v.find(' (')]\n\n # We want this to be random even if random seeds have been set so that we don't overwrite\n # when re-running with the same seed\n random_state = random.getstate()\n random.seed()\n random.setstate(random_state)\n\n # System time with milliseconds\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n name += \"_{}\".format(time)\n\n if not os.path.isdir(opts[\"save_path\"]):\n os.makedirs(opts[\"save_path\"], exist_ok=True)\n\n opts[\"logs_path\"] = os.path.join(opts[\"save_path\"], name)\n opts[\"checkpoint_path\"] = os.path.join(opts[\"save_path\"], name, 'ckpt')\n\n if not os.path.isdir(opts[\"logs_path\"]):\n os.makedirs(opts[\"logs_path\"], exist_ok=True)\n\n set_log_file_path(os.path.join(opts['logs_path'], 'log.txt'))\n\n with open(os.path.join(opts[\"logs_path\"], 'arguments.json'), 'w') as fp:\n json.dump(opts, fp, sort_keys=True, indent=4, separators=(',', ': '))\n return opts\n\n\ndef write_to_csv(d, write_header, training, logs_path):\n if logs_path:\n filename = 'training.csv' if training else 'validation.csv'\n with open(os.path.join(logs_path, filename), 'a+') as f:\n w = csv.DictWriter(f, d.keys())\n if write_header:\n w.writeheader()\n w.writerow(d)\n\n\ndef print_trainable_variables(logs_path):\n logger.info('Trainable Variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n logger.info(variable)\n variable_parameters = 1\n for DIM in variable.get_shape():\n variable_parameters *= DIM.value\n total_parameters += variable_parameters\n logger.info('Total Parameters:' + str(total_parameters) + '\\n')\n\n\ndef make_histogram(values, bins=512):\n # From https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\n # License: BSD License 2.0\n # Author Michael Gygli\n\n # Logs the histogram of a list/vector of values.\n # Convert to a numpy array\n values = np.array(values)\n\n # Create histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill fields of histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values**2))\n\n # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]\n # See https://github.com/tensorflow/tensorflow/blob/r2.6/tensorflow/core/framework/summary.proto#L30\n # Thus, we drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n return hist\n # return tf.Summary.Value(tag=tag, histo=hist)\n\n\ndef save_model_statistics(checkpoint_path, summary_writer, step=0):\n initializers = load_initializers_from_checkpoint(checkpoint_path)\n summary = tf.Summary()\n for name, np_weight in initializers.items():\n name = name.replace(\":\", \"_\")\n tensor = np_weight.astype(np.float32)\n if not np.any(np.isnan(tensor)):\n summary.value.add(tag=name, histo=make_histogram(tensor))\n summary.value.add(tag=f\"L2/{name}\", simple_value=np.linalg.norm(tensor))\n\n summary_writer.add_summary(summary, step)\n summary_writer.flush()\n\n\ndef load_initializers_from_checkpoint(checkpoint_path):\n initializers = {}\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\n var_to_map = reader.get_variable_to_shape_map()\n for key, dim in var_to_map.items():\n if key == 'global_step':\n continue\n # if reader.get_tensor(key).dtype.name == 'float16':\n # int_data = np.asarray(reader.get_tensor(key), np.int32)\n # np_weight = int_data.view(dtype=np.float16).reshape(dim)\n # else:\n np_weight = reader.get_tensor(key)\n initializers[key] = np_weight\n return initializers\n\n\ndef get_git_revision():\n return subprocess.check_output([\"git\", \"describe\", \"--always\", \"--dirty\"]).strip().decode()\n",
"# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n# Copyright (c) 2021 lucidrains\n\n# This file has been modified by Graphcore\n\n\nimport argparse\nfrom pathlib import Path\nimport datetime\nimport time\nfrom glob import glob\nimport os\nimport shutil\nfrom log import Logger\nimport torch\nimport poptorch\nimport popart\nimport wandb # Quit early if user doesn't have wandb installed.\nfrom poptorch.optim import Adam, AdamW\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom models.dalle import default\nfrom models import VQGanVAE, WrappedDALLE\nfrom models.loader import TextImageDataset\nfrom models.tokenizer import SimpleTokenizer, YttmTokenizer\nfrom args import parse_args\n\n\n# helpers\n\n\ndef exists(val):\n return val is not None\n\n\ndef get_trainable_params(model, weight_decay=0):\n # Do not apply weight_decay for one-dimensional parameters\n regularized_params = []\n non_regularized_params = []\n for param in model.parameters():\n if param.requires_grad:\n if len(param.shape) == 1:\n non_regularized_params.append(param)\n else:\n regularized_params.append(param)\n\n params = [\n {\"params\": regularized_params, \"weight_decay\": weight_decay},\n {\"params\": non_regularized_params, \"weight_decay\": 0}\n ]\n\n return params\n\n\ndef cp_path_to_dir(cp_path, tag):\n \"\"\"Convert a checkpoint path to a directory with `tag` inserted.\n If `cp_path` is already a directory, return it unchanged.\n \"\"\"\n if not isinstance(cp_path, Path):\n cp_path = Path(cp_path)\n if cp_path.is_dir():\n return cp_path\n path_sans_extension = cp_path.parent / cp_path.stem\n cp_dir = Path(f'{path_sans_extension}-{tag}-cp')\n return cp_dir\n\n\ndef main(args):\n if not args.synthetic_data:\n assert Path(args.input_folder).exists(), f'The path {args.input_folder} was not found.'\n\n abs_pathd = os.path.abspath(args.checkpoint_output_dir)\n os.makedirs(abs_pathd, exist_ok=True)\n log = Logger(abs_pathd+\"/\"+datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')+'.log',\n level='INFO')\n\n # tokenizer\n\n if exists(args.bpe_path):\n klass = YttmTokenizer\n tokenizer = klass(args.bpe_path)\n else:\n tokenizer = SimpleTokenizer()\n\n # reconstitute vae\n if exists(args.pretrained_checkpoint):\n dalle_path = Path(args.pretrained_checkpoint)\n\n assert dalle_path.exists(), 'DALL-E model file does not exist'\n loaded_obj = torch.load(str(dalle_path), map_location='cpu')\n\n dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']\n opt_state = loaded_obj.get('opt_state')\n scheduler_state = loaded_obj.get('scheduler_state')\n\n vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)\n\n dalle_params = dict(\n **dalle_params\n )\n resume_epoch = loaded_obj.get('epoch', 0)\n else:\n print('using pretrained VAE for encoding images to tokens')\n vae_params = None\n\n vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)\n\n dalle_params = dict(\n num_text_tokens=tokenizer.vocab_size,\n text_seq_len=args.text_seq_len,\n dim=args.hidden_size,\n depth=args.num_hidden_layers,\n heads=args.num_attention_heads,\n dim_head=args.dim_head,\n loss_img_weight=args.loss_img_weight,\n attn_types=tuple(args.attn_types.split(',')),\n ff_dropout=args.ff_dropout,\n attn_dropout=args.attn_dropout,\n sandwich_norm=args.sandwich_norm,\n embedding_ipu_id=args.embedding_ipu_id,\n embedding_serialization_factor=args.embedding_serialization_factor,\n layers_per_ipu=args.layers_per_ipu,\n cls_ipu_id=args.cls_ipu_id,\n fp16=args.fp16\n )\n resume_epoch = 0\n\n\n # create dataset and dataloader\n\n ds = TextImageDataset(\n args.input_folder,\n text_len=args.text_seq_len,\n image_size=vae.image_size,\n resize_ratio=1.0,\n truncate_captions=args.truncate_captions,\n tokenizer=tokenizer,\n shuffle=True,\n synthetic=args.synthetic_data,\n fp16=args.fp16\n )\n\n assert len(ds) > 0, 'dataset is empty'\n print(f'{len(ds)} image-text pairs found for training')\n\n\n opts = poptorch.Options()\n opts.autoRoundNumIPUs(True)\n opts.deviceIterations(args.batches_per_step)\n opts.replicationFactor(args.replication_factor)\n opts.Training.gradientAccumulation(args.gradient_accumulation)\n opts.Training.accumulationAndReplicationReductionType(poptorch.ReductionType.Mean)\n opts.Precision.enableStochasticRounding(args.stochastic_rounding)\n opts.anchorMode(poptorch.AnchorMode.Final)\n opts.TensorLocations.setOptimizerLocation(\n poptorch.TensorLocationSettings().useOnChipStorage(True))\n\n if args.enable_rts:\n opts.TensorLocations.setOptimizerLocation(\n poptorch.TensorLocationSettings().useReplicatedTensorSharding(True).minElementsForReplicatedTensorSharding(args.replication_factor))\n\n opts.randomSeed(args.random_seed)\n opts.setExecutionStrategy(\n poptorch.PipelinedExecution(poptorch.AutoStage.AutoIncrement))\n\n mem_prop = {\n f'IPU{i}': args.matmul_proportion[i]\n for i in range(args.ipus_per_replica)\n }\n opts.setAvailableMemoryProportion(mem_prop)\n\n # PopART options\n opts._Popart.set(\"disableGradAccumulationTensorStreams\", True)\n opts._Popart.set(\"outlineThreshold\", 10.0)\n\n if args.enable_half_partials:\n opts.Precision.setPartialsType(torch.float16)\n else:\n opts.Precision.setPartialsType(torch.float32)\n\n dl = poptorch.DataLoader(options=opts, dataset=ds, batch_size=args.batch_size, num_workers=args.dataloader_workers,\n persistent_workers=True, shuffle=True, drop_last=True, sampler=None)\n steps_per_epoch = len(dl)\n\n # initialize DALL-E\n\n dalle = WrappedDALLE(vae=vae, **dalle_params)\n\n # if using fp16:\n if args.fp16:\n dalle = dalle.half()\n\n if exists(args.pretrained_checkpoint):\n dalle.load_state_dict(weights)\n\n # optimizer\n first_order_type = torch.float16 if args.enable_half_first_order_momentum else torch.float32\n accum_type = torch.float16 if args.fp16 else torch.float32\n if args.optimizer == \"Adam\":\n opt = Adam(get_trainable_params(dalle, args.weight_decay), lr=args.learning_rate, eps=1e-6, loss_scaling=args.loss_scaling,\n accum_type=accum_type, first_order_momentum_accum_type=first_order_type, second_order_momentum_accum_type=torch.float32)\n elif args.optimizer == \"AdamW\":\n opt = AdamW(get_trainable_params(dalle, args.weight_decay), lr=args.learning_rate, eps=1e-6, loss_scaling=args.loss_scaling,\n accum_type=accum_type, first_order_momentum_accum_type=first_order_type, second_order_momentum_accum_type=torch.float32)\n else:\n raise ValueError(\"Unknown Optimizer:\", args.optimizer)\n if exists(args.pretrained_checkpoint) and opt_state:\n opt.load_state_dict(opt_state)\n poptorch_dalle = poptorch.trainingModel(dalle,\n options=opts,\n optimizer=opt)\n if args.lr_decay:\n scheduler = ReduceLROnPlateau(\n opt,\n mode=\"min\",\n factor=0.5,\n patience=10,\n cooldown=10,\n min_lr=1e-6,\n verbose=True,\n )\n if exists(args.pretrained_checkpoint) and scheduler_state:\n scheduler.load_state_dict(scheduler_state)\n else:\n scheduler = None\n\n # experiment tracker\n\n model_config = dict(\n depth=args.num_hidden_layers,\n heads=args.num_attention_heads,\n dim_head=args.dim_head\n )\n\n if args.wandb:\n run = wandb.init(\n project=args.wandb_project_name,\n entity=None,\n resume=False,\n config=model_config,\n settings=wandb.Settings(console='off')\n )\n\n\n def save_model(path, epoch=0):\n if not path:\n return\n\n save_obj = {\n 'hparams': dalle_params,\n 'vae_params': vae_params,\n 'epoch': epoch,\n }\n\n save_obj = {\n **save_obj,\n 'weights': dalle.state_dict(),\n 'opt_state': opt.state_dict(),\n }\n save_obj['scheduler_state'] = (scheduler.state_dict() if scheduler else None)\n filename = f\"dalle_{epoch}.pt\"\n save_path = os.path.join(path, filename)\n torch.save(save_obj, save_path)\n\n # Compile model\n log.logger.info(\"---------- Compilation Started ---------\")\n start_compile = time.perf_counter()\n text, images = next(iter(dl))\n poptorch_dalle.compile(text, images)\n duration_compilation = time.perf_counter() - start_compile\n log.logger.info(f\"Compiled model in {duration_compilation} secs\")\n log.logger.info(\"---------------------------------------\")\n\n # Training loop\n log.logger.info(\"---------- Training Started -----------\")\n\n save_model(args.checkpoint_output_dir, epoch=resume_epoch)\n global_batch_size = args.batch_size * args.gradient_accumulation * args.replication_factor\n samples_per_step = global_batch_size * args.batches_per_step\n training_steps = args.epochs * steps_per_epoch\n start_train = time.perf_counter()\n start_step = time.perf_counter()\n for epoch in range(resume_epoch, args.epochs):\n for i, (text, images) in enumerate(dl):\n current_step = i + epoch * steps_per_epoch\n loss = poptorch_dalle(text, images)\n # Average loss across replicas\n if args.replication_factor == 1:\n mean_loss = loss\n else:\n mean_loss = loss.mean()\n step_length = time.perf_counter() - start_step\n step_throughput = samples_per_step / step_length\n msg = (\"Epoch: {:.2f}/{} \"\n \"Step: {}/{} \"\n \"Lr: {:.6f} \"\n \"Loss: {:.3f} \"\n \"Throughput: {:.2f} samples/sec\"\n ).format(epoch, args.epochs,\n current_step, training_steps,\n opt.param_groups[0]['lr'],\n mean_loss.item(),\n step_throughput)\n log.logger.info(msg)\n if args.wandb:\n wandb.log({\"LR\": opt.param_groups[0]['lr'],\n \"Throughput\": step_throughput,\n \"Loss\": mean_loss.item()})\n\n start_step = time.perf_counter()\n if i % args.checkpoint_save_steps == 0:\n save_model(args.checkpoint_output_dir, epoch=epoch)\n\n if args.lr_decay:\n scheduler.step(mean_loss)\n\n save_model(args.checkpoint_output_dir, epoch=epoch)\n\n if args.wandb:\n wandb.finish()\n\n stop_train = time.perf_counter()\n log.logger.info(\"---------------------------------------\")\n\n log.logger.info(\"---------- Training Metrics -----------\")\n log.logger.info(f\"global_batch_size: {global_batch_size}\")\n log.logger.info(f\"batches_per_step: {args.batches_per_step}\")\n log.logger.info(f\"training_steps: {training_steps}\")\n duration_run = stop_train - start_train\n num_samples = samples_per_step * training_steps\n log.logger.info(f\"Training time: {duration_run:.3f} secs\")\n log.logger.info(\"Throughput: {:5f} samples/sec.\".format(num_samples / duration_run))\n log.logger.info(\"---------------------------------------\")\n\nif __name__ == \"__main__\":\n # argument parsing\n args = parse_args()\n\n torch.manual_seed(args.random_seed)\n main(args)\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nvidia.dali\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\nimport multiprocessing\nimport numpy as np\nimport torch\nimport math\n\nclass PipelineParams:\n def __init__(\n self,\n sample_rate=16000,\n max_duration=float(\"inf\"),\n normalize_transcripts=True,\n trim_silence=False,\n speed_perturbation=None\n ):\n pass\n\nclass SpeedPerturbationParams:\n def __init__(\n self,\n min_rate=0.85,\n max_rate=1.15,\n p=1.0,\n ):\n pass\n\nclass DaliPipeline(nvidia.dali.pipeline.Pipeline):\n def __init__(self, *,\n pipeline_type,\n device_id,\n num_threads,\n batch_size,\n file_root: str,\n sampler,\n sample_rate,\n resample_range: list,\n window_size,\n window_stride,\n nfeatures,\n nfft,\n dither_coeff,\n silence_threshold,\n preemph_coeff,\n max_duration,\n preprocessing_device=\"gpu\"):\n super().__init__(batch_size, num_threads, device_id)\n\n self._dali_init_log(locals())\n\n if torch.distributed.is_initialized():\n shard_id = torch.distributed.get_rank()\n n_shards = torch.distributed.get_world_size()\n else:\n shard_id = 0\n n_shards = 1\n\n self.preprocessing_device = preprocessing_device.lower()\n assert self.preprocessing_device == \"cpu\" or self.preprocessing_device == \"gpu\", \\\n \"Incorrect preprocessing device. Please choose either 'cpu' or 'gpu'\"\n\n self.resample_range = resample_range\n\n train_pipeline = pipeline_type == 'train'\n self.train = train_pipeline\n self.sample_rate = sample_rate\n self.dither_coeff = dither_coeff\n self.nfeatures = nfeatures\n self.max_duration = max_duration\n self.do_remove_silence = True if silence_threshold is not None else False\n\n shuffle = train_pipeline and not sampler.is_sampler_random()\n self.read = ops.FileReader(name=\"Reader\", pad_last_batch=(pipeline_type == 'val'), device=\"cpu\", file_root=file_root, file_list=sampler.get_file_list_path(), shard_id=shard_id,\n num_shards=n_shards, shuffle_after_epoch=shuffle)\n\n # TODO change ExternalSource to Uniform for new DALI release\n if resample_range is not None:\n self.speed_perturbation_coeffs = ops.Uniform(device=\"cpu\", range=resample_range)\n else:\n self.speed_perturbation_coeffs = None\n\n self.decode = ops.AudioDecoder(device=\"cpu\", sample_rate=self.sample_rate if resample_range is None else None,\n dtype=types.FLOAT, downmix=True)\n\n self.normal_distribution = ops.NormalDistribution(device=preprocessing_device)\n\n self.preemph = ops.PreemphasisFilter(device=preprocessing_device, preemph_coeff=preemph_coeff)\n\n self.spectrogram = ops.Spectrogram(device=preprocessing_device, nfft=nfft,\n window_length=window_size * sample_rate,\n window_step=window_stride * sample_rate)\n\n self.mel_fbank = ops.MelFilterBank(device=preprocessing_device, sample_rate=sample_rate, nfilter=self.nfeatures,\n normalize=True)\n\n self.log_features = ops.ToDecibels(device=preprocessing_device, multiplier=np.log(10), reference=1.0,\n cutoff_db=math.log(1e-20))\n\n self.get_shape = ops.Shapes(device=preprocessing_device)\n\n self.normalize = ops.Normalize(device=preprocessing_device, axes=[1])\n\n self.pad = ops.Pad(device=preprocessing_device, fill_value=0)\n\n # Silence trimming\n self.get_nonsilent_region = ops.NonsilentRegion(device=\"cpu\", cutoff_db=silence_threshold)\n self.trim_silence = ops.Slice(device=\"cpu\", normalized_anchor=False, normalized_shape=False, axes=[0])\n self.to_float = ops.Cast(device=\"cpu\", dtype=types.FLOAT)\n\n @classmethod\n def from_config(cls, pipeline_type, device_id, batch_size, file_root: str, sampler, config_data: dict,\n config_features: dict, device_type: str = \"gpu\", do_resampling: bool = True,\n num_cpu_threads=multiprocessing.cpu_count()):\n\n max_duration = config_data['max_duration']\n sample_rate = config_data['sample_rate']\n silence_threshold = -60 if config_data['trim_silence'] else None\n\n # TODO Take into account resampling probablity\n # TODO config_features['speed_perturbation']['p']\n\n if do_resampling and config_data['speed_perturbation'] is not None:\n resample_range = [config_data['speed_perturbation']['min_rate'],\n config_data['speed_perturbation']['max_rate']]\n else:\n resample_range = None\n\n window_size = config_features['window_size']\n window_stride = config_features['window_stride']\n nfeatures = config_features['n_filt']\n nfft = config_features['n_fft']\n dither_coeff = config_features['dither']\n preemph_coeff = .97\n\n return cls(pipeline_type=pipeline_type,\n device_id=device_id,\n preprocessing_device=device_type,\n num_threads=num_cpu_threads,\n batch_size=batch_size,\n file_root=file_root,\n sampler=sampler,\n sample_rate=sample_rate,\n resample_range=resample_range,\n window_size=window_size,\n window_stride=window_stride,\n nfeatures=nfeatures,\n nfft=nfft,\n dither_coeff=dither_coeff,\n silence_threshold=silence_threshold,\n preemph_coeff=preemph_coeff,\n max_duration=max_duration)\n\n @staticmethod\n def _dali_init_log(args: dict):\n if (not torch.distributed.is_initialized() or (\n torch.distributed.is_initialized() and torch.distributed.get_rank() == 0)): # print once\n max_len = max([len(ii) for ii in args.keys()])\n fmt_string = '\\t%' + str(max_len) + 's : %s'\n print('Initializing DALI with parameters:')\n for keyPair in sorted(args.items()):\n print(fmt_string % keyPair)\n\n def _remove_silence(self, inp):\n begin, length = self.get_nonsilent_region(inp)\n out = self.trim_silence(inp, self.to_float(begin), self.to_float(length))\n return out\n\n def define_graph(self):\n audio, label = self.read()\n if not self.train or self.speed_perturbation_coeffs is None:\n audio, sr = self.decode(audio)\n else:\n resample_coeffs = self.speed_perturbation_coeffs() * self.sample_rate\n audio, sr = self.decode(audio, sample_rate=resample_coeffs)\n\n if self.do_remove_silence:\n audio = self._remove_silence(audio)\n\n # Max duration drop is performed at DataLayer stage\n\n if self.preprocessing_device == \"gpu\":\n audio = audio.gpu()\n\n if self.dither_coeff != 0.:\n audio = audio + self.normal_distribution(audio) * self.dither_coeff\n\n audio = self.preemph(audio)\n\n audio = self.spectrogram(audio)\n audio = self.mel_fbank(audio)\n audio = self.log_features(audio)\n\n audio_len = self.get_shape(audio)\n\n audio = self.normalize(audio)\n audio = self.pad(audio)\n\n # When modifying DALI pipeline returns, make sure you update `output_map` in DALIGenericIterator invocation\n return audio, label, audio_len\n\n",
"# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n\"\"\"Defines a Temporal Graph Network (https://arxiv.org/abs/2006.10637) for IPU.\"\"\"\n\nimport functools\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Dict, Optional, Tuple, TypeVar\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nimport optimiser\nimport utils\n\n###############################################################################\n# Generic helpers\n\nU = TypeVar(\"U\")\n\n\ndef assert_shape(tensor: tf.Tensor, expected: Tuple[Optional[int],\n ...]) -> Tuple[int, ...]:\n \"\"\"Check tensor shape against expected, ignoring None, returning `tensor.shape`.\"\"\"\n actual = tensor.shape\n match = len(actual) == len(expected) and all(\n y is None or x == y for x, y in zip(actual, expected))\n assert match, f\"wrong shape, expected {expected}, actual {actual}\"\n return actual\n\n\ndef scoped_fn(fn: Callable[..., U]) -> Callable[..., U]:\n \"\"\"Wrap a function with a variable scope, named with the function name.\"\"\"\n @functools.wraps(fn)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n with tf.variable_scope(fn.__name__):\n return fn(*args, **kwargs)\n\n return wrapper\n\n\n@scoped_fn\ndef index_softmax(values: tf.Tensor, indices: tf.Tensor,\n n_indices: int) -> tf.Tensor:\n \"\"\"Compute multiple softmax() in groups defined by indices.\n\n E.g.\n index_softmax([0, 0, ln(2), 2], [0, 0, 0, 1], 2)\n computes softmax([0, 0, ln(2)]) and softmax([2])\n => [0.25, 0.25, 0.5, 1.0]\n\n Acts over axis=0 of values.\n \"\"\"\n # Run everything in float32, for stability\n dtype = values.dtype\n values = tf.cast(values, tf.float32)\n\n max_values = tf.reduce_max(values, axis=0, keepdims=True)\n exp_values = tf.exp(values - max_values)\n # Max(*, 1e-6) prevents a DIV0 error, caused by underflow of the sum-exp.\n sum_exp_values = tf.maximum(\n tf.unsorted_segment_sum(exp_values, indices, n_indices), 1e-6)\n return tf.cast(exp_values / tf.gather(sum_exp_values, indices), dtype)\n\n\n@scoped_fn\ndef linear(input: tf.Tensor,\n n_output: int,\n use_bias: bool = True) -> tf.Tensor:\n \"\"\"A standard linear layer `W x + b`.\"\"\"\n weight = tf.get_variable(\n \"weight\",\n dtype=input.dtype,\n shape=(input.shape[-1], n_output),\n initializer=tf.glorot_normal_initializer(),\n )\n output = input @ weight\n if use_bias:\n bias = tf.get_variable(\n \"bias\",\n dtype=input.dtype,\n shape=(n_output, ),\n initializer=tf.zeros_initializer(),\n )\n output += bias\n return output\n\n\n@scoped_fn\ndef cos_fp16(x: tf.Tensor) -> tf.Tensor:\n \"\"\"Run cos(x) in FP16, first running mod(x, 2*pi) for range safety.\"\"\"\n if x.dtype == tf.float16:\n return tf.cos(x)\n x_16 = tf.cast(tf.mod(x, 2 * np.pi), tf.float16)\n return tf.cos(x_16)\n\n\n@scoped_fn\ndef gru_cell(prev_hidden: tf.Tensor, input: tf.Tensor) -> tf.Tensor:\n \"\"\"Compute a single step of a GRU (following the PyTorch parameterization).\n\n See PyTorch GRUCell, https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html\n for the definition of this operation & trainable variables.\n\n Arguments:\n\n prev_hidden -- shape (batch_size x hidden_size), the previous GRU state,\n e.g. returned by gru_cell\n\n input -- shape (batch_size x input_size)\n\n Returns:\n\n tensor of shape (batch_size x hidden_size), a new GRU state\n \"\"\"\n batch_size, hidden_size = assert_shape(prev_hidden, (None, None))\n _, input_size = assert_shape(input, (batch_size, None))\n dtype = prev_hidden.dtype\n\n weight_i = tf.get_variable(\n \"weight_i\",\n (3, input_size, hidden_size),\n dtype=dtype,\n initializer=tf.glorot_normal_initializer(),\n )\n bias_i = tf.get_variable(\"bias_i\", (3, hidden_size),\n dtype=dtype,\n initializer=tf.zeros_initializer())\n weight_h = tf.get_variable(\n \"weight_h\",\n (3, hidden_size, hidden_size),\n dtype=dtype,\n initializer=tf.glorot_normal_initializer(),\n )\n bias_h = tf.get_variable(\"bias_h\", (3, hidden_size),\n dtype=dtype,\n initializer=tf.zeros_initializer())\n\n reset_i, update_i, candidate_i = tf.unstack(input @ weight_i +\n tf.expand_dims(bias_i, 1))\n reset_h, update_h, candidate_h = tf.unstack(prev_hidden @ weight_h +\n tf.expand_dims(bias_h, 1))\n\n reset = tf.sigmoid(reset_i + reset_h)\n update = tf.sigmoid(update_i + update_h)\n candidate = tf.tanh(candidate_i + reset * candidate_h)\n return (1 - update) * candidate + update * prev_hidden\n\n\n@scoped_fn\ndef transformer_conv(\n n_output: int,\n n_heads: int,\n dropout: float,\n nodes: tf.Tensor,\n edge_idx: tf.Tensor,\n edges: tf.Tensor,\n) -> tf.Tensor:\n \"\"\"Implementation of Graph Transformer, https://arxiv.org/abs/2009.03509.\n\n Matches the specification of TransformerConv in PyTorch Geometric, always using\n a \"skip\" projection from inputs and shared key/value projections for edges.\n\n Arguments:\n\n n_output -- output feature size\n\n n_heads -- number of attention heads (note: head size is given by n_output/n_heads)\n\n dropout -- rate parameter for attention mask (post-softmax) dropout\n\n nodes -- shape (n_nodes, node_feature_size), input features for each node\n\n edge_idx -- shape (2, n_edges), (0 <= edge_idx < n_nodes), the source and\n destination of each edge, indexing into nodes\n\n edges -- shape (n_edges, edge_feature_size), input features for each edge\n\n Returns:\n\n tensor of shape (n_nodes, n_output), node features after applying a graph\n transformer (attention) layer\n \"\"\"\n assert n_output % n_heads == 0, \\\n \"graph transformer output size should be divisible by the number of heads\"\n head_size = n_output // n_heads\n n_nodes, _ = assert_shape(nodes, (None, None))\n _, n_edges = assert_shape(edge_idx, (2, None))\n assert_shape(edges, (n_edges, None))\n\n with tf.variable_scope(\"skip\"):\n skip = linear(nodes, n_output)\n\n with tf.variable_scope(\"edge_shared_kv\"):\n edge_kv = linear(edges, n_output, use_bias=False)\n\n with tf.variable_scope(\"node_qkv\"):\n node_qkv = linear(nodes, 3 * n_output)\n\n with tf.variable_scope(\"attention\"):\n q = tf.gather(node_qkv[:, :n_output], edge_idx[1])\n kv = tf.reshape(\n tf.gather(node_qkv[:, n_output:], edge_idx[0]),\n (n_edges, 2, n_output),\n )\n k, v = tf.unstack(kv + edge_kv[:, tf.newaxis, :], axis=1)\n a = tf.reduce_sum(tf.reshape(q * k, (n_edges, n_heads, head_size)),\n -1) / (head_size**0.5)\n a = index_softmax(a, edge_idx[1], n_nodes)\n if dropout:\n a = tf.nn.dropout(a, rate=dropout)\n attention = tf.unsorted_segment_sum(\n tf.repeat(a, head_size, axis=1) * v, edge_idx[1], n_nodes)\n\n return skip + attention\n\n\n@scoped_fn\ndef time_encoder(dt: tf.Tensor, size: int, dtype: tf.DType) -> tf.Tensor:\n \"\"\"Create TGN time encoder cos(dt @ weight + bias).\"\"\"\n weight = tf.get_variable(\n \"weight\",\n (size, ),\n dtype=dt.dtype,\n initializer=tf.random_normal_initializer(stddev=0.1),\n )\n bias = tf.get_variable(\"bias\", (size, ),\n dtype=dt.dtype,\n initializer=tf.zeros_initializer())\n cos = cos_fp16 if dtype == tf.float16 else tf.cos\n return cos(dt[..., tf.newaxis] * weight + bias)\n\n\n@dataclass\nclass TgnMemory:\n \"\"\"Outputs from tgn_memory().\"\"\"\n\n output: tf.Tensor\n last_update: tf.Tensor\n updates: Tuple[tf.Tensor, ...]\n\n\nTGN_MEMORY_VARIABLES_KEY = \"tgn_memory_variables\"\n\n\n@scoped_fn\ndef tgn_memory(\n n_nodes: int,\n memory_size: int,\n time_embedding_size: int,\n node_ids: tf.Tensor,\n write_idx: tf.Tensor,\n write_mask: tf.Tensor,\n write_features: tf.Tensor,\n write_times: tf.Tensor,\n) -> TgnMemory:\n \"\"\"Create TGN memory read & update operations.\n\n A trainable memory for nodes in an temporal interaction graph. The memory\n state is computed using the latest interaction event that touched a node.\n The update is a GRU cell, taking as input the previous memory of both source\n and desination nodes for that edge, the edge feature vector and time difference\n from interaction to current time.\n\n Note that the GRU cell is computed lazily when the memory is read, rather than\n when it is stored, to support a single step of truncated backpropagation through\n time and obtain a gradient for GRU variables.\n\n Please see \"Temporal Graph Network\" (https://arxiv.org/abs/2006.10637) for full\n details.\n\n Arguments:\n\n n_nodes -- total number of slots in the memory\n\n memory_size -- size of stored state in the memory / GRU cell output size\n\n time_embedding_size -- size of the time encoding activation provided to the\n GRU cell\n\n node_ids -- shape (n_read), (-1 <= ID < n_nodes), the memory locations to be read\n\n write_idx -- shape (2, n_write), (0 <= idx < n_read), the (src, dst) indices of\n edges, selecting nodes that should be written with their updated\n memory state\n\n write_mask -- shape (2, n_write), boolean tensor for elements in write_idx that\n should be written (true) or skipped (false), such that each memory\n location is written at most once\n\n write_features -- shape (n_write, feature_size), input features to be stored and\n used to compute the memory when it is next accessed\n\n write_times -- shape (n_write), edge event times to be stored and used to compute\n the memory when it next accessed\n\n Returns:\n\n TgnMemory(\n output -- tensor of shape (n_read, memory_size), current memory for node_ids\n last_update -- tensor of shape (n_read), last update of output\n updates -- tuple of operations to run to update the memory\n )\n \"\"\"\n assert_shape(node_ids, (None, ))\n _, n_write = assert_shape(write_idx, (2, None))\n assert_shape(write_mask, (2, n_write))\n _, feature_size = assert_shape(write_features, (n_write, None))\n assert_shape(write_times, (n_write, ))\n dtype = write_features.dtype\n\n # Declare memory\n # As an optimisation, we concatenate the 6 fields required by the memory\n # into 2 tensors, one consisting of ints, the other of floats.\n # This requires some extra code to slice and concat, but means we can use\n # 2 (dynamic) gather operations instead of 6.\n\n # Each row: [last_update, dt, neighbour]\n v_ints = tf.get_variable(\n \"ints\",\n shape=(1 + n_nodes, 3),\n dtype=tf.int32,\n trainable=False,\n initializer=tf.zeros_initializer(),\n collections=[tf.GraphKeys.GLOBAL_VARIABLES, TGN_MEMORY_VARIABLES_KEY],\n )\n # Each row: [memory, features, direction]\n v_floats = tf.get_variable(\n \"floats\",\n shape=(1 + n_nodes, memory_size + feature_size + 2),\n dtype=dtype,\n trainable=False,\n initializer=tf.zeros_initializer(),\n collections=[tf.GraphKeys.GLOBAL_VARIABLES, TGN_MEMORY_VARIABLES_KEY],\n )\n\n # Memory[0] is used for padding (node_ids == -1)\n safe_node_ids = 1 + node_ids\n\n # Read memory for node_ids\n node_ints = tf.gather(v_ints, safe_node_ids)\n node_last_update, node_dt, node_neighbour_idx = tf.unstack(node_ints,\n axis=1)\n node_neighbour = tf.gather(v_floats[:, :memory_size], node_neighbour_idx)\n node_time_encoding = time_encoder(tf.cast(node_dt, tf.float32),\n time_embedding_size, dtype)\n\n node_floats = tf.gather(v_floats, safe_node_ids)\n node_self = node_floats[:, :memory_size]\n node_features = node_floats[:, memory_size:memory_size + feature_size]\n node_direction = node_floats[:, memory_size + feature_size:]\n\n node_memory = gru_cell(\n node_self,\n tf.concat(\n [\n node_direction[:, 0, tf.newaxis] * node_self +\n node_direction[:, 1, tf.newaxis] * node_neighbour,\n node_direction[:, 1, tf.newaxis] * node_self +\n node_direction[:, 0, tf.newaxis] * node_neighbour,\n node_features,\n node_time_encoding,\n ],\n axis=1,\n ),\n )\n\n # Write memory according to (write_idx, write_mask)\n flat_write_idx = tf.reshape(write_idx, (-1, ))\n indices = tf.gather(safe_node_ids, flat_write_idx)\n masked_indices = indices * tf.cast(tf.reshape(write_mask,\n (-1, )), indices.dtype)\n p_last_update = tf.reshape(tf.tile(write_times[tf.newaxis], (2, 1)),\n (-1, ))\n p_dt = p_last_update - tf.gather(node_last_update, flat_write_idx)\n # Swap src and dst indices to get the neighbour index for each node\n p_neighbour = tf.roll(indices, n_write, 0)\n p_memory = tf.gather(node_memory, flat_write_idx)\n p_features = tf.tile(write_features, (2, 1))\n p_direction = tf.repeat(tf.eye(2, dtype=dtype), n_write,\n 0) # src=[1, 0], dst=[0, 1]\n\n # There is already a data dependency, but just to be sure...\n with tf.control_dependencies([node_last_update, node_memory]):\n update_ints = v_ints.scatter_update(\n tf.IndexedSlices(\n tf.stack([p_last_update, p_dt, p_neighbour], axis=1),\n masked_indices))\n update_floats = v_floats.scatter_update(\n tf.IndexedSlices(\n tf.concat([p_memory, p_features, p_direction], axis=1),\n masked_indices))\n\n return TgnMemory(\n output=node_memory,\n last_update=node_last_update,\n updates=(update_ints, update_floats),\n )\n\n\n@scoped_fn\ndef tgn_gnn(\n time_embedding_size: int,\n dropout: float,\n input: tf.Tensor,\n last_update: tf.Tensor,\n edge_idx: tf.Tensor,\n edge_times: tf.Tensor,\n edge_features: tf.Tensor,\n) -> tf.Tensor:\n \"\"\"The 'core' GNN from TGN, with time encoder & graph transformer.\n\n Computes transformed representations for a set of nodes, based on a set of\n interactions (edges) involving those nodes.\n\n Arguments:\n\n time_embedding_size -- number of features to use for the time encoding,\n which is concatenated to edge_features for the GNN\n step\n\n dropout -- rate parameter for transformer_conv\n\n input -- shape (n_nodes, memory_size), input node features (from memory)\n\n last_update -- shape (n_nodes), timestamps for the last memory update that\n produced the input\n\n edge_idx -- shape (2, n_edges), indexing into input and last_update\n\n edge_times -- shape (n_edges), timestamps for current set of edges\n\n edge_features -- shape (n_edges, feature_size), input features for current\n set of edges\n\n Returns:\n\n tensor of shape (n_nodes, memory_size) -- node output features\n \"\"\"\n n_nodes, n_features = assert_shape(input, (None, None))\n assert_shape(last_update, (n_nodes, ))\n _, n_edges = assert_shape(edge_idx, (2, None))\n assert_shape(edge_times, (n_edges, ))\n assert_shape(edge_features, (n_edges, None))\n\n dt = tf.gather(last_update, edge_idx[0]) - edge_times\n time_encoding = time_encoder(tf.cast(dt, tf.float32), time_embedding_size,\n input.dtype)\n return transformer_conv(\n int(n_features),\n n_heads=2,\n dropout=dropout,\n nodes=input,\n edge_idx=edge_idx,\n edges=tf.concat([edge_features, time_encoding], axis=1),\n )\n\n\n@scoped_fn\ndef tgn_link_predictor(src: tf.Tensor, dst: tf.Tensor) -> tf.Tensor:\n \"\"\"Predict the logit for a link between src & dst.\n\n Implemented as a ReLU MLP with 1 hidden layer and 1 output.\n\n Arguments:\n\n src -- shape (* x feature_size), source node features\n\n dst -- shape (* x feature_size), destination node features\n\n Returns:\n\n tensor of shape (*), scores for each paired src and dst\n \"\"\"\n assert src.shape == dst.shape\n feature_size = int(src.shape[-1])\n\n with tf.variable_scope(\"hidden\"):\n hidden = tf.nn.relu(\n linear(tf.concat([src, dst], axis=-1), feature_size))\n with tf.variable_scope(\"output\"):\n return linear(hidden, 1)[..., 0]\n\n\n@scoped_fn\ndef tgn(\n # Settings\n n_nodes: int,\n memory_size: int,\n time_embedding_size: int,\n dropout: float,\n learning_rate: float,\n target: utils.Target,\n is_training: bool,\n # Inputs\n node_ids: tf.Tensor,\n batch_idx: tf.Tensor,\n batch_times: tf.Tensor,\n batch_features: tf.Tensor,\n batch_most_recent: tf.Tensor,\n edge_idx: tf.Tensor,\n edge_times: tf.Tensor,\n edge_features: tf.Tensor,\n) -> Dict[str, tf.Tensor]:\n \"\"\"Complete TGN including memory read/update, GNN and optional optimisation.\n\n Processes a batch of intearction events, pairs of (src, pos_dst), which update\n the node memory and predicts the probability of an event between (src, pos_dst)\n and between (src, neg_dst) to give a contrastive loss.\n\n See the component functions tgn_memory(), tgn_gnn(), tgn_link_predictor() for a\n functional description.\n\n Please see \"Temporal Graph Network\" (https://arxiv.org/abs/2006.10637) for full\n details.\n\n Arguments:\n\n n_nodes -- total number of slots in the memory\n\n memory_size -- size of stored state in the memory / GRU cell output size\n\n time_embedding_size -- size of the time encoding activation provided to the\n GRU cell\n\n dropout -- rate parameter for transformer_conv() via tgn_gnn()\n\n learning_rate -- for Adam (training only)\n\n target -- device to execute on, note: this influences optimal mixed precision\n design\n\n is_training -- boolean flag enabling training: optimiser step and dropout\n\n node_ids -- shape (n_read), nodes to be read in this step\n\n batch_idx -- shape (3, batch_size), indices [src, pos_dst, neg_dst], indexing\n into node_ids for each interaction event, paired with a negative\n sample\n\n batch_times -- shape (batch_size), timestamps for each event\n\n batch_features -- shape (batch_size, feature_size), input features for each\n event\n\n batch_most_recent -- shape (2, batch_size), boolean mask for the [src, pos_dst]\n values in batch_idx that are most recent within the batch,\n used to prevent write hazards in the memory\n\n edge_idx -- shape (2, edges_size), indices [src, dst] into node_ids, a history\n of a few recent edges to add additional context separate from the\n memory\n\n edge_times -- shape (edges_size), timestamps for the edge history\n\n edge_features -- shape (edges_size, feature_size), features for the edge history\n\n Returns:\n\n {\"loss\": tensor of shape (), mean loss over non-masked (src, dst) pairs\n \"count\": tensor of shape (), number of non-masked pairs\n \"probs\": tensor of shape (2, batch_size), unless is_training, probability\n of consecutive link per pair, for calculating validation statistics\n }\n \"\"\"\n\n memory = tgn_memory(\n n_nodes=n_nodes,\n memory_size=memory_size,\n time_embedding_size=time_embedding_size,\n node_ids=node_ids,\n write_idx=batch_idx[:2],\n write_mask=batch_most_recent,\n write_features=batch_features,\n write_times=batch_times,\n )\n\n hidden = tgn_gnn(\n time_embedding_size=time_embedding_size,\n dropout=is_training * dropout,\n input=memory.output,\n last_update=memory.last_update,\n edge_idx=edge_idx,\n edge_times=edge_times,\n edge_features=edge_features,\n )\n\n logits = tgn_link_predictor(\n tf.gather(hidden, tf.tile(batch_idx[0][tf.newaxis], (2, 1))),\n tf.gather(hidden, batch_idx[1:]),\n )\n\n # Masks any batch padding\n batch_mask = tf.not_equal(batch_idx[0], node_ids.shape[0] - 1)\n count = tf.reduce_sum(tf.cast(batch_mask, tf.int32))\n labels = tf.tile(tf.constant([[1], [0]], dtype=logits.dtype),\n (1, logits.shape[1]))\n # *2 because the reference uses mean(pos_loss) + mean(neg_loss)\n loss = 2 * tf.reduce_mean(\n tf.cast(batch_mask, logits.dtype) *\n tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))\n\n if is_training:\n if target is utils.Target.IPU:\n step = optimiser.Adam(\n learning_rate=learning_rate).minimize_with_global_step(loss)\n else:\n # Allows AMP with TF_ENABLE_AUTO_MIXED_PRECISION=1\n step = tf.train.AdamOptimizer(\n learning_rate=learning_rate).minimize(loss)\n with tf.control_dependencies(memory.updates + (step, )):\n return dict(loss=tf.identity(loss), count=count)\n else:\n with tf.control_dependencies(memory.updates):\n return dict(loss=tf.identity(loss),\n count=count,\n probs=tf.nn.sigmoid(logits))\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport tempfile\nfrom tensorflow.python.ipu.config import IPUConfig\nimport numpy as np\nfrom functools import partial\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python import ipu\nfrom ipu_sparse_ops import sparse, optimizers\nimport os\nimport logging\n\nos.sys.path.append(\"../../\") # dynamic_sparsity\nfrom ipu_sparse_ops.model_baseclass import SparseModelOptions # noqa: E402\nfrom ipu_sparse_ops.transformer.transformer_baseclass import TransformerOptions # noqa: E402\nfrom ipu_sparse_ops.transformer.transformer_dense import DenseTransformer # noqa: E402\nfrom ipu_sparse_ops.transformer.transformer_dynsparse import DynsparseTransformer # noqa: E402\n\n# disable TF 2.0\ntf.disable_eager_execution()\ntf.disable_v2_behavior()\n\n\ndef get_program_arguments():\n transformer_parser = TransformerOptions()\n SparseModelOptions.add_all_arguments(transformer_parser)\n transformer_parser.add_argument(\"--profile\", action=\"store_true\",\n help=\"Enable profiling for mem profile\")\n default_settings = dict(\n dtype=tf.float32,\n source_sequence_length=12,\n hidden_length=16,\n ff_length=64,\n attention_heads=1,\n qkv_length=16,\n sparsity=0.9,\n batch_size=1,\n random_seed=11,\n pooling_type='NONE',\n dropout_keep_prob=1\n )\n transformer_parser.set_defaults(**default_settings)\n return transformer_parser.parse_args()\n\n\ndef stream_dense_grads_from_device(transformer, loss, ops=None):\n # This will create tensorflow ops which have to be\n # run in a session to retrieve the result\n ops = {} if ops is None else ops\n for name, sparse_layer in transformer.sparse_layers.items():\n with tf.variable_scope(name, reuse=True):\n dense_grad_w = sparse_layer.get_dense_grad_w(loss)\n ops[name + '_grad_w'] = tf.convert_to_tensor(dense_grad_w)\n return ops\n\n\ndef sparse_transformer_fwd_and_grad(transformer, input_activation):\n transformer.compute_dense_grad = True\n output_activation = transformer.encoder_layer(input_activation, mask=None, compute_dense_grad=True, debug_name=\"layer_0\")\n loss = tf.reduce_sum(output_activation)\n\n # Wrap the optimizer (this would help manage the slot variables)\n optimizer = optimizers.SparseOptimizer(tf.train.AdamOptimizer)\n optimizer = optimizer(learning_rate=1e-3, sparse_layers=transformer.sparse_layers.values())\n\n grads = optimizer.compute_gradients(loss)\n input_grad = tf.gradients(loss, input_activation)[0]\n with tf.control_dependencies([input_grad]):\n train_op = optimizer.apply_gradients(grads)\n\n with tf.control_dependencies([train_op]):\n streamOps = {\"output_activation\": output_activation}\n streamOps[\"input_grad\"] = input_grad\n # Sparse grads\n for grad, var in grads:\n streamOps[var.op.name + \"_grad\"] = grad\n # Dense grads\n stream_dense_grads_from_device(transformer, loss, streamOps)\n return streamOps\n\n\ndef dense_transformer_fwd_and_grad(transformer, input_activation):\n output_activation = transformer.encoder_layer(input_activation, mask=None, debug_name=\"layer_0\")\n loss = tf.reduce_sum(output_activation)\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n grads = optimizer.compute_gradients(loss)\n input_grad = tf.gradients(loss, input_activation)[0]\n with tf.control_dependencies([input_grad]):\n train_op = optimizer.apply_gradients(grads)\n\n with tf.control_dependencies([train_op]):\n streamOps = {\"output_activation\": output_activation}\n streamOps[\"input_grad\"] = input_grad\n for grad, var in grads:\n streamOps[var.op.name + \"_grad\"] = grad\n return streamOps\n\n\ndef main(args):\n tf.logging.set_verbosity(tf.logging.ERROR)\n np.set_printoptions(linewidth=200)\n random_seed = args.random_seed\n checkpoint_path = os.path.join(tempfile.mkdtemp(), \"model.ckpt\")\n\n # Input activations for the attention layer\n random_gen = np.random.default_rng(seed=random_seed)\n activations_np = random_gen.uniform(-0.1, 0.1, size=(args.batch_size, args.source_sequence_length, args.hidden_length))\n\n # Configure the IPU\n cfg = IPUConfig()\n cfg.auto_select_ipus = 1\n cfg.configure_ipu_system()\n\n # Build IPU graphs\n sparse_decoder_graph = tf.Graph()\n sparse_transformer = DynsparseTransformer(args)\n with sparse_decoder_graph.as_default():\n with tf.device(\"cpu\"):\n # placeholder for activations\n # weight placeholders are created inside sparse_transfomer\n inputs_ph = tf.placeholder(args.dtype, activations_np.shape)\n with ipu.scopes.ipu_scope(\"/device:IPU:0\"):\n sparse_decoder = partial(sparse_transformer_fwd_and_grad, sparse_transformer)\n sparse_decoder_fetches = ipu.ipu_compiler.compile(sparse_decoder, [inputs_ph])\n ipu.utils.move_variable_initialization_to_cpu()\n\n # sparse-decoder\n with tf.Session(graph=sparse_decoder_graph) as sess:\n # initialize weights\n sess.run(tf.global_variables_initializer())\n\n # Save the sparse weights to checkpoint as dense\n sparse_transformer.checkpointAsDense(checkpoint_path)\n\n # run sparse decoder\n sparse_result = sess.run(sparse_decoder_fetches, feed_dict={inputs_ph: activations_np})\n\n # Create a dense transformer and initialize the weights to the values that\n # the sparse model was initialzed with originally\n dense_decoder_graph = tf.Graph()\n dense_transformer = DenseTransformer(args)\n with dense_decoder_graph.as_default():\n with tf.device(\"cpu\"):\n # placeholder for activations\n # weights will get streamed from checkpoint\n inputs_ph = tf.placeholder(args.dtype, activations_np.shape)\n\n with ipu.scopes.ipu_scope(\"/device:IPU:0\"):\n dense_decoder_fetches = partial(dense_transformer_fwd_and_grad, dense_transformer)\n dense_graph = ipu.ipu_compiler.compile(dense_decoder_fetches, [inputs_ph])\n ipu.utils.move_variable_initialization_to_cpu()\n\n with tf.device(\"cpu\"):\n # We will only load the trainable variables, not momentum etc.\n loader = tf.train.Saver(tf.trainable_variables())\n\n # dense-decoder\n with tf.Session(graph=dense_decoder_graph) as sess:\n # Initialized momentums which are not part of the checkpoint\n sess.run(tf.global_variables_initializer())\n # Restore saved trainable variables\n loader.restore(sess, checkpoint_path)\n dense_result = sess.run(dense_graph, feed_dict={inputs_ph: activations_np})\n\n # TEST\n rtol = 1e-05\n atol = 1e-05\n if args.dtype == tf.float16:\n rtol = 1e-04\n atol = 1e-02\n # Compare model output activations (actual vs. desired) -> (sparse vs. dense)\n np.testing.assert_allclose(sparse_result[\"output_activation\"], dense_result[\"output_activation\"],\n atol=atol, rtol=rtol, err_msg=\"Output activations do not match.\")\n\n # Compate gradient of output wrt. input\n np.testing.assert_allclose(sparse_result[\"input_grad\"], dense_result[\"input_grad\"],\n atol=atol, rtol=rtol, err_msg=\"Grads wrt. inputs do not match\")\n\n # Compare the dense_w and sparse grads of every sparse layer\n for name, sparse_layer in sparse_transformer.sparse_layers.items():\n # Compate the dense grads\n dense_grad = dense_result[name + \"/weight\" + \"_grad\"]\n sparse_grad_w = sparse_result[name + \"_grad_w\"]\n np.testing.assert_allclose(sparse_grad_w, dense_grad, atol=atol, rtol=rtol,\n err_msg=f\"Dense grads for layer {name} do not match\")\n\n # Compare the sparse grads\n sparse_grad_padded = sparse_result[name + \"/sparse_layer/nz_values_grad\"]\n sparse_grad_data = sparse.SparseRepresentation(sparse_layer.weights.get_metainfo(), sparse_grad_padded)\n i, j, sparse_grad = sparse.triplets_from_representation(sparse_layer.weights.spec, sparse_grad_data, sparse_layer.weights.matmul_options)\n\n # Convert dense grads to blocks\n block_size, _ = sparse_layer.get_nonzero_blocks_shape()\n nx, ny = dense_grad.shape[0] // block_size, dense_grad.shape[1] // block_size\n strides = np.array(dense_grad.strides) # strides are in bytes\n strides = tuple(strides * block_size) + tuple(strides)\n blocked_dense_grad = np.lib.stride_tricks.as_strided(dense_grad, (nx, ny, block_size, block_size), strides)\n if block_size == 1:\n blocked_dense_grad = np.squeeze(np.copy(blocked_dense_grad), axis=(-2, -1))\n np.testing.assert_allclose(sparse_grad, blocked_dense_grad[i, j], atol=atol, rtol=rtol,\n err_msg=f\"Sparse grads for layer {name} do not match\")\n\n print(\"All results match.\")\n return sparse_result, dense_result\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.getLevelName(\"DEBUG\"),\n format='%(asctime)s %(name)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n args = get_program_arguments()\n a, b = main(args)\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\n\nimport os\nimport numpy as np\nfrom functools import reduce\nfrom operator import mul\nfrom operator import add\nfrom subprocess import run\n\ng_random_sparse_mask = np.random.RandomState()\ng_random_data = np.random.RandomState()\ng_random_labels = np.random.RandomState()\n\ng_high_value = 10.0\ng_low_value = -10.0\n\n\ndef build_custom_ops(so_path):\n \"\"\"\n Build custom ops library.\n\n inputs:\n custom ops library path\n \"\"\"\n build_path = os.path.dirname(so_path)\n run(['make', '-j'], cwd=build_path)\n\n\ndef set_seed(seed):\n \"\"\"\n Set seeds of random generatorts.\n\n inputs:\n seed: seed value\n \"\"\"\n g_random_sparse_mask.seed(seed)\n g_random_data.seed(seed)\n g_random_labels.seed(seed)\n\n\ndef create_random_sparse_mask(sparsity, rows, cols):\n \"\"\"\n Create a random sparse mask.\n\n inputs:\n sparsity: sparsity level (0.4 means 40% of blocks are empty)\n rows: number of rows\n cols: number of columns\n\n returns: mask as 2D array of [0|1] values\n \"\"\"\n\n assert(sparsity >= 0.0)\n assert(sparsity < 1.0)\n proportion = [sparsity, 1 - sparsity]\n mask = g_random_sparse_mask.choice([0, 1], size=(rows, cols), p=proportion)\n\n # don't want mask to be all zeros\n while np.all(mask == 0):\n mask = g_random_sparse_mask.choice([0, 1], size=(rows, cols), p=proportion)\n return mask\n\n\ndef create_block_sparse_matrix(dims, block_size, sparsity_mask, initial_value=0):\n \"\"\"\n Create sparse matrix in dense and block-sparse form\n\n inputs:\n dims: dimensions of the sparse matrix\n block_size: size of a block (8x8, 16x16 etc)\n sparsity_mask: block-sparsity mask as 2D array (1 for non-zero and 0 for zero block)\n initial_value: the value of masked elements\n\n returns:\n block_sparse_matrix: np.array of num_blocks * block_sz\n dense_matrix: np.array of size dims with the dense representation of the matrix.\n i.e. explcit zeros for a zero block. Used to perform\n dense MM's for a reference output\n\n e.g for a sparse matrix of size (6, 6) with block size 2x2\n Matrix contains 9 blocks of size 2x2, some sparse and some dense\n If the 6x6 matrices has 2 non zero blocks, then ..\n\n Inputs:\n dims = [6, 6]\n block_size = [2,2]\n sparsity_mask = [[1, 1, 0],[1, 0, 0],[0, 1, 1]] (say)\n\n Outputs:\n block_sparse_matrix = 5 x 4 array\n dense_matrix = 6x6 array\n \"\"\"\n\n assert(len(dims) == 2)\n assert(len(block_size) == 2)\n assert(dims[0] % block_size[0] == 0)\n assert(dims[1] % block_size[1] == 0)\n\n rows = dims[0]\n cols = dims[1]\n\n block_size_row = block_size[0]\n block_size_col = block_size[1]\n\n num_block_rows = rows // block_size_row\n num_block_cols = cols // block_size_col\n\n if initial_value == 0:\n dense_matrix = np.zeros((rows, cols))\n else:\n dense_matrix = np.empty((rows, cols))\n dense_matrix.fill(initial_value)\n\n block_sparse_matrix = []\n for block_row in range(num_block_rows):\n for block_col in range(num_block_cols):\n if sparsity_mask[block_row][block_col]:\n block_data = g_random_data.randint(low=g_low_value,\n high=g_high_value,\n size=block_size_row * block_size_col).astype(\"float32\")\n block_sparse_matrix.append(block_data)\n dense_matrix[block_row * block_size_row: (block_row+1) * block_size_row,\n block_col * block_size_col: (block_col+1) * block_size_col] = block_data.reshape(block_size_row, block_size_col)\n\n # At this point mask is a 2D array, flatten it into 1D list and return, bsr_rhs is already a list (so convert to array)\n return np.array(block_sparse_matrix), dense_matrix\n\n\ndef create_dense_tensor(dims, data_type=\"float32\"):\n \"\"\"\n Create a tensor with random elements\n\n inputs:\n dims: list of tensor dimensions\n\n returns: created tensor\n \"\"\"\n return g_random_data.randint(low=g_low_value, high=g_high_value, size=dims).astype(data_type)\n\n\ndef create_block_sparse_tensor(nominal_shape, block_size, sparsity_or_mask, data_type=\"float32\", initial_value=0):\n \"\"\"\n Create sparse tensor in dense and block-sparse form\n\n inputs:\n nominal_shape: list of dimensions of the sparse tensor e.g (2, 3, 4, 4)\n block_size : size of each block (e.g. [8, 8])\n sparsity_or_mask : block sparsity level (0.4 means, 40% of blocks are zeros)\n or existing block sparsity mask as a flattened 2D array\n (1 for non-zero and 0 for zero block)\n\n returns:\n sparse_matrix : sparse representation of matrix (nnz_blocks * block size)\n dense_matrix: dense representation of the matrix (for ref calc)\n sparsity_mask : generated or provided block sparsity mask\n \"\"\"\n\n assert(len(nominal_shape) >= 2)\n assert(len(block_size) == 2)\n\n # skip last two dimensions\n # last 2 dims enter the MM, others form the group\n num_grouped_dims = reduce(mul, nominal_shape[:-2], 1)\n rows = nominal_shape[-2]\n cols = nominal_shape[-1]\n\n assert(rows % block_size[0] == 0)\n assert(cols % block_size[1] == 0)\n block_size_row = block_size[0]\n block_size_col = block_size[1]\n\n num_block_rows = rows // block_size_row\n num_block_rows_total = num_block_rows * num_grouped_dims\n num_block_cols = cols // block_size_col\n\n if not isinstance(sparsity_or_mask, list):\n sparsity = sparsity_or_mask\n generate_mask = True\n sparsity_mask_1d = []\n else:\n generate_mask = False\n sparsity_mask_1d = sparsity_or_mask\n assert(len(sparsity_mask_1d) == num_block_rows_total * num_block_cols)\n sparsity_mask = np.reshape(sparsity_mask_1d, (num_block_rows_total, num_block_cols))\n\n # Create dense matrix of nominal dims\n if initial_value == 0:\n dense_matrix = np.zeros(nominal_shape).astype(data_type)\n else:\n dense_matrix = np.empty(nominal_shape).astype(data_type)\n dense_matrix.fill(initial_value)\n\n dense_matrix = dense_matrix.reshape((num_grouped_dims, rows, cols))\n\n dims = [rows, cols]\n\n sparse_matrix = []\n for g in range(num_grouped_dims):\n if not generate_mask:\n sparsity_mask_1g = sparsity_mask[num_block_rows * g: num_block_rows * (g + 1)]\n else:\n sparsity_mask_1g = create_random_sparse_mask(sparsity, num_block_rows, num_block_cols)\n sparsity_mask_1d.extend(sparsity_mask_1g.flatten())\n assert(reduce(add, sparsity_mask_1g.flatten(), 0) > 0)\n _bsr, dense_matrix[g] = create_block_sparse_matrix(dims, block_size, sparsity_mask_1g, initial_value)\n # _bsr comes as array\n\n sparse_matrix.extend(_bsr)\n\n dense_matrix = dense_matrix.reshape(nominal_shape)\n\n # all parameters are returned as numpy arrays\n return np.array(sparse_matrix, dtype=data_type), dense_matrix, sparsity_mask_1d\n\n\ndef create_random_labels(dims):\n \"\"\"\n Create random labels tensor.\n Every row contains 1 at random place, the rest elements are 0\n\n inputs:\n dims: dimensions for the tensor\n\n returns: generated labels\n \"\"\"\n\n assert(len(dims) >= 2)\n\n lbs = np.zeros(dims, np.int)\n cols = dims[-1]\n lbs = np.reshape(lbs, (-1, cols))\n for r in range(0, lbs.shape[0]):\n idx = g_random_data.randint(0, cols)\n lbs[r][idx] = 1\n lbs = np.reshape(lbs, dims)\n return lbs\n\n\ndef create_random_sparse_labels(dims, sparsity_mask, block_size):\n \"\"\"\n Create random sparse labels tensor\n Every row contains 1 at a random, but non-masked place, the rest elements are 0\n\n inputs:\n dims: dimensions for the tensor\n sparsity_mask: block-sparsity mask\n block_size : size of each block\n\n returns: generated labels\n \"\"\"\n\n assert(len(dims) >= 2)\n assert(len(block_size) == 2)\n assert(dims[-1] % block_size[-1] == 0)\n\n b_cols = dims[-1] // block_size[-1]\n sparsity_mask = np.reshape(sparsity_mask, (-1, b_cols))\n\n b_rows = sparsity_mask.shape[0]\n b_lbs = np.zeros(sparsity_mask.shape, np.int)\n for b_r in range(0, b_rows):\n nzr = np.sum(sparsity_mask[b_r])\n if nzr > 0:\n b_nzc = g_random_data.randint(1, nzr + 1)\n nzc = 0\n for b_c in range(0, b_cols):\n nzc = nzc + sparsity_mask[b_r][b_c]\n if nzc == b_nzc:\n b_lbs[b_r][b_c] = 1\n break\n lbs_1b = np.zeros(block_size, np.int)\n b_row, b_col = (block_size[0], block_size[1])\n for r_b in range(0, b_row):\n idx_b = g_random_data.randint(0, b_col)\n lbs_1b[r_b][idx_b] = 1\n lbs = np.kron(b_lbs, lbs_1b)\n lbs = np.reshape(lbs, dims)\n return lbs\n\n\ndef create_empty_rows_mask(dims, sparsity_mask, block_size, extra_mask=None):\n \"\"\"\n Create mask of empty rows\n Every row contains all 0 elements if all elements in this row are masked,\n otherwise all elements are 1\n\n inputs:\n dims: dimensions for the tensor\n sparsity_mask: block-sparsity mask\n block_size : size of each block\n extra_mask: extra mask in elementwise form\n\n return: Tensor with masked rows\n\n Example:\n dims = [6, 4]\n sparsity_mask = [[1, 1],[0, 0],[1, 0]]\n block_size = [2, 2]\n extra_mask = [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 1], [1, 1, 1, 1]]\n\n Output = [[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1]]\n\n Explanation:\n Block-sparsity mask in elemetwise form:\n [[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]]\n Extra mask:\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 1], [1, 1, 1, 1]]\n Combined as AND mask:\n [[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]]\n Empty rows mask:\n [[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1]]\n \"\"\"\n\n assert(len(dims) >= 2)\n assert(len(block_size) == 2)\n assert(dims[-1] % block_size[-1] == 0)\n\n b_cols = dims[-1] // block_size[-1]\n sparsity_mask = np.reshape(sparsity_mask, (-1, b_cols))\n\n cols = dims[-1]\n er_msk = np.ones(dims)\n er_msk = np.reshape(er_msk, (-1, cols))\n rows = er_msk.shape[0]\n msk_1b = np.ones(block_size)\n msk = np.kron(sparsity_mask, msk_1b)\n if extra_mask is not None:\n extra_mask = np.reshape(extra_mask, (-1, cols))\n assert(extra_mask.shape == msk.shape)\n msk = msk * extra_mask\n\n for r in range(0, rows):\n nzr = np.sum(msk[r])\n if nzr == 0:\n er_msk[r] = np.zeros(cols)\n er_msk = np.reshape(er_msk, dims)\n return er_msk\n\n\ndef create_diagonal_mask(dims, mask_types):\n \"\"\"\n Create a tensor, containing diagional triangular mask\n\n inputs:\n dims: dimensions for the tensor\n mask_types: list of mask types for each 2D slice\n 0 = no mask\n 1 = zero upper triangle\n 2 = zero lower triangle\n\n returns: created tensor\n\n Example:\n dims = [2, 3, 3]\n mask_types = [1, 2]\n\n Output:\n [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[1, 1, 1], [0, 1, 1], [0, 0, 1]]]]\n \"\"\"\n\n assert(len(dims) >= 2)\n\n rows = dims[-2]\n cols = dims[-1]\n num_grouped_dims = reduce(mul, dims[:-2], 1)\n assert(num_grouped_dims == len(mask_types))\n dims2d = (num_grouped_dims, rows, cols)\n mask = np.ones(dims2d)\n for g in range(0, num_grouped_dims):\n if mask_types[g] == 1:\n for r in range(0, rows):\n for c in range(r + 1, cols):\n mask[g][r][c] = 0\n elif mask_types[g] == 2:\n for r in range(0, rows):\n for c in range(0, min(r, cols)):\n mask[g][r][c] = 0\n mask = np.reshape(mask, dims)\n return mask\n\n\ndef to_block_sparse(dense_tensor, block_size, sparsity_mask, data_type=\"float32\"):\n \"\"\"\n Convert a dense tensor into blok-sparse format.\n\n inputs:\n dense_tensor: input tensor as numpy array, can have 2 dimensions or more\n block_size: block size as a tuple (block row length, block column length)\n sparsity_mask: sparsity matrix as a flattened 2D array.\n If dense tensor has more that 2 dimensions, the sparsity mask must cover all 2D slices\n data_type: data type for the output tensor\n returns: tensor in block-sparse format: [total number of non-zero blocks, block row * block col]\n \"\"\"\n\n nominal_shape = dense_tensor.shape\n assert(len(nominal_shape) >= 2)\n num_grouped_dims = reduce(mul, nominal_shape[:-2], 1)\n\n rows = nominal_shape[-2]\n cols = nominal_shape[-1]\n block_size_row = block_size[0]\n block_size_col = block_size[1]\n assert(rows % block_size_row == 0)\n assert(cols % block_size_col == 0)\n num_block_rows = rows // block_size_row\n num_block_cols = cols // block_size_col\n\n dense_tensor = dense_tensor.reshape((num_grouped_dims, rows, cols))\n block_sparse_matrix = []\n idx_sparse = 0\n for g in range(num_grouped_dims):\n r = 0\n for br in range(num_block_rows):\n r1 = r + block_size_row\n c = 0\n for bc in range(num_block_cols):\n c1 = c + block_size_col\n if sparsity_mask[idx_sparse] == 1:\n block_sparse_matrix.append(dense_tensor[g, r:r1, c:c1].flatten())\n c = c1\n idx_sparse = idx_sparse + 1\n r = r1\n return np.array(block_sparse_matrix, dtype=data_type)\n\n\ndef get_lib_path(lib_name):\n \"\"\"\n Get full library path,\n assuming library file is located in the same directory as this file\n\n inputs: library name (without lib prefix)\n returns: full path of a library\n \"\"\"\n\n base_path = os.path.realpath(os.path.dirname(__file__))\n return os.path.join(base_path, \"lib\" + lib_name + \".so\")\n",
"from __future__ import division\n\nimport torch\nfrom torch.autograd import Function\nimport transducer_cpp\n\n\nclass Transducer(Function):\n def __init__(self, blank_label=None):\n \"\"\"\n Constructor for Transducer cost.\n\n Arguments:\n blank_label (optional) (Int): Integer representing the index\n of the blank, defaults to `alphabet_size - 1`.\n \"\"\"\n super(Transducer, self).__init__()\n self.blank_label = blank_label\n\n @staticmethod\n def forward(ctx, log_probs, labels, lengths, label_lengths):\n \"\"\"\n Computes the Transducer cost for a minibatch of examples.\n\n Arguments:\n log_probs (FloatTensor): The log probabilities should\n be of shape\n (minibatch, input len, output len, vocab size).\n labels (IntTensor): 1D tensor of labels for each example\n consecutively.\n lengths (IntTensor): 1D tensor of number actviation time-steps\n for each example.\n label_lengths (IntTensor): 1D tensor of label lengths for\n each example.\n\n Returns:\n costs (FloatTensor): .\n \"\"\"\n is_cuda = log_probs.is_cuda\n\n certify_inputs(log_probs, labels, lengths, label_lengths)\n\n log_probs = log_probs.cpu()\n costs = torch.zeros(log_probs.shape[0])\n grads = log_probs.new(log_probs.shape).zero_()\n\n blank_label = 0 # self.blank_label\n if blank_label is None:\n blank_label = log_probs.shape[-1] - 1\n\n transducer_cpp.transduce(\n log_probs, labels, lengths, label_lengths, costs, grads, blank_label\n )\n if is_cuda:\n costs = costs.cuda()\n grads = grads.cuda()\n ctx.save_for_backward(grads)\n\n return costs\n\n @staticmethod\n def backward(ctx, cost):\n return ctx.saved_tensors[0], None, None, None\n\n\nclass TransducerLoss(Transducer):\n def __init__(self, size_average=True, blank_label=None):\n super(TransducerLoss, self).__init__(blank_label)\n self.size_average = size_average\n\n def forward(self, *args):\n parent = super(TransducerLoss, self)\n costs = parent.forward(*args)\n cost = torch.sum(costs)\n if self.size_average:\n cost = cost / costs.shape[0]\n return costs.new((cost,))\n\n def backward(self, *args):\n parent = super(TransducerLoss, self)\n grads = parent.backward(*args)[0]\n if self.size_average:\n grads = grads / grads.shape[0]\n return grads, None, None, None\n\n\ndef check_type(var, t, name):\n if var.dtype is not t:\n raise TypeError(\"{} must be {}\".format(name, t))\n\n\ndef check_contiguous(var, name):\n if not var.is_contiguous():\n raise ValueError(\"{} must be contiguous\".format(name))\n\n\ndef check_dim(var, dim, name):\n if len(var.shape) != dim:\n raise ValueError(\"{} must be {}D\".format(name, dim))\n\n\ndef certify_inputs(log_probs, labels, lengths, label_lengths):\n check_type(log_probs, torch.float32, \"log_probs\")\n check_type(labels, torch.int32, \"labels\")\n check_type(label_lengths, torch.int32, \"label_lengths\")\n check_type(lengths, torch.int32, \"lengths\")\n check_contiguous(labels, \"labels\")\n check_contiguous(label_lengths, \"label_lengths\")\n check_contiguous(lengths, \"lengths\")\n\n if lengths.shape[0] != log_probs.shape[0]:\n raise ValueError(\"must have a length per example.\")\n if label_lengths.shape[0] != log_probs.shape[0]:\n raise ValueError(\"must have a label length per example.\")\n\n check_dim(log_probs, 4, \"log_probs\")\n check_dim(labels, 1, \"labels\")\n check_dim(lengths, 1, \"lenghts\")\n check_dim(label_lengths, 1, \"label_lenghts\")\n",
"#!/usr/bin/env python3\n# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\n\"\"\"\nA simple program that uses the PopART library ONNX builder to create\na fully connected layer.\n\"\"\"\nimport argparse\nimport struct\nfrom collections import namedtuple\nimport numpy as np\nimport popart\nimport os\nimport ctypes\n\n\nso_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"../../custom_ops.so\")\nctypes.cdll.LoadLibrary(so_path)\n\nSession = namedtuple('Session', ['session', 'anchors'])\nROWS = 28\nCOLS = 28\nOUTPUT_SIZE = 10\n\ng_sparseMatMulTypeLookup = {\n 'DENSE_LHS_SPARSE_RHS_DENSE_OUT': 0,\n 'DENSE_LHS_DENSE_RHS_SPARSE_OUT': 1,\n 'SPARSE_LHS_SPARSE_RHS_SPARSE_OUT': 2\n}\n\n\nclass MNIST_model(object):\n\n def __init__(self, hidden_size=512):\n self.builder = popart.Builder()\n self.dtype = np.float32\n self.hidden = hidden_size\n self.block_size = [8, 8, 8]\n self.data_type = 'float32'\n\n def weights_initializer(self, shape_list):\n result = None\n if len(shape_list) == 2:\n result = np.random.normal(0, 1, shape_list) * np.square(2. / shape_list[0])\n if len(shape_list) == 1:\n result = np.zeros(shape_list)\n return result.astype(self.dtype)\n\n def create_rhs(self, dims, block_size, sparsity_level):\n block_size_row = block_size[0]\n block_size_col = block_size[1]\n num_block_rows = dims[0] // block_size_row\n num_block_cols = dims[1] // block_size_col\n proportion = [sparsity_level, 1 - sparsity_level]\n mask = np.random.choice([0, 1], size=(num_block_rows, num_block_cols), p=proportion)\n while np.sum(mask) == 0:\n mask = np.random.choice([0, 1], size=(num_block_rows, num_block_cols), p=proportion)\n\n sparse_tensor_shape = [np.sum(mask), block_size[1] * block_size[2]]\n rhs = self.weights_initializer(sparse_tensor_shape)\n nnz_per_2D_slice = [np.sum(mask)]\n # At this point mask is a 2D array, convert it into 1D and return\n return np.array(rhs), nnz_per_2D_slice, mask.flatten()\n\n def create_proto(self, batch_size):\n input_size = ROWS * COLS\n input_shape = popart.TensorInfo(\"FLOAT\", [batch_size, input_size])\n input_x = self.builder.addInputTensor(input_shape)\n x = input_x\n sparse_mm_type = g_sparseMatMulTypeLookup[\"DENSE_LHS_SPARSE_RHS_DENSE_OUT\"]\n # First matmul\n with self.builder.nameScope('Dense1'):\n b_value = self.weights_initializer([self.hidden])\n b = self.builder.addInitializedInputTensor(b_value)\n rhs_dims_1 = [input_size, self.hidden]\n w_bsr, nnz, sparsity_mask = self.create_rhs(rhs_dims_1, self.block_size, opts.sparsity_level)\n w = self.builder.addInitializedInputTensor(w_bsr)\n transpose_rhs = 0\n x = self.builder.customOp(opName = \"BSMatMul\",\n opVersion=1,\n domain = \"ai.graphcore\",\n inputs = [x, w],\n attributes = {\n \"bsr_rhs_lengths_per_2d_plane\": nnz,\n \"matrix_dims\": [batch_size, input_size, self.hidden],\n \"block_size\": self.block_size,\n \"sparsity_mask\": sparsity_mask.tolist(),\n \"bsmatmul_type\": sparse_mm_type,\n \"transpose_rhs\": transpose_rhs,\n \"memory_cycle_ratio\": 0.2,\n \"in_type\": self.data_type,\n \"out_type\": self.data_type,\n \"pp_type\": self.data_type\n })[0]\n x = self.builder.aiOnnx.add([x, b])\n # Nonlinearity\n x = self.builder.aiOnnx.relu([x])\n # Second matmul\n with self.builder.nameScope('Dense2'):\n output_size = OUTPUT_SIZE\n output_size_padding = (self.block_size[2] - OUTPUT_SIZE % self.block_size[2]) % self.block_size[2]\n if output_size_padding > 0:\n # We might need to pad weight matrix in 2nd dimension to make it divisible by block size\n output_size = output_size + output_size_padding\n lhs_dims_2 = [batch_size, self.hidden]\n rhs_dims_2 = [self.hidden, output_size]\n # We use sparse matmul in the outer layer also,\n # to demonstrate how to use block-sparse API\n # on tensors with sizes not divisible by block size.\n # However, we use sparsity 0 here to get good accuracy numbers\n w2_bsr, w2_nnz, sparsity_mask2 = self.create_rhs(rhs_dims_2, self.block_size, 0.0)\n\n w2 = self.builder.addInitializedInputTensor(w2_bsr)\n transpose_rhs2 = 0\n x = self.builder.customOp(opName = \"BSMatMul\",\n opVersion = 1,\n domain = \"ai.graphcore\",\n inputs = [x, w2],\n attributes = {\n \"bsr_rhs_lengths_per_2d_plane\": w2_nnz,\n \"matrix_dims\": [batch_size, self.hidden, output_size],\n \"block_size\": self.block_size,\n \"sparsity_mask\": sparsity_mask2.tolist(),\n \"bsmatmul_type\": sparse_mm_type,\n \"transpose_rhs\": transpose_rhs2,\n \"memory_cycle_ratio\": 0.2,\n \"in_type\": self.data_type,\n \"out_type\": self.data_type,\n \"pp_type\": self.data_type\n })[0]\n if output_size_padding > 0:\n # Throw away padding\n axes_value = np.array([0, 1]).astype(np.int32)\n axes = self.builder.aiOnnx.constant(axes_value, \"axes\")\n start = self.builder.aiOnnx.constant(np.array([0, 0]).astype(np.int32), \"start\")\n end = self.builder.aiOnnx.constant(np.array([batch_size, OUTPUT_SIZE]).astype(np.int32), \"end\")\n x = self.builder.aiOnnx.slice([x, start, end, axes])\n b2_value = self.weights_initializer([OUTPUT_SIZE])\n b2 = self.builder.addInitializedInputTensor(b2_value)\n output = self.builder.aiOnnx.add([x, b2])\n # Losses\n self.builder.addOutputTensor(output)\n prob = self.builder.aiOnnx.softmax([output])\n label_shape = popart.TensorInfo(\"INT32\", [batch_size])\n label = self.builder.addInputTensor(label_shape)\n loss = self.builder.aiGraphcore.nllloss([prob, label], debugContext = \"nllLossVal\")\n proto = self.builder.getModelProto()\n return proto, input_x, label, output, loss\n\n\ndef load_mnist(data_dir):\n def _readfile(path):\n with open(path, \"rb\") as f:\n magic_number, num_items = struct.unpack('>II', f.read(8))\n if magic_number == 2051:\n rows, cols = struct.unpack('>II', f.read(8))\n data = np.fromstring(f.read(), dtype=np.uint8)\n data = data.reshape([num_items, rows * cols])\n data = data.astype(dtype=np.float32)\n data = data / 255.0\n else:\n data = np.fromstring(f.read(), dtype=np.uint8)\n data = data.astype(dtype=np.int32)\n return data\n train_data = _readfile(f\"{data_dir}/train-images-idx3-ubyte\")\n train_labels = _readfile(f\"{data_dir}/train-labels-idx1-ubyte\")\n test_data = _readfile(f\"{data_dir}/t10k-images-idx3-ubyte\")\n test_labels = _readfile(f\"{data_dir}/t10k-labels-idx1-ubyte\")\n return train_data, train_labels, test_data, test_labels\n\n\nclass DataSet:\n def __init__(self, batch_size, batches_per_step, data, labels):\n self.data = data\n self.labels = labels\n self.num_examples = len(data)\n self.batch_size = batch_size\n self.batches_per_step = min(batches_per_step, self.num_examples // self.batch_size)\n self.inputs_per_step = self.batch_size * self.batches_per_step\n self.steps_per_epoch = self.num_examples // self.inputs_per_step\n\n def __getitem__(self, key):\n input_begin = key * self.inputs_per_step\n input_end = input_begin + self.inputs_per_step\n data = self.data[input_begin:input_end]\n data = data.reshape([self.batches_per_step, self.batch_size, -1])\n labels = self.labels[input_begin:input_end]\n labels = labels.reshape([self.batches_per_step, self.batch_size])\n return data, labels\n\n def __iter__(self):\n return (self[j] for j in range(self.steps_per_epoch))\n\n def __len__(self):\n return self.steps_per_epoch\n\n\ndef get_device(num_ipus, sim=True):\n # Select a device\n deviceManager = popart.DeviceManager()\n if sim:\n options = {\n \"compileIPUCode\": True,\n \"numIPUs\": num_ipus,\n \"tilesPerIPU\": 1216\n }\n device = deviceManager.createIpuModelDevice(options)\n else:\n device = deviceManager.acquireAvailableDevice(num_ipus)\n if device is None:\n print(\"Failed to acquire IPU. Exiting.\")\n quit()\n return device\n\n\ndef init_session(proto, loss, dataFlow, userOpts, device, training=True):\n # Create a session to compile and execute the graph\n if training:\n session = popart.TrainingSession(fnModel=proto,\n loss=loss,\n deviceInfo=device,\n optimizer=popart.ConstSGD(0.001),\n dataFlow=dataFlow,\n userOptions=userOpts)\n else:\n session = popart.InferenceSession(fnModel=proto,\n deviceInfo=device,\n dataFlow=dataFlow,\n userOptions=userOpts)\n print(\"Compiling the {} graph.\".format(\"training\" if training else \"validation\"))\n session.prepareDevice()\n session.setRandomSeed(1)\n\n # Create buffers to receive results from the execution\n anchors = session.initAnchorArrays()\n return Session(session, anchors)\n\n\ndef train(opts):\n if opts.fix_seed:\n print('Fixing the seed for result reproducibility')\n np.random.seed(0)\n train_data, train_labels, test_data, test_labels = load_mnist(opts.data_folder)\n # Limit batches_per_step so the test set isn't evaluated more than once.\n max_value = len(test_data) // opts.batch_size\n if max_value < opts.batches_per_step:\n print(\"(batches-per-step * batch-size) is larger than test set!\\n\"\n \" Reduced batches-per-step to: {}\\n\".format(max_value))\n opts.batches_per_step = max_value\n training_set = DataSet(opts.batch_size, opts.batches_per_step, train_data, train_labels)\n test_set = DataSet(opts.batch_size, opts.batches_per_step, test_data, test_labels)\n print(\"Creating ONNX model.\")\n model = MNIST_model(hidden_size=opts.hidden_size)\n proto, data_in, labels_in, output, loss = model.create_proto(opts.batch_size)\n # Describe how to run the model\n anchor_desc = {output: popart.AnchorReturnType(\"ALL\"),\n loss: popart.AnchorReturnType(\"ALL\")}\n dataFlow = popart.DataFlow(opts.batches_per_step, anchor_desc)\n # Options\n userOpts = popart.SessionOptions()\n # The validation graph by default will be optimized to change all variables to constants\n # This prevents that, which allows for checkpoints to be loaded into the model without recompiling\n userOpts.constantWeights = False\n # Enable auto-sharding\n if opts.num_ipus > 1:\n userOpts.virtualGraphMode = popart.VirtualGraphMode.Auto\n # Enable pipelining\n if opts.pipeline:\n userOpts.enablePipelining = True\n userOpts.separateCallOpPdfs = False\n device = get_device(opts.num_ipus, opts.simulation)\n training = init_session(proto, loss, dataFlow, userOpts, device, training=True)\n validation = init_session(proto, loss, dataFlow, userOpts, device, training=False)\n print(\"Running training loop.\")\n for i in range(opts.epochs):\n # Training\n training.session.weightsFromHost()\n for step, (data, labels) in enumerate(training_set):\n stepio = popart.PyStepIO({data_in: data, labels_in: labels}, training.anchors)\n training.session.run(stepio, 'Epoch ' + str(i) + ' training step' + str(step))\n aggregated_loss = 0\n aggregated_accuracy = 0\n training.session.modelToHost('ckpt.onnx')\n validation.session.resetHostWeights('ckpt.onnx')\n validation.session.weightsFromHost()\n # Evaluation\n for step, (data, labels) in enumerate(test_set):\n stepio = popart.PyStepIO({data_in: data, labels_in: labels}, validation.anchors)\n validation.session.run(stepio, 'Epoch ' + str(i) + ' evaluation step ' + str(step))\n # Loss\n aggregated_loss += np.mean(validation.anchors[loss])\n # Accuracy\n results = np.argmax(validation.anchors[output].reshape([test_set.inputs_per_step, 10]), 1)\n num_correct = np.sum(results == labels.reshape([test_set.inputs_per_step]))\n aggregated_accuracy += num_correct / test_set.inputs_per_step\n # Log statistics\n aggregated_loss /= len(test_set)\n aggregated_accuracy /= len(test_set)\n print(\"Epoch #{}\".format(i + 1))\n print(\" Loss={0:.4f}\".format(aggregated_loss))\n print(\" Accuracy={0:.2f}%\".format(aggregated_accuracy * 100))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description = 'Simple MNIST example to test serialized matrix matrix multiplication in PopART',\n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--batch-size', type = int, default = 32,\n help = \"Set the Batch size\")\n parser.add_argument('--batches-per-step', type=int, default=20,\n help = \"Number of minibatches to perform on the Device before returning to the Host.\"\n \" This will be capped so the Device returns each epoch.\")\n parser.add_argument('--epochs', type = int, default = 10,\n help = \"Number of epochs to train for.\")\n parser.add_argument('--num-ipus', type = int, default = 1,\n help = \"Number of IPU's\")\n parser.add_argument('--pipeline', action = \"store_true\", default = False,\n help = \"Pipeline the model over IPUs\")\n parser.add_argument('--fix-seed', action = \"store_true\", default = False,\n help = \"Fix the seeds\")\n parser.add_argument('--simulation', action = 'store_true',\n help = \"Run the example with an IPU_MODEL device.\")\n parser.add_argument('--log-graph-trace', action = 'store_true',\n help = \"Turn on ir logging to display the graph's ops.\")\n parser.add_argument('--hidden-size', type = int, default = 400,\n help = 'The number of neurons in the hidden layer')\n parser.add_argument('--sparsity-level', type = float, default = 0.2,\n help = 'The level of sparsity (0 = fully dense, 1 = fully sparse)')\n parser.add_argument('data_folder', type = str,\n help = 'Path to mnist data')\n opts = parser.parse_args()\n train(opts)\n",
"#!/usr/bin/env python3\n# Copyright (c) 2020 Graphcore Ltd. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport datetime\nimport json\nimport logging\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport time\nfrom collections import Counter, OrderedDict, namedtuple\nfrom contextlib import ExitStack\nfrom functools import partial\nfrom shutil import copytree\nfrom socket import gethostname\n\nimport numpy as np\nimport popdist\nimport popdist.tensorflow\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python import ipu\nfrom tensorflow.python.ipu import horovod as hvd\nfrom tensorflow.python.ipu import pipelining_ops\nfrom tensorflow.python.ipu.config import DeviceConnectionType\n\nimport ipu_utils\nimport log\nimport modeling as bert_ipu\nfrom bert_data import data_loader\nfrom ipu_optimizer import get_optimizer\nfrom log import logger\nfrom loss_scaling_schedule import LossScalingScheduler\nfrom lr_schedules import make_lr_schedule\nfrom multi_stage_wrapper import (MultiStageEmbedding,\n get_split_embedding_stages,\n get_split_matmul_stages)\nfrom options import make_global_options\nfrom poplar_options import set_poplar_engine_options\n\nimport popdist\nimport popdist.tensorflow\n\nGraphOps = namedtuple('graphOps',\n ['graph', 'session', 'init', 'ops', 'placeholders',\n 'iterator', 'outfeed', 'saver', 'restore', 'tvars'])\n\n\ndef create_popdist_strategy():\n \"\"\"\n Creates a distribution strategy for use with popdist. We use the\n Horovod-based IPUMultiReplicaStrategy. Horovod is used for the initial\n broadcast of the weights and when reductions are requested on the host.\n Imports are placed here so they are only done when required, as Horovod\n might not always be available.\n \"\"\"\n\n from tensorflow.python.ipu.horovod import popdist_strategy\n\n hvd.init()\n\n # We add the IPU cross replica reductions explicitly in the IPUOptimizer,\n # so disable them in the IPUMultiReplicaStrategy.\n return popdist_strategy.IPUMultiReplicaStrategy(\n add_ipu_cross_replica_reductions=False)\n\n\ndef build_pretrain_pipeline_stages(model, bert_config, opts):\n \"\"\"\n build pipeline stages according to \"pipeline_stages\" in config file\n \"\"\"\n\n # flatten stages config into list of layers\n flattened_layers = []\n for stage in opts['pipeline_stages']:\n flattened_layers.extend(stage)\n layer_counter = Counter(flattened_layers)\n assert layer_counter['hid'] == opts['num_hidden_layers']\n assert layer_counter['emb'] == layer_counter['mlm']\n # gradient_accumulation_count needs to be a multiple of stage_number*2\n # this is constrained by sdk\n assert opts['gradient_accumulation_count'] % (len(opts['pipeline_stages'])*2) == 0\n\n computational_stages = []\n if layer_counter['emb'] > 1:\n # support distribute embedding to multiple IPUs\n embedding = MultiStageEmbedding(embedding_size=bert_config.hidden_size,\n vocab_size=bert_config.vocab_size,\n initializer_range=bert_config.initializer_range,\n n_stages=layer_counter['emb'],\n matmul_serialize_factor=opts[\"matmul_serialize_factor\"],\n dtype=bert_config.dtype)\n embedding_stages = get_split_embedding_stages(\n embedding=embedding, split_count=layer_counter['emb'], bert_config=bert_config, micro_batch_size=opts[\"micro_batch_size\"], seq_length=opts['seq_length'])\n # masked lm better be on same ipu with embedding layer for saving storage\n masked_lm_output_post_stages = get_split_matmul_stages(\n embedding=embedding, split_count=layer_counter['emb'], bert_config=bert_config)\n else:\n embedding_stages = [model.embedding_lookup_layer]\n masked_lm_output_post_stages = [model.mlm_head]\n\n layers = {\n 'emb': embedding_stages,\n 'pos': model.embedding_postprocessor_layer,\n 'hid': model.encoder,\n 'mlm': masked_lm_output_post_stages,\n 'nsp': model.get_next_sentence_output_layer\n }\n stage_layer_list = []\n for stage in opts['pipeline_stages']:\n func_list = []\n for layer in stage:\n # embedding layer and mlm layer can be splited to mutliple IPUs, so need to be dealt with separately\n if layer == 'emb':\n func_list.append(embedding_stages[0])\n embedding_stages = embedding_stages[1:]\n elif layer == 'mlm':\n func_list.append(masked_lm_output_post_stages[0])\n masked_lm_output_post_stages = masked_lm_output_post_stages[1:]\n else:\n func_list.append(layers[layer])\n stage_layer_list.append(func_list)\n computational_stages = ipu_utils.stages_constructor(\n stage_layer_list, ['learning_rate', 'loss_scaling'],\n ['learning_rate', 'loss_scaling', 'mlm_loss', 'nsp_loss', 'mlm_acc', 'nsp_acc'])\n\n return computational_stages\n\n\ndef build_network(infeed,\n outfeed,\n bert_config=None,\n opts=None,\n learning_rate=None,\n loss_scaling=None,\n is_training=True):\n\n # build model\n if opts[\"groupbert\"]:\n logger.info(f\"************* Using GroupBERT model architecture *************\")\n pipeline_model = bert_ipu.GroupBertModel(bert_config, is_training=is_training)\n else:\n pipeline_model = bert_ipu.BertModel(bert_config, is_training=is_training)\n\n # build stages & device mapping\n computational_stages = build_pretrain_pipeline_stages(\n pipeline_model, bert_config, opts,)\n device_mapping = opts['device_mapping']\n\n logger.info(\n f\"************* computational stages: *************\\n{computational_stages}\")\n logger.info(\n f\"************* device mapping: *************\\n{device_mapping}\")\n\n # define optimizer\n def optimizer_function(learning_rate, loss_scaling, mlm_loss, nsp_loss, mlm_acc, nsp_acc):\n total_loss = mlm_loss + nsp_loss\n optimizer = get_optimizer(learning_rate, loss_scaling, opts['total_replicas'], opts)\n fp32_loss = tf.cast(total_loss, tf.float32) * loss_scaling\n return ipu.ops.pipelining_ops.OptimizerFunctionOutput(optimizer, fp32_loss)\n\n # Set IPU-specific available memory proportion\n if isinstance(opts['available_memory_proportion'], float):\n available_memory_proportion_list = [\n str(opts['available_memory_proportion'])\n ] * len(device_mapping)\n else:\n available_memory_proportion_list = [\n str(opts['available_memory_proportion'][device]) for device in device_mapping\n ]\n\n if len(available_memory_proportion_list) != len(device_mapping):\n raise ValueError(\n \"The available_memory_proportion list must be the same length as the number of stages in the pipeline.\"\n )\n\n options = [ipu.pipelining_ops.PipelineStageOptions(\n matmul_options={\n \"availableMemoryProportion\": amp,\n \"partialsType\": opts[\"partials_type\"]\n }) for amp in available_memory_proportion_list\n ]\n\n # define pipeline schedule\n pipeline_schedule = pipelining_ops.PipelineSchedule.Grouped\n # TODO (nicolasc): I don't think this is supported for BERT as we have multiple stages on the same IPU\n if opts[\"pipeline_schedule\"] == \"Interleaved\":\n pipeline_schedule = pipelining_ops.PipelineSchedule.Interleaved\n\n if is_training:\n pipeline_ops = ipu.ops.pipelining_ops.pipeline(computational_stages=computational_stages,\n gradient_accumulation_count=int(\n opts['gradient_accumulation_count']),\n repeat_count=opts['batches_per_step'],\n inputs=[learning_rate, loss_scaling],\n infeed_queue=infeed,\n outfeed_queue=outfeed,\n optimizer_function=optimizer_function,\n device_mapping=device_mapping,\n forward_propagation_stages_poplar_options=options,\n backward_propagation_stages_poplar_options=options,\n offload_weight_update_variables=opts[\"variable_offloading\"],\n pipeline_schedule=pipeline_schedule,\n recomputation_mode=ipu.ops.pipelining_ops.RecomputationMode[\n opts['recomputation_mode']],\n accumulate_outfeed=True,\n replicated_optimizer_state_sharding=opts['replicated_tensor_sharding'],\n name=\"Pipeline\")\n else:\n pipeline_ops = ipu.ops.pipelining_ops.pipeline(computational_stages=computational_stages,\n gradient_accumulation_count=int(\n opts['gradient_accumulation_count']),\n repeat_count=opts['batches_per_step'],\n inputs=[learning_rate, loss_scaling],\n infeed_queue=infeed,\n outfeed_queue=outfeed,\n device_mapping=device_mapping,\n forward_propagation_stages_poplar_options=options,\n backward_propagation_stages_poplar_options=options,\n offload_weight_update_variables=opts[\"variable_offloading\"],\n pipeline_schedule=pipeline_schedule,\n recomputation_mode=ipu.ops.pipelining_ops.RecomputationMode[\n opts['recomputation_mode']],\n replicated_optimizer_state_sharding=opts['replicated_tensor_sharding'],\n name=\"Pipeline\")\n\n return pipeline_ops\n\n\ndef distributed_per_replica(function):\n \"\"\"Run the function with the distribution strategy (if any) in a per-replica context.\"\"\"\n def wrapper(*arguments):\n if tf.distribute.has_strategy():\n strategy = tf.distribute.get_strategy()\n return strategy.experimental_run_v2(function, args=arguments)\n else:\n return function(*arguments)\n\n return wrapper\n\n\n@distributed_per_replica\ndef training_step_with_infeeds_and_outfeeds(train_iterator, outfeed_queue, bert_config, opts, learning_rate, loss_scaling, is_training):\n \"\"\"\n Training step that uses an infeed loop with outfeeds. This runs 'iterations_per_step' steps per session call. This leads to\n significant speed ups on IPU. Not compatible with running on CPU or GPU.\n \"\"\"\n\n if opts['gradient_accumulation_count'] > 1:\n training_step = partial(build_network,\n infeed=train_iterator,\n outfeed=outfeed_queue,\n bert_config=bert_config,\n opts=opts,\n learning_rate=learning_rate,\n loss_scaling=loss_scaling,\n is_training=is_training)\n\n return ipu.ipu_compiler.compile(training_step, [])\n\n\ndef build_graph(opts, is_training=True):\n train_graph = tf.Graph()\n strategy = None\n\n if opts['use_popdist']:\n strategy = create_popdist_strategy()\n\n with train_graph.as_default(), ExitStack() as stack:\n if strategy:\n stack.enter_context(strategy.scope())\n\n if opts[\"groupbert\"]:\n bert_config = bert_ipu.BertConfig.from_dict(\n opts, config=bert_ipu.GroupBertConfig(vocab_size=None))\n else:\n bert_config = bert_ipu.BertConfig.from_dict(\n opts, config=bert_ipu.BertConfig(vocab_size=None))\n\n bert_config.dtype = tf.float32 if opts[\"precision\"] == '32' else tf.float16\n\n # define placeholders\n placeholders = {\n 'learning_rate': tf.placeholder(tf.float32, shape=[]),\n 'loss_scaling': tf.placeholder(tf.float32, shape=[])\n }\n learning_rate = placeholders['learning_rate']\n loss_scaling = placeholders['loss_scaling']\n\n # define input, datasets must be defined outside the ipu device scope.\n train_iterator = ipu.ipu_infeed_queue.IPUInfeedQueue(data_loader.load(opts, is_training=is_training))\n # define output\n outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()\n\n # building networks with pipeline\n def bert_net():\n return build_network(train_iterator,\n outfeed_queue,\n bert_config,\n opts,\n learning_rate,\n loss_scaling,\n is_training)\n\n with ipu.scopes.ipu_scope('/device:IPU:0'):\n train = training_step_with_infeeds_and_outfeeds(train_iterator,\n outfeed_queue,\n bert_config,\n opts,\n learning_rate,\n loss_scaling,\n is_training)\n\n # get result from outfeed queue\n outfeed = outfeed_queue.dequeue()\n\n if strategy:\n # Take the mean of all the outputs across the distributed workers\n outfeed = [strategy.reduce(tf.distribute.ReduceOp.MEAN, v) for v in outfeed]\n\n if opts['distributed_worker_index'] == 0 or opts['log_all_workers']:\n log.print_trainable_variables(opts)\n\n model_and_optimiser_variables = tf.global_variables()\n model_variables = tf.trainable_variables() + tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)\n restore = tf.train.Saver(\n var_list=model_and_optimiser_variables\n if opts['restore_optimiser_from_checkpoint'] else model_variables)\n\n train_saver = tf.train.Saver(\n var_list=model_and_optimiser_variables\n if opts['save_optimiser_to_checkpoint'] else model_variables,\n max_to_keep=5)\n\n ipu.utils.move_variable_initialization_to_cpu()\n train_init = tf.global_variables_initializer()\n tvars = tf.trainable_variables()\n\n # calculate the number of required IPU\n num_ipus = (max(opts['device_mapping']) + 1) * opts['replicas']\n num_ipus = ipu_utils.next_power_of_two(num_ipus)\n\n ipu_config = ipu_utils.get_config(\n fp_exceptions=opts[\"fp_exceptions\"],\n enable_recomputation=opts[\"enable_recomputation\"],\n disable_graph_outlining=False,\n num_required_ipus=num_ipus,\n enable_stochastic_rounding=opts['stochastic_rounding'],\n minimum_remote_tensor_size=opts['min_remote_tensor_size'],\n max_cross_replica_sum_buffer_size=opts['max_cross_replica_sum_buffer_size'],\n max_reduce_scatter_buffer_size=opts['max_reduce_scatter_buffer_size'],\n scheduler_selection=opts['scheduler'],\n compile_only=opts['compile_only'],\n ipu_id = opts['select_ipu'])\n\n if opts['use_popdist']:\n ipu_config = popdist.tensorflow.set_ipu_config(ipu_config, opts['shards'], configure_device=False)\n\n # Do not acquire a device, compile only.\n if opts[\"compile_only\"]:\n ipu_config.device_connection.version = \"ipu2\"\n ipu_config.device_connection.enable_remote_buffers = True\n # PRE_COMPILE allows for runing execuatables on graph without being online\n ipu_config.device_connection.type = DeviceConnectionType.PRE_COMPILE\n\n # Enforce using a exe cache dir, defaulting if not given\n if (\"TF_POPLAR_FLAGS\" in os.environ):\n if (\"--executable_cache_path\" not in os.environ[\"TF_POPLAR_FLAGS\"]):\n print(\"Warning: --executable_cache_path in TF_POPLAR_FLAGS \" +\n \"(for 'poprun --mpi_local_args') not set. Setting to default \" +\n \"path: ./tmp/tf_cache/\")\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--executable_cache_path=/tmp/tf_cache\"\n\n # Sometimes TF_POPLAR_FLAGS might not even exist\n else:\n print(\"Warning: TF_POPLAR_FLAGS environment variable (for 'poprun \" +\n \"--mpi_local_args') not set. --executable_cache_path must be \" +\n \"defined when using --compile-only. Setting to default path: \" +\n \"./tmp/tf_cache/\")\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--executable_cache_path=/tmp/tf_cache\"\n\n ipu_config.configure_ipu_system()\n\n train_sess = tf.Session(graph=train_graph)\n\n return GraphOps(train_graph, train_sess, train_init, [train], placeholders, train_iterator, outfeed, train_saver, restore, tvars)\n\n\ndef training_step(train, learning_rate, loss_scaling):\n start = time.time()\n _ = train.session.run(train.ops, feed_dict={\n train.placeholders['learning_rate']: learning_rate,\n train.placeholders['loss_scaling']: loss_scaling})\n batch_time = (time.time() - start)\n if not os.environ.get('TF_POPLAR_FLAGS') or '--use_synthetic_data' not in os.environ.get('TF_POPLAR_FLAGS'):\n _learning_rate, _loss_scaling_, _mlm_loss, _nsp_loss, _mlm_acc, _nsp_acc = train.session.run(train.outfeed)\n # We need to divide explicitly by the accumulated gradient since it gets accumulated implicitly inside the pipeline.\n mlm_loss = np.mean(_mlm_loss)/opts['gradient_accumulation_count']\n nsp_loss = np.mean(_nsp_loss)/opts['gradient_accumulation_count']\n mlm_acc = np.mean(_mlm_acc)/opts['gradient_accumulation_count']\n nsp_acc = np.mean(_nsp_acc)/opts['gradient_accumulation_count']\n if mlm_acc == -1 and nsp_acc == - 1:\n # If they are both disabled then it is worth to put Nan instead\n mlm_acc = np.nan\n nsp_acc = np.nan\n else:\n mlm_loss, nsp_loss = 0, 0\n mlm_acc, nsp_acc = 0, 0\n return batch_time, mlm_loss, nsp_loss, mlm_acc, nsp_acc\n\n\ndef train(opts):\n # --------------- OPTIONS ---------------------\n total_samples = data_loader.get_dataset_files_count(opts, is_training=True)\n opts[\"dataset_repeat\"] = math.ceil(\n (opts[\"num_train_steps\"]*opts[\"global_batch_size\"])/total_samples)\n\n total_samples_per_epoch = total_samples/opts[\"duplicate_factor\"]\n logger.info(f\"Total samples for each epoch {total_samples_per_epoch}\")\n logger.info(f\"Global batch size {opts['global_batch_size']}\")\n steps_per_epoch = total_samples_per_epoch//opts[\"global_batch_size\"]\n logger.info(f\"Total steps for each epoch {steps_per_epoch}\")\n\n steps_per_logs = math.ceil(\n opts[\"steps_per_logs\"] / opts['batches_per_step'])*opts['batches_per_step']\n steps_per_tensorboard = math.ceil(\n opts[\"steps_per_tensorboard\"] / opts['batches_per_step'])*opts['batches_per_step']\n steps_per_ckpts = math.ceil(\n opts[\"steps_per_ckpts\"] / opts['batches_per_step'])*opts['batches_per_step']\n logger.info(f\"Checkpoint will be saved every {steps_per_ckpts} steps.\")\n\n total_steps = (opts[\"num_train_steps\"] //\n opts['batches_per_step'])*opts['batches_per_step']\n logger.info(f\"{opts['batches_per_step']} steps will be run for ipu to host synchronization once, it should be divided by num_train_steps, so num_train_steps will limit to {total_steps}.\", opts)\n\n # learning rate strategy\n lr_schedule_name = opts['lr_schedule']\n logger.info(f\"Using learning rate schedule {lr_schedule_name}\")\n learning_rate_schedule = make_lr_schedule(lr_schedule_name, opts, total_steps)\n\n # variable loss scaling\n loss_scaling_schedule = LossScalingScheduler(opts['loss_scaling'], opts['loss_scaling_by_step'])\n\n # -------------- BUILD TRAINING GRAPH ----------------\n train = build_graph(opts,\n is_training=True)\n train.session.run(train.init)\n train.session.run(train.iterator.initializer)\n\n is_main_worker = opts['distributed_worker_index'] == 0\n\n step = 0\n # -------------- SAVE AND RESTORE --------------\n if opts[\"restore_dir\"]:\n restore_path = opts['restore_dir']\n if os.path.isfile(restore_path):\n latest_checkpoint = os.path.splitext(restore_path)[0]\n else:\n latest_checkpoint = tf.train.latest_checkpoint(restore_path)\n logger.info(\n f\"Restoring training from latest checkpoint: {latest_checkpoint}\")\n step_pattern = re.compile(\".*ckpt-([0-9]+)$\")\n step = int(step_pattern.match(latest_checkpoint).groups()[0])\n train.saver.restore(train.session, latest_checkpoint)\n epoch = step / steps_per_epoch\n\n # restore event files\n source_path = os.path.join(opts[\"restore_dir\"], '/event')\n target_path = os.path.join(opts[\"save_path\"], '/event')\n if os.path.isdir(source_path):\n copytree(source_path, target_path)\n else:\n if opts[\"init_checkpoint\"]:\n train.saver.restore(train.session, opts[\"init_checkpoint\"])\n logger.info(\n f'Init Model from checkpoint {opts[\"init_checkpoint\"]}')\n\n if opts['save_path']:\n file_path = train.saver.save(train.session, opts[\"checkpoint_path\"], global_step=0)\n logger.info(f\"Saved checkpoint to {file_path}\")\n\n\n # Initialise Weights & Biases if available\n if opts['wandb'] and is_main_worker:\n import wandb\n wandb.init(project=\"tf-bert\", sync_tensorboard=True, name=opts['wandb_name'])\n wandb.config.update(opts)\n\n # Tensorboard logs path\n log_path = os.path.join(opts[\"logs_path\"], 'event')\n logger.info(\"Tensorboard event file path {}\".format(log_path))\n summary_writer = tf.summary.FileWriter(\n log_path, train.graph, session=train.session)\n\n # End to avoid any training if compile only mode\n if opts['compile_only']:\n\n # single warm up step without weight update or training\n # Graph gets compiled in here\n compilation_time, _, _, _, _ = training_step(train, 0, 0)\n\n print(\"Training graph successfully compiled. \" +\n \"Exiting as --compile-only was passed.\")\n\n # Copying these from below, adding compile time to summary\n poplar_summary = tf.Summary()\n poplar_summary.value.add(\n tag='poplar/compile_time',\n simple_value=compilation_time)\n summary_writer.add_summary(poplar_summary)\n summary_writer.flush()\n\n logger.info(\"Compile time: {}\".format(compilation_time))\n\n sys.exit(0)\n\n # ------------- TRAINING LOOP ----------------\n print_format = (\n \"step: {step:6d}, epoch: {epoch:6.2f}, lr: {lr:6.7f}, mlm_loss: {mlm_loss:6.3f}, nsp_loss: {nsp_loss:6.3f},\\\n mlm_acc: {mlm_acc:6.5f}, nsp_acc: {nsp_acc:6.5f}, samples/sec: {samples_per_sec:6.2f}, time: {iter_time:8.6f}, total_time: {total_time:8.1f}\"\n )\n learning_rate = mlm_loss = nsp_loss = 0\n start_all = time.time()\n\n try:\n while step < total_steps:\n learning_rate = learning_rate_schedule.get_at_step(step)\n loss_scaling = loss_scaling_schedule.get_at_step(step)\n try:\n batch_time, mlm_loss, nsp_loss, mlm_acc, nsp_acc = training_step(\n train, learning_rate, loss_scaling)\n except tf.errors.OpError as e:\n raise tf.errors.ResourceExhaustedError(\n e.node_def, e.op, e.message)\n\n batch_time /= opts['batches_per_step']\n\n is_log_step = (step % steps_per_logs == 0)\n is_save_tensorboard_step = (steps_per_tensorboard > 0 and (step % steps_per_tensorboard == 0))\n is_save_ckpt_step = (step and (\n step % steps_per_ckpts == 0 or step == total_steps - opts['batches_per_step']))\n\n if (step == 1 and (is_main_worker or opts['log_all_workers'])):\n poplar_compile_time = time.time() - start_all\n logger.info(f\"Poplar compile time: {poplar_compile_time:.2f}s\")\n poplar_summary = tf.Summary()\n poplar_summary.value.add(\n tag='poplar/compile_time', simple_value=poplar_compile_time)\n summary_writer.add_summary(poplar_summary)\n\n if is_log_step:\n total_time = time.time() - start_all\n epoch = step / steps_per_epoch\n stats = OrderedDict([\n ('step', step),\n ('epoch', epoch),\n ('lr', learning_rate),\n ('loss_scaling', loss_scaling),\n ('mlm_loss', mlm_loss),\n ('nsp_loss', nsp_loss),\n ('mlm_acc', mlm_acc),\n ('nsp_acc', nsp_acc),\n ('iter_time', batch_time),\n ('samples_per_sec', opts['global_batch_size']/batch_time),\n ('total_time', total_time),\n ])\n\n logger.info(print_format.format(**stats))\n\n # Log training statistics\n train_summary = tf.Summary()\n train_summary.value.add(tag='epoch', simple_value=epoch)\n train_summary.value.add(tag='loss/MLM', simple_value=mlm_loss)\n train_summary.value.add(tag='loss/NSP', simple_value=nsp_loss)\n train_summary.value.add(tag='accuracy/MLM', simple_value=mlm_acc)\n train_summary.value.add(tag='accuracy/NSP', simple_value=nsp_acc)\n train_summary.value.add(\n tag='learning_rate', simple_value=learning_rate)\n train_summary.value.add(\n tag='loss_scaling', simple_value=loss_scaling)\n train_summary.value.add(\n tag='samples_per_sec', simple_value=opts['global_batch_size']/batch_time)\n train_summary.value.add(\n tag='samples', simple_value=step*opts['batches_per_step']*opts['global_batch_size'])\n summary_writer.add_summary(train_summary, step)\n summary_writer.flush()\n\n if is_save_ckpt_step or is_save_tensorboard_step:\n if is_main_worker:\n file_path = train.saver.save(train.session, opts[\"checkpoint_path\"], global_step=step)\n logger.info(f\"Saved checkpoint to {file_path}\")\n\n if is_save_tensorboard_step:\n log.save_model_statistics(file_path, summary_writer, step)\n\n if opts['use_popdist']:\n ipu_utils.barrier()\n\n step += opts['batches_per_step']\n finally:\n train.session.close()\n\n\ndef set_distribution_defaults(opts):\n if opts['use_popdist']:\n opts['distributed_worker_count'] = popdist.getNumInstances()\n opts['distributed_worker_index'] = popdist.getInstanceIndex()\n else:\n opts['distributed_worker_count'] = 1\n opts['distributed_worker_index'] = 0\n\n if opts['distributed_worker_index'] != 0 and not opts['log_all_workers']:\n logger.setLevel(logging.ERROR)\n\n\ndef set_training_defaults(opts):\n # Automatic pipeline depth counter\n if opts[\"global_batch_size\"]:\n gradients_to_accumulate = opts[\"global_batch_size\"] // (opts[\"total_replicas\"] * opts['micro_batch_size'])\n divisor = len(opts['pipeline_stages']) * 2\n # We need then to fix the gradient_to_accumulate according to the pipeline\n gradients_to_accumulate = divisor * (1 + gradients_to_accumulate // divisor)\n if opts['gradient_accumulation_count'] and opts['gradient_accumulation_count'] != gradients_to_accumulate:\n logger.error(\"Passed a gradient to accumulate and a global batch size. Disable one of them to run.\")\n sys.exit(os.EX_OK)\n opts['gradient_accumulation_count'] = gradients_to_accumulate\n # We update the global_batch_size\n proposed_global_batch_size = opts['gradient_accumulation_count'] * opts[\"total_replicas\"] * opts[\"micro_batch_size\"]\n if proposed_global_batch_size != opts['global_batch_size']:\n logger.info(\"Changing the global batch size to match the pipeline requirements.\")\n opts['global_batch_size'] = proposed_global_batch_size\n else:\n opts['global_batch_size'] = opts['micro_batch_size'] * opts['gradient_accumulation_count']*opts['total_replicas']\n\n opts['compute_acc'] = not opts['disable_acc']\n if opts['disable_acc']:\n logger.info(\"Disabling computation of the accuracies. Just the losses will be reported.\")\n\n\ndef set_ipu_defaults(opts):\n poplar_version = os.popen('popc --version').read()\n opts['poplar_version'] = poplar_version\n logger.info(f\"Running on host: {gethostname()}\")\n logger.info(f\"Current date/time: {str(datetime.datetime.now())}\")\n commit_hash = log.get_git_revision()\n logger.info(f\"Code revision: {commit_hash}\")\n\n if opts['seed']:\n # Seed the various random sources\n seed = opts['seed']\n logger.info(f\"Pseudo-random number generator seed specified: f{seed}\")\n random.seed(seed)\n # Set other seeds to different values for extra safety\n tf.set_random_seed(random.randint(0, 2**32 - 1))\n np.random.seed(random.randint(0, 2**32 - 1))\n ipu.utils.reset_ipu_seed(random.randint(-2**16, 2**16 - 1))\n\n\ndef set_defaults(opts):\n data_loader.set_defaults(opts)\n set_distribution_defaults(opts)\n set_training_defaults(opts)\n set_ipu_defaults(opts)\n log.set_defaults(opts)\n\n\ndef add_pretraining_options(parser: argparse.ArgumentParser):\n group = parser.add_argument_group(\"Pretraining options\")\n # Add pretraining-specific command line options here.\n return parser\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n opts = make_global_options([add_pretraining_options])\n\n opts['shards'] = ipu_utils.next_power_of_two(max(opts[\"device_mapping\"]) + 1)\n\n if popdist.isPopdistEnvSet():\n opts['use_popdist'] = True\n opts['replicas'] = popdist.getNumLocalReplicas()\n opts['total_replicas'] = popdist.getNumTotalReplicas()\n if opts['compile_only']:\n opts['select_ipu'] = None\n else:\n opts['select_ipu'] = popdist.getDeviceId()\n else:\n opts['use_popdist'] = False\n opts['total_replicas'] = opts['replicas']\n opts['select_ipu'] = None\n\n set_defaults(opts)\n\n set_poplar_engine_options(execution_profile=opts['execution_profile'],\n memory_profile=opts['memory_profile'],\n profile_dir=str(opts['profile_dir']),\n sync_replicas_independently=opts['replicas'] > 1 and opts['sync_replicas_independently'],\n synthetic_data=opts['synthetic_data'],\n tensorflow_progress_bar=opts['progress_bar'])\n\n poplar_options = os.getenv('POPLAR_ENGINE_OPTIONS', 'unset')\n logger.info(f\"Poplar options: {poplar_options}\")\n logger.info(\"Command line: \" + ' '.join(sys.argv))\n if opts['use_popdist'] and opts['log_all_workers']:\n option_string = f\"Option flags for worker {opts['distributed_worker_index']}:\\n\"\n else:\n option_string = f\"Option flags:\\n\"\n logger.info(option_string + json.dumps(OrderedDict(sorted(opts.items())), indent=1))\n\n # Start training\n train(opts)\n",
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nWeight decay Unit test.\nRun backward pass with different weight_decay values.\nCheck tensors values for popart vs. pytorch simplified models.\n\"\"\"\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nimport popart\nimport onnx\n\nfrom bert_model import Bert, BertConfig\nfrom bert_optimizer import BaseOptimizerFactory\nfrom tests.torch_bert import BertConfig as TorchBertConfig\nfrom tests.torch_bert import BertIntermediate, BertOutput\nfrom tests.utils import (check_model, check_tensors, copy_weights_to_torch,\n run_fwd_model, run_py)\n\n\nclass MockIteration:\n def __init__(self):\n self.learning_rate = 0\n\n\nclass MockArgs:\n def __init__(self, optimizer, learning_rate, weight_decay):\n self.optimizer = optimizer\n self.learning_rate = learning_rate\n self.momentum = 0\n self.dampening = 0\n self.velocity_scaling = 1.0\n self.loss_scaling = 1.0\n self.task = \"PRETRAINING\"\n self.weight_decay = weight_decay\n self.momentum_scaling = 0\n self.pipeline_momentum_scaling = 0\n self.pipeline = False\n self.squad_lr_scale = None\n self.continuous_pipeline_optimizer_scaling = False\n self.use_half_optimizer_state = False\n\n\nclass BertFCN(nn.Module):\n def __init__(self, config):\n super(BertFCN, self).__init__()\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, input_x):\n intermediate_output = self.intermediate(input_x)\n layer_output = self.output(intermediate_output, input_x)\n outputs = (layer_output, )\n return outputs\n\n\nTORCH_TO_ONNX = {\n \"intermediate.dense.weight\": \"1/W\",\n \"intermediate.dense.bias\": \"1/B\",\n \"output.dense.weight\": \"2/W\",\n \"output.dense.bias\": \"2/B\",\n \"output.LayerNorm.weight\": \"Gamma\",\n \"output.LayerNorm.bias\": \"Beta\"\n}\n\nTRANSPOSE_WEIGHTS = {\n \"intermediate.dense.weight\": np.transpose,\n \"output.dense.weight\": np.transpose\n}\n\n\[email protected]('weight_decay', [0.5, 0.1])\ndef test_weight_decay(weight_decay):\n\n lr = 0.01\n l1_lambda = 0.1\n\n # ------------------- PopART -------------------------\n config = BertConfig(vocab_length=128,\n micro_batch_size=1,\n hidden_size=768,\n sequence_length=128,\n popart_dtype=\"FLOAT\",\n no_dropout=True,\n activation_type='Gelu')\n\n data, outputs, proto, post_proto = popart_result_and_model(\n config, weight_decay=weight_decay, lr=lr, l1_lambda=l1_lambda)\n\n # ------------------- PyTorch -------------------------\n torch_config = TorchBertConfig(config.vocab_length,\n config.hidden_size,\n config.num_layers,\n config.attention_heads,\n layer_norm_eps=config.layer_norm_eps,\n hidden_dropout_prob=0.,\n hidden_act=nn.functional.gelu)\n\n inputs = [\n data.reshape(config.micro_batch_size, config.sequence_length,\n config.hidden_size)\n ]\n\n torch_output, torch_model = pytorch_result_and_model(\n torch_config,\n inputs,\n proto,\n weight_decay=weight_decay,\n lr=lr,\n l1_lambda=l1_lambda)\n\n # ------------------- Check outputs -------------------------\n check_tensors(torch_output, outputs)\n\n check_model(torch_model,\n post_proto,\n TORCH_TO_ONNX,\n transform=TRANSPOSE_WEIGHTS)\n\n\ndef popart_result_and_model(popart_config, weight_decay=0.0, lr=0.0, l1_lambda=0.0):\n popart_model = Bert(popart_config)\n builder = popart_model.builder\n\n input_info = popart.TensorInfo(popart_config.popart_dtype, [\n popart_config.micro_batch_size * popart_config.sequence_length,\n popart_config.hidden_size\n ])\n input_tensor = builder.addInputTensor(input_info)\n\n data = {\n input_tensor:\n np.random.normal(0, 0.02,\n input_info.shape()).astype(popart_config.dtype)\n }\n\n output = popart_model.feed_forward(input_tensor)\n\n l1 = builder.aiGraphcore.l1loss([output], l1_lambda, debugContext=\"l1LossVal\", reduction=popart.ReductionType.Sum)\n proto = builder.getModelProto()\n\n iteration = MockIteration()\n args = MockArgs(\"SGD\", lr, weight_decay)\n optimizer_factory = BaseOptimizerFactory(args, iteration,\n popart_model.tensors)\n optimizer = optimizer_factory.create()\n\n outputs, post_proto = run_py(proto,\n data, (output, l1),\n loss=l1,\n optimizer=optimizer)\n\n return data[input_tensor], outputs, proto, post_proto\n\n\ndef pytorch_result_and_model(torch_config,\n inputs,\n popart_proto,\n weight_decay=0.0,\n lr=0.0,\n l1_lambda=0.0):\n\n proto = onnx.load_model_from_string(popart_proto)\n torch_model = BertFCN(torch_config)\n torch_model.eval() # Turn off dropout\n copy_weights_to_torch(torch_model,\n proto,\n TORCH_TO_ONNX,\n transform=TRANSPOSE_WEIGHTS)\n run_fwd_model(inputs, torch_model)\n\n decay = []\n no_decay = []\n for name, param in torch_model.named_parameters():\n if \"bias\" in name or \"LayerNorm\" in name:\n no_decay.append(param)\n else:\n decay.append(param)\n\n params = [{\n 'params': no_decay,\n 'weight_decay': 0.\n }, {\n 'params': decay,\n 'weight_decay': weight_decay\n }]\n\n optim = torch.optim.SGD(params, lr, momentum=0.0)\n\n result = torch_model(*[torch.from_numpy(t).float() for t in inputs])[0]\n torch_loss = l1_lambda * torch.norm(result, 1)\n torch_loss.backward()\n optim.step()\n result = result.detach().numpy()\n\n return result, torch_model\n",
"# coding=utf-8\n# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file has been modified by Graphcore Ltd.\n\nimport collections\nimport json\nimport os\n\nimport six\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib.data import map_and_batch\n\nfrom . import tokenization\nfrom .pretraining import _decode_record\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(int64_list=tf.train.Int64List(\n value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature(\n [feature.start_position])\n features[\"end_positions\"] = create_int_feature(\n [feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(\n feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn, padding_to=1):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n num_features = 0\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position +\n 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position,\n tokenizer, example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(\n tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans,\n doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\n \"tokens: %s\" %\n \" \".join([f\"{k}:\" + tokenization.printable_text(x) for k, x in enumerate(tokens)]).encode('utf-8'))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join([\n \"%d:%d\" % (x, y)\n for (x, y) in six.iteritems(token_to_orig_map)\n ]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y)\n for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" %\n \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" %\n \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" %\n \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(\n tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\"answer: %s\" %\n (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n num_features += 1\n\n num_padding = (padding_to - num_features) % padding_to\n for k in range(num_padding):\n dummy_feature = InputFeatures(\n unique_id=-1,\n example_index=0,\n doc_span_index=0,\n tokens=[0] * max_seq_length,\n token_to_orig_map=[0],\n token_is_max_context=False,\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n start_position=-1,\n end_position=-1,\n is_impossible=False)\n\n output_fn(dummy_feature)\n\n return num_features\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + \\\n 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (tokenization.printable_text(\n self.question_text).encode('utf-8'))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\ndef read_squad_examples(input_file, opts, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n\n examples = []\n\n if opts['generated_data']:\n for k in range(opts['num_synthetic_dataset_samples']):\n example = SquadExample(qas_id=f\"{100000000 + k}\",\n question_text=\"some question text\",\n doc_tokens=[\"a token\", \"another token\", \"a third token\"],\n orig_answer_text=\"some original answer text\",\n start_position=0,\n end_position=1,\n is_impossible=False)\n examples.append(example)\n return examples\n\n\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n if opts[\"version_2_with_negative\"]:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\"\n )\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset +\n answer_length - 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\n \"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n return examples\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef get_squad_dataset(opts, is_training):\n seq_length = opts['seq_length']\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n micro_batch_size = opts['micro_batch_size']\n tfrecord_dir = opts['tfrecord_dir']\n opts['global_batch_size'] = opts['replicas'] * opts['micro_batch_size'] * opts['gradient_accumulation_count']\n if opts[\"version_2_with_negative\"]:\n base_name_train = f\"{opts['seq_length']}_{opts['doc_stride']}_{opts['max_query_length']}_SQuAD20\"\n base_name_eval = f\"{opts['seq_length']}_{opts['doc_stride']}_{opts['max_query_length']}_{opts['global_batch_size']}_SQuAD20\"\n else:\n base_name_train = f\"{opts['seq_length']}_{opts['doc_stride']}_{opts['max_query_length']}_SQuAD11\"\n base_name_eval = f\"{opts['seq_length']}_{opts['doc_stride']}_{opts['max_query_length']}_{opts['global_batch_size']}_SQuAD11\"\n if is_training:\n filename = os.path.join(tfrecord_dir, base_name_train + \"_train.tfrecord\")\n input_file = opts['train_file']\n else:\n filename = os.path.join(tfrecord_dir, base_name_eval + \"_eval.tfrecord\")\n input_file = opts['predict_file']\n\n if not os.path.exists(filename):\n tf.logging.info(f'Preprocessing SQuAD input file: {filename}')\n\n cache_path = os.path.dirname(filename)\n if not os.path.exists(cache_path):\n tf.logging.info(f'Creating SQuAD cache in {cache_path}')\n os.makedirs(cache_path)\n\n examples = read_squad_examples(input_file=input_file,\n opts=opts,\n is_training=is_training)\n\n writer = FeatureWriter(filename=filename, is_training=is_training)\n features = []\n tokenizer = tokenization.FullTokenizer(\n vocab_file=opts['vocab_file'], do_lower_case=opts['do_lower_case'])\n\n def append_feature(feature):\n features.append(feature)\n writer.process_feature(feature)\n\n if is_training:\n # Don't need to pad for repeated dataset\n padding_to = 1\n else:\n # Padding for pipeline depth\n padding_to = opts['replicas'] * opts['micro_batch_size'] * opts['gradient_accumulation_count']\n\n num_of_features = convert_examples_to_features(examples=examples,\n tokenizer=tokenizer,\n max_seq_length=opts['seq_length'],\n doc_stride=opts['doc_stride'],\n max_query_length=opts['max_query_length'],\n is_training=is_training,\n output_fn=append_feature,\n padding_to=padding_to)\n\n if is_training:\n metadatafile = os.path.join(tfrecord_dir, \"train_\"+base_name_train+\".metadata\")\n if not os.path.exists(metadatafile):\n tf.logging.info(f'Logging converted no. of SQuAD train features in {metadatafile}')\n with open(metadatafile, 'w') as f:\n f.write(str(num_of_features) + '\\n')\n else:\n metadatafile = os.path.join(tfrecord_dir, \"eval_\"+base_name_eval+\".metadata\")\n if not os.path.exists(metadatafile):\n tf.logging.info(f'Logging converted no. of SQuAD eval features in {metadatafile}')\n with open(metadatafile, 'w') as f:\n f.write(str(num_of_features) + '\\n')\n writer.close()\n\n d = tf.data.TFRecordDataset(filename)\n\n if is_training:\n if opts['distributed_worker_count'] > 1:\n d = d.shard(num_shards=opts['distributed_worker_count'], index=opts['distributed_worker_index'])\n d = d.shuffle(buffer_size=100000, reshuffle_each_iteration=False)\n d = d.repeat()\n\n if opts['generated_data']:\n d = d.repeat()\n\n d = d.apply(\n map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=micro_batch_size,\n drop_remainder=True))\n\n return d\n",
"# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\nimport numpy as np\nimport pdb\nimport torch\n\n\ndef bbox_overlaps(boxes, query_boxes):\n \"\"\"Compute the IOU.\n args:\n boxes:(N,4) ndarray of float\n query_boxes:(K,4) ndarray of float\n return:\n overlaps: (N,K) the IOU of boxes query_boxes\n \"\"\"\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K))\n for k in range(K):\n box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + 1) *\n (query_boxes[k, 3] - query_boxes[k, 1] + 1))\n for n in range(N):\n iw = (min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]) + 1)\n if iw > 0:\n ih = (min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]) + 1)\n if ih > 0:\n ua = float((boxes[n, 2] - boxes[n, 0] + 1) *\n (boxes[n, 3] - boxes[n, 1] + 1) + box_area -\n iw * ih)\n overlaps[n, k] = iw * ih / ua\n return overlaps\n\n\ndef bbox_transform_npy(ex_rois, gt_rois):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n\n ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0\n ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0\n ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths\n ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights\n\n gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0\n gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0\n gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths\n gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights\n\n targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths\n targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights\n targets_dw = np.log(gt_widths / ex_widths)\n targets_dh = np.log(gt_heights / ex_heights)\n\n targets = np.vstack(\n (targets_dx, targets_dy, targets_dw, targets_dh)).transpose()\n return targets\n\n\ndef bbox_transform(ex_rois, gt_rois):\n ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0\n ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0\n ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths\n ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights\n\n gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0\n gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0\n gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths\n gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights\n\n targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths\n targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights\n targets_dw = torch.log(gt_widths / ex_widths)\n targets_dh = torch.log(gt_heights / ex_heights)\n\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), 1)\n return targets\n",
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n\nimport random\nimport torch\nimport signal\nimport functools\nimport re\nimport sys\nimport threading\nimport traceback\nimport os\nimport time\nimport atexit\nfrom functools import partial\nimport popart\n\nIS_WINDOWS = sys.platform == \"win32\"\nif IS_WINDOWS:\n import ctypes\n from ctypes.wintypes import DWORD, BOOL, HANDLE\n\nif sys.version_info[0] == 2:\n import Queue as queue\nelse:\n import queue\n\n# NOTE [ Python Traceback Reference Cycle Problem ]\n#\n# When using sys.exc_info(), it is important to **not** store the exc_info[2],\n# which is the traceback, because otherwise you will run into the traceback\n# reference cycle problem, i.e., the traceback holding reference to the frame,\n# and the frame (which holds reference to all the object in its temporary\n# scope) holding reference the traceback.\n\n\nclass ExceptionWrapper(object):\n r\"\"\"Wraps an exception plus traceback to communicate across threads\"\"\"\n\n def __init__(self, exc_info):\n # It is important that we don't store exc_info, see\n # NOTE [ Python Traceback Reference Cycle Problem ]\n self.exc_type = exc_info[0]\n self.exc_msg = \"\".join(traceback.format_exception(*exc_info))\n\n\n_use_shared_memory = False\nr\"\"\"Whether to use shared memory in default_collate\"\"\"\n\nMP_STATUS_CHECK_INTERVAL = 5.0\nr\"\"\"Interval (in seconds) to check status of processes to avoid hanging in\n multiprocessing data loading. This is mainly used in getting data from\n another process, in which case we need to periodically check whether the\n sender is alive to prevent hanging.\"\"\"\n\nif IS_WINDOWS:\n # On Windows, the parent ID of the worker process remains unchanged when\n # the manager process is gone, and the only way to check it through OS is\n # to let the worker have a process handle of the manager and ask if the\n # process status has changed.\n class ManagerWatchdog(object):\n def __init__(self):\n self.manager_pid = os.getppid()\n\n self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)\n self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)\n self.kernel32.OpenProcess.restype = HANDLE\n self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)\n self.kernel32.WaitForSingleObject.restype = DWORD\n\n # Value obtained from\n # https://msdn.microsoft.com/en-us/library/ms684880.aspx\n SYNCHRONIZE = 0x00100000\n self.manager_handle = self.kernel32.OpenProcess(\n SYNCHRONIZE, 0, self.manager_pid)\n\n if not self.manager_handle:\n raise ctypes.WinError(ctypes.get_last_error())\n\n self.manager_dead = False\n\n def is_alive(self):\n if not self.manager_dead:\n # Value obtained from\n # https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx\n self.manager_dead = self.kernel32.WaitForSingleObject(\n self.manager_handle, 0) == 0\n return not self.manager_dead\nelse:\n\n class ManagerWatchdog(object):\n def __init__(self):\n self.manager_pid = os.getppid()\n self.manager_dead = False\n\n def is_alive(self):\n if not self.manager_dead:\n self.manager_dead = os.getppid() != self.manager_pid\n return not self.manager_dead\n\n\ndef _worker_loop(dataset, index_queue, data_queue, done_event, collate_fn,\n seed, init_fn, worker_id, log_statisics):\n # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on\n # the logic of this function.\n\n popart.getLogger().debug(\n \"Starting dataloaderiterator worker process {} (stats:{})\".format(\n worker_id, log_statisics))\n\n try:\n global _use_shared_memory\n _use_shared_memory = True\n\n # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python\n # signal module's handlers are executed after Python returns from C\n # low-level handlers, likely when the same fatal signal happened again\n # already. https://docs.python.org/3/library/signal.html Sec. 18.8.1.1\n torch._C._set_worker_signal_handlers()\n\n # Reduce the priority of the work thread, so that the main thread\n # runs first\n os.nice(5)\n\n torch.set_num_threads(1)\n\n random.seed(seed)\n torch.manual_seed(seed)\n\n data_queue.cancel_join_thread()\n\n if init_fn is not None:\n init_fn(worker_id)\n\n watchdog = ManagerWatchdog()\n\n processing_times = []\n waiting_times = []\n\n waiting_time_start = time.time()\n while watchdog.is_alive():\n\n try:\n r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)\n except queue.Empty:\n continue\n\n waiting_times.append(time.time() - waiting_time_start)\n\n if r is None:\n # Received the final signal\n assert done_event.is_set()\n return\n elif done_event.is_set():\n # Done event is set. But I haven't received the final signal\n # (None) yet. I will keep continuing until get it, and skip the\n # processing steps.\n continue\n\n idx, batch_indices = r\n try:\n if log_statisics:\n processing_time_start = time.time()\n samples = collate_fn([dataset[i] for i in batch_indices])\n processing_times.append(time.time() -\n processing_time_start)\n\n if len(processing_times) > 8:\n popart.getLogger().info(\n \"DataLoader worker:{0} waiting: {1:6.4f} \"\n \"processing:{2:6.4f}\"\n .format(\n worker_id,\n sum(waiting_times) / len(waiting_times),\n sum(processing_times) / len(processing_times)))\n processing_times.clear()\n waiting_times.clear()\n\n else:\n samples = collate_fn([dataset[i] for i in batch_indices])\n except Exception:\n # It is important that we don't store exc_info in a variable,\n # see NOTE [ Python Traceback Reference Cycle Problem ]\n data_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n data_queue.put((idx, samples))\n del samples\n\n waiting_time_start = time.time()\n except KeyboardInterrupt:\n # Main process will raise KeyboardInterrupt anyways.\n pass\n\n\nnumpy_type_map = {\n 'float64': torch.DoubleTensor,\n 'float32': torch.FloatTensor,\n 'float16': torch.HalfTensor,\n 'int64': torch.LongTensor,\n 'int32': torch.IntTensor,\n 'int16': torch.ShortTensor,\n 'int8': torch.CharTensor,\n 'uint8': torch.ByteTensor,\n}\n\n\ndef default_collate(batch, tensor_type=None):\n r\"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n\n # Due to ' _th_cat is not implemented for type torch.HalfTensor' we can\n # not add a transform that created batches of HalfTensor's so for not\n # we will use an option\n if tensor_type is not None:\n return torch.stack(\n batch, 0, out=out).type(numpy_type_map[tensor_type])\n else:\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], torch._six.int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], torch._six.string_classes):\n return batch\n elif isinstance(batch[0], torch._six.container_abcs.Mapping):\n return {\n key: default_collate([d[key] for d in batch], tensor_type)\n for key in batch[0]\n }\n elif isinstance(batch[0], torch._six.container_abcs.Sequence):\n transposed = zip(*batch)\n return [\n default_collate(samples, tensor_type) for samples in transposed\n ]\n\n raise TypeError((error_msg.format(type(batch[0]))))\n\n\n_python_exit_status = False\nr\"\"\"Whether Python is shutting down. This flag is guaranteed to be set before\nthe Python core library resources are freed, but Python may already be exiting\nfor some time when this is set.\n\nHook to set this flag is `_set_python_exit_flag`, and is inspired by a similar\nhook in Python 3.7 multiprocessing library:\nhttps://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327\n\"\"\"\n\n\ndef _set_python_exit_flag():\n global _python_exit_status\n _python_exit_status = True\n\n\natexit.register(_set_python_exit_flag)\n\n\nclass _DataLoaderIter(object):\n r\"\"\"Iterates once over the DataLoader's dataset, as specified by the\n sampler\"\"\"\n\n # NOTE [ Data Loader Multiprocessing Shutdown Logic ]\n #\n # Preliminary:\n #\n # Our data model looks like this\n # (queues are indicated with curly brackets):\n #\n # main process ||\n # | ||\n # {index_queue} ||\n # | ||\n # worker processes || DATA\n # | ||\n # | || FLOW\n # | ||\n # | || DIRECTION\n # | ||\n # {data_queue} ||\n # | ||\n # data output \\/\n #\n #\n # Terminating multiprocessing logic requires very careful design. In\n # particular, we need to make sure that\n #\n # 1. The iterator gracefully exits the workers when its last reference is\n # gone or it is depleted.\n #\n # In this case, the workers should be gracefully exited because the\n # main process may still need to continue to run, and we want cleaning\n # up code in the workers to be executed.\n # Naturally, we implement the shutdown logic in `__del__` of\n # DataLoaderIterator.\n #\n # We delay the discussion on the logic in this case until later.\n #\n # 2. The iterator exits the workers when the loader process and/or worker\n # processes exits normally or with error.\n #\n # We set all workers to have `daemon=True`.\n #\n # You may ask, why can't we make the workers non-daemonic, and\n # gracefully exit using the same logic as we have in `__del__` when\n # the iterator gets deleted (see 1 above)?\n #\n # First of all, `__del__` is **not** guaranteed to be called when\n # interpreter exits. Even if it is called, by the time it executes,\n # many Python core library resources may alreay be freed, and even\n # simple things like acquiring an internal lock of a queue may hang.\n # Therefore, in this case, we actually need to prevent `__del__` from\n # being executed, and rely on the automatic termination of daemonic\n # children. Thus, we register an `atexit` hook that sets a global flag\n # `_python_exit_status`. Since `atexit` hooks are executed in reverse\n # order of registration, we are guaranteed that this flag is set\n # before library resources we use are freed. (Hooks freeing those\n # resources are registered at importing the Python core libraries at\n # the top of this file.) So in `__del__`, we check if\n # `_python_exit_status` is set or `None` (freed), and perform no-op\n # if so.\n #\n # Another problem with `__del__` is also related to the library\n # cleanup calls. When a process ends, it shuts the all its daemonic\n # children down with a SIGTERM (instead of joining them without a\n # timeout). Simiarly for threads, but by a different mechanism. This\n # fact, together with a few implementation details of multiprocessing,\n # forcesus to make workers daemonic. All of our problems arise when a\n # DataLoader is used in a subprocess, and are caused by\n # multiprocessing code which looks more or less like this:\n #\n # try:\n # your_function_using_a_dataloader()\n # finally:\n # multiprocessing.util._exit_function()\n #\n # The joining/termination mentioned above happens inside\n # `_exit_function()`. Now, if `your_function_using_a_dataloader()`\n # throws, the stack trace stored in the exception will prevent the\n # frame which uses `DataLoaderIter` to be freed. If the frame has any\n # reference to the `DataLoaderIter` (e.g., in a method of the iter),\n # its `__del__`, which starts the shutdown procedure, will not be\n # called. That, in turn, means that workers aren't notified.\n # Attempting to join in `_exit_function` will then result in a hang.\n #\n # For context, `_exit_function` is also registered as an `atexit`\n # call. So it is unclear to me (@ssnl) why this is needed in a finally\n # block.\n # The code dates back to 2008 and there is no comment on the original\n # PEP 371 or patch https://bugs.python.org/issue3050 (containing both\n # the finally block and the `atexit` registration) that explains this.\n #\n # Another choice is to just shutdown workers with logic in 1 above\n # whenever we see an error in `next`. This isn't ideal because\n # a. It prevents users from using try-catch to resume data loading.\n # b. It doesn't prevent hanging if users have references to the\n # iterator.\n #\n # 3. All processes exit if any of them die unexpectedly by fatal signals.\n #\n # As shown above, the workers are set as daemonic children of the main\n # process. However, automatic cleaning-up of such child processes only\n # happens if the parent process exits gracefully (e.g., not via fatal\n # signals like SIGKILL). So we must ensure that each process will exit\n # even the process that should send/receive data to/from it were\n # killed, i.e.,\n #\n # a. A process won't hang when getting from a queue.\n #\n # Even with carefully designed data dependencies (i.e., a `put()`\n # always corresponding to a `get()`), hanging on `get()` can\n # still happen when data in queue is corrupted (e.g., due to\n # `cancel_join_thread` or unexpected exit).\n #\n # For child exit, we register SIGCHLD handler on main process,\n # which checks if any of the workers fail in the (Python)\n # handler.\n # See DataLoader.cpp.\n #\n # For `.get()` calls where the sender(s) is not the workers, we\n # guard them with timeouts, and check the status of the sender\n # when timeout happens:\n # + in the workers, the `ManagerWatchdog` class checks the main\n # process status.\n #\n # b. A process won't hang when putting into a queue;\n #\n # We use `mp.Queue` which has a separate background thread to put\n # objects from an unbounded buffer array. The background thread\n # is daemonic and usually automatically joined when the process\n # exits.\n #\n # However, in case that the receiver has ended abruptly while\n # reading from the pipe, the join will hang forever. Therefore,\n # for both `worker_result_queue` (worker -> main process)\n # and each `index_queue` (main process -> worker), we use\n # `q.cancel_join_thread()` in sender process before any `q.put`\n # to prevent this automatic join.\n #\n # Moreover, having all queues called `cancel_join_thread` makes\n # implementing graceful shutdown logic in `__del__` much easier.\n # It won't need to get from any queue, which would also need to\n # be guarded by periodic status checks.\n #\n # Note that this may leave corrupted data in the queue, but we\n # don't care about the data anyways once we are shutting down.\n #\n #\n # Now let's get back to 1:\n # how we gracefully exit the workers when the last reference to the\n # iteartor is gone.\n #\n # To achieve this, we implement the following logic along with the design\n # choices mentioned above:\n #\n # [worker processes]\n # While loader process is alive:\n # Get from index_queue.\n # If got a `None`, exit.\n # If get anything else,\n # Check `done_event`.\n # If set, continue to next iteration\n # i.e., keep getting until see the `None`, then exit.\n # Otherwise, process data.\n # If timed out,\n # No matter `done_event` is set (still need to see `None`) or not,\n # must continue to next iteration .\n #\n #\n # [main process]\n # In the DataLoader Iter's `__del__`\n # a. Set `done_event`\n #\n # Note: from here on, the workers may exit at\n # any time after they receive `None`.\n #\n # c. Exit the workers.\n # i. Put `None` in each worker's `index_queue`.\n # ii. Join the workers.\n #\n # NOTE: This has to be after (b) because it may leave corrupted data\n # in `worker_result_queue`.\n #\n # NB: `done_event`s isn't strictly needed. E.g., we can just check for\n # `None` from `index_queue`, but it allows us to skip wasting resources\n # processing indices already in `index_queue` if we are already\n # shutting down.\n\n def __init__(self, loader):\n self.dataset = loader.dataset\n self.collate_fn = loader.collate_fn\n self.batch_sampler = loader.batch_sampler\n self.num_workers = loader.num_workers\n self.timeout = loader.timeout\n self.batches_outstanding = 0\n\n self.log_statisics = loader.log_statisics\n self.processing_times = []\n\n self.sample_iter = iter(self.batch_sampler)\n\n base_seed = torch.LongTensor(1).random_().item()\n\n if self.num_workers > 0:\n self.worker_init_fn = loader.worker_init_fn\n self.worker_queue_idx = 0\n self.worker_result_queue = torch.multiprocessing.Queue()\n\n self.worker_pids_set = False\n self.shutdown = False\n self.send_idx = 0\n self.rcvd_idx = 0\n self.reorder_dict = {}\n self.done_event = torch.multiprocessing.Event()\n\n # Set to try when resetting the iterator and we want to consume any\n # outstanding data from the workers\n self.flush_data_queue = False\n\n self.index_queues = []\n self.workers = []\n for i in range(self.num_workers):\n index_queue = torch.multiprocessing.Queue()\n index_queue.cancel_join_thread()\n w = torch.multiprocessing.Process(\n target=_worker_loop,\n args=(self.dataset, index_queue, self.worker_result_queue,\n self.done_event, self.collate_fn, base_seed + i,\n self.worker_init_fn, i, self.log_statisics))\n w.daemon = True\n # NB: Process.start() actually take some time as it needs to\n # start a process and pass the arguments over via a pipe.\n # Therefore, we only add a worker to self.workers list\n # after it started, so that we do not call .join() if\n # program dies before it starts, and __del__ tries to join\n # but will get:\n # AssertionError: can only join a started process.\n w.start()\n self.index_queues.append(index_queue)\n self.workers.append(w)\n\n self.data_queue = self.worker_result_queue\n\n torch.utils.data._utils.signal_handling._set_worker_pids(\n id(self), tuple(w.pid for w in self.workers))\n torch.utils.data._utils.signal_handling._set_SIGCHLD_handler()\n self.worker_pids_set = True\n\n def reset(self):\n\n popart.getLogger().debug(\"Resetting the dataloaderiterator\")\n\n # Drain the workers\n self.flush_data_queue = True\n while self.batches_outstanding > 0:\n self.__next__()\n self.flush_data_queue = False\n\n # Reset the sample iterator\n self.send_idx = 0\n self.rcvd_idx = 0\n self.sample_iter = iter(self.batch_sampler)\n\n # prime the prefetch loop\n for _ in range(2 * self.num_workers):\n self._put_indices()\n\n def __len__(self):\n return len(self.batch_sampler)\n\n def _get_batch(self):\n # In the non-timeout case, worker exit is covered by SIGCHLD handler.\n if self.timeout > 0:\n try:\n return self.data_queue.get(timeout=self.timeout)\n except queue.Empty:\n raise RuntimeError(\n 'DataLoader timed out after {} seconds'.format(\n self.timeout))\n else:\n return self.data_queue.get()\n\n def __next__(self):\n if self.num_workers == 0: # same-process loading\n indices = next(self.sample_iter) # may raise StopIteration\n if self.log_statisics:\n t = time.time()\n batch = self.collate_fn([self.dataset[i] for i in indices])\n self.processing_times.append(time.time() - t)\n\n if len(self.processing_times) > 8:\n popart.getLogger().info(\n \"DataLoader processing:{0:6.4f}\".format(\n sum(self.processing_times) / len(\n self.processing_times)))\n self.processing_times.clear()\n else:\n batch = self.collate_fn([self.dataset[i] for i in indices])\n return [x.numpy() for x in batch]\n\n # check if the next sample has already been generated\n if self.rcvd_idx in self.reorder_dict:\n batch = self.reorder_dict.pop(self.rcvd_idx)\n return self._process_next_batch(batch)\n\n # raise the stop iteration execption when we have recevied all\n # batches in the data set\n if self.batches_outstanding == 0:\n raise StopIteration\n\n while True:\n assert (not self.shutdown and self.batches_outstanding > 0)\n idx, batch = self._get_batch()\n self.batches_outstanding -= 1\n if idx != self.rcvd_idx:\n # store out-of-order samples\n self.reorder_dict[idx] = batch\n continue\n return self._process_next_batch(batch)\n\n next = __next__ # Python 2 compatibility\n\n def __iter__(self):\n return self\n\n def _put_indices(self):\n\n assert self.batches_outstanding < 2 * self.num_workers\n indices = next(self.sample_iter, None)\n if indices is None:\n return\n\n self.index_queues[self.worker_queue_idx].put((self.send_idx, indices))\n self.worker_queue_idx = (self.worker_queue_idx + 1) % self.num_workers\n self.batches_outstanding += 1\n self.send_idx += 1\n\n def _process_next_batch(self, batch):\n self.rcvd_idx += 1\n if not self.flush_data_queue:\n self._put_indices()\n if isinstance(batch, ExceptionWrapper):\n raise batch.exc_type(batch.exc_msg)\n\n # Convert the batch into numpy arrays\n return [x.numpy() for x in batch]\n\n def __getstate__(self):\n # TODO: add limited pickling support for sharing an iterator\n # across multiple threads for HOGWILD.\n # Probably the best way to do this is by moving the sample pushing\n # to a separate thread and then just sharing the data queue\n # but signalling the end is tricky without a non-blocking API\n raise NotImplementedError(\"_DataLoaderIter cannot be pickled\")\n\n def _shutdown_workers(self):\n # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details\n # on the logic of this function.\n if _python_exit_status is True or _python_exit_status is None:\n # See (2) of the note. If Python is shutting down, do no-op.\n return\n # Normal exit when last reference is gone / iterator is depleted.\n # See (1) and the second half of the note.\n if not self.shutdown:\n self.shutdown = True\n # Removes pids from the C side data structure first so worker\n # termination afterwards won't trigger false positive error report.\n if self.worker_pids_set:\n torch.utils.data._utils.signal_handling._remove_worker_pids(\n id(self))\n self.worker_pids_set = False\n\n self.done_event.set()\n\n # Exit workers now.\n for q in self.index_queues:\n q.put(None)\n # Indicate that no more data will be put on this queue by the\n # current process.\n q.close()\n for w in self.workers:\n w.join()\n\n def __del__(self):\n if self.num_workers > 0:\n self._shutdown_workers()\n\n\nclass DataLoader(object):\n r\"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n This is a customized DataLoader for popart which is inspired by the torch\n DataLoader. The difference being that the __iter__ call reuses the\n _DataLoader when called multiple times, to prevent the work processes being\n stopped and respawned.\n\n The DataLoader can be used with the 'enumerate' call to get an interator to\n the data set. The iterator will return numpy arrays.\n\n Additionally the cuda pin_memory option has been removed.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: ``1``).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: ``False``).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with :attr:`batch_size`,\n :attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: ``0``)\n collate_fn (callable, optional): merges a list of samples to form a\n mini-batch.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete\n batch, if the dataset size is not divisible by the batch size.\n If ``False`` and the size of dataset is not divisible by the batch\n size, then the last batch will be smaller. (default: ``False``)\n tensor_type : The type of tensor to be returned. By default the type will\n be float32. This value can be set to 'float32','float16' to return\n the desired type\n timeout (numeric, optional): if positive, the timeout value for\n collecting a batch from workers. Should always be non-negative.\n (default: ``0``)\n worker_init_fn (callable, optional): If not ``None``, this will be called\n on each worker subprocess with the worker id (an int in\n ``[0, num_workers - 1]``) as input, after seeding and before data\n loading. (default: ``None``)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraies\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use :func:`torch.initial_seed()` to access the PyTorch seed for\n each worker in :attr:`worker_init_fn`, and use it to set other\n seeds before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot\n be an unpicklable object, e.g., a lambda function.\n \"\"\"\n\n def __init__(self,\n dataset,\n batch_size=1,\n shuffle=False,\n sampler=None,\n batch_sampler=None,\n num_workers=0,\n collate_fn=default_collate,\n drop_last=False,\n tensor_type=None,\n timeout=0,\n worker_init_fn=None,\n log_statisics=False):\n self.dataset = dataset\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.collate_fn = partial(collate_fn, tensor_type=tensor_type)\n self.drop_last = drop_last\n self.timeout = timeout\n self.worker_init_fn = worker_init_fn\n self.log_statisics = log_statisics\n\n popart.getLogger().info(\n \"DataLoader created batchsize:{} num_workers:{} \"\n \"shuffle:{} tensor_type:{}\"\n .format(batch_size, num_workers, shuffle, tensor_type))\n\n if timeout < 0:\n raise ValueError('timeout option should be non-negative')\n\n if batch_sampler is not None:\n if batch_size > 1 or shuffle or sampler is not None or drop_last:\n raise ValueError('batch_sampler option is mutually exclusive '\n 'with batch_size, shuffle, sampler, and '\n 'drop_last')\n self.batch_size = None\n self.drop_last = None\n\n if sampler is not None and shuffle:\n raise ValueError('sampler option is mutually exclusive with '\n 'shuffle')\n\n if self.num_workers < 0:\n raise ValueError('num_workers option cannot be negative; '\n 'use num_workers=0 to disable multiprocessing.')\n\n if batch_sampler is None:\n if sampler is None:\n if shuffle:\n sampler = torch.utils.data.RandomSampler(dataset)\n else:\n sampler = torch.utils.data.SequentialSampler(dataset)\n batch_sampler = torch.utils.data.BatchSampler(\n sampler, batch_size, drop_last)\n\n self.sampler = sampler\n self.batch_sampler = batch_sampler\n\n self.dataloaderiter = None\n\n def __iter__(self):\n\n # Only create the iterator once, as will fork process when created\n # which causes problems if the current process already has\n # synchronisation objects\n if self.dataloaderiter is None:\n self.dataloaderiter = _DataLoaderIter(self)\n\n return self.dataloaderiter\n\n def __len__(self):\n return len(self.batch_sampler)\n",
"# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n# Written by Hu Di\nimport argparse\nimport torch\nimport time\nimport numpy as np\nimport os\nimport sys\nsys.path.append('./IPU')\nimport shutil\nimport cv2\nimport random\nfrom models.get_model import make_model\nfrom gc_session import Session\nfrom ipu_tensor import gcop\nfrom datasets.data_loader import get_data_loader\nfrom utils import logger\nfrom yaml_parser import change_cfg_by_yaml_file, save_yaml\nfrom config import cfg\nfrom utils.utils import load_from_pth_with_mappin, load_onnx, checkNaN_np\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('yaml', type=str, help='path of yaml')\n args = parser.parse_args()\n return args\n\n\n# change the cfg inplace by yaml config\nargs = parse_args()\nyaml_file_path = args.yaml\nchange_cfg_by_yaml_file(yaml_file_path)\n\nthreads = 0\nos.environ[\"OMP_NUM_THREADS\"] = str(threads)\nos.environ[\"OPENBLAS_NUM_THREADS\"] = str(threads)\nos.environ[\"MKL_NUM_THREADS\"] = str(threads)\nos.environ[\"VECLIB_MAXIMUM_THREADS\"] = str(threads)\nos.environ[\"NUMEXPR_NUM_THREADS\"] = str(threads)\ncv2.setNumThreads(threads)\n\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\nsetup_seed(cfg.TRAIN.SEED)\n\n# init results folder\noutput_dir = cfg.output_dir\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\nsave_yaml(os.path.join(output_dir, 'config.yaml'))\nshutil.copy(yaml_file_path, os.path.join(output_dir, 'neat_config.yaml'))\n\n# set IPU\ngcop.safe_mode_on()\ngcop.set_options(cfg.SESSION)\ngcop.set_seed(cfg.TRAIN.SEED)\ngcop.set_memory_proportion(cfg.TRAIN.AVAILABLE_MEMORY_PROPORTION)\nif cfg.MODEL.LOAD_STRICT:\n gcop.set_load_strict()\n\n# init log\nlogger.init_log(output_dir,\n log_name=cfg.task_name,\n resume=cfg.TRAIN.RESUME,\n tb_on=cfg.TRAIN.TB_ON,\n wandb_on=cfg.TRAIN.WANDB_ON)\nlogger.log_str('output dir:', output_dir)\n\n# set data\ntrain_dataloader = get_data_loader(cfg)\niters_per_epoch = len(train_dataloader)\ntrain_dataloader_iter = iter(train_dataloader)\ntrain_size = iters_per_epoch * cfg.TRAIN.BATCH_SIZE\nIM_WIDTH, IM_HEIGHT = cfg.INPUT_SIZE\ninput_im_shape = [1, 3, IM_HEIGHT, IM_WIDTH]\n\n# load initializers\ninit_weights_path = cfg.INIT_WEIGHTS_PATH\nmappin_path = cfg.WEIGHTS_MAPPIN_PAtH\ninitializer = load_from_pth_with_mappin(init_weights_path, mappin_path)\nweights_path = cfg.TRAIN.PRETRAINED_WEIGHTS\nif weights_path is not None:\n logger.log_str('loading weights:', weights_path)\n if weights_path.endswith('.pth'):\n append_initializer = load_from_pth_with_mappin(weights_path,\n mappin_path)\n elif weights_path.endswith('.onnx'):\n append_initializer = load_onnx(weights_path)\n else:\n raise RuntimeError('wrong format: {}'.format(weights_path))\n initializer = {**initializer, **append_initializer}\n gcop.enable_global_initializer(initializer)\n\n# make model\nnet = make_model(\n cfg.MODEL_NAME,\n input_im_shape=input_im_shape,\n input_box_num=cfg.TRAIN.NUM_GT_BOXES,\n fp16_on=cfg.FLOAT16_ON,\n classes=[1] * cfg.NUM_CLASSES,\n training=True,\n)\n\nnet.bulid_graph()\n\nspecific_dic = {}\nif not cfg.TRAIN.BIAS_DECAY:\n trainable_variables = gcop.trainable_variables()\n trainbale_bias = list(\n filter(lambda tensor: 'bias' in tensor.name, trainable_variables))\n for bias in trainbale_bias:\n specific_dic[bias.name] = {\"weightDecay\": (0.0, True)}\n\n# optimizer\nstart_lr = cfg.TRAIN.LEARNING_RATE * \\\n cfg.TRAIN.WARMUP_FACTOR if cfg.TRAIN.WARMUP_ITERS > 0 else cfg.TRAIN.LEARNING_RATE\nend_lr = cfg.TRAIN.LEARNING_RATE\ncurrent_lr = start_lr\nnext_momentum = cfg.TRAIN.MOMENTUM\noptimizer = gcop.bF.SGD(learning_rate=current_lr,\n momentum=next_momentum,\n weight_decay=cfg.TRAIN.WEIGHT_DECAY,\n clip_norm=cfg.TRAIN.CLIP_NORM,\n lossScaling=cfg.TRAIN.LOSS_SCALING,\n specific_dic=specific_dic)\n\n# check if resume\nstart_iters = -1\nstate_json = os.path.join(output_dir, 'state.json')\nif cfg.TRAIN.RESUME and os.path.exists(state_json):\n start_iters = net.load_from_snap(output_dir)\n\ncurrentT = time.time()\nsess = Session(net.outputs, optimizer=optimizer, loss=net.loss)\nlogger.log_str('model build time:', (time.time() - currentT) / 60,\n ' miniutes')\n\nlocal_iters = 0\n\nlogger.log_str('task name: ', cfg.task_name)\nstepsize = cfg.TRAIN.STEPSIZE\nmax_iters = cfg.TRAIN.MAX_ITERS\nsave_iters = cfg.TRAIN.SAVE_ITERS\nstart_to_find_smallest_loss_iters = cfg.TRAIN.START_TO_FIND_SMALLEST_LOSS_ITERS if cfg.TRAIN.START_TO_FIND_SMALLEST_LOSS_ITERS > 0 else float(\n 'inf')\nlogger.log_str('stepsize:{}, max iters:{}'.format(stepsize, max_iters))\nsmallest_loss = float('inf')\n\nif start_iters == -1: # no past training is resumed\n sess.save_model(os.path.join(output_dir, 'init_weights.onnx'))\n\n# init data collector\nindices_collector = [] # collect image indices\ncurrentT = time.time()\nwhile 1:\n\n if local_iters <= cfg.TRAIN.WARMUP_ITERS and cfg.TRAIN.WARMUP_ITERS != 0:\n current_lr = (start_lr * (cfg.TRAIN.WARMUP_ITERS - local_iters) +\n end_lr * local_iters) / cfg.TRAIN.WARMUP_ITERS\n optimizer.adj_lr(current_lr, sess.session, specific_dic=specific_dic)\n\n if local_iters in stepsize:\n if isinstance(cfg.TRAIN.GAMMA, list):\n current_lr = cfg.TRAIN.GAMMA.pop(0)\n else:\n current_lr *= cfg.TRAIN.GAMMA\n optimizer.adj_lr(current_lr, sess.session, specific_dic=specific_dic)\n\n if local_iters <= start_iters:\n local_iters += 1\n continue\n\n blobs = next(train_dataloader_iter)\n\n im_data = blobs['img'].numpy().astype(np.float32)\n raw_boxes = blobs['gt_bboxes'].numpy().astype(np.float32)\n raw_labels = blobs['gt_labels'].numpy().astype(np.float32)[\n :, :, np.newaxis]\n assert raw_labels.max() < cfg.NUM_CLASSES\n gt_boxes = np.concatenate([raw_boxes, raw_labels],\n axis=2)\n # TODO -1 will be converted to 4294967295, but fortunately we only sample non-negative labels for training\n rpn_label = blobs['rpn_label'].numpy().astype(np.uint32)\n rpn_keep = blobs['rpn_keep'].numpy().astype(np.uint32)\n rpn_bbox_targets = blobs['rpn_bbox_targets'].numpy().astype(np.float32)\n rpn_bbox_inside_weights = blobs['rpn_bbox_inside_weights'].numpy().astype(\n np.float32)\n rpn_bbox_outside_weights = blobs['rpn_bbox_outside_weights'].numpy().astype(\n np.float32)\n\n local_inputs = [\n im_data, gt_boxes,\n rpn_label, rpn_keep, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights\n ]\n\n feed_dict = {net.inputs[k]: n for k, n in zip(net.inputs, local_inputs)}\n\n start_time = time.time()\n for i in range(100):\n results_dic = sess.run(feed_dict=feed_dict)\n time_used = time.time() - start_time\n tput = 100 * cfg.TRAIN.BATCH_SIZE / time_used\n logger.log_str('Faster-RCNN training Tput: {}'.format(tput))\n",
"# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport numpy\nimport editdistance\nimport torch\nfrom itertools import groupby\n\n\ndef get_kl_acc(y_pred, y_true, ignore_id=0):\n \"\"\"Calculate the accuracy of the conformer model in each validate step\"\"\"\n\n y_pred = y_pred.flatten()\n y_true = y_true.flatten()\n\n mask = y_true != ignore_id\n numerator = torch.sum(y_pred[mask] == y_true[mask], axis = 0)\n denominator = torch.sum(mask, axis = 0)\n\n return float(numerator) / float(denominator)\n\n\ndef get_char_dict(dict_path):\n char_dict = {}\n index = 0\n with open(dict_path, 'r') as fp:\n for item in fp.readlines():\n item = item.strip()\n char_dict[int(index)] = item\n index += 1\n return char_dict\n\n\ndef get_cer(y_pred, y_true, dict_path, blank_id=0):\n y_pred = numpy.reshape(y_pred, [-1, y_pred.shape[-1]])\n y_true = numpy.reshape(y_true, [-1, y_true.shape[-1]])\n char_dict = get_char_dict(dict_path)\n\n cers, char_ref_lens = [], []\n for i, y in enumerate(y_pred):\n y_hat_i = [x[0] for x in groupby(y)]\n y_true_i = y_true[i]\n seq_hat, seq_true = [], []\n for idx in y_hat_i:\n idx = int(idx)\n if idx in char_dict.keys():\n seq_hat.append(char_dict[int(idx)])\n\n for idx in y_true_i:\n idx = int(idx)\n if idx in char_dict.keys():\n seq_true.append(char_dict[int(idx)])\n\n hyp_chars = \"\".join(seq_hat)\n ref_chars = \"\".join(seq_true)\n # Here only compare the ones before <sos/eos> which is the predicted value of the correctly identified part and the label\n hyp_chars = hyp_chars.split('<sos/eos>')[0]\n ref_chars = ref_chars.split('<sos/eos>')[0]\n if len(ref_chars) > 0:\n cers.append(editdistance.eval(hyp_chars, ref_chars))\n char_ref_lens.append(len(ref_chars))\n\n cer = float(sum(cers)) / sum(char_ref_lens) if cers else None\n\n return cer\n"
] | [
[
"tensorflow.pywrap_tensorflow.NewCheckpointReader",
"numpy.sum",
"numpy.min",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.max",
"numpy.prod",
"tensorflow.trainable_variables",
"tensorflow.Summary",
"numpy.array",
"numpy.histogram",
"tensorflow.HistogramProto"
],
[
"torch.manual_seed",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.save"
],
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.is_initialized",
"numpy.log"
],
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.nn.dropout",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.eye",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.cos",
"tensorflow.compat.v1.unsorted_segment_sum",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.sigmoid",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.glorot_normal_initializer",
"tensorflow.compat.v1.tanh",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.mod",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.roll",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.repeat",
"tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits"
],
[
"tensorflow.compat.v1.gradients",
"numpy.lib.stride_tricks.as_strided",
"tensorflow.python.ipu.scopes.ipu_scope",
"numpy.random.default_rng",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.trainable_variables",
"numpy.copy",
"tensorflow.compat.v1.variable_scope",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu",
"tensorflow.compat.v1.Graph",
"numpy.testing.assert_allclose",
"tensorflow.compat.v1.disable_eager_execution",
"numpy.array",
"tensorflow.python.ipu.config.IPUConfig",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.set_printoptions",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder"
],
[
"numpy.reshape",
"numpy.kron",
"numpy.ones",
"numpy.all",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"torch.sum",
"torch.zeros"
],
[
"numpy.square",
"numpy.random.seed",
"numpy.random.choice",
"numpy.random.normal",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"tensorflow.python.ipu.ops.pipelining_ops.OptimizerFunctionOutput",
"tensorflow.python.ipu.pipelining_ops.PipelineStageOptions",
"tensorflow.python.ipu.scopes.ipu_scope",
"numpy.mean",
"tensorflow.compat.v1.train.Saver",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.compat.v1.global_variables",
"tensorflow.python.ipu.horovod.init",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu",
"tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue",
"tensorflow.compat.v1.errors.ResourceExhaustedError",
"tensorflow.compat.v1.distribute.get_strategy",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.distribute.has_strategy",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.python.ipu.horovod.popdist_strategy.IPUMultiReplicaStrategy",
"tensorflow.compat.v1.Summary"
],
[
"torch.norm",
"torch.from_numpy",
"torch.optim.SGD"
],
[
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.gfile.Open",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.python_io.TFRecordWriter",
"tensorflow.compat.v1.train.Features"
],
[
"numpy.log",
"torch.log",
"torch.stack",
"numpy.zeros",
"numpy.vstack"
],
[
"torch._C._set_worker_signal_handlers",
"torch.LongTensor",
"torch.multiprocessing.Queue",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.from_numpy",
"torch.multiprocessing.Event",
"torch.set_num_threads",
"torch.utils.data._utils.signal_handling._set_SIGCHLD_handler",
"torch.multiprocessing.Process",
"torch.stack",
"torch.DoubleTensor",
"torch.utils.data.BatchSampler"
],
[
"numpy.concatenate",
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.manual_seed_all"
],
[
"numpy.reshape",
"torch.sum"
]
] |
rehohoho/mmsegmentation | [
"a73ae7a421e07741fda62c9d81b335cbc4b7f7d6",
"a73ae7a421e07741fda62c9d81b335cbc4b7f7d6"
] | [
"mmseg/models/decode_heads/knet_head.py",
"mmseg/models/decode_heads/stdc_head.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer\nfrom mmcv.cnn.bricks.transformer import (FFN, TRANSFORMER_LAYER,\n MultiheadAttention,\n build_transformer_layer)\n\nfrom mmseg.models.builder import HEADS, build_head\nfrom mmseg.models.decode_heads.decode_head import BaseDecodeHead\nfrom mmseg.utils import get_root_logger\n\n\n@TRANSFORMER_LAYER.register_module()\nclass KernelUpdator(nn.Module):\n \"\"\"Dynamic Kernel Updator in Kernel Update Head.\n\n Args:\n in_channels (int): The number of channels of input feature map.\n Default: 256.\n feat_channels (int): The number of middle-stage channels in\n the kernel updator. Default: 64.\n out_channels (int): The number of output channels.\n gate_sigmoid (bool): Whether use sigmoid function in gate\n mechanism. Default: True.\n gate_norm_act (bool): Whether add normalization and activation\n layer in gate mechanism. Default: False.\n activate_out: Whether add activation after gate mechanism.\n Default: False.\n norm_cfg (dict | None): Config of norm layers.\n Default: dict(type='LN').\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU').\n \"\"\"\n\n def __init__(\n self,\n in_channels=256,\n feat_channels=64,\n out_channels=None,\n gate_sigmoid=True,\n gate_norm_act=False,\n activate_out=False,\n norm_cfg=dict(type='LN'),\n act_cfg=dict(type='ReLU', inplace=True),\n ):\n super(KernelUpdator, self).__init__()\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.out_channels_raw = out_channels\n self.gate_sigmoid = gate_sigmoid\n self.gate_norm_act = gate_norm_act\n self.activate_out = activate_out\n self.act_cfg = act_cfg\n self.norm_cfg = norm_cfg\n self.out_channels = out_channels if out_channels else in_channels\n\n self.num_params_in = self.feat_channels\n self.num_params_out = self.feat_channels\n self.dynamic_layer = nn.Linear(\n self.in_channels, self.num_params_in + self.num_params_out)\n self.input_layer = nn.Linear(self.in_channels,\n self.num_params_in + self.num_params_out,\n 1)\n self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1)\n self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1)\n if self.gate_norm_act:\n self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1]\n\n self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]\n self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]\n self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]\n self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]\n\n self.activation = build_activation_layer(act_cfg)\n\n self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1)\n self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]\n\n def forward(self, update_feature, input_feature):\n \"\"\"Forward function of KernelUpdator.\n\n Args:\n update_feature (torch.Tensor): Feature map assembled from\n each group. It would be reshaped with last dimension\n shape: `self.in_channels`.\n input_feature (torch.Tensor): Intermediate feature\n with shape: (N, num_classes, conv_kernel_size**2, channels).\n Returns:\n Tensor: The output tensor of shape (N*C1/C2, K*K, C2), where N is\n the number of classes, C1 and C2 are the feature map channels of\n KernelUpdateHead and KernelUpdator, respectively.\n \"\"\"\n\n update_feature = update_feature.reshape(-1, self.in_channels)\n num_proposals = update_feature.size(0)\n # dynamic_layer works for\n # phi_1 and psi_3 in Eq.(4) and (5) of K-Net paper\n parameters = self.dynamic_layer(update_feature)\n param_in = parameters[:, :self.num_params_in].view(\n -1, self.feat_channels)\n param_out = parameters[:, -self.num_params_out:].view(\n -1, self.feat_channels)\n\n # input_layer works for\n # phi_2 and psi_4 in Eq.(4) and (5) of K-Net paper\n input_feats = self.input_layer(\n input_feature.reshape(num_proposals, -1, self.feat_channels))\n input_in = input_feats[..., :self.num_params_in]\n input_out = input_feats[..., -self.num_params_out:]\n\n # `gate_feats` is F^G in K-Net paper\n gate_feats = input_in * param_in.unsqueeze(-2)\n if self.gate_norm_act:\n gate_feats = self.activation(self.gate_norm(gate_feats))\n\n input_gate = self.input_norm_in(self.input_gate(gate_feats))\n update_gate = self.norm_in(self.update_gate(gate_feats))\n if self.gate_sigmoid:\n input_gate = input_gate.sigmoid()\n update_gate = update_gate.sigmoid()\n param_out = self.norm_out(param_out)\n input_out = self.input_norm_out(input_out)\n\n if self.activate_out:\n param_out = self.activation(param_out)\n input_out = self.activation(input_out)\n\n # Gate mechanism. Eq.(5) in original paper.\n # param_out has shape (batch_size, feat_channels, out_channels)\n features = update_gate * param_out.unsqueeze(\n -2) + input_gate * input_out\n\n features = self.fc_layer(features)\n features = self.fc_norm(features)\n features = self.activation(features)\n\n return features\n\n\[email protected]_module()\nclass KernelUpdateHead(nn.Module):\n \"\"\"Kernel Update Head in K-Net.\n\n Args:\n num_classes (int): Number of classes. Default: 150.\n num_ffn_fcs (int): The number of fully-connected layers in\n FFNs. Default: 2.\n num_heads (int): The number of parallel attention heads.\n Default: 8.\n num_mask_fcs (int): The number of fully connected layers for\n mask prediction. Default: 3.\n feedforward_channels (int): The hidden dimension of FFNs.\n Defaults: 2048.\n in_channels (int): The number of channels of input feature map.\n Default: 256.\n out_channels (int): The number of output channels.\n Default: 256.\n dropout (float): The Probability of an element to be\n zeroed in MultiheadAttention and FFN. Default 0.0.\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU').\n ffn_act_cfg (dict): Config of activation layers in FFN.\n Default: dict(type='ReLU').\n conv_kernel_size (int): The kernel size of convolution in\n Kernel Update Head for dynamic kernel updation.\n Default: 1.\n feat_transform_cfg (dict | None): Config of feature transform.\n Default: None.\n kernel_init (bool): Whether initiate mask kernel in mask head.\n Default: False.\n with_ffn (bool): Whether add FFN in kernel update head.\n Default: True.\n feat_gather_stride (int): Stride of convolution in feature transform.\n Default: 1.\n mask_transform_stride (int): Stride of mask transform.\n Default: 1.\n kernel_updator_cfg (dict): Config of kernel updator.\n Default: dict(\n type='DynamicConv',\n in_channels=256,\n feat_channels=64,\n out_channels=256,\n act_cfg=dict(type='ReLU', inplace=True),\n norm_cfg=dict(type='LN')).\n \"\"\"\n\n def __init__(self,\n num_classes=150,\n num_ffn_fcs=2,\n num_heads=8,\n num_mask_fcs=3,\n feedforward_channels=2048,\n in_channels=256,\n out_channels=256,\n dropout=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_act_cfg=dict(type='ReLU', inplace=True),\n conv_kernel_size=1,\n feat_transform_cfg=None,\n kernel_init=False,\n with_ffn=True,\n feat_gather_stride=1,\n mask_transform_stride=1,\n kernel_updator_cfg=dict(\n type='DynamicConv',\n in_channels=256,\n feat_channels=64,\n out_channels=256,\n act_cfg=dict(type='ReLU', inplace=True),\n norm_cfg=dict(type='LN'))):\n super(KernelUpdateHead, self).__init__()\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.fp16_enabled = False\n self.dropout = dropout\n self.num_heads = num_heads\n self.kernel_init = kernel_init\n self.with_ffn = with_ffn\n self.conv_kernel_size = conv_kernel_size\n self.feat_gather_stride = feat_gather_stride\n self.mask_transform_stride = mask_transform_stride\n\n self.attention = MultiheadAttention(in_channels * conv_kernel_size**2,\n num_heads, dropout)\n self.attention_norm = build_norm_layer(\n dict(type='LN'), in_channels * conv_kernel_size**2)[1]\n self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg)\n\n if feat_transform_cfg is not None:\n kernel_size = feat_transform_cfg.pop('kernel_size', 1)\n transform_channels = in_channels\n self.feat_transform = ConvModule(\n transform_channels,\n in_channels,\n kernel_size,\n stride=feat_gather_stride,\n padding=int(feat_gather_stride // 2),\n **feat_transform_cfg)\n else:\n self.feat_transform = None\n\n if self.with_ffn:\n self.ffn = FFN(\n in_channels,\n feedforward_channels,\n num_ffn_fcs,\n act_cfg=ffn_act_cfg,\n dropout=dropout)\n self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]\n\n self.mask_fcs = nn.ModuleList()\n for _ in range(num_mask_fcs):\n self.mask_fcs.append(\n nn.Linear(in_channels, in_channels, bias=False))\n self.mask_fcs.append(\n build_norm_layer(dict(type='LN'), in_channels)[1])\n self.mask_fcs.append(build_activation_layer(act_cfg))\n\n self.fc_mask = nn.Linear(in_channels, out_channels)\n\n def init_weights(self):\n \"\"\"Use xavier initialization for all weight parameter and set\n classification head bias as a specific value when use focal loss.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n # adopt the default initialization for\n # the weight and bias of the layer norm\n pass\n if self.kernel_init:\n logger = get_root_logger()\n logger.info(\n 'mask kernel in mask head is normal initialized by std 0.01')\n nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01)\n\n def forward(self, x, proposal_feat, mask_preds, mask_shape=None):\n \"\"\"Forward function of Dynamic Instance Interactive Head.\n\n Args:\n x (Tensor): Feature map from FPN with shape\n (batch_size, feature_dimensions, H , W).\n proposal_feat (Tensor): Intermediate feature get from\n diihead in last stage, has shape\n (batch_size, num_proposals, feature_dimensions)\n mask_preds (Tensor): mask prediction from the former stage in shape\n (batch_size, num_proposals, H, W).\n\n Returns:\n Tuple: The first tensor is predicted mask with shape\n (N, num_classes, H, W), the second tensor is dynamic kernel\n with shape (N, num_classes, channels, K, K).\n \"\"\"\n N, num_proposals = proposal_feat.shape[:2]\n if self.feat_transform is not None:\n x = self.feat_transform(x)\n\n C, H, W = x.shape[-3:]\n\n mask_h, mask_w = mask_preds.shape[-2:]\n if mask_h != H or mask_w != W:\n gather_mask = F.interpolate(\n mask_preds, (H, W), align_corners=False, mode='bilinear')\n else:\n gather_mask = mask_preds\n\n sigmoid_masks = gather_mask.softmax(dim=1)\n\n # Group Feature Assembling. Eq.(3) in original paper.\n # einsum is faster than bmm by 30%\n x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x)\n\n # obj_feat in shape [B, N, C, K, K] -> [B, N, C, K*K] -> [B, N, K*K, C]\n proposal_feat = proposal_feat.reshape(N, num_proposals,\n self.in_channels,\n -1).permute(0, 1, 3, 2)\n obj_feat = self.kernel_update_conv(x_feat, proposal_feat)\n\n # [B, N, K*K, C] -> [B, N, K*K*C] -> [N, B, K*K*C]\n obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2)\n obj_feat = self.attention_norm(self.attention(obj_feat))\n # [N, B, K*K*C] -> [B, N, K*K*C]\n obj_feat = obj_feat.permute(1, 0, 2)\n\n # obj_feat in shape [B, N, K*K*C] -> [B, N, K*K, C]\n obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels)\n\n # FFN\n if self.with_ffn:\n obj_feat = self.ffn_norm(self.ffn(obj_feat))\n\n mask_feat = obj_feat\n\n for reg_layer in self.mask_fcs:\n mask_feat = reg_layer(mask_feat)\n\n # [B, N, K*K, C] -> [B, N, C, K*K]\n mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2)\n\n if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1):\n mask_x = F.interpolate(\n x, scale_factor=0.5, mode='bilinear', align_corners=False)\n H, W = mask_x.shape[-2:]\n else:\n mask_x = x\n # group conv is 5x faster than unfold and uses about 1/5 memory\n # Group conv vs. unfold vs. concat batch, 2.9ms :13.5ms :3.8ms\n # Group conv vs. unfold vs. concat batch, 278 : 1420 : 369\n # but in real training group conv is slower than concat batch\n # so we keep using concat batch.\n # fold_x = F.unfold(\n # mask_x,\n # self.conv_kernel_size,\n # padding=int(self.conv_kernel_size // 2))\n # mask_feat = mask_feat.reshape(N, num_proposals, -1)\n # new_mask_preds = torch.einsum('bnc,bcl->bnl', mask_feat, fold_x)\n # [B, N, C, K*K] -> [B*N, C, K, K]\n mask_feat = mask_feat.reshape(N, num_proposals, C,\n self.conv_kernel_size,\n self.conv_kernel_size)\n # [B, C, H, W] -> [1, B*C, H, W]\n new_mask_preds = []\n for i in range(N):\n new_mask_preds.append(\n F.conv2d(\n mask_x[i:i + 1],\n mask_feat[i],\n padding=int(self.conv_kernel_size // 2)))\n\n new_mask_preds = torch.cat(new_mask_preds, dim=0)\n new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W)\n if self.mask_transform_stride == 2:\n new_mask_preds = F.interpolate(\n new_mask_preds,\n scale_factor=2,\n mode='bilinear',\n align_corners=False)\n\n if mask_shape is not None and mask_shape[0] != H:\n new_mask_preds = F.interpolate(\n new_mask_preds,\n mask_shape,\n align_corners=False,\n mode='bilinear')\n\n return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape(\n N, num_proposals, self.in_channels, self.conv_kernel_size,\n self.conv_kernel_size)\n\n\[email protected]_module()\nclass IterativeDecodeHead(BaseDecodeHead):\n \"\"\"K-Net: Towards Unified Image Segmentation.\n\n This head is the implementation of\n `K-Net: <https://arxiv.org/abs/2106.14855>`_.\n\n Args:\n num_stages (int): The number of stages (kernel update heads)\n in IterativeDecodeHead. Default: 3.\n kernel_generate_head:(dict): Config of kernel generate head which\n generate mask predictions, dynamic kernels and class predictions\n for next kernel update heads.\n kernel_update_head (dict): Config of kernel update head which refine\n dynamic kernels and class predictions iteratively.\n\n \"\"\"\n\n def __init__(self, num_stages, kernel_generate_head, kernel_update_head,\n **kwargs):\n super(BaseDecodeHead, self).__init__(**kwargs)\n assert num_stages == len(kernel_update_head)\n self.num_stages = num_stages\n self.kernel_generate_head = build_head(kernel_generate_head)\n self.kernel_update_head = nn.ModuleList()\n self.align_corners = self.kernel_generate_head.align_corners\n self.num_classes = self.kernel_generate_head.num_classes\n self.input_transform = self.kernel_generate_head.input_transform\n self.ignore_index = self.kernel_generate_head.ignore_index\n\n for head_cfg in kernel_update_head:\n self.kernel_update_head.append(build_head(head_cfg))\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n feats = self.kernel_generate_head._forward_feature(inputs)\n sem_seg = self.kernel_generate_head.cls_seg(feats)\n seg_kernels = self.kernel_generate_head.conv_seg.weight.clone()\n seg_kernels = seg_kernels[None].expand(\n feats.size(0), *seg_kernels.size())\n\n stage_segs = [sem_seg]\n for i in range(self.num_stages):\n sem_seg, seg_kernels = self.kernel_update_head[i](feats,\n seg_kernels,\n sem_seg)\n stage_segs.append(sem_seg)\n if self.training:\n return stage_segs\n # only return the prediction of the last stage during testing\n return stage_segs[-1]\n\n def losses(self, seg_logit, seg_label):\n losses = dict()\n for i, logit in enumerate(seg_logit):\n loss = self.kernel_generate_head.losses(logit, seg_label)\n for k, v in loss.items():\n losses[f'{k}.s{i}'] = v\n\n return losses\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\n\nfrom ..builder import HEADS\nfrom .fcn_head import FCNHead\n\n\[email protected]_module()\nclass STDCHead(FCNHead):\n \"\"\"This head is the implementation of `Rethinking BiSeNet For Real-time\n Semantic Segmentation <https://arxiv.org/abs/2104.13188>`_.\n\n Args:\n boundary_threshold (float): The threshold of calculating boundary.\n Default: 0.1.\n \"\"\"\n\n def __init__(self, boundary_threshold=0.1, **kwargs):\n super(STDCHead, self).__init__(**kwargs)\n self.boundary_threshold = boundary_threshold\n # Using register buffer to make laplacian kernel on the same\n # device of `seg_label`.\n self.register_buffer(\n 'laplacian_kernel',\n torch.tensor([-1, -1, -1, -1, 8, -1, -1, -1, -1],\n dtype=torch.float32,\n requires_grad=False).reshape((1, 1, 3, 3)))\n self.fusion_kernel = torch.nn.Parameter(\n torch.tensor([[6. / 10], [3. / 10], [1. / 10]],\n dtype=torch.float32).reshape(1, 3, 1, 1),\n requires_grad=False)\n\n def losses(self, seg_logit, seg_label):\n \"\"\"Compute Detail Aggregation Loss.\"\"\"\n # Note: The paper claims `fusion_kernel` is a trainable 1x1 conv\n # parameters. However, it is a constant in original repo and other\n # codebase because it would not be added into computation graph\n # after threshold operation.\n seg_label = seg_label.to(self.laplacian_kernel)\n boundary_targets = F.conv2d(\n seg_label, self.laplacian_kernel, padding=1)\n boundary_targets = boundary_targets.clamp(min=0)\n boundary_targets[boundary_targets > self.boundary_threshold] = 1\n boundary_targets[boundary_targets <= self.boundary_threshold] = 0\n\n boundary_targets_x2 = F.conv2d(\n seg_label, self.laplacian_kernel, stride=2, padding=1)\n boundary_targets_x2 = boundary_targets_x2.clamp(min=0)\n\n boundary_targets_x4 = F.conv2d(\n seg_label, self.laplacian_kernel, stride=4, padding=1)\n boundary_targets_x4 = boundary_targets_x4.clamp(min=0)\n\n boundary_targets_x4_up = F.interpolate(\n boundary_targets_x4, boundary_targets.shape[2:], mode='nearest')\n boundary_targets_x2_up = F.interpolate(\n boundary_targets_x2, boundary_targets.shape[2:], mode='nearest')\n\n boundary_targets_x2_up[\n boundary_targets_x2_up > self.boundary_threshold] = 1\n boundary_targets_x2_up[\n boundary_targets_x2_up <= self.boundary_threshold] = 0\n\n boundary_targets_x4_up[\n boundary_targets_x4_up > self.boundary_threshold] = 1\n boundary_targets_x4_up[\n boundary_targets_x4_up <= self.boundary_threshold] = 0\n\n boudary_targets_pyramids = torch.stack(\n (boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up),\n dim=1)\n\n boudary_targets_pyramids = boudary_targets_pyramids.squeeze(2)\n boudary_targets_pyramid = F.conv2d(boudary_targets_pyramids,\n self.fusion_kernel)\n\n boudary_targets_pyramid[\n boudary_targets_pyramid > self.boundary_threshold] = 1\n boudary_targets_pyramid[\n boudary_targets_pyramid <= self.boundary_threshold] = 0\n\n loss = super(STDCHead, self).losses(seg_logit,\n boudary_targets_pyramid.long())\n return loss\n"
] | [
[
"torch.cat",
"torch.einsum",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.functional.interpolate",
"torch.nn.init.xavier_uniform_"
],
[
"torch.stack",
"torch.nn.functional.conv2d",
"torch.nn.functional.interpolate",
"torch.tensor"
]
] |
tsutterley/captoolkit | [
"314c4d34f49012c25286478c943b0ab13c893c62",
"314c4d34f49012c25286478c943b0ab13c893c62",
"314c4d34f49012c25286478c943b0ab13c893c62"
] | [
"captoolkit/readgla12.py",
"captoolkit/fittopo.py",
"captoolkit/cubesmb.py"
] | [
"#!/usr/bin/env python\r\n\"\"\"\r\n Reads GLA12 Release 634 HDF5.\r\n \r\n Reads several files in parallel if njobs > 1 is specified.\r\n \r\n Extracts a subset of the data based on a mask.tif file.\r\n \r\n Example:\r\n \r\n python readgla.py /mnt/devon-r0/shared_data/icesat/GLAH12.034/ /mnt/devon-r0/shared_data/icesat/grounded/ /mnt/devon-r0/shared_data/masks/ANT_groundedice_240m.tif 3031 A 600 1\r\n \r\n See full GLA12 parameters at:\r\n \r\n http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah12.html\r\n \r\n Notes:\r\n \r\n For previous releases the path of some fields have changed!\r\n \r\n Corrections applied by default (i.e. data come corrected):\r\n \r\n instrument corrections - was applied\r\n atmospheric delays (wet/dry tropo) - was applied\r\n tides and load - was applied\r\n GC offset - was applied\r\n \r\n saturation (d_satElevCorr) - was NOT applied [1]\r\n inter-campaign bias - was NOT applied\r\n \r\n [1] If it is invalid, then the elevation should not be used.\r\n The saturation correction flag (sat_corr_flg) is an important\r\n flag to understand the possible quality of the elevation data.\r\n \r\n To REMOVE the tide and load cor, and APPLY saturation cor:\r\n \r\n elev_retide = d_elev + d_ocElv + d_ldElv + d_satElevCorr\r\n \r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport h5py\r\nimport pyproj\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom joblib import Parallel, delayed\r\nfrom gdalconst import *\r\nfrom osgeo import gdal, osr\r\nfrom scipy.ndimage import map_coordinates\r\n\r\n\r\ndef geotiffread(ifile,metaData):\r\n \"\"\"Read raster from file.\"\"\"\r\n \r\n file = gdal.Open(ifile, GA_ReadOnly)\r\n \r\n projection = file.GetProjection()\r\n src = osr.SpatialReference()\r\n src.ImportFromWkt(projection)\r\n proj = src.ExportToWkt()\r\n \r\n Nx = file.RasterXSize\r\n Ny = file.RasterYSize\r\n \r\n trans = file.GetGeoTransform()\r\n \r\n dx = trans[1]\r\n dy = trans[5]\r\n \r\n if metaData == \"A\":\r\n \r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n \r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n \r\n X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]\r\n \r\n if metaData == \"P\":\r\n \r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n \r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n \r\n X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + Xp*trans[4] + Yp*trans[5]\r\n\r\n band = file.GetRasterBand(1)\r\n\r\n Z = band.ReadAsArray()\r\n \r\n dx = np.abs(dx)\r\n dy = np.abs(dy)\r\n \r\n return X, Y, Z, dx, dy, proj\r\n\r\n\r\ndef bilinear2d(xd,yd,data,xq,yq, **kwargs):\r\n \"\"\"Bilinear interpolation from grid.\"\"\"\r\n \r\n xd = np.flipud(xd)\r\n yd = np.flipud(yd)\r\n data = np.flipud(data)\r\n \r\n xd = xd[0,:]\r\n yd = yd[:,0]\r\n \r\n nx, ny = xd.size, yd.size\r\n (x_step, y_step) = (xd[1]-xd[0]), (yd[1]-yd[0])\r\n \r\n assert (ny, nx) == data.shape\r\n assert (xd[-1] > xd[0]) and (yd[-1] > yd[0])\r\n \r\n if np.size(xq) == 1 and np.size(yq) > 1:\r\n xq = xq*ones(yq.size)\r\n elif np.size(yq) == 1 and np.size(xq) > 1:\r\n yq = yq*ones(xq.size)\r\n \r\n xp = (xq-xd[0])*(nx-1)/(xd[-1]-xd[0])\r\n yp = (yq-yd[0])*(ny-1)/(yd[-1]-yd[0])\r\n\r\n coord = np.vstack([yp,xp])\r\n \r\n zq = map_coordinates(data, coord, **kwargs)\r\n \r\n return zq\r\n\r\n\r\ndef wrap_to_180(lon):\r\n \"\"\"Wrapps longitude to -180 to 180 degrees.\"\"\"\r\n lon[lon>180] -= 360.\r\n return lon\r\n\r\n\r\ndef list_files(path, endswith='.h5'):\r\n \"\"\" List files in dir recursively. \"\"\"\r\n return [os.path.join(dpath, f)\r\n for dpath, dnames, fnames in os.walk(path)\r\n for f in fnames if f.endswith(endswith)]\r\n\r\ndef track_type(time, lat):\r\n \"\"\"\r\n Determines ascending and descending tracks.\r\n Defines unique tracks as segments with time breaks > tmax,\r\n and tests whether lat increases or decreases w/time.\r\n \"\"\"\r\n \r\n # Generate track segment\r\n tracks = np.zeros(lat.shape)\r\n \r\n # Set values for segment\r\n tracks[0:np.argmax(np.abs(lat))] = 1\r\n\r\n # Output index array\r\n i_asc = np.zeros(tracks.shape, dtype=bool)\r\n\r\n # Loop trough individual tracks\r\n for track in np.unique(tracks):\r\n \r\n # Get all points from an individual track\r\n i_track, = np.where(track == tracks)\r\n\r\n # Test tracks length\r\n if len(i_track) < 2:\r\n continue\r\n \r\n # Test if lat increases (asc) or decreases (des) w/time\r\n i_min = time[i_track].argmin()\r\n i_max = time[i_track].argmax()\r\n lat_diff = lat[i_track][i_max] - lat[i_track][i_min]\r\n \r\n # Determine track type\r\n if lat_diff > 0:\r\n i_asc[i_track] = True\r\n\r\n # Output index vector's\r\n return i_asc, np.invert(i_asc)\r\n\r\n\r\nindir = sys.argv[1] # input dir\r\noutdir = sys.argv[2] # output dir\r\nfmask = sys.argv[3] # geotiff file with mask\r\nproj = str(sys.argv[4]) # epsg number\r\nmeta = sys.argv[5] # \"A\" or \"P\"\r\nindex = int(sys.argv[6]) # mission index\r\nnjobs = int(sys.argv[7]) # number of parallel jobs\r\n\r\n# Generate file list\r\nfiles = list_files(indir, endswith='.H5')\r\n\r\nprint(('input dir:', indir))\r\nprint(('output dir:', outdir))\r\nprint(('mask file:', fmask))\r\nprint(('epsg num:', proj))\r\nprint(('metadata:', meta))\r\nprint(('njobs:', njobs))\r\nprint(('# files:', len(files)))\r\n\r\n\r\n# Projection - unprojected lat/lon\r\nprojGeo = pyproj.Proj(\"+init=EPSG:4326\")\r\n\r\n# Make pyproj format\r\nprojection = '+init=EPSG:' + proj\r\n\r\n# Projection - prediction grid\r\nprojGrd = pyproj.Proj(projection)\r\n\r\niter = 1\r\nindex = 600\r\n\r\n# Test for mask\r\nif fmask != 'None':\r\n \r\n # Read in masking grid\r\n (Xm, Ym, Zm, dX, dY, Proj) = geotiffread(fmask, meta)\r\n\r\ndef main(fname):\r\n \r\n print(('readg:', fname, '...'))\r\n \r\n global iter\r\n \r\n f = h5py.File(fname)\r\n \r\n d = {} # Dictionary for input fields\r\n \r\n d['t_sec'] = f['Data_40HZ/Time/d_UTCTime_40'] # [secs since 2000-01-01 12:00:00 UTC]\r\n \r\n d['lat'] = f['Data_40HZ/Geolocation/d_lat'] # [deg]\r\n d['lon'] = f['Data_40HZ/Geolocation/d_lon'] # [deg]\r\n\r\n d['num_pk'] = f['Data_40HZ/Waveform/i_numPk'] # Num Peaks found in the Return\r\n d['gain'] = f['Data_40HZ/Waveform/i_gval_rcv'] # counts [unitless]\r\n d['rec_nrg'] = f['Data_40HZ/Reflectivity/d_RecNrgAll'] # [joules]\r\n d['tx_nrg'] = f['Data_40HZ/Transmit_Energy/d_TxNrg'] # [joules]\r\n \r\n d['h_sat'] = f['Data_40HZ/Elevation_Corrections/d_satElevCorr'] # saturation cor [m]\r\n d['h_gc'] = f['Data_40HZ/Elevation_Corrections/d_GmC'] # GC-offset cor [m]\r\n d['h_dry'] = f['Data_40HZ/Elevation_Corrections/d_dTrop'] # dry tropo [m]\r\n d['h_wet'] = f['Data_40HZ/Elevation_Corrections/d_wTrop'] # wet tropo [m]\r\n \r\n d['h_sol'] = f['Data_40HZ/Geophysical/d_erElv'] # solid tide [m]\r\n d['h_geo'] = f['Data_40HZ/Geophysical/d_poTide'] # geoc pole tide [m]\r\n d['h_equi'] = f['Data_40HZ/Geophysical/d_eqElv'] # equilib tide [m]\r\n d['h_ellip'] = f['Data_40HZ/Geophysical/d_deltaEllip'] # h_TP - h_WGS84 [m]\r\n d['h_tide'] = f['Data_40HZ/Geophysical/d_ocElv'] # ocean tide [m]\r\n d['h_load'] = f['Data_40HZ/Geophysical/d_ldElv'] # load tide [m]\r\n \r\n d['h_cor'] = f['Data_40HZ/Elevation_Surfaces/d_elev'] # corrected height [m]\r\n d['misfit'] = f['Data_40HZ/Elevation_Surfaces/d_IceSVar'] # gaussian misfit [volts] [2]\r\n \r\n d['rec_ndx'] = f['Data_40HZ/Time/i_rec_ndx'] # record index\r\n d['shot_count'] = f['Data_40HZ/Time/i_shot_count'] # shot index within record\r\n \r\n # Elevation quality flag: 0=valid, 1=not_valid\r\n d['use_flg'] = f['Data_40HZ/Quality/elev_use_flg']\r\n \r\n # Cloud contamination flag: 0=false, 1=true\r\n d['cloud_flg'] = f['Data_40HZ/Elevation_Flags/elv_cloud_flg']\r\n \r\n # Attitude quality flag: 0=good, 50=warning, 100=bad, 127=not_valid\r\n d['att_flg'] = f['Data_40HZ/Quality/sigma_att_flg']\r\n \r\n # Saturation Correction Flag:\r\n # 0=not_saturated, 1=inconsequential, 2=applicable 3=not_computed 4=not_applicable\r\n d['sat_flg'] = f['Data_40HZ/Quality/sat_corr_flg']\r\n \r\n # 1Hz Track\r\n track_01Hz = f['Data_1HZ/Geolocation/i_track'][:]\r\n \r\n # Get unique track numbers\r\n track_id = np.unique(track_01Hz)\r\n \r\n # Initialize vector\r\n track_40Hz = np.empty((0,1), dtype='int')\r\n \r\n # Construct 40 Hz track vector - IMPROVE! SLOW WAY OF DOING IT\r\n for i in range(len(track_01Hz)):\r\n \r\n # Create 40 Hz vector\r\n track_40Hz = np.vstack((track_40Hz, np.ones((40,1),dtype='int') * track_01Hz[i]))\r\n\r\n # Construct cycle vector\r\n #cycle = int(fname[fname.rfind('/') + 1:].split('_')[3]) * np.ones(track_40Hz.shape)\r\n \r\n # Induvidual track identifier\r\n #d['orbit'] = np.char.add(cycle.astype('int').astype('str'), track_40Hz.astype('int').astype('str')).astype('int')\r\n \r\n '''\r\n [2] For postprocessing: The RMS error converged to about 0.25 m after\r\n removing the data with the 5% highest waveform misfits in each campaign, so we\r\n adopted that as a data-editing threshold, retaining 95% of the original data.\r\n Also, filter out cloud-contaminated points using the 'cloud_flg' param.\r\n '''\r\n \r\n # Wrap longitude to -180/180 degrees\r\n d['lon'] = wrap_to_180(d['lon'][:])\r\n \r\n # Reproject coordinates\r\n lon, lat = d['lon'][:], d['lat'][:]\r\n \r\n # Converte to Stereographical coordinates\r\n (x, y) = pyproj.transform(projGeo, projGrd, lon, lat)\r\n \r\n # Test for mask\r\n if fmask != 'None':\r\n \r\n # Interpolation of grid to points for masking\r\n Ii = bilinear2d(Xm, Ym, Zm, x.T, y.T, order=1)\r\n \r\n # Set all NaN's to zero\r\n Ii[np.isnan(Ii)] = 0\r\n \r\n # Convert to boolean\r\n mask = Ii == 1\r\n \r\n else:\r\n \r\n # Select all data\r\n mask = np.ones(lat.shape, dtype='bool')\r\n \r\n # Parameters for selecting valid pts\r\n h_cor = d['h_cor'][:]\r\n h_sat = d['h_sat'][:]\r\n use_flg = d['use_flg'][:]\r\n sat_flg = d['sat_flg'][:]\r\n att_flg = d['att_flg'][:]\r\n num_pk = d['num_pk'][:]\r\n \r\n # Get index of valid pts\r\n idx, = np.where(\r\n (mask == 1) &\r\n (np.abs(h_cor) < 1e10) &\r\n (np.abs(h_sat) < 1e10) &\r\n (np.abs(lat) <= 90) &\r\n (np.abs(lon) <= 180) &\r\n (use_flg == 0) &\r\n (sat_flg <= 2) &\r\n (att_flg == 0) &\r\n (num_pk == 1))\r\n\r\n # Check if no valid pts\r\n if len(idx) == 0:\r\n print(('no valid pts:', fname))\r\n return\r\n\r\n # Keep only valid pts (and load to memory)\r\n for k in list(d.keys()):\r\n \r\n # Edit all the fields\r\n d[k] = d[k][:][idx]\r\n \r\n # Unapply tides (retide)\r\n d['h_cor'] += d['h_tide'] + d['h_load']\r\n \r\n # Apply saturation cor\r\n d['h_cor'] += d['h_sat']\r\n \r\n # Convert ellipsoid: h_TP -> h_WGS84\r\n d['h_cor'] -= d['h_ellip']\r\n \r\n #FIXME: THIS IS NOT ORBIT NUMBER (ONE ID FOR EACH TRACK)!!!\r\n # Combine rec_ndx and shot_count to uniquely identify each GLAS laser shot\r\n #d['orbit'] = np.char.add(d['rec_ndx'].astype('str'),\r\n # d['shot_count'].astype('str')).astype('int')\r\n\r\n # Compute correct time - add back 'year 2000 + 12 hours' in secs\r\n d['t_sec'] += (2000 * 365.25 * 24 * 3600.) + (12 * 3600.)\r\n \r\n # Compute time in decimal years\r\n d['t_year'] = d['t_sec'] / (365.25 * 24 * 3600.)\r\n \r\n # Compute time since 1970 - remove year 1970 in secs\r\n d['t_sec'] -= 1970 * 365.25 * 24 * 3600.\r\n \r\n # Change path and/or name of read file\r\n name, ext = os.path.splitext(os.path.basename(fname))\r\n \r\n # Clip track vector\r\n tracks_40Hz = track_40Hz[idx]\r\n \r\n # Compute unique tracks\r\n tracks = np.unique(tracks_40Hz)\r\n \r\n # Create orbit array\r\n d['orbit'] = np.ones(d['lat'][:].shape) * np.nan\r\n \r\n # Select fields to save\r\n out = ['orbit',\r\n 't_sec',\r\n 't_year',\r\n 'lon',\r\n 'lat',\r\n 'h_cor',\r\n 'h_dry',\r\n 'h_ellip',\r\n 'h_equi',\r\n 'h_gc',\r\n 'h_geo',\r\n 'h_sat',\r\n 'h_sol',\r\n 'h_wet',\r\n 'gain',\r\n 'misfit',\r\n 'tx_nrg',\r\n 'rec_nrg',\r\n 'cloud_flg',]\r\n\r\n # Loop through tracks\r\n for i in range(len(tracks)):\r\n \r\n # Get index of induvidual tracks\r\n ind = (tracks_40Hz == tracks[i]).reshape(d['lat'][:].shape)\r\n \r\n # Set track datum identifier for each track\r\n (dec,year)=np.modf(d['t_year'][ind][0])\r\n month = np.round(dec * 12, decimals=0)\r\n day = np.round(dec * 365.25, decimals=0)\r\n \r\n # Datum string\r\n date = str(int(year))+'_'+str(int(month)).zfill(2)+'_'+str(int(day)).zfill(3)\r\n \r\n # Separate tracks\r\n (i_asc, i_des) = track_type(d['t_sec'][ind], d['lat'][ind])\r\n \r\n # Save ascending track\r\n if len(d['lat'][ind][i_asc]) > 0:\r\n \r\n # Psudo orbit number generation\r\n d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')\r\n \r\n # Orbit type identifier\r\n str_orb = 'READ_A'\r\n \r\n # Track number string\r\n str_trknum = '_'+str(int(iter)).zfill(6)+'_'\r\n\r\n # Fullname of output file\r\n outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)\r\n\r\n # Save data\r\n with h5py.File(outfile, 'w') as fout:\r\n [fout.create_dataset(k, data=d[k][ind][i_asc]) for k in out]\r\n\r\n # Update counter\r\n iter += 1\r\n print(('output file:', outfile))\r\n \r\n # Save descending track\r\n if len(d['lat'][ind][i_des]) > 0:\r\n \r\n # Psudo orbit number generation\r\n d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')\r\n \r\n # Orbit type identifier\r\n str_orb = 'READ_D'\r\n \r\n # Track number string\r\n str_trknum = '_'+str(int(iter)).zfill(6)+'_'\r\n \r\n # Fullname of output file\r\n outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)\r\n \r\n # Save data\r\n with h5py.File(outfile, 'w') as fout:\r\n [fout.create_dataset(k, data=d[k][ind][i_des]) for k in out]\r\n \r\n # Update counter\r\n iter += 1\r\n\r\n print(('output file:', outfile))\r\n\r\n f.close()\r\n\r\nif njobs == 1:\r\n print('running sequential code ...')\r\n [main(f) for f in files]\r\n\r\nelse:\r\n print(('running parallel code (%d jobs) ...' % njobs))\r\n from joblib import Parallel, delayed\r\n Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f) for f in files)\r\n",
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nSurface topography detrending of satellite and airborne altimetry\r\n\r\nProgram computes surface elevation residuals, containing only the temporal\r\ncomponent, by removing the static topography.\r\n\r\nDepending on the number of observations in each solution one of three models\r\nare used to solve for the topography (1) Bi-quadratic, (2) Bilinear and (3)\r\nthe average.\r\n\r\nUser specifies a grid resolution, search radius and the number of\r\nrelocations that should be used to detrend the observations. Inside each\r\nsearch area the model is centered (relocated) to the centroid of the data,\r\ngiven the provided number of allowed relocations.\r\n\r\nGiven the possible overlap between solutions the solution with the smallest\r\nRMS is used and data of poorer quality overwritten.\r\n\r\nNotes:\r\n For mission in reference track configuration a dx = dy = 250 m and a\r\n search radius of 350 m is appropriate, and less than n=3 relocations is\r\n usually needed to center the data (depends on search radius)\r\n\r\n This program can be run in parallel to processes several files at the same\r\n time (tiles or missions etc).\r\n\r\n Good threshold (\"-m\" option) for switching from biquadratic to bilinear\r\n model is around 10-15 points.\r\n\r\nExample:\r\n\r\n python fittopo.py /path/to/files/*.h5 -v lon lat t_year h_cor \\\r\n -d 1 1 -r 1 -q 3 -i 5 -z 5 -m 15 -k 1 -t 2012 -j 3031 -n 2\r\n\r\nCredits:\r\n captoolkit - JPL Cryosphere Altimetry Processing Toolkit\r\n\r\n Johan Nilsson ([email protected])\r\n Fernando Paolo ([email protected])\r\n Alex Gardner ([email protected])\r\n\r\n Jet Propulsion Laboratory, California Institute of Technology\r\n\r\n\"\"\"\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nimport os\r\nimport h5py\r\nimport pyproj\r\nimport argparse\r\nimport numpy as np\r\nimport statsmodels.api as sm\r\nfrom datetime import datetime\r\nfrom scipy.spatial import cKDTree\r\nfrom statsmodels.robust.scale import mad\r\n\r\n# Defaul grid spacing in x and y (km)\r\nDXY = [1, 1]\r\n\r\n# Defaul min and max search radius (km)\r\nRADIUS = [1]\r\n\r\n# Default min obs within search radius to compute solution\r\nMINOBS = 10\r\n\r\n# Default number of iterations for solution\r\nNITER = 5\r\n\r\n# Default ref time for solution: 'year' | 'fixed'=full mean t | 'variable'=cap mean t\r\nTREF = 'fixed'\r\n\r\n# Default projection EPSG for solution (AnIS=3031, GrIS=3413)\r\nPROJ = 3031\r\n\r\n# Default data columns (lon,lat,time,height,error,id)\r\nCOLS = ['lon', 'lat', 't_sec', 'h_cor', 'h_rms']\r\n\r\n# Default expression to transform time variable\r\nEXPR = None\r\n\r\n# Default order of the surface fit model\r\nORDER = 2\r\n\r\n# Default numbe rof obs. to change to mean solution\r\nMLIM = 10\r\n\r\n# Default njobs for parallel processing of *tiles*\r\nNJOBS = 1\r\n\r\n# Maximum slope allowed from the solution, replaced by SLOPE\r\nSLOPE = 1.0\r\n\r\n# Output description of solution\r\ndescription = ('Compute surface elevation residuals '\r\n 'from satellite/airborne altimetry.')\r\n\r\n# Define command-line arguments\r\nparser = argparse.ArgumentParser(description=description)\r\n\r\nparser.add_argument(\r\n 'files', metavar='file', type=str, nargs='+',\r\n help='file(s) to process (HDF5)')\r\n\r\nparser.add_argument(\r\n '-d', metavar=('dx','dy'), dest='dxy', type=float, nargs=2,\r\n help=('spatial resolution for grid-solution (deg or km)'),\r\n default=DXY,)\r\n\r\nparser.add_argument(\r\n '-r', metavar=('radius'), dest='radius', type=float, nargs=1,\r\n help=('min and max search radius (km)'),\r\n default=RADIUS,)\r\n\r\nparser.add_argument(\r\n '-q', metavar=('n_reloc'), dest='nreloc', type=int, nargs=1,\r\n help=('number of relocations for search radius'),\r\n default=[0],)\r\n\r\nparser.add_argument(\r\n '-i', metavar='n_iter', dest='niter', type=int, nargs=1,\r\n help=('maximum number of iterations for model solution'),\r\n default=[NITER],)\r\n\r\nparser.add_argument(\r\n '-z', metavar='min_obs', dest='minobs', type=int, nargs=1,\r\n help=('minimum obs to compute solution'),\r\n default=[MINOBS],)\r\n\r\nparser.add_argument(\r\n '-m', metavar=('mod_lim'), dest='mlim', type=int, nargs=1,\r\n help=('minimum obs for higher order models'),\r\n default=[MLIM],)\r\n\r\nparser.add_argument(\r\n '-k', metavar=('mod_order'), dest='order', type=int, nargs=1,\r\n help=('order of the surface fit model: 1=lin or 2=quad'),\r\n default=[ORDER],)\r\n\r\nparser.add_argument(\r\n '-t', metavar=('ref_time'), dest='tref', type=str, nargs=1,\r\n help=('time to reference the solution to: year|fixed|variable'),\r\n default=[TREF],)\r\n\r\nparser.add_argument(\r\n '-j', metavar=('epsg_num'), dest='proj', type=str, nargs=1,\r\n help=('projection: EPSG number (AnIS=3031, GrIS=3413)'),\r\n default=[str(PROJ)],)\r\n\r\nparser.add_argument(\r\n '-v', metavar=('x','y','t','h'), dest='vnames', type=str, nargs=4,\r\n help=('name of lon/lat/t/h in the HDF5'),\r\n default=COLS,)\r\n\r\nparser.add_argument(\r\n '-x', metavar=('expr'), dest='expr', type=str, nargs=1,\r\n help=\"expression to apply to time (e.g. 't + 2000'), optional\",\r\n default=[EXPR],)\r\n\r\nparser.add_argument(\r\n '-n', metavar=('n_jobs'), dest='njobs', type=int, nargs=1,\r\n help=\"for parallel processing of multiple tiles, optional\",\r\n default=[NJOBS],)\r\n\r\nparser.add_argument(\r\n '-s', metavar=('slope_lim'), dest='slplim', type=float, nargs=1,\r\n help=\"slope limit for x/y direction (deg)\",\r\n default=[SLOPE],)\r\n\r\nparser.add_argument(\r\n '-p', dest='pshow', action='store_true',\r\n help=('print diagnostic information to terminal'),\r\n default=False)\r\n\r\nargs = parser.parse_args()\r\n\r\n# Pass arguments\r\nfiles = args.files # input file(s)\r\ndx = args.dxy[0] * 1e3 # grid spacing in x (km -> m)\r\ndy = args.dxy[1] * 1e3 # grid spacing in y (km -> m)\r\ndmax = args.radius[0] * 1e3 # min search radius (km -> m)\r\nnreloc = args.nreloc[0] # number of relocations\r\nnlim = args.minobs[0] # min obs for solution\r\nmlim = args.mlim[0] # minimum value for parametric verusu men model\r\nniter = args.niter[0] # number of iterations for solution\r\ntref_ = args.tref[0] # ref time for solution (d.yr)\r\nproj = args.proj[0] # EPSG number (GrIS=3413, AnIS=3031)\r\nicol = args.vnames[:] # data input cols (x,y,t,h,err,id) [4]\r\nexpr = args.expr[0] # expression to transform time\r\nnjobs = args.njobs[0] # for parallel processing of tiles\r\norder = args.order[0] # max order of the surface fit model\r\nslplim = args.slplim[0] # max allowed surface slope in deg.\r\ndiag = args.pshow # print diagnostics to terminal\r\n\r\nprint('parameters:')\r\nfor p in list(vars(args).items()):\r\n print(p)\r\n\r\ndef make_grid(xmin, xmax, ymin, ymax, dx, dy):\r\n \"\"\"Construct output grid-coordinates.\"\"\"\r\n\r\n # Setup grid dimensions\r\n Nn = int((np.abs(ymax - ymin)) / dy) + 1\r\n Ne = int((np.abs(xmax - xmin)) / dx) + 1\r\n\r\n # Initiate x/y vectors for grid\r\n x_i = np.linspace(xmin, xmax, num=Ne)\r\n y_i = np.linspace(ymin, ymax, num=Nn)\r\n\r\n return np.meshgrid(x_i, y_i)\r\n\r\n\r\ndef transform_coord(proj1, proj2, x, y):\r\n \"\"\"Transform coordinates from proj1 to proj2 (EPSG num).\"\"\"\r\n\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)\r\n\r\n\r\ndef mad_std(x, axis=None):\r\n \"\"\" Robust standard deviation (using MAD). \"\"\"\r\n return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)\r\n\r\n\r\ndef get_radius_idx(x, y, x0, y0, r, Tree, n_reloc=0,\r\n min_months=24, max_reloc=3, time=None, height=None):\r\n \"\"\" Get indices of all data points inside radius. \"\"\"\r\n\r\n # Query the Tree from the center of cell\r\n idx = Tree.query_ball_point((x0, y0), r)\r\n\r\n #print 'query #: 1 ( first search )'\r\n\r\n if len(idx) < 2:\r\n return idx\r\n\r\n if time is not None:\r\n n_reloc = max_reloc\r\n\r\n if n_reloc < 1:\r\n return idx\r\n\r\n # Relocate center of search radius and query again\r\n for k in range(n_reloc):\r\n\r\n # Compute new search location => relocate initial center\r\n x0_new, y0_new = np.median(x[idx]), np.median(y[idx])\r\n\r\n # Compute relocation distance\r\n reloc_dist = np.hypot(x0_new-x0, y0_new-y0)\r\n\r\n # Do not allow total relocation to be larger than the search radius\r\n if reloc_dist > r:\r\n break\r\n\r\n #print 'query #:', k+2, '( reloc #:', k+1, ')'\r\n #print 'relocation dist:', reloc_dist\r\n\r\n idx = Tree.query_ball_point((x0_new, y0_new), r)\r\n\r\n # If max number of relocations reached, exit\r\n if n_reloc == k+1:\r\n break\r\n\r\n # If time provided, keep relocating until time-coverage is sufficient\r\n if time is not None:\r\n\r\n t_b, x_b = binning(time[idx], height[idx], dx=1/12., window=1/12.)[:2]\r\n\r\n print(('months #:', np.sum(~np.isnan(x_b))))\r\n\r\n # If sufficient coverage, exit\r\n if np.sum(~np.isnan(x_b)) >= min_months:\r\n break\r\n\r\n return idx\r\n\r\n\r\ndef rlsq(x, y, n=1):\r\n \"\"\" Fit a robust polynomial of n:th deg.\"\"\"\r\n\r\n # Test solution\r\n if len(x[~np.isnan(y)]) <= (n + 1):\r\n\r\n if n == 0:\r\n p = np.nan\r\n s = np.nan\r\n else:\r\n p = np.zeros((1, n)) * np.nan\r\n s = np.nan\r\n\r\n return p, s\r\n\r\n # Empty array\r\n A = np.empty((0, len(x)))\r\n\r\n # Create counter\r\n i = 0\r\n\r\n # Determine if we need centering\r\n if n > 1:\r\n # Center x-axis\r\n x -= np.nanmean(x)\r\n\r\n # Special case\r\n if n == 0:\r\n\r\n # Mean offset\r\n A = np.ones(len(x))\r\n\r\n else:\r\n\r\n # Make design matrix\r\n while i <= n:\r\n # Stack coefficients\r\n A = np.vstack((A, x ** i))\r\n\r\n # Update counter\r\n i += 1\r\n\r\n # Test to see if we can solve the system\r\n try:\r\n\r\n # Robust least squares fit\r\n fit = sm.RLM(y, A.T, missing='drop').fit(maxiter=5, tol=0.001)\r\n\r\n # polynomial coefficients\r\n p = fit.params\r\n\r\n # RMS of the residuals\r\n s = mad_std(fit.resid)\r\n\r\n except:\r\n\r\n # Set output to NaN\r\n if n == 0:\r\n p = np.nan\r\n s = np.nan\r\n else:\r\n p = np.zeros((1, n)) * np.nan\r\n s = np.nan\r\n\r\n return p[::-1], s\r\n\r\ndef binning(x, y, xmin=None, xmax=None, dx=1 / 12.,\r\n window=3 / 12., interp=False, median=False):\r\n \"\"\"Time-series binning (w/overlapping windows).\r\n\r\n Args:\r\n x,y: time and value of time series.\r\n xmin,xmax: time span of returned binned series.\r\n dx: time step of binning.\r\n window: size of binning window.\r\n interp: interpolate binned values to original x points.\r\n \"\"\"\r\n if xmin is None:\r\n xmin = np.nanmin(x)\r\n if xmax is None:\r\n xmax = np.nanmax(x)\r\n\r\n steps = np.arange(xmin, xmax, dx) # time steps\r\n bins = [(ti, ti + window) for ti in steps] # bin limits\r\n\r\n N = len(bins)\r\n yb = np.full(N, np.nan)\r\n xb = np.full(N, np.nan)\r\n eb = np.full(N, np.nan)\r\n nb = np.full(N, np.nan)\r\n sb = np.full(N, np.nan)\r\n\r\n for i in range(N):\r\n\r\n t1, t2 = bins[i]\r\n idx, = np.where((x >= t1) & (x <= t2))\r\n\r\n if len(idx) == 0:\r\n xb[i] = 0.5 * (t1 + t2)\r\n continue\r\n\r\n ybv = y[idx]\r\n\r\n if median:\r\n yb[i] = np.nanmedian(ybv)\r\n else:\r\n yb[i] = np.nanmean(ybv)\r\n\r\n xb[i] = 0.5 * (t1 + t2)\r\n eb[i] = mad_std(ybv)\r\n nb[i] = np.sum(~np.isnan(ybv))\r\n sb[i] = np.sum(ybv)\r\n\r\n if interp:\r\n try:\r\n yb = np.interp(x, xb, yb)\r\n eb = np.interp(x, xb, eb)\r\n sb = np.interp(x, xb, sb)\r\n xb = x\r\n except:\r\n pass\r\n\r\n return xb, yb, eb, nb, sb\r\n\r\n# Main function for computing parameters\r\ndef main(ifile, n=''):\r\n\r\n # Check for empty file\r\n if os.stat(ifile).st_size == 0:\r\n print('input file is empty!')\r\n return\r\n\r\n # Start timing of script\r\n startTime = datetime.now()\r\n\r\n print('loading data ...')\r\n\r\n # Determine input file type\r\n if not ifile.endswith(('.h5', '.H5', '.hdf', '.hdf5')):\r\n print(\"Input file must be in hdf5-format\")\r\n return\r\n\r\n # Input variables\r\n xvar, yvar, tvar, zvar = icol\r\n\r\n # Load all 1d variables needed\r\n with h5py.File(ifile, 'r') as fi:\r\n\r\n lon = fi[xvar][:]\r\n lat = fi[yvar][:]\r\n time = fi[tvar][:]\r\n height = fi[zvar][:]\r\n\r\n # EPSG number for lon/lat proj\r\n projGeo = '4326'\r\n\r\n # EPSG number for grid proj\r\n projGrd = proj\r\n\r\n print('converting lon/lat to x/y ...')\r\n\r\n # Convert into stereographic coordinates\r\n (x, y) = transform_coord(projGeo, projGrd, lon, lat)\r\n\r\n # Get bbox from data\r\n (xmin, xmax, ymin, ymax) = x.min(), x.max(), y.min(), y.max()\r\n\r\n # Apply transformation to time\r\n if expr: time = eval(expr.replace('t', 'time'))\r\n\r\n # Overall (fixed) mean time\r\n t_mean = np.round(np.nanmean(time), 2)\r\n\r\n # Grid solution - defined by nodes\r\n (Xi, Yi) = make_grid(xmin, xmax, ymin, ymax, dx, dy)\r\n\r\n # Flatten prediction grid\r\n xi = Xi.ravel()\r\n yi = Yi.ravel()\r\n\r\n # Zip data to vector\r\n coord = list(zip(x.ravel(), y.ravel()))\r\n\r\n # Construct cKDTree\r\n print('building the k-d tree ...')\r\n Tree = cKDTree(coord)\r\n\r\n # Create output containers\r\n dh_topo = np.full(height.shape, np.nan)\r\n de_topo = np.full(height.shape, 999999.)\r\n mi_topo = np.full(height.shape, np.nan)\r\n hm_topo = np.full(height.shape, np.nan)\r\n sx_topo = np.full(height.shape, np.nan)\r\n sy_topo = np.full(height.shape, np.nan)\r\n tr_topo = np.full(height.shape, np.nan)\r\n\r\n # Set slope limit\r\n slp_lim = np.tan(np.deg2rad(slplim))\r\n\r\n # Enter prediction loop\r\n print('predicting values ...')\r\n for i in range(len(xi)):\r\n\r\n x0, y0 = xi[i], yi[i]\r\n\r\n # Get indexes of data within search radius or cell bbox\r\n idx = get_radius_idx(\r\n x, y, x0, y0, dmax, Tree, n_reloc=nreloc,\r\n min_months=18, max_reloc=3, time=None, height=None)\r\n\r\n # Length of data in search cap\r\n nobs = len(x[idx])\r\n\r\n # Check data density\r\n if (nobs < nlim): continue\r\n\r\n # Parameters for model-solution\r\n xcap = x[idx]\r\n ycap = y[idx]\r\n tcap = time[idx]\r\n hcap = height[idx]\r\n\r\n # Copy original height vector\r\n h_org = hcap.copy()\r\n\r\n # Centroid node\r\n xc = np.median(xcap)\r\n yc = np.median(ycap)\r\n\r\n # If reference time not given, use fixed or variable mean\r\n if tref_ == 'fixed':\r\n tref = t_mean\r\n elif tref_ == 'variable':\r\n tref = np.nanmean(tcap)\r\n else:\r\n tref = np.float(tref_)\r\n\r\n # Design matrix elements\r\n c0 = np.ones(len(xcap))\r\n c1 = xcap - xc\r\n c2 = ycap - yc\r\n c3 = c1 * c2\r\n c4 = c1 * c1\r\n c5 = c2 * c2\r\n c6 = tcap - tref\r\n\r\n # Length before editing\r\n nb = len(hcap)\r\n\r\n # Determine model order\r\n if order == 2 and nb >= mlim * 2:\r\n\r\n # Biquadratic surface and linear trend\r\n Acap = np.vstack((c0, c1, c2, c3, c4, c5, c6)).T\r\n\r\n # Model identifier\r\n mi = 1\r\n\r\n # Set model order\r\n elif nb >= mlim:\r\n\r\n # Bilinear surface and linear trend\r\n Acap = np.vstack((c0, c1, c2, c6)).T\r\n\r\n # Model identifier\r\n mi = 2\r\n\r\n else:\r\n\r\n # Model identifier\r\n mi = 3\r\n\r\n # Modelled topography\r\n if mi == 1:\r\n\r\n # Construct model object\r\n linear_model = sm.RLM(hcap, Acap, M=sm.robust.norms.HuberT(), missing='drop')\r\n\r\n # Fit the model to the data,\r\n linear_model_fit = linear_model.fit(maxiter=niter, tol=0.001)\r\n\r\n # Coefficients\r\n Cm = linear_model_fit.params\r\n\r\n # Biquadratic surface\r\n h_model = np.dot(np.vstack((c0, c1, c2, c3, c4, c5)).T, Cm[[0, 1, 2, 3, 4, 5]])\r\n\r\n # Compute along and across track slope\r\n sx = np.sign(Cm[1]) * slp_lim if np.abs(Cm[1]) > slp_lim else Cm[1]\r\n sy = np.sign(Cm[2]) * slp_lim if np.abs(Cm[2]) > slp_lim else Cm[2]\r\n\r\n # Mean height\r\n h_avg = Cm[0]\r\n\r\n elif mi == 2:\r\n\r\n # Construct model object\r\n linear_model = sm.RLM(hcap, Acap, M=sm.robust.norms.HuberT(), missing='drop')\r\n\r\n # Fit the model to the data,\r\n linear_model_fit = linear_model.fit(maxiter=niter, tol=0.001)\r\n\r\n # Coefficients\r\n Cm = linear_model_fit.params\r\n\r\n # Bilinear surface\r\n h_model = np.dot(np.vstack((c0, c1, c2)).T, Cm[[0, 1, 2]])\r\n\r\n # Compute along and across track slope\r\n sx = np.sign(Cm[1]) * slp_lim if np.abs(Cm[1]) > slp_lim else Cm[1]\r\n sy = np.sign(Cm[2]) * slp_lim if np.abs(Cm[2]) > slp_lim else Cm[2]\r\n\r\n # Mean height\r\n h_avg = Cm[0]\r\n\r\n else:\r\n\r\n # Mean surface from median\r\n h_avg = np.median(hcap)\r\n\r\n # Compute distance estimates from centroid\r\n s_dx = (xcap - xc) + 1e-3\r\n s_dy = (ycap - yc) + 1e-3\r\n\r\n # Center surface height\r\n dh_i = h_org - h_avg\r\n\r\n # Compute along-track slope\r\n px, rms_x = rlsq(s_dx, dh_i, 1)\r\n py, rms_x = rlsq(s_dy, dh_i, 1)\r\n\r\n # Set along-track slope\r\n s_x = 0 if np.isnan(px[0]) else px[0]\r\n\r\n # Set across-track slope to zero\r\n s_y = 0 if np.isnan(py[0]) else py[0]\r\n\r\n # Compute along and across track slope\r\n sx = np.sign(s_x) * slp_lim if np.abs(s_x) > slp_lim else s_x\r\n sy = np.sign(s_y) * slp_lim if np.abs(s_y) > slp_lim else s_y\r\n\r\n # Compute the surface height correction\r\n h_model = h_avg + (sx * s_dx) + (sy * s_dy)\r\n\r\n # Compute full slope\r\n slope = np.arctan(np.sqrt(sx**2 + sy**2)) * (180 / np.pi)\r\n\r\n # Compute residual\r\n dh = h_org - h_model\r\n\r\n # Number of observations\r\n na = len(dh)\r\n\r\n # RMSE of the residuals\r\n RMSE = mad_std(dh)\r\n\r\n # Overwrite errors\r\n iup = RMSE < de_topo[idx]\r\n\r\n # Create temporary variables\r\n dh_cap = dh_topo[idx].copy()\r\n de_cap = de_topo[idx].copy()\r\n hm_cap = hm_topo[idx].copy()\r\n mi_cap = mi_topo[idx].copy()\r\n tr_cap = tr_topo[idx].copy()\r\n\r\n # Update variables\r\n dh_cap[iup] = dh[iup]\r\n de_cap[iup] = RMSE\r\n hm_cap[iup] = h_avg\r\n mi_cap[iup] = mi\r\n tr_cap[iup] = tref\r\n\r\n # Update with current solution\r\n dh_topo[idx] = dh_cap\r\n de_topo[idx] = de_cap\r\n hm_topo[idx] = hm_cap\r\n mi_topo[idx] = mi_cap\r\n tr_topo[idx] = tr_cap\r\n sx_topo[idx] = np.arctan(sx) * (180 / np.pi)\r\n sy_topo[idx] = np.arctan(sy) * (180 / np.pi)\r\n\r\n # Print progress (every N iterations)\r\n if (i % 100) == 0 and diag is True:\r\n\r\n # Print message every i:th solution\r\n print(('%s %i %s %2i %s %i %s %03d %s %.3f %s %.3f' % \\\r\n ('#',i,'/',len(xi),'Model:',mi,'Nobs:',nb,'Slope:',\\\r\n np.around(slope,3),'Residual:',np.around(mad_std(dh),3))))\r\n\r\n # Print percentage of not filled\r\n print(('Total NaNs (percent): %.2f' % \\\r\n (100 * float(len(dh_topo[np.isnan(dh_topo)])) / float(len(dh_topo)))))\r\n\r\n # Print percentage of each model\r\n one = np.sum(mi_topo == 1)\r\n two = np.sum(mi_topo == 2)\r\n tre = np.sum(mi_topo == 3)\r\n N = float(len(mi_topo))\r\n\r\n print(('Model types (percent): 1 = %.2f, 2 = %.2f, 3 = %.2f' % \\\r\n (100 * one/N, 100 * two/N, 100 * tre/N)))\r\n\r\n # Append new columns to original file\r\n with h5py.File(ifile, 'a') as fi:\r\n\r\n # Check if we have variables in file\r\n try:\r\n\r\n # Save variables\r\n fi['h_res'] = dh_topo\r\n fi['h_mod'] = hm_topo\r\n fi['e_res'] = de_topo\r\n fi['m_deg'] = mi_topo\r\n fi['t_ref'] = tr_topo\r\n fi['slp_x'] = sx_topo\r\n fi['slp_y'] = sy_topo\r\n\r\n except:\r\n\r\n # Update variables\r\n fi['h_res'][:] = dh_topo\r\n fi['h_mod'][:] = hm_topo\r\n fi['e_res'][:] = de_topo\r\n fi['m_deg'][:] = mi_topo\r\n fi['t_ref'][:] = tr_topo\r\n fi['slp_x'][:] = sx_topo\r\n fi['slp_y'][:] = sy_topo\r\n\r\n # Rename file\r\n if ifile.find('TOPO') < 0:\r\n os.rename(ifile, ifile.replace('.h5', '_TOPO.h5'))\r\n\r\n # Print some statistics\r\n print(('*' * 75))\r\n print(('%s %s %.5f %s %.2f %s %.2f %s %.2f %s %.2f' % \\\r\n ('Statistics',\r\n 'Mean:', np.nanmedian(dh_topo),\r\n 'Std.dev:', mad_std(dh_topo),\r\n 'Min:', np.nanmin(dh_topo),\r\n 'Max:', np.nanmax(dh_topo),\r\n 'RMSE:', np.nanmedian(de_topo[dh_topo!=999999]),)))\r\n print(('*' * 75))\r\n print('')\r\n\r\n # Print execution time of algorithm\r\n print(('Execution time: '+ str(datetime.now()-startTime)))\r\n\r\nif njobs == 1:\r\n print('running sequential code ...')\r\n [main(f, n) for n,f in enumerate(files)]\r\n\r\nelse:\r\n print(('running parallel code (%d jobs) ...' % njobs))\r\n from joblib import Parallel, delayed\r\n Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f, n) for n, f in enumerate(files))\r\n\r\n '''\r\n from dask import compute, delayed\r\n from distributed import Client, LocalCluster\r\n\r\n cluster = LocalCluster(n_workers=8, threads_per_worker=None,\r\n scheduler_port=8002, diagnostics_port=8003)\r\n client = Client(cluster) # connect to cluster\r\n print client\r\n\r\n #values = [delayed(main)(f) for f in files]\r\n #results = compute(*values, get=client.get)\r\n values = [client.submit(main, f) for f in files]\r\n results = client.gather(values)\r\n '''\r\n",
"# -*- coding: utf-8 -*-\n# TODO: Separate code into RACMO and ERA5\n\"\"\"\n1. Convert RACMO SMB to dH/dt_smb, same shape as full_cube.\n2. Convert ERA5 precip+evap+runoff to dH/dt_smb, same shape as full_cube.\n\nUnits:\n RACMO:\n SMB [kg/m2/month] # ignore the Meatadata!!!\n time [days since 1950-01-01 00:00:00.0], daily averaged values\n ERA5:\n Precip/Evap/Runoff [m/m2/day] # ignore the Meatadata!!!\n time [hours since 1900-01-01 00:00:00.0], monthly averaged values\n\nRecipe:\n - Convert time: hours -> days -> JD -> years\n - Convert units: 1/month * 12 -> 1/yr / 1/day * 365.2425 -> 1/yr\n - Convert mass: mass -> thickness [m of ice eq.] (assume solid ice)\n - Smooth w/5-month running mean\n - Regrid height-cube time series\n\nNotes:\n Conversion from Mass to equivalent Height if no Firn Air has been removed\n (i.e. SMB has the volume of snow/firn):\n\n rho_w = 1028. # density of ocean water (kg/m3)\n rho_i = 450. # density of snow/firn (400-550 kg/m3)\n Z1 /= rho_i # mass rate dM/dt (kg/yr) => thickness of snow/firn dh/dt (m/yr)\n Z1 *= (rho_w - rho_i) / rho_w # buoyancy compensation (for floating ice)\n\n\"\"\"\nimport sys\nimport warnings\n\nimport h5py\nimport pyproj\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.time import Time\nfrom netCDF4 import Dataset\nfrom scipy.interpolate import griddata\nfrom scipy.signal import savgol_filter\n\nwarnings.filterwarnings(\"ignore\")\n\n\n# import pyresample as pr\n\n# === EDIT HERE =============================================\n\nfsmb = \"/Users/paolofer/data/smb/racmo/SMB_ANT27_monthly_RACMO2.3p2_197901_201612.nc\"\nfera5 = \"/Users/paolofer/data/era5/adaptor.mars.internal-1559246138.871853-28867-12-aa84fcda-6a01-4161-9e59-ea415d26c27b.nc\"\nfcube = \"cube_full/FULL_CUBE.h5\"\n\n# Default variable names in the SMB NetCDF file\ntvar = \"time\"\nxvar = \"lon\"\nyvar = \"lat\"\nzvar = \"smb\"\n\ntcube = \"t\"\nxcube = \"x\"\nycube = \"y\"\n\nsaveas = \"smb_era5\"\n\n# Averaging window (months)\nwindow = 5\n\n# Density of solid ice for converting mass -> thickness\nrho_i = 917.0\n\n# Time interval for subsetting\nt1, t2 = 1991.5, 2019.0\n\n# === END EDIT ==============================================\n\n\ndef day2dyr(time, since=\"1950-01-01 00:00:00\"):\n \"\"\" Convert days since epoch to decimal years. \"\"\"\n t_ref = Time(since, scale=\"utc\").jd # convert days to jd\n\n return Time(t_ref + time, format=\"jd\", scale=\"utc\").decimalyear\n\n\ndef transform_coord(proj1, proj2, x, y):\n \"\"\"\n Transform coordinates from proj1 to proj2 (EPSG num).\n\n Examples EPSG proj:\n Geodetic (lon/lat): 4326\n Polar Stereo AnIS (x/y): 3031\n Polar Stereo GrIS (x/y): 3413\n \"\"\"\n # Set full EPSG projection strings\n proj1 = pyproj.Proj(\"+init=EPSG:\" + str(proj1))\n proj2 = pyproj.Proj(\"+init=EPSG:\" + str(proj2))\n # proj2 = pyproj.Proj(\"+proj=ob_tran +o_proj=latlon +o_lat_p=-180.0 +lon_0=10.0\") # rotated lon/lat\n\n return pyproj.transform(proj1, proj2, x, y)\n\n\ndef sgolay1d(h, window=3, order=1, deriv=0, dt=1.0, mode=\"nearest\", time=None):\n \"\"\"Savitztky-Golay filter with support for NaNs\n\n If time is given, interpolate NaNs otherwise pad w/zeros.\n\n dt is spacing between samples.\n \"\"\"\n h2 = h.copy()\n (ii,) = np.where(np.isnan(h2))\n (jj,) = np.where(np.isfinite(h2))\n\n if len(ii) > 0 and time is not None:\n h2[ii] = np.interp(time[ii], time[jj], h2[jj])\n elif len(ii) > 0:\n h2[ii] = 0\n else:\n pass\n h2 = savgol_filter(h2, window, order, deriv, delta=dt, mode=mode)\n\n return h2\n\n\ndef running_mean_cube(cube, window=3, axis=0):\n half = int(window / 2.0)\n\n for k in range(half, cube.shape[axis] - half + 1):\n if axis == 0:\n cube[k] = np.nanmean(cube[k - half : k + half + 1], axis=axis)\n elif axis == 2:\n cube[:, :, k] = np.nanmean(cube[:, :, k - half : k + half + 1], axis=axis)\n else:\n print(\"averaging axis must be 0 or 2\")\n\n return cube\n\n\ndef regrid_cube(x1, y1, z1, x2, y2):\n \"\"\"\n Regrid Z1(t,y,x) onto Z2(y,x,t), keeping the original time.\n\n x1/y1/x2/y2 are 2D arrays.\n z1 is a 3D array.\n \"\"\"\n z2 = np.full((x2.shape[0], x2.shape[1], z1.shape[0]), np.nan) # -> (y,x,t)\n\n for k in range(z1.shape[0]):\n print(\"regridding:\", k)\n z2[:, :, k] = griddata(\n (x1.ravel(), y1.ravel()),\n z1[k, :, :].ravel(),\n (x2, y2),\n fill_value=np.nan,\n method=\"linear\",\n )\n\n return z2\n\n\ndef h5read(ifile, vnames):\n with h5py.File(ifile, \"r\") as f:\n return [f[v][:] for v in vnames]\n\n\ndef ncread(ifile, vnames):\n ds = Dataset(ifile, \"r\") # NetCDF4\n d = ds.variables\n\n return [d[v][:] for v in vnames]\n\n\nifile = sys.argv[1] if sys.argv[1:] else fsmb\n\nt_cube, x_cube, y_cube, cube, adv, div = h5read(\n fcube, [tcube, xcube, ycube, \"H_thick\", \"advHv\", \"divHv\"]\n)\n\nif 0:\n print(\"loading RACMO file ...\")\n t_smb, lon_smb, lat_smb, smb = ncread(fsmb, [tvar, xvar, yvar, zvar])\n\n # Reduce SMB dimensions: 4 -> 3\n smb = smb[:, 0, :, :]\n\n # Convert \"days since 1950-01-01 00:00:00\" -> year\n t_smb = day2dyr(t_smb, since=\"1950-01-01 00:00:00\")\n\n # Convert M/mo -> H/yr\n smb *= 12.0 # [kg/m2/mo] -> [kg/m2/yr]\n smb /= rho_i # dM/dt [kg/yr] -> dH/dt [m/yr]\n\nelse:\n print(\"loading ERA5 file ...\")\n t_smb, lon_smb, lat_smb, precip, evap, runoff = ncread(\n fera5, [\"time\", \"longitude\", \"latitude\", \"tp\", \"e\", \"ro\"]\n )\n\n # Subset\n lat_smb, precip, evap, runoff = (\n lat_smb[600:],\n precip[:, 600:, :],\n evap[:, 600:, :],\n runoff[:, 600:, :],\n )\n\n # Get SMB\n smb = precip - evap - runoff # [m.w.eq./mo]\n\n # Convert \"hours since 1900-01-01 00:00:00.0\" -> years\n t_smb = day2dyr(t_smb / 24.0, since=\"1900-01-01 00:00:00\")\n\n # Convert m.water.eq/day -> m.ice.eq/yr\n smb *= 365.2425 # [m/m2/day] -> [m/m2/yr]\n smb *= 1000 / rho_i # (rho_w/rho_i) * H_w = H_i [m.w.e/yr] -> [m.i.e/yr]\n\n\n# Subset SMB in time\n(kk,) = np.where((t_smb > t1) & (t_smb < t2))\nt_smb, smb = t_smb[kk], smb[kk, :, :]\n\nif np.ndim(lon_smb) == 1:\n lon_smb, lat_smb = np.meshgrid(lon_smb, lat_smb) # 1d -> 2d\n\n# Transform geodetic -> polar stereo\n# NOTE: Interp on x/y comes better than on lon/lat!\nX_smb, Y_smb = transform_coord(4326, 3031, lon_smb, lat_smb) # 2d\nX_cube, Y_cube = np.meshgrid(x_cube, y_cube) # 2d\n\nif 0:\n print(\"Time averaging ...\")\n smb = running_mean_cube(smb, window, axis=0)\n\n # Regrid in time\n import xarray as xr\n\n da_smb = xr.DataArray(\n smb, [(\"t\", t_smb), (\"y\", range(smb.shape[1])), (\"x\", range(smb.shape[2]))]\n )\n\n print(\"regridding in time ...\")\n smb = da_smb.interp(t=t_cube).values\n\n print(\"regridding in space ...\")\n smb = regrid_cube(X_smb, Y_smb, smb, X_cube, Y_cube)\n\n\n\"\"\"\nprint(smb.shape)\nplt.pcolormesh(X_cube, Y_cube, smb[:,:,2])\n#plt.matshow(smb[:,:,2])\nplt.show()\n\"\"\"\n\n# --- Test ----------------------------------------------\n\n# Subset region for testing (inclusive)\n# Do not load full data into memory!\n\nif 1:\n\n # Load\n with h5py.File(\"SMB_RACMO.h5\", \"r\") as f:\n smb_racmo = f[\"smb\"][:]\n\n with h5py.File(\"SMB_ERA5.h5\", \"r\") as f:\n smb_era5 = f[\"smb\"][:]\n\n # Plot\n import pandas as pd\n import matplotlib.pyplot as plt\n\n t = t_cube\n\n # i, j = 836, 368 # PIG\n # i, j = 366, 147 # Larsen C\n i, j = 510, 1600 # Amery\n\n mask = np.isnan(cube)\n smb_racmo[mask] = np.nan\n smb_era5[mask] = np.nan\n\n p = smb_racmo[i, j, :]\n p2 = smb_era5[i, j, :]\n H = cube[i, j, :]\n advec = adv[i, j, :]\n diver = div[i, j, :]\n\n \"\"\"\n plt.matshow(smb_racmo[:,:,10])\n plt.matshow(smb_era5[:,:,10])\n plt.matshow(cube[:,:,10])\n plt.show()\n sys.exit()\n \"\"\"\n\n p -= np.nanmean(p)\n p2 -= np.nanmean(p2)\n\n dHdt = sgolay1d(H, window=5, order=1, deriv=1, dt=t[1] - t[0], time=None)\n\n # dHdt += advec + diver\n dHdt -= np.nanmean(dHdt)\n\n print(\"SMB mean rate (m/yr):\", np.nanmean(p))\n\n plt.figure(figsize=(14, 5))\n plt.subplot(211)\n plt.plot(t, p, linewidth=2, label=\"RACMO\")\n plt.plot(t, p2, linewidth=2, label=\"ERA5\")\n plt.plot(t, dHdt, linewidth=2, label=\"dH/dt\")\n plt.legend()\n plt.title(\"SMB - Amery\")\n plt.ylabel(\"meters of ice eq / yr\")\n plt.subplot(212)\n plt.plot(\n t,\n dHdt - p,\n linewidth=2,\n label=\"dH/dt-RACMO (std=%.2f)\" % np.nanstd(dHdt[:-5] - p[:-5]),\n )\n plt.plot(\n t,\n dHdt - p2,\n linewidth=2,\n label=\"dH/dt-ERA5 (std=%.2f)\" % np.nanstd(dHdt[:-5] - p2[:-5]),\n )\n plt.legend()\n plt.ylabel(\"meters / yr\")\n\n plt.show()\n sys.exit()\n\n\n# NOTE: All cubes should be saved in the original h file\n\nif 0:\n # Save\n with h5py.File(\"SMB_ERA5.h5\", \"w\") as f:\n f[\"smb\"] = smb\n f[\"x\"] = x_cube\n f[\"y\"] = y_cube\n f[\"t\"] = t_cube\n\nif 0:\n # Save data to orignal cube file\n with h5py.File(fcube, \"a\") as f:\n f[saveas] = smb\n\n print(\"data saved ->\", fcube)\n"
] | [
[
"numpy.abs",
"numpy.invert",
"numpy.unique",
"numpy.isnan",
"numpy.arange",
"numpy.flipud",
"numpy.empty",
"numpy.ones",
"numpy.modf",
"numpy.round",
"numpy.size",
"scipy.ndimage.map_coordinates",
"numpy.meshgrid",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
],
[
"numpy.nanmax",
"numpy.nanmedian",
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"numpy.around",
"numpy.nanmin",
"numpy.nanmean",
"numpy.where",
"scipy.spatial.cKDTree",
"numpy.hypot",
"numpy.arange",
"numpy.full",
"numpy.interp",
"numpy.float",
"numpy.zeros",
"numpy.isnan",
"numpy.median",
"numpy.deg2rad",
"numpy.meshgrid",
"numpy.sum",
"numpy.abs",
"numpy.sign",
"numpy.vstack"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.isfinite",
"numpy.isnan",
"numpy.full",
"numpy.ndim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"numpy.nanmean",
"numpy.interp",
"numpy.nanstd",
"numpy.meshgrid",
"numpy.where",
"scipy.signal.savgol_filter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
mathyouf/GPT-Games | [
"bf6e558bf6ec92d1fba97770587610da0f3447eb"
] | [
"src/interactive_conditional_samples.py"
] | [
"#!/usr/bin/env python3\n\nimport fire\nimport json\nimport os\nimport re\nimport numpy as np\nimport tensorflow as tf\n\nimport model, sample, encoder\n\ndef modify_raw_text(raw_text, interviewer, interviewee):\n return interviewer+\": \\\"\" + raw_text + \"\\\" \"+ interviewee +\":\\\"\"\n\ndef interact_model(\n model_name='124M',\n seed=None,\n nsamples=1,\n batch_size=1,\n length=None,\n temperature=1,\n top_k=0,\n top_p=1,\n models_dir='models',\n):\n \"\"\"\n Interactively run the model\n :model_name=124M : String, which model to use\n :seed=None : Integer seed for random number generators, fix seed to reproduce\n results\n :nsamples=1 : Number of samples to return total\n :batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.\n :length=None : Number of tokens in generated text, if None (default), is\n determined by model hyperparameters\n :temperature=1 : Float value controlling randomness in boltzmann\n distribution. Lower temperature results in less random completions. As the\n temperature approaches zero, the model will become deterministic and\n repetitive. Higher temperature results in more random completions.\n :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n considered for each step (token), resulting in deterministic completions,\n while 40 means 40 words are considered at each step. 0 (default) is a\n special setting meaning no restrictions. 40 generally is a good value.\n :models_dir : path to parent folder containing model subfolders\n (i.e. contains the <model_name> folder)\n \"\"\"\n models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n if batch_size is None:\n batch_size = 1\n assert nsamples % batch_size == 0\n\n enc = encoder.get_encoder(model_name, models_dir)\n hparams = model.default_hparams()\n with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if length is None:\n length = hparams.n_ctx // 2\n elif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n with tf.Session(graph=tf.Graph()) as sess:\n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n output = sample.sample_sequence(\n hparams=hparams, length=length,\n context=context,\n batch_size=batch_size,\n temperature=temperature, top_k=top_k, top_p=top_p\n )\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\n saver.restore(sess, ckpt)\n interviewer = input(\"What is your name? \")\n interviewee = input(\"Who are you talking to? \")\n previous_memory = \"\"\n while True:\n raw_text = input(interviewer+\" >>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(interviewer+\" >>> \")\n raw_text = modify_raw_text(raw_text, interviewer, interviewee)\n previous_memory += raw_text\n response = re.match(r'(.*?)\"', '495839045')\n while not response:\n context_tokens = enc.encode(previous_memory)\n generated = 0\n for _ in range(nsamples // batch_size):\n out = sess.run(output, feed_dict={\n context: [context_tokens for _ in range(batch_size)]\n })[:, len(context_tokens):]\n for i in range(batch_size):\n generated += 1\n text = enc.decode(out[i])\n response = re.match(r'(.*?)\"\\'', text)\n if response:\n match = re.match(r'(.*?)\"', text).group(0)\n # print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n # print(\"Raw Input:\", previous_memory)\n print(interviewee+\" >>> \",match[:-1])\n # print(\"=\" * 80)\n previous_memory += match + \" \"\n\nif __name__ == '__main__':\n fire.Fire(interact_model)\n\n"
] | [
[
"tensorflow.Graph",
"numpy.random.seed",
"tensorflow.placeholder",
"tensorflow.set_random_seed",
"tensorflow.train.Saver"
]
] |
rparini/cxroots | [
"037247fc47b29781b9cc66857a8395283e8ecc86",
"037247fc47b29781b9cc66857a8395283e8ecc86"
] | [
"cxroots/tests/test_deriv.py",
"cxroots/Paths.py"
] | [
"import pytest\nimport numpy as np\nfrom numpy import cos, sin\n\nfrom cxroots import Circle, Rectangle\nfrom cxroots import CxDerivative\n\[email protected]('C', [\n pytest.param(Circle(0, 2), id='circle'),\n pytest.param(Rectangle([-1.5,1.5],[-2,2]), id='rect'),\n pytest.param(None, id='default')\n])\ndef test_CxDerivative(C):\n f = lambda z: z**10 - 2*z**5 + sin(z)*cos(z/2)\n df = lambda z: 10*(z**9 - z**4) + cos(z)*cos(z/2) - 0.5*sin(z)*sin(z/2)\n\n z = np.array([-1.234, 0.3+1j, 0.1j, -0.9-0.5j])\n\n assert CxDerivative(f, z, n=1, contour=C) == pytest.approx(df(z))\n\n",
"from __future__ import division\n\nimport numpy as np\nimport scipy.integrate\nfrom numpy import exp, pi\n\nclass ComplexPath(object):\n \"\"\"A base class for paths in the complex plane.\"\"\"\n def __init__(self):\n self._integralCache = {}\n self._trapValuesCache = {}\n\n\n def __call__(self, t):\n r\"\"\"\n The parameterization of the path in the varaible :math:`t\\in[0,1]`.\n\n Parameters\n ----------\n t : float\n A real number :math:`0\\leq t \\leq 1`.\n\n Returns\n -------\n complex\n A point on the path in the complex plane.\n \"\"\"\n raise NotImplementedError('__call__ must be implemented in a subclass')\n\n\n def trap_values(self, f, k, useCache=True):\n \"\"\"\n Compute or retrieve (if cached) the values of the functions f\n at :math:`2^k+1` points along the contour which are evenly\n spaced with respect to the parameterisation of the contour.\n\n Parameters\n ----------\n f : function\n A function of a single complex variable.\n k : int\n Defines the number of points along the curve that f is to be\n evaluated at as :math:`2^k+1`.\n useCache : bool, optional\n If True then use, if available, the results of any previous\n calls to this function for the same f and save any new\n results so that they can be reused later.\n\n Returns\n -------\n :class:`numpy.ndarray`\n The values of f at :math:`2^k+1` points along the contour\n which are evenly spaced with respect to the parameterisation\n of the contour.\n \"\"\"\n if f in self._trapValuesCache.keys() and useCache:\n vals = self._trapValuesCache[f]\n vals_k = int(np.log2(len(vals)-1))\n\n if vals_k == k:\n return vals\n elif vals_k > k:\n return vals[::2**(vals_k-k)]\n else:\n t = np.linspace(0, 1, 2**k+1)\n vals = np.empty(2**k+1, dtype=np.complex128)\n vals.fill(np.nan)\n vals[::2**(k-vals_k)] = self._trapValuesCache[f]\n vals[np.isnan(vals)] = f(self(t[np.isnan(vals)]))\n\n # cache values\n self._trapValuesCache[f] = vals\n return vals\n\n else:\n t = np.linspace(0, 1, 2**k+1)\n vals = f(self(t))\n if useCache:\n self._trapValuesCache[f] = vals\n return vals\n\n\n def plot(self, N=100, linecolor='C0', linestyle='-'):\n \"\"\"\n Uses matplotlib to plot, but not show, the path as a 2D plot in\n the Complex plane.\n\n Parameters\n ----------\n N : int, optional\n The number of points to use when plotting the path.\n linecolor : optional\n The colour of the plotted path, passed to the\n :func:`matplotlib.pyplot.plot` function as the keyword\n argument of 'color'. See the matplotlib tutorial on\n `specifying colours <https://matplotlib.org/users/colors.html#>`_.\n linestyle : str, optional\n The line style of the plotted path, passed to the\n :func:`matplotlib.pyplot.plot` function as the keyword\n argument of 'linestyle'. The default corresponds to a solid\n line. See :meth:`matplotlib.lines.Line2D.set_linestyle` for\n other acceptable arguments.\n \"\"\"\n import matplotlib.pyplot as plt\n t = np.linspace(0,1,N)\n path = self(t)\n plt.plot(path.real, path.imag, color=linecolor, linestyle=linestyle)\n plt.xlabel('Re[$z$]', size=16)\n plt.ylabel('Im[$z$]', size=16)\n plt.gca().set_aspect(1)\n\n # add arrow to indicate direction of path\n arrow_direction = (self(0.51) - self(0.5))/abs(self(0.51) - self(0.5))\n arrow_extent = 1e-6*arrow_direction\n ymin, ymax = plt.gca().get_ylim()\n xmin, xmax = plt.gca().get_xlim()\n head_length = max(abs(ymax - ymin), abs(xmax - xmin))/40.\n plt.arrow(self(0.5).real, self(0.5).imag,\n arrow_extent.real, arrow_extent.imag,\n head_width=head_length*2/3., head_length=head_length,\n fc=linecolor, ec=linecolor)\n\n def show(self, saveFile=None, **plotKwargs):\n \"\"\"\n Shows the path as a 2D plot in the complex plane. Requires\n Matplotlib.\n\n Parameters\n ----------\n saveFile : str (optional)\n If given then the plot will be saved to disk with name\n 'saveFile'. If saveFile=None the plot is shown on-screen.\n **plotKwargs\n Other key word arguments are passed to :meth:`~cxroots.Paths.ComplexPath.plot`.\n \"\"\"\n import matplotlib.pyplot as plt\n self.plot(**plotKwargs)\n\n if saveFile is not None:\n plt.savefig(saveFile, bbox_inches='tight')\n plt.close()\n else:\n plt.show()\n\n def integrate(self, f, absTol=0, relTol=1e-12, divMax=15, intMethod='quad', verbose=False):\n \"\"\"\n Integrate the function f along the path. The value of the\n integral is cached and will be reused if the method is called\n with same arguments (ignoring verbose).\n\n Parameters\n ----------\n f : function\n A function of a single complex variable.\n absTol : float, optional\n The absolute tolerance for the integration.\n relTol : float, optional\n The realative tolerance for the integration.\n divMax : int, optional\n If the Romberg integration method is used then divMax is the\n maximum number of divisions before the Romberg integration\n routine of a path exits.\n intMethod : {'quad', 'romb'}, optional\n If 'quad' then :func:`scipy.integrate.quad` is used to\n compute the integral. If 'romb' then Romberg integraion,\n using :func:`scipy.integrate.romberg`, is used instead.\n verbose : bool, optional\n Passed ass the `show` argument of :func:`scipy.integrate.romberg`.\n\n Returns\n -------\n complex\n The integral of the function f along the path.\n\n Notes\n -----\n This function is only used when checking the\n multiplicity of roots. The bulk of the integration for\n rootfinding is done with :func:`cxroots.CountRoots.prod`.\n \"\"\"\n\n args = (f, absTol, relTol, divMax, intMethod)\n if args in self._integralCache.keys():\n integral = self._integralCache[args]\n\n elif hasattr(self, '_reversePath') and args in self._reversePath._integralCache:\n # if we have already computed the reverse of this path\n integral = -self._reversePath._integralCache[args]\n\n else:\n integrand = lambda t: f(self(t))*self.dzdt(t)\n\n if intMethod == 'romb':\n integral = scipy.integrate.romberg(integrand, 0, 1, tol=absTol, rtol=relTol, divmax=divMax, show=verbose)\n elif intMethod == 'quad':\n integrand_real = lambda t: np.real(integrand(t))\n integrand_imag = lambda t: np.imag(integrand(t))\n\n integral_real, abserr_real = scipy.integrate.quad(integrand_real, 0, 1, epsabs=absTol, epsrel=relTol)\n integral_imag, abserr_imag = scipy.integrate.quad(integrand_imag, 0, 1, epsabs=absTol, epsrel=relTol)\n integral = integral_real + 1j*integral_imag\n else:\n raise ValueError(\"intMethod must be either 'romb' or 'quad'\")\n\n if np.isnan(integral):\n raise RuntimeError('The integral along the segment %s is NaN.\\\n \\nThis is most likely due to a root being on or very close to the path of integration.'%self)\n\n self._integralCache[args] = integral\n\n return integral\n\n\nclass ComplexLine(ComplexPath):\n r\"\"\"\n A straight line :math:`z` in the complex plane from a to b\n parameterised by\n\n ..math::\n\n z(t) = a + (b-a)t, \\quad 0\\leq t \\leq 1\n\n\n Parameters\n ----------\n a : float\n b : float\n \"\"\"\n def __init__(self, a, b):\n self.a, self.b = a, b\n self.dzdt = lambda t: self.b-self.a\n super(ComplexLine, self).__init__()\n\n def __str__(self):\n return 'ComplexLine from %.3f+%.3fi to %.3f+%.3fi' % (self.a.real, self.a.imag, self.b.real, self.b.imag)\n\n def __call__(self, t):\n r\"\"\"\n The function :math:`z(t) = a + (b-a)t`.\n\n Parameters\n ----------\n t : float\n A real number :math:`0\\leq t \\leq 1`.\n\n Returns\n -------\n complex\n A point on the line in the complex plane.\n \"\"\"\n return self.a + t*(self.b-self.a)\n\n def distance(self, z):\n \"\"\"\n Distance from the point z to the closest point on the line.\n\n Parameters\n ----------\n z : complex\n\n Returns\n -------\n float\n The distance from z to the point on the line which is\n closest to z.\n \"\"\"\n # convert complex numbers to vectors\n A = np.array([self.a.real, self.a.imag])\n B = np.array([self.b.real, self.b.imag])\n Z = np.array([z.real, z.imag])\n\n # the projection of the point z onto the line a -> b is where\n # the parameter t is\n t = (Z-A).dot(B-A)/abs((B-A).dot(B-A))\n\n # but the line segment only has 0 <= t <= 1\n t = t.clip(0,1)\n\n # so the point on the line segment closest to z is\n c = self(t)\n return abs(c-z)\n\nclass ComplexArc(ComplexPath):\n r\"\"\"\n A circular arc :math:`z` with center z0, radius R, initial angle t0\n and change of angle dt. The arc is parameterised by\n\n ..math::\n\n z(t) = R e^{i(t0 + t dt)} + z0, \\quad 0\\leq t \\leq 1\n\n Parameters\n ----------\n z0 : complex\n R : float\n t0 : float\n dt : float\n \"\"\"\n def __init__(self, z0, R, t0, dt):\n self.z0, self.R, self.t0, self.dt = z0, R, t0, dt\n self.dzdt = lambda t: 1j*self.dt*self.R*exp(1j*(self.t0 + t*self.dt))\n super(ComplexArc, self).__init__()\n\n def __str__(self):\n return 'ComplexArc: z0=%.3f, R=%.3f, t0=%.3f, dt=%.3f' % (self.z0, self.R, self.t0, self.dt)\n\n def __call__(self, t):\n r\"\"\"\n The function :math:`z(t) = R e^{i(t_0 + t dt)} + z_0`.\n\n Parameters\n ----------\n t : float\n A real number :math:`0\\leq t \\leq 1`.\n\n Returns\n -------\n complex\n A point on the arc in the complex plane.\n \"\"\"\n return self.R*exp(1j*(self.t0 + t*self.dt)) + self.z0\n\n def distance(self, z):\n \"\"\"\n Distance from the point z to the closest point on the arc.\n\n Parameters\n ----------\n z : complex\n\n Returns\n -------\n float\n The distance from z to the point on the arc which is closest\n to z.\n \"\"\"\n theta = np.angle(z-self.z0) # np.angle maps to (-pi,pi]\n theta = (theta-self.t0)%(2*pi) + self.t0 # put theta in [t0,t0+2pi)\n\n if ((self.dt > 0 and self.t0 < theta < self.t0+self.dt)\n or (self.dt < 0 and self.t0+self.dt < theta - 2*pi < self.t0)):\n # the closest point to z lies on the arc\n return abs(self.R*exp(1j*theta) + self.z0 - z)\n else:\n # the closest point to z is one of the endpoints\n return min(abs(self(0)-z), abs(self(1)-z))\n\n"
] | [
[
"numpy.array",
"numpy.cos",
"numpy.sin"
],
[
"matplotlib.pyplot.gca",
"numpy.linspace",
"numpy.isnan",
"matplotlib.pyplot.savefig",
"numpy.empty",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.angle",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
kingagla/reviews_classification | [
"9bf9636035bf14fb3ce151d075a6c04f4cdbfde6"
] | [
"scripts/models/01_prepare_and_save_models.py"
] | [
"import os\nimport pickle\nimport pandas as pd\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.models import Sequential\nfrom scripts.settings import *\nfrom scripts.utils import create_dir\n\n\ndef prepare_for_learning(file_path, model_path, n_samples=5000, use_neutral=False):\n # load data\n rev_vec = pd.read_pickle(file_path)\n # remove neutral if not used\n if not use_neutral:\n rev_vec = rev_vec[rev_vec['Information'] != 'neu']\n # use only part of available data\n rev_vec = rev_vec.sample(n_samples)\n # save indices of training and validation set\n pickle.dump(rev_vec.index, open(learning_index_path, 'wb'))\n\n X, y = rev_vec[[col for col in rev_vec.columns if col.startswith('Vec')]], rev_vec['Information']\n le = LabelEncoder()\n le.fit(y.values.reshape(-1, 1))\n create_dir(os.path.dirname(model_path))\n pickle.dump(le, open(model_path, 'wb'))\n return rev_vec, X, y\n\n\ndef classification_report_to_excel(y_test, y_pred, excel_path):\n cr = classification_report(y_test, y_pred, output_dict=True)\n create_dir(os.path.dirname(excel_path))\n pd.DataFrame(cr).T.to_excel(excel_path)\n\n\ndef neural_network():\n model = Sequential()\n model.add(Dense(256, input_dim=1024, activation='relu', use_bias=True,\n kernel_initializer='random_normal'))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation='relu', use_bias=True, kernel_initializer='random_normal'))\n model.add(Dropout(0.5))\n model.add(Dense(16, activation='relu', use_bias=True, kernel_initializer='random_normal'))\n model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='random_normal'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['acc'])\n return model\n\n\ndef fit_and_save_model(X_train, y_train, model, model_path, network=False):\n # create directory for model\n create_dir(os.path.dirname(model_path))\n\n if network:\n checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True)\n model.fit(X_train, y_train, epochs=150, batch_size=512, validation_split=0.2, callbacks=[checkpoint])\n else:\n model.fit(X_train, y_train)\n pickle.dump(model, open(model_path, 'wb'))\n\n\ndef main():\n rev_vec, X, y = prepare_for_learning(rev_path,\n os.path.join(model_dir, label_encoder_file),\n n_samples=5000,\n use_neutral=False)\n le_path = os.path.join(model_dir, label_encoder_file)\n le = pickle.load(open(le_path, 'rb'))\n y = le.transform(y)\n # learn random forest\n rf = RandomForestClassifier(n_estimators=100, max_depth=5,\n min_samples_leaf=2,\n class_weight='balanced', criterion='entropy')\n fit_and_save_model(X, y, rf, os.path.join(model_dir, random_forest_file), network=False)\n\n # use DBSCAN to find negative\n dbs = DBSCAN(eps=0.01, min_samples=2)\n pickle.dump(dbs, open(os.path.join(model_dir, dbscan_file), 'wb'))\n\n # use neural network\n network = neural_network()\n fit_and_save_model(X, y, network, os.path.join(model_dir, network_file), network=True)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.read_pickle",
"tensorflow.keras.models.Sequential",
"sklearn.ensemble.RandomForestClassifier",
"tensorflow.keras.layers.Dense",
"sklearn.cluster.DBSCAN",
"pandas.DataFrame",
"tensorflow.keras.layers.Dropout",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report"
]
] |
Jarino/cgp-optimization | [
"3b50813a591c3535c7846b7e8acf5f5959122d02"
] | [
"tengp_eval/optimizers/sa.py"
] | [
"from configparser import ConfigParser\n\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport pygmo as pg\n\nfrom tengp.individual import IndividualBuilder, NPIndividual\nfrom tengp import Parameters, FunctionSet\nfrom tengp_eval.coevolution import TrainersSet, GaPredictors\n\n\ndef fitness_function(individual, x, y):\n output = individual.transform(x)\n try:\n #return adjusted_r2_score(y, output, len(x), len(individual.genes))\n return mean_squared_error(output, y)\n except ValueError:\n return 10e10\n\nclass cost_function:\n def __init__(self, X, Y, params, bounds):\n self.params = params\n self.bounds = bounds\n self.X = X\n self.Y = Y\n\n def fitness(self, x):\n individual = NPIndividual(list(x), self.bounds, self.params)\n\n fitness = fitness_function(individual, self.X, self.Y)\n\n return [fitness]\n\n\n def get_bounds(self):\n return self.bounds\n\ndef define_cgp_system(n_nodes, n_inputs, n_outputs, funset, max_back):\n \"\"\"\n define CCGP system\n\n Return:\n IndividualBuilder object\n Parameters\n bounds (tuple)\n \"\"\"\n params = Parameters(n_inputs, n_outputs, 1, n_nodes, funset, real_valued=True, max_back=max_back)\n ib = IndividualBuilder(params)\n bounds = ib.create().bounds\n return ib, params, bounds\n\ndef run_benchmark_coevolution(cp, x_train, y_train, funset):\n ib, params, bounds = define_cgp_system(\n cp.getint('CGPPARAMS', 'n_nodes'),\n x_train.shape[1] if len(x_train.shape) > 1 else 1,\n y_train.shape[1] if len(y_train.shape) > 1 else 1,\n funset,\n cp.getint('CGPPARAMS', 'max_back'))\n\n # setup the coevolution elements\n ts = TrainersSet(ib, 16, fitness_function, x_train, y_train)\n predictors = GaPredictors(x_train, y_train, 10, 24)\n predictors.evaluate_fitness(ts)\n x_reduced, y_reduced = predictors.best_predictors_data()\n\n GENS_STEP = 50\n\n cf = cost_function(x_reduced, y_reduced, params, bounds)\n prob = pg.problem(cf)\n algo = pg.algorithm(pg.pso(\n gen=GENS_STEP,\n omega=cp.getfloat('OPTIMPARAMS', 'omega'),\n eta1=cp.getfloat('OPTIMPARAMS', 'eta1'),\n eta2=cp.getfloat('OPTIMPARAMS', 'eta2'),\n memory=True))\n algo.set_verbosity(1)\n pop = pg.population(prob, cp.getint('DEFAULT', 'population_size'))\n n_gens = GENS_STEP\n\n\n while n_gens < 500:\n\n pop = algo.evolve(pop)\n\n # calculate exact fitness of champion and\n # add it to the trainers set\n champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)\n try:\n champion.fitness = fitness_function(champion, x_train, y_train)\n ts.add_trainer(champion)\n except ValueError:\n print('unsuccessful adding of champion')\n\n # update random population\n ts.update_random_population()\n\n predictors.predictors_evolution_step(ts)\n print('changing the subset, best predictor: ', predictors.best_predictor.fitness)\n\n x_reduced, y_reduced = predictors.best_predictors_data()\n pop.problem.extract(object).X = x_reduced\n pop.problem.extract(object).Y = y_reduced\n n_gens += GENS_STEP\n\n uda = algo.extract(pg.pso)\n\n champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)\n champion.fitness = fitness_function(champion, x_train, y_train)\n\n\n fitnesses = [x[2] for x in uda.get_log()]\n fitnesses.append(champion.fitness)\n return fitnesses\n\n\ndef run_benchmark(cp, x_train, y_train, funset):\n ib, params, bounds = define_cgp_system(\n cp.getint('CGPPARAMS', 'n_nodes'),\n x_train.shape[1] if len(x_train.shape) > 1 else 1,\n y_train.shape[1] if len(y_train.shape) > 1 else 1,\n funset,\n cp.getint('CGPPARAMS', 'max_back'))\n cf = cost_function(x_train, y_train, params, bounds)\n prob = pg.problem(cf)\n\n algo = pg.algorithm(pg.simulated_annealing(\n Ts=cp.getfloat('OPTIMPARAMS', 'Ts'),\n Tf=cp.getfloat('OPTIMPARAMS', 'Tf'),\n n_T_adj=cp.getint('OPTIMPARAMS', 'n_T_adj'),\n n_range_adj=cp.getint('OPTIMPARAMS', 'n_range_adj'),\n bin_size=cp.getint('OPTIMPARAMS', 'bin_size'),\n start_range=cp.getfloat('OPTIMPARAMS', 'start_range')))\n\n algo.set_verbosity(100)\n pop = pg.population(prob, 1)\n pop = algo.evolve(pop)\n uda = algo.extract(pg.simulated_annealing)\n\n return [x[2] for x in uda.get_log()]\n\nRUNNERS = [run_benchmark]\n"
] | [
[
"sklearn.metrics.mean_squared_error"
]
] |
THU-DA-6D-Pose-Group/Self6D-Diff-Renderer | [
"408330a9c7d7010a5af0a5b0b469f1ef695d18de",
"408330a9c7d7010a5af0a5b0b469f1ef695d18de"
] | [
"core/dr_utils/dib_renderer_x/utils/sphericalcoord.py",
"core/dr_utils/dib_renderer_x/renderer/texrender_batch.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport numpy as np\n\n\n##################################################################\n# symmetric over z axis\ndef get_spherical_coords_z(X):\n # X is N x 3\n rad = np.linalg.norm(X, axis=1)\n # Inclination\n theta = np.arccos(X[:, 2] / rad)\n # Azimuth\n phi = np.arctan2(X[:, 1], X[:, 0])\n\n # Normalize both to be between [-1, 1]\n vv = (theta / np.pi) * 2 - 1\n uu = ((phi + np.pi) / (2 * np.pi)) * 2 - 1\n # Return N x 2\n return np.stack([uu, vv], 1)\n\n\n# symmetric over x axis\ndef get_spherical_coords_x(X):\n # X is N x 3\n rad = np.linalg.norm(X, axis=1)\n # Inclination\n # y == 1\n # cos = 0\n # y == -1\n # cos = pi\n theta = np.arccos(X[:, 0] / rad)\n # Azimuth\n phi = np.arctan2(X[:, 2], X[:, 1])\n\n # Normalize both to be between [-1, 1]\n uu = (theta / np.pi) * 2 - 1\n vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1\n # Return N x 2\n return np.stack([uu, vv], 1)\n\n\n# symmetric spherical projection\ndef get_symmetric_spherical_tex_coords(vertex_pos, symmetry_axis=1, up_axis=2, front_axis=0):\n # vertex_pos is N x 3\n length = np.linalg.norm(vertex_pos, axis=1)\n # Inclination\n theta = np.arccos(vertex_pos[:, front_axis] / length)\n # Azimuth\n phi = np.abs(np.arctan2(vertex_pos[:, symmetry_axis], vertex_pos[:, up_axis]))\n\n # Normalize both to be between [-1, 1]\n uu = (theta / np.pi) * 2 - 1\n # vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1\n vv = (phi / np.pi) * 2 - 1\n # Return N x 2\n return np.stack([uu, vv], 1)\n\n\n#########################################################################\nif __name__ == \"__main__\":\n\n from utils.utils_mesh import loadobj, savemeshtes\n import cv2\n\n p, f = loadobj(\"2.obj\")\n uv = get_spherical_coords_x(p)\n uv[:, 0] = -uv[:, 0]\n\n uv[:, 1] = -uv[:, 1]\n uv = (uv + 1) / 2\n savemeshtes(p, uv, f, \"./2_x.obj\")\n\n tex = np.zeros(shape=(256, 512, 3), dtype=np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = (10, 200)\n fontScale = 5\n fontColor = (0, 255, 255)\n lineType = 2\n\n cv2.putText(tex, \"Hello World!\", bottomLeftCornerOfText, font, fontScale, fontColor, lineType)\n cv2.imshow(\"\", tex)\n cv2.waitKey()\n cv2.imwrite(\"2_x.png\", np.transpose(tex, [1, 0, 2]))\n",
"from __future__ import print_function\nfrom __future__ import division\n\nfrom ..rasterizer import linear_rasterizer\nfrom ..utils import datanormalize\nfrom .fragment_shaders.frag_tex import fragmentshader\nfrom .vertex_shaders.perpsective import perspective_projection\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n\n##################################################################\nclass TexRenderBatch(nn.Module):\n def __init__(self, height, width, filtering=\"nearest\"):\n super(TexRenderBatch, self).__init__()\n\n self.height = height\n self.width = width\n self.filtering = filtering\n\n def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ft_fx3=None):\n \"\"\"\n points: b x [points_1xpx3, faces_fx3]\n cameras: [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]\n uv_bxpx2: b x [1xpx2]\n texture_bx3xthxtw: b x [1x3xthxtw]\n ft_fx3: b x [fx3]\n \"\"\"\n b = len(points)\n assert b > 0, b\n points3d_1xfx9_list = []\n points2d_1xfx6_list = []\n normalz_1xfx1_list = []\n normal1_1xfx3_list = []\n uv_1xfx9_list = []\n\n single_intrinsic = True\n if cameras[2].ndim == 3:\n assert cameras[2].shape[0] == b\n single_intrinsic = False\n\n for i in range(b):\n ##############################################################\n # first, MVP projection in vertexshader\n points_1xpx3, faces_fx3 = points[i]\n if single_intrinsic:\n cam_params = [cameras[0][i : i + 1], cameras[1][i : i + 1], cameras[2]]\n else:\n cam_params = [cameras[0][i : i + 1], cameras[1][i : i + 1], cameras[2][i]]\n # use faces_fx3 as ft_fx3 if not given\n if ft_fx3 is None:\n ft_fx3_single = faces_fx3\n else:\n ft_fx3_single = ft_fx3[i]\n\n points3d_1xfx9, points2d_1xfx6, normal_1xfx3 = perspective_projection(points_1xpx3, faces_fx3, cam_params)\n\n ################################################################\n # normal\n\n # decide which faces are front and which faces are back\n normalz_1xfx1 = normal_1xfx3[:, :, 2:3]\n # normalz_bxfx1 = torch.abs(normalz_bxfx1)\n\n # normalize normal\n normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)\n\n ############################################################\n # second, rasterization\n uv_1xpx2 = uv_bxpx2[i]\n\n c0 = uv_1xpx2[:, ft_fx3_single[:, 0], :]\n c1 = uv_1xpx2[:, ft_fx3_single[:, 1], :]\n c2 = uv_1xpx2[:, ft_fx3_single[:, 2], :]\n mask = torch.ones_like(c0[:, :, :1])\n uv_1xfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)\n\n # append data\n points3d_1xfx9_list.append(points3d_1xfx9)\n points2d_1xfx6_list.append(points2d_1xfx6)\n normalz_1xfx1_list.append(normalz_1xfx1)\n normal1_1xfx3_list.append(normal1_1xfx3)\n uv_1xfx9_list.append(uv_1xfx9)\n\n # put the object with larger depth earlier\n\n # imrender = torch.empty((1, self.height, self.width, 3), device=device, dtype=torch.float32)\n # improb_1xhxwx1 = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)\n # fg_mask = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)\n ren_ims = []\n ren_masks = []\n ren_probs = []\n for i in range(b):\n imfeat, improb_1xhxwx1_i = linear_rasterizer(\n self.width,\n self.height,\n points3d_1xfx9_list[i],\n points2d_1xfx6_list[i],\n normalz_1xfx1_list[i],\n uv_1xfx9_list[i],\n )\n imtexcoords = imfeat[:, :, :, :2] # (1,H,W,2)\n hardmask = imfeat[:, :, :, 2:3] # (1,H,W,1) mask\n # fragrement shader\n texture_1x3xthxtw = texture_bx3xthxtw[i]\n imrender_i = fragmentshader(imtexcoords, texture_1x3xthxtw, hardmask)\n ren_ims.append(imrender_i) # 1HW3\n ren_probs.append(improb_1xhxwx1_i)\n ren_masks.append(hardmask)\n\n imrender = torch.cat(ren_ims, dim=0) # bHW3\n improb_bxhxwx1 = torch.cat(ren_probs, dim=0)\n mask_bxhxwx1 = torch.cat(ren_masks, dim=0)\n # return imrender, improb_1xhxwx1, normal1_1xfx3_list\n return imrender, improb_bxhxwx1, normal1_1xfx3_list, mask_bxhxwx1\n"
] | [
[
"numpy.arccos",
"numpy.linalg.norm",
"numpy.stack",
"numpy.arctan2",
"numpy.transpose",
"numpy.zeros"
],
[
"torch.ones_like",
"torch.cat"
]
] |
gioramponi/LOGEL | [
"e862324816c57dd5d07691ee8583259a6a62116c",
"e862324816c57dd5d07691ee8583259a6a62116c"
] | [
"gridworld/lfl/mdp_utils.py",
"mujoco/observer_logel.py"
] | [
"\"\"\"utils for entropy-regularized discrete MDPs.\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\n\n\ndef softmax(x, tau=1.):\n e = np.exp(x * tau)\n z = -np.log(sum(e))\n return np.exp(x * tau + z)\n\n\ndef score_policy(pi, r, p, alpha, gamma):\n \"\"\"Returns expected score J(pi) = v_pi(start) using soft policy evaluation.\"\"\"\n n_states, n_actions, _ = p.shape\n q_pi = np.random.rand(n_states, n_actions)\n v_pi = np.zeros(n_states)\n for _ in range(1000):\n v_pi = np.zeros(n_states)\n for state in range(n_states):\n for action_ in range(n_actions):\n v_pi[state] += pi[state, action_] * \\\n (q_pi[state, action_] - alpha * np.log(pi[state, action_]))\n\n q_pi *= 0\n for state in range(n_states):\n for action in range(n_actions):\n q_pi[state, action] = r[state, action]\n for state_ in range(n_states):\n q_pi[state, action] += gamma * p[state, action, state_] * v_pi[state_]\n\n j_pi = v_pi[0]\n return j_pi\n\n\ndef solve_entropy_regularized_mdp(r, p, alpha, gamma):\n \"\"\"Returns optimal (soft) policy pi* and score J(pi*).\"\"\"\n n_states, n_actions, _ = p.shape\n q = np.zeros((n_states, n_actions))\n v = np.log(np.sum(np.exp(q), 1))\n # <<<<<<< HEAD\n print(\"r, p: \", r.shape, p.shape)\n # =======\n #\n # >>>>>>> aed0552fe0dea9129b017edf7ec4b9d4c4dcf9f2\n for _ in range(1000):\n q = r + gamma * np.sum(p * np.tile(v, (n_states, n_actions, 1)), 2)\n v = alpha * np.log(np.sum(np.exp(q / alpha), 1))\n\n pi_star = np.zeros((n_states, n_actions))\n for state in range(n_states):\n pi_star[state, :] = softmax(q[state, :] / alpha)\n\n j_pi_star = v[0]\n return pi_star, j_pi_star\n\n\ndef sample_sa_trajectory(p, pi, length):\n \"\"\"Returns a trajectory sampled from the learner's policy pi.\"\"\"\n n_states, n_actions, _ = p.shape\n trajectory = []\n state = 0\n action = np.random.choice(range(n_actions), p=pi[state, :])\n for _ in range(length):\n new_state = np.random.choice(range(n_states), p=p[state, action, :])\n new_action = np.random.choice(range(n_actions), p=pi[new_state, :])\n trajectory.append((state, action))\n state = new_state\n action = new_action\n return trajectory\n\n\ndef sample_sar_trajectory(p, pi, r, length):\n \"\"\"Returns a trajectory sampled from the learner's policy pi.\"\"\"\n n_states, n_actions, _ = p.shape\n trajectory = []\n state = 0\n action = np.random.choice(range(n_actions), p=pi[state, :])\n for _ in range(length):\n new_state = np.random.choice(range(n_states), p=p[state, action, :])\n new_action = np.random.choice(range(n_actions), p=pi[new_state, :])\n trajectory.append((state, action, r[state, action]))\n state = new_state\n action = new_action\n return trajectory",
"\"\"\"Observer's training.\n\nThe observer is trained via Proximal Policy Optimization (PPO).\nThis code is an adaptation of the PPO implementation taken from\nhttps://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail.\n\"\"\"\n\nimport collections\nimport glob\nimport os\nfrom arguments import get_args\nfrom envs import make_vec_envs\nfrom model import Policy\nimport numpy as np\nfrom ppo import PPO\nfrom storage import RolloutStorage\nimport torch\nfrom utils import get_vec_normalize\nfrom utils import update_linear_schedule\nfrom rbf import *\nargs = get_args()\n\nnum_updates = int(args.num_env_steps) // args.num_steps // args.num_processes\ntorch.manual_seed(args.seed)\n\nargs.save_dir = args.save_dir\nargs.scores_dir = args.scores_dir\nargs.rewards_dir = args.rewards_dir\nargs.policies_dir = args.policies_dir\n\nif args.env_name == 'Reacher-v2':\n num_feat = 26\n observer_steps = range(10, 20)\nif args.env_name == 'Hopper-v2':\n num_feat = 3\n observer_steps = range(10, 20)\n\ntry:\n os.makedirs(args.log_dir)\nexcept OSError:\n files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))\n # for f in files:\n # os.remove(f)\n\neval_log_dir = args.log_dir + '_eval'\n\ntry:\n os.makedirs(eval_log_dir)\nexcept OSError:\n files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))\n # for f in files:\n # os.remove(f)\n\ntry:\n os.makedirs(args.scores_dir)\nexcept OSError:\n files = glob.glob(os.path.join(args.scores_dir, '*' + args.expe + '.npy'))\n # for f in files:\n # os.remove(f)\n\n\ndef main():\n device = 'cpu'\n acc_steps = []\n acc_scores = []\n torch.set_num_threads(1)\n\n envs = make_vec_envs(args.env_name, args.seed, args.num_processes,\n args.gamma, args.log_dir, args.add_timestep,\n device, False)\n\n # get cloned policy and recovered reward function\n policy_reward_dir = args.rewards_dir\n\n reward_file_name = policy_reward_dir + '/reward2_' + args.env_name +'_' + args.expe + '.pth.npy'\n reward = np.load(reward_file_name, allow_pickle=True)[-1]\n weight_reward = torch.from_numpy(reward)\n\n actor_critic = Policy(envs.observation_space.shape, envs.action_space)\n\n\n agent = PPO(actor_critic, args.clip_param, args.ppo_epoch,\n args.num_mini_batch, args.value_loss_coef, args.entropy_coef,\n lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm)\n\n rollouts = RolloutStorage(args.num_steps, args.num_processes,\n envs.observation_space.shape, envs.action_space, 1)\n\n obs = envs.reset()\n rollouts.obs[0].copy_(obs)\n rollouts.to(device)\n rbf1 = build_features_reacher2(.2, 5, 2)\n episode_rewards = collections.deque(maxlen=10)\n num_updates = 30\n for j in range(num_updates):\n\n if args.use_linear_lr_decay:\n # decrease learning rate linearly\n update_linear_schedule(agent.optimizer, j, num_updates, args.lr)\n agent.clip_param = args.clip_param * (1 - j / float(num_updates))\n\n for step in range(args.num_steps):\n # Sample actions\n with torch.no_grad():\n value, action, action_log_prob = actor_critic.act(\n rollouts.obs[step],\n rollouts.masks[step])\n if args.env_name == 'Hopper-v2':\n if args.num_processes > 1:\n pos_before = envs.get_sim_data()\n obs, _, done, infos = envs.step(action)\n feat_rewards = np.zeros((args.num_processes, num_feat))\n if args.env_name == 'Reacher-v2':\n\n if args.num_processes > 1:\n body_data = envs.get_body_data()\n for num_p in range(args.num_processes):\n rbf1_ = rbf1(body_data[num_p][:-1])\n rbf4_ = np.array([np.linalg.norm(action[num_p], ord=2)**2])\n feat_rewards[num_p] = np.concatenate((rbf1_.reshape(-1), rbf4_))\n else:\n rbf1_ = rbf1(\n (envs.envs[0].env.env.get_body_com(\"fingertip\") - envs.envs[0].env.env.get_body_com(\"target\"))[\n :-1])\n rbf4_ = np.array([-np.square(action[0]).sum()])\n feat_rewards[0] = np.concatenate((rbf1_.reshape(-1), rbf4_))\n if args.env_name == 'Hopper-v2':\n if args.num_processes > 1:\n pos_after = envs.get_sim_data()\n for num_p in range(args.num_processes):\n feat_1 = pos_after[num_p] - pos_before[num_p]\n feat_2 = 0\n if not done[num_p]:\n feat_2 = 1\n feat_3 = np.array([np.linalg.norm(action[num_p], ord=2)**2]).flatten()\n feat_rewards[num_p] = np.array([feat_1, feat_2, feat_3])\n # use infered reward:\n with torch.no_grad():\n reward = np.zeros((args.num_processes, 1))\n for num_p in range(args.num_processes):\n reward[num_p] = np.dot(feat_rewards[num_p], weight_reward.flatten())\n # print(reward)\n for info in infos:\n if 'episode' in info.keys():\n episode_rewards.append(info['episode']['r'])\n # r = 0\n # for key, val in info.items():\n # if 'reward' in key:\n # r += val\n # episode_rewards.append(r)\n\n # If done then clean the history of observations.\n masks = torch.FloatTensor([[0.0] if done_ else [1.0]\n for done_ in done])\n reward = torch.from_numpy(np.array(reward))\n # print(reward)\n rollouts.insert(obs, action, action_log_prob,\n value, reward, masks,1)\n\n with torch.no_grad():\n next_value = actor_critic.get_value(rollouts.obs[-1],\n rollouts.masks[-1]).detach()\n\n rollouts.compute_returns(next_value, args.gamma, args.tau)\n\n value_loss, action_loss, dist_entropy = agent.update(rollouts)\n\n rollouts.after_update()\n\n # save for every interval-th episode or for the last epoch\n if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir:\n save_path = os.path.join(args.save_dir, 'ppo')\n try:\n os.makedirs(save_path)\n except OSError:\n pass\n\n # A really ugly way to save a model to CPU\n save_model = actor_critic\n\n save_model = [save_model,\n getattr(get_vec_normalize(envs), 'ob_rms', None)]\n\n torch.save(save_model, os.path.join(save_path, args.env_name + '.pt'))\n\n total_num_steps = (j + 1) * args.num_processes * args.num_steps\n\n if j % args.log_interval == 0 and len(episode_rewards) > 1:\n print('Updates', j,\n 'num timesteps', len(episode_rewards),\n '\\n Last training episodes: mean/median reward',\n '{:.1f}'.format(np.mean(episode_rewards)),\n '/{:.1f}'.format(np.median(episode_rewards)),\n 'min/max reward',\n '{:.1f}'.format(np.min(episode_rewards)),\n '/{:.1f}'.format(np.max(episode_rewards)),\n 'dist entropy', dist_entropy,\n 'value loss', value_loss,\n 'action loss', action_loss)\n\n if len(episode_rewards) > 1:\n acc_steps.append(total_num_steps)\n acc_scores.append(np.mean(episode_rewards))\n\n if (args.eval_interval is not None\n and len(episode_rewards) > 1\n and j % args.eval_interval == 0):\n eval_envs = make_vec_envs(args.env_name, args.seed + args.num_processes,\n args.num_processes, args.gamma, eval_log_dir,\n args.add_timestep, device, True)\n\n vec_norm = get_vec_normalize(eval_envs)\n if vec_norm is not None:\n vec_norm.eval()\n vec_norm.ob_rms = get_vec_normalize(envs).ob_rms\n\n eval_episode_rewards = []\n\n obs = eval_envs.reset()\n eval_masks = torch.zeros(args.num_processes, 1, device=device)\n\n while len(eval_episode_rewards) < 10:\n with torch.no_grad():\n _, action, _ = actor_critic.act(\n obs, eval_masks, deterministic=True)\n\n # Obser reward and next obs\n obs, reward, done, infos = eval_envs.step(action)\n\n eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0]\n for done_ in done])\n for info in infos:\n if 'episode' in info.keys():\n eval_episode_rewards.append(info['episode']['r'])\n\n eval_envs.close()\n\n print('Evaluation using',\n len(eval_episode_rewards),\n 'episodes: mean reward',\n '{:.5f}\\n'.format(np.mean(eval_episode_rewards)))\n\n scores_file_name = args.scores_dir + '/observer_scores_logel_' + args.env_name + '_' + args.expe + '.npy'\n steps_file_name = args.scores_dir + '/observer_steps_loegl_' + args.env_name + '_' + args.expe + '.npy'\n np.save(scores_file_name, np.array(acc_scores))\n np.save(steps_file_name, np.array(acc_steps))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.log",
"numpy.tile",
"numpy.random.rand",
"numpy.exp",
"numpy.zeros"
],
[
"numpy.square",
"torch.zeros",
"numpy.min",
"torch.manual_seed",
"numpy.median",
"torch.from_numpy",
"numpy.linalg.norm",
"numpy.max",
"torch.set_num_threads",
"torch.FloatTensor",
"torch.no_grad",
"numpy.mean",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
b3ttin4/network_simulation_and_analysis | [
"56ec3fd497ad95eee6eec00042d332133495288e"
] | [
"network_model/tools/bn_tools_t.py"
] | [
"import numpy as np\n\n\n# Nonlinearity functions (Numpy implementation)\nnl_linear = lambda x: x\nnl_tanh = lambda x: np.tanh(x)\nnl_sigmoid = lambda x: 1./(1+np.exp(-x)) \nnl_rect = lambda x: np.clip(x, 0, np.inf)\n#nl_rect = lambda x: np.clip(x, -np.inf, np.inf)\nnl_shallow_rect = lambda x: np.clip(0.1*x, 0, np.inf)\nnl_clip = lambda x: np.clip(x, 0, 1)\nnl_softplus = lambda x: np.log(1. + np.exp(x)) #\n#'''\n# Nonlinearity functions (Theano implementation)\nimport numpy, theano\nimport numpy.distutils\nimport numpy.distutils.__config__\nimport theano.tensor as T\nnl_linear_t = lambda x: x\nnl_tanh_t = lambda x: T.tanh(x) \nnl_sigmoid_t = lambda x: T.nnet.sigmoid(x) \nnl_fermi_t = lambda x: T.nnet.sigmoid(x*50)\nnl_clip_t = lambda x: T.clip(x, 0., 1.)\nnl_rect_t = lambda x: T.maximum(x, 0.)\nnl_rect_squared_t = lambda x: T.maximum(x**2, 0.)\nnl_shallow_rect_t = lambda x: T.maximum(0.1*x, 0.)\n#'''\ndef convert_input_const_to_time(inp, num_frames):\n if inp.shape[0] != 1:\n raise Exception(\"First axis of inp has to be 1-dim.\")\n if inp.shape[1] != 1:\n inp = inp[:, 0:1, :]\n print('WARNING (bn_tools): Input has more than one frame. Only first frame will be broadcast.')\n \n inp = np.tile(inp, (1, num_frames, 1))\n return inp\n \ndef check_nonlinearities():\n import matplotlib.pyplot as plt\n x_np=np.arange(-5,5,0.1).astype('float32')\n x=theano.shared(x_np) \n# for fkt in [nl_linear_t,nl_rect_t,nl_clip_t,nl_sigmoid_t, nl_tanh_t]:\n for fkt in [nl_clip_t,nl_sigmoid_t]:\n\n y= fkt(x)\n tf = theano.function([],y)\n plt.plot(x_np, tf())\n plt.show()\n \nif __name__=='__main__':\n check_nonlinearities()\n"
] | [
[
"numpy.clip",
"numpy.arange",
"numpy.tile",
"numpy.tanh",
"numpy.exp",
"matplotlib.pyplot.show"
]
] |
project-k-0-1/project-k | [
"fa5be043a3c82daee992d28db25519e2b1b53289"
] | [
"sa_numeric.py"
] | [
"\"\"\" Numerical functions \"\"\"\nimport math\nimport numpy as np\nimport pymysql.cursors\nfrom sa_db import sa_db_access\nACCESS_OBJ = sa_db_access()\nDB_USR = ACCESS_OBJ.username()\nDB_PWD = ACCESS_OBJ.password()\nDB_NAME = ACCESS_OBJ.db_name()\nDB_SRV = ACCESS_OBJ.db_server()\n\ndef get_pct_change(ini_val, new_val):\n \"\"\" xxx \"\"\"\n if not new_val == 0:\n if new_val < ini_val:\n return_data = ((ini_val - new_val) / ini_val) * (-1)\n else:\n return_data = (new_val - ini_val) / new_val\n else:\n return_data = 0\n\n return return_data\n\n\ndef get_stdev(sql):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with just one numerical value to compute standard deviation\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n cursor.execute(sql)\n list_data = list(cursor.fetchall())\n return_data = np.std(list_data)\n cursor.close()\n connection.close()\n\n return return_data\n\ndef get_volatility_risk(sql, is_portf, symbol):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with one numerical column to compute volatility risk\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n\n if is_portf:\n sql_i = \"SELECT account_reference FROM instruments WHERE symbol='\"+ str(symbol) +\"'\"\n cursor.execute(sql_i)\n res = cursor.fetchall()\n for row in res:\n reference = row[0]\n else:\n cursor.execute(sql)\n res = cursor.fetchall()\n for row in res:\n reference = row[0]\n cursor.close()\n connection.close()\n\n stdev = get_stdev(sql)\n ref_price = reference - stdev\n return_data = abs(get_pct_change(reference, ref_price))\n return return_data\n\ndef get_mdd(sql):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with just one numerical value to compute maximum drawdown\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n cursor.execute(sql)\n res = cursor.fetchall()\n top = 0\n breset = math.pow(10, 100)\n bottom = breset\n pct_dd = 0\n cur_dd = 0\n for row in res:\n val = row[0]\n\n if val > top:\n top = val\n bottom = breset\n\n if val < bottom:\n bottom = val\n\n if bottom < top:\n cur_dd = abs(get_pct_change(bottom, top))\n else:\n cur_dd = 0\n\n if cur_dd > pct_dd:\n pct_dd = cur_dd\n cursor.close()\n connection.close()\n\n return_data = pct_dd\n return return_data\n\ndef get_romad(sql):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with one column as numerical value to compute return on maximum drawdown\n #ordered by date ASC\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n cursor.execute(sql)\n res = cursor.fetchall()\n i = 0\n first = 0\n last = 0\n for row in res:\n if i == 0:\n first = row[0]\n last = row[0]\n i += 1\n cursor.close()\n connection.close()\n\n instrument_returns = get_pct_change(first, last)\n drawdown = get_mdd(sql)\n\n if drawdown >0:\n return_data = instrument_returns / drawdown\n else:\n return_data = 0\n\n return return_data\n"
] | [
[
"numpy.std"
]
] |
parachutel/garage | [
"e9d4301278f5dd31e3cbd20df1422befa2d0b6c4",
"f4a6271edd0f9c280c306d1f0bbf4bc1591ab85e",
"f4a6271edd0f9c280c306d1f0bbf4bc1591ab85e",
"f4a6271edd0f9c280c306d1f0bbf4bc1591ab85e"
] | [
"tests/benchmarks/test_benchmark_trpo.py",
"tests/fixtures/models/simple_gaussian_mlp_model.py",
"tests/fixtures/fixtures.py",
"tests/garage/torch/policies/test_deterministic_policy.py"
] | [
"'''\nThis script creates a regression test over garage-TRPO and baselines-TRPO.\n\nUnlike garage, baselines doesn't set max_path_length. It keeps steps the action\nuntil it's done. So we introduced tests.wrappers.AutoStopEnv wrapper to set\ndone=True when it reaches max_path_length. We also need to change the\ngarage.tf.samplers.BatchSampler to smooth the reward curve.\n'''\nimport datetime\nimport os.path as osp\nimport random\n\nfrom baselines import logger as baselines_logger\nfrom baselines.bench import benchmarks\nfrom baselines.common.tf_util import _PLACEHOLDER_CACHE\nfrom baselines.ppo1.mlp_policy import MlpPolicy\nfrom baselines.trpo_mpi import trpo_mpi\nimport dowel\nfrom dowel import logger as dowel_logger\nimport gym\nimport pytest\nimport tensorflow as tf\n\nfrom garage.envs import normalize\nfrom garage.experiment import deterministic\nfrom garage.tf.algos import TRPO\nfrom garage.tf.baselines import GaussianMLPBaseline\nfrom garage.tf.envs import TfEnv\nfrom garage.tf.experiment import LocalTFRunner\nfrom garage.tf.policies import GaussianMLPPolicy\nimport tests.helpers as Rh\nfrom tests.wrappers import AutoStopEnv\n\n\nclass TestBenchmarkPPO:\n '''Compare benchmarks between garage and baselines.'''\n\n @pytest.mark.huge\n def test_benchmark_trpo(self):\n '''\n Compare benchmarks between garage and baselines.\n\n :return:\n '''\n mujoco1m = benchmarks.get_benchmark('Mujoco1M')\n\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')\n benchmark_dir = './data/local/benchmarks/trpo/%s/' % timestamp\n result_json = {}\n for task in mujoco1m['tasks']:\n env_id = task['env_id']\n env = gym.make(env_id)\n baseline_env = AutoStopEnv(env_name=env_id, max_path_length=100)\n\n seeds = random.sample(range(100), task['trials'])\n\n task_dir = osp.join(benchmark_dir, env_id)\n plt_file = osp.join(benchmark_dir,\n '{}_benchmark.png'.format(env_id))\n baselines_csvs = []\n garage_csvs = []\n\n for trial in range(task['trials']):\n _PLACEHOLDER_CACHE.clear()\n seed = seeds[trial]\n\n trial_dir = task_dir + '/trial_%d_seed_%d' % (trial + 1, seed)\n garage_dir = trial_dir + '/garage'\n baselines_dir = trial_dir + '/baselines'\n\n with tf.Graph().as_default():\n # Run garage algorithms\n env.reset()\n garage_csv = run_garage(env, seed, garage_dir)\n\n # Run baseline algorithms\n baseline_env.reset()\n baselines_csv = run_baselines(baseline_env, seed,\n baselines_dir)\n\n garage_csvs.append(garage_csv)\n baselines_csvs.append(baselines_csv)\n\n Rh.plot(\n b_csvs=baselines_csvs,\n g_csvs=garage_csvs,\n g_x='Iteration',\n g_y='AverageReturn',\n b_x='EpThisIter',\n b_y='EpRewMean',\n trials=task['trials'],\n seeds=seeds,\n plt_file=plt_file,\n env_id=env_id,\n x_label='Iteration',\n y_label='AverageReturn')\n\n result_json[env_id] = Rh.create_json(\n b_csvs=baselines_csvs,\n g_csvs=garage_csvs,\n seeds=seeds,\n trails=task['trials'],\n g_x='Iteration',\n g_y='AverageReturn',\n b_x='TimestepsSoFar',\n b_y='EpRewMean',\n factor_g=1024,\n factor_b=1)\n env.close()\n\n Rh.write_file(result_json, 'TRPO')\n\n\ndef run_garage(env, seed, log_dir):\n '''\n Create garage model and training.\n\n Replace the trpo with the algorithm you want to run.\n\n :param env: Environment of the task.\n :param seed: Random seed for the trial.\n :param log_dir: Log dir path.\n :return:import baselines.common.tf_util as U\n '''\n deterministic.set_seed(seed)\n\n with LocalTFRunner() as runner:\n env = TfEnv(normalize(env))\n\n policy = GaussianMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n output_nonlinearity=None,\n )\n\n baseline = GaussianMLPBaseline(\n env_spec=env.spec,\n regressor_args=dict(\n hidden_sizes=(32, 32),\n use_trust_region=True,\n ),\n )\n\n algo = TRPO(\n env_spec=env.spec,\n policy=policy,\n baseline=baseline,\n max_path_length=100,\n discount=0.99,\n gae_lambda=0.98,\n max_kl_step=0.01,\n policy_ent_coeff=0.0,\n )\n\n # Set up logger since we are not using run_experiment\n tabular_log_file = osp.join(log_dir, 'progress.csv')\n dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))\n dowel_logger.add_output(dowel.StdOutput())\n dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))\n\n runner.setup(algo, env)\n runner.train(n_epochs=976, batch_size=1024)\n\n dowel_logger.remove_all()\n\n return tabular_log_file\n\n\ndef run_baselines(env, seed, log_dir):\n '''\n Create baselines model and training.\n\n Replace the trpo and its training with the algorithm you want to run.\n\n :param env: Environment of the task.\n :param seed: Random seed for the trial.\n :param log_dir: Log dir path.\n :return\n '''\n with tf.compat.v1.Session().as_default():\n baselines_logger.configure(log_dir)\n\n def policy_fn(name, ob_space, ac_space):\n return MlpPolicy(\n name=name,\n ob_space=ob_space,\n ac_space=ac_space,\n hid_size=32,\n num_hid_layers=2)\n\n trpo_mpi.learn(\n env,\n policy_fn,\n timesteps_per_batch=1024,\n max_kl=0.01,\n cg_iters=10,\n cg_damping=0.1,\n max_timesteps=int(1e6),\n gamma=0.99,\n lam=0.98,\n vf_iters=5,\n vf_stepsize=1e-3)\n env.close()\n\n return osp.join(log_dir, 'progress.csv')\n",
"import tensorflow as tf\n\nfrom garage.tf.distributions import DiagonalGaussian\nfrom garage.tf.models import Model\n\n\nclass SimpleGaussianMLPModel(Model):\n \"\"\"Simple GaussianMLPModel for testing.\"\"\"\n\n def __init__(self,\n output_dim,\n name='SimpleGaussianMLPModel',\n *args,\n **kwargs):\n super().__init__(name)\n self.output_dim = output_dim\n\n def network_output_spec(self):\n return ['sample', 'mean', 'log_std', 'std_param', 'dist']\n\n def _build(self, obs_input, name=None):\n return_var = tf.compat.v1.get_variable(\n 'return_var', (), initializer=tf.constant_initializer(0.5))\n mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)\n log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim), 0.5)\n action = mean + log_std * 0.5\n dist = DiagonalGaussian(self.output_dim)\n # action will be 0.5 + 0.5 * 0.5 = 0.75\n return action, mean, log_std, log_std, dist\n",
"import gc\n\nfrom dowel import logger\nimport tensorflow as tf\n\nfrom garage.experiment import deterministic\nfrom tests.fixtures.logger import NullOutput\n\n\nclass TfTestCase:\n def setup_method(self):\n self.sess = tf.compat.v1.Session()\n self.sess.__enter__()\n\n def teardown_method(self):\n self.sess.__exit__(None, None, None)\n self.sess.close()\n del self.sess\n gc.collect()\n\n\nclass TfGraphTestCase:\n def setup_method(self):\n tf.reset_default_graph()\n self.graph = tf.Graph()\n for c in self.graph.collections:\n self.graph.clear_collection(c)\n self.graph_manager = self.graph.as_default()\n self.graph_manager.__enter__()\n self.sess = tf.Session(graph=self.graph)\n self.sess_manager = self.sess.as_default()\n self.sess_manager.__enter__()\n self.sess.__enter__()\n logger.add_output(NullOutput())\n deterministic.set_seed(1)\n\n # initialize global singleton_pool for each test case\n from garage.sampler import singleton_pool\n singleton_pool.initialize(1)\n\n def teardown_method(self):\n logger.remove_all()\n self.sess.__exit__(None, None, None)\n self.sess_manager.__exit__(None, None, None)\n self.graph_manager.__exit__(None, None, None)\n self.sess.close()\n\n # These del are crucial to prevent ENOMEM in the CI\n # b/c TensorFlow does not release memory explicitly\n del self.graph\n del self.sess\n gc.collect()\n",
"import pickle\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nfrom garage.tf.envs import TfEnv\nfrom garage.torch.modules import MLPModule\nfrom garage.torch.policies import DeterministicPolicy\nfrom tests.fixtures.envs.dummy import DummyBoxEnv\n\n\nclass TestContinuousNNPolicies:\n # yapf: disable\n @pytest.mark.parametrize('obs_dim, act_dim, hidden_sizes', [\n (1, 1, (1, )),\n (2, 2, (2, )),\n (3, 3, (3, )),\n (4, 4, (1, 1)),\n (5, 5, (2, 2)),\n ])\n # yapf: enable\n def test_get_action(self, obs_dim, act_dim, hidden_sizes):\n env_spec = TfEnv(DummyBoxEnv())\n obs = torch.ones([1, obs_dim], dtype=torch.float32)\n nn_module = MLPModule(\n input_dim=obs_dim,\n output_dim=act_dim,\n hidden_nonlinearity=None,\n hidden_sizes=hidden_sizes,\n hidden_w_init=nn.init.ones_,\n output_w_init=nn.init.ones_)\n\n policy = DeterministicPolicy(env_spec, nn_module)\n expected_output = np.full([1, act_dim],\n fill_value=obs_dim * np.prod(hidden_sizes),\n dtype=np.float32)\n assert np.array_equal(policy.get_action(obs), expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('obs_dim, act_dim, batch_size, hidden_sizes', [\n (3, 6, 1, (1, )),\n (4, 7, 1, (2, )),\n (5, 8, 2, (3, )),\n (6, 9, 2, (1, 1)),\n (7, 10, 3, (2, 2)),\n ])\n # yapf: enable\n def test_get_actions(self, obs_dim, act_dim, batch_size, hidden_sizes):\n env_spec = TfEnv(DummyBoxEnv())\n obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)\n nn_module = MLPModule(\n input_dim=obs_dim,\n output_dim=act_dim,\n hidden_nonlinearity=None,\n hidden_sizes=hidden_sizes,\n hidden_w_init=nn.init.ones_,\n output_w_init=nn.init.ones_)\n\n policy = DeterministicPolicy(env_spec, nn_module)\n expected_output = np.full([batch_size, act_dim],\n fill_value=obs_dim * np.prod(hidden_sizes),\n dtype=np.float32)\n assert np.array_equal(policy.get_actions(obs), expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('obs_dim, act_dim, batch_size, hidden_sizes', [\n (3, 6, 1, (1, )),\n (4, 7, 1, (2, )),\n (5, 8, 2, (3, )),\n (6, 9, 2, (1, 1)),\n (7, 10, 3, (2, 2)),\n ])\n # yapf: enable\n def test_is_pickleable(self, obs_dim, act_dim, batch_size, hidden_sizes):\n env_spec = TfEnv(DummyBoxEnv())\n obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)\n nn_module = MLPModule(\n input_dim=obs_dim,\n output_dim=act_dim,\n hidden_nonlinearity=None,\n hidden_sizes=hidden_sizes,\n hidden_w_init=nn.init.ones_,\n output_w_init=nn.init.ones_)\n\n policy = DeterministicPolicy(env_spec, nn_module)\n output1 = policy.get_actions(obs)\n\n p = pickle.dumps(policy)\n policy_pickled = pickle.loads(p)\n output2 = policy_pickled.get_actions(obs)\n assert np.array_equal(output1, output2)\n"
] | [
[
"tensorflow.compat.v1.Session",
"tensorflow.Graph"
],
[
"tensorflow.constant_initializer",
"tensorflow.shape"
],
[
"tensorflow.compat.v1.Session",
"tensorflow.Graph",
"tensorflow.reset_default_graph",
"tensorflow.Session"
],
[
"numpy.prod",
"torch.ones",
"numpy.array_equal"
]
] |
zahrag/3DHARSOM | [
"f934d0b5786d2edac29a7a18be31fa74aafcb881"
] | [
"codes/SOM.py"
] | [
"\n\"\"\"\n Author: Zahra Gharaee.\n This code is written for the 3D-Human-Action-Recognition Project, started March 14 2014.\n \"\"\"\n\nimport numpy as np\nfrom numpy import linalg as LA\n\n\nclass SOM:\n\n def __init__(self, learning, outputsize_x, outputsize_y, inputsize, sigma, softmax_exponent, max_epoch):\n\n self.name = 'SOM'\n self.learning = learning\n self.outputsize_x = outputsize_x\n self.outputsize_y = outputsize_y\n self.inputsize = inputsize\n self.sigma = sigma\n self.softmax_exponent = softmax_exponent\n self.max_epoch = max_epoch\n self.metric = 'Euclidean'\n self.normalize_input = False\n self.normalize_weights = False\n self.softmax_normalization = True\n self.neighborhood_decay = 0.9999\n self.neighborhood_min = 1\n self.learningRate = 0.1\n self.learningRate_decay = 0.9999\n self.learningRate_min = 0.01\n self.neighborhood_radius = outputsize_x\n self.node_map = np.zeros((outputsize_x, outputsize_y, 2))\n self.weights = np.random.rand(outputsize_x, outputsize_y, inputsize) # Rows, Columns, Depth\n\n for i in range(outputsize_x):\n for j in range(outputsize_y):\n self.node_map[i, j, 0] = i\n self.node_map[i, j, 1] = j\n\n def normalize(self, state):\n\n if self.normalize_input:\n state /= LA.norm(np.expand_dims(state, axis=0))\n\n return state\n\n def soft_max_normalization(self, state):\n\n m = np.max(state)\n if m != 0:\n state /= m\n\n return state\n\n def set_activity(self, state):\n\n if self.metric == 'Euclidean':\n dist = np.sum((state - self.weights) ** 2, axis=2)\n activity = np.exp(-dist / self.sigma)\n\n else:\n # Scalar Product\n mat_mul = state * self.weights\n activity = mat_mul.sum(axis=2)\n\n if self.softmax_exponent != 1:\n activity = activity ** self.softmax_exponent\n\n if self.softmax_normalization:\n activity = self.soft_max_normalization(activity)\n\n return activity\n\n def find_winning_node(self, activity):\n\n winner_x, winner_y = np.unravel_index(np.argmax(activity, axis=None), activity.shape)\n winning_node = np.array([winner_x, winner_y])\n\n return winning_node\n\n def learn(self, state, winner):\n\n dis = np.sum((self.node_map - winner) ** 2, axis=2)\n gus = np.exp(-dis / (2 * self.neighborhood_radius ** 2))\n err = state - self.weights\n self.weights += self.learningRate * (err.T * gus.T).T\n\n def learning_decay(self):\n\n self.learningRate *= self.learningRate_decay\n if self.learningRate < self.learningRate_min:\n self.learningRate = self.learningRate_min\n\n self.neighborhood_radius *= self.neighborhood_decay\n if self.neighborhood_radius < self.neighborhood_min:\n self.neighborhood_radius = self.neighborhood_min\n\n def run_SOM(self, state):\n\n state = self.normalize(state)\n\n activity = self.set_activity(state)\n\n winner = self.find_winning_node(activity)\n\n if self.learning:\n self.learn(state, winner)\n self.learning_decay()\n\n return activity, winner\n\n\n\n\n"
] | [
[
"numpy.expand_dims",
"numpy.max",
"numpy.argmax",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
JPLMLIA/libeos | [
"3ad25c22159edf79d407454e32b8f07333cb57c2"
] | [
"pims/els_data.py"
] | [
"# Cassini CAPS ELS data reader\n# Modeled after Gary's MDIS reader\n# Kiri Wagstaff, 11/28/18\n\nimport os\nfrom datetime import datetime\nfrom collections import defaultdict\nimport numpy as np\nfrom pds.core.parser import Parser\nfrom scipy.interpolate import interp1d\n\nGEOMFILE = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'ref',\n 'geometricfactor.npz'\n)\n_EARRAY = None\n_GEOM = None\n\nE_CHARGE_COULOMBS = 1.602176487e-19\nE_MASS_KG = 9.10938188e-31\n\ndef _load_gfactors():\n \"\"\"\n Using global variables here because we only want to read these values from\n file once, then cache them at the module level\n \"\"\"\n global _EARRAY\n global _GEOM\n if _EARRAY is None:\n sav = np.load(GEOMFILE)\n _EARRAY = sav['earray']\n _GEOM = sav['geom']\n\ndef needs_gfactors(f):\n \"\"\"\n Decorator for any function that needs to have the geometric factors loaded\n first (calls `_load_gfactors` prior to calling the function).\n \"\"\"\n def fprime(*args, **kwargs):\n _load_gfactors()\n return f(*args, **kwargs)\n return fprime\n\n@needs_gfactors\ndef compute_def(e, counts):\n \"\"\"\n Computes the Differential Energy Flux (DEF)\n Units: m^-2 sr^-1 s^-1\n\n According to Abi's script and the CAPS User Guide, this is done by dividing\n the counts by the anode- and energy-specific geometric factors.\n \"\"\"\n\n # According to section 9.2 of the CAPS PDS User Guide, the proper thing to\n # do is interpolate the geometric factors: \"If the ELS data record you are\n # working with has energy summing ... then you can use the above table to\n # interpolate the value you need for G.\"\n geom_interp = interp1d(\n _EARRAY, _GEOM, axis=0,\n fill_value='extrapolate',\n bounds_error=False,\n assume_sorted=True,\n )\n G = geom_interp(e)\n\n # newaxis is for the \"phi\" dimension of the data\n return counts / G[..., np.newaxis]\n\ndef compute_dnf(e, def_data):\n \"\"\"\n Computes the Differential Number Flux (DNF)\n Units: m^-2 sr^-1 s^-1 J^-1\n\n Following Abi's script and the CAPS User Guide, this is the DEF divided by\n the product of the energy and the charge of the particle (electron).\n \"\"\"\n # Add the new axes to broadcast across the theta/phi dimensions\n return def_data / (E_CHARGE_COULOMBS*e[..., np.newaxis, np.newaxis])\n\ndef compute_psd(e, def_data):\n \"\"\"\n Computes the Phase Space Density (PSD)\n Units: m^-6 s^-3\n\n Following Abi's script and the CAPS User Guide, this is the DEF times a\n factor of (mass^2 / (2 q^2 E^2)).\n the product of the energy and the charge of the particle (electron).\n \"\"\"\n qE_squared = (E_CHARGE_COULOMBS*e)**2\n # Add the new axes to broadcast across the theta/phi dimensions\n return (\n def_data * (E_MASS_KG**2) /\n (2 * qE_squared[..., np.newaxis, np.newaxis])\n )\n\ndef parse_dates(datearray):\n return np.array([\n datetime.strptime(row.tostring(), '%Y-%jT%H:%M:%S.%f')\n for row in datearray\n ])\n\ndef reshape_data(data):\n # Dimensions taken from ELS_V01.FMT\n # (records, energy, theta, phi)\n return data.reshape((-1, 63, 8, 1))\n\nclass ELS(object):\n\n COLUMNS = (\n # Values obtained from ELS_V01.FMT\n # Name, start byte, dtype, items, missing constant\n ('start_date', 1, np.uint8, 21, None),\n ('dead_time_method', 22, np.uint8, 1, None),\n ('record_dur', 25, np.float32, 1, 65535.0),\n ('acc_time', 29, np.float32, 63, 65535.0),\n ('data', 281, np.float32, 504, 65535.0),\n ('dim1_e', 2297, np.float32, 63, 65535.0),\n ('dim1_e_upper', 2549, np.float32, 63, 65535.0),\n ('dim1_e_lower', 2801, np.float32, 63, 65535.0),\n ('dim2_theta', 3053, np.float32, 8, 65535.0),\n ('dim2_theta_upper', 3085, np.float32, 8, 65535.0),\n ('dim2_theta_lower', 3117, np.float32, 8, 65535.0),\n ('dim3_phi', 3149, np.float32, 1, 65535.0),\n ('dim3_phi_upper', 3153, np.float32, 1, 65535.0),\n ('dim3_phi_lower', 3157, np.float32, 1, 65535.0),\n )\n\n POSTPROCESS = {\n 'start_date': parse_dates,\n 'data': reshape_data,\n }\n\n def __init__(self, data_path, lbl_path=None, verbose=False):\n \"\"\"\n If the LBL file path is not specified, we'll assume that it is\n sitting right next to the DAT file (and raise an Error if not).\n \"\"\"\n self.data_path = data_path\n if lbl_path is None:\n # Infer the LBL path if not supplied\n data_base, data_ext = os.path.splitext(data_path)\n if data_ext.lower() == data_ext:\n lbl_path = data_base + '.lbl'\n else:\n lbl_path = data_base + '.LBL'\n\n if not os.path.exists(lbl_path):\n raise ValueError('Expected LBL file \"%s\" does not exist' % lbl_path)\n\n self.lbl_path = lbl_path\n self.verbose = verbose\n\n self._load()\n\n def _log(self, msg):\n if self.verbose:\n print(msg)\n\n def _load(self):\n with open(self.lbl_path, 'r') as f:\n parser = Parser()\n labels = parser.parse(f)\n\n record_bytes = int(labels['RECORD_BYTES'])\n nrecords = int(labels['FILE_RECORDS'])\n\n columns = defaultdict(list)\n with open(self.data_path, 'rb') as f:\n for i in range(nrecords):\n for cname, cstart, ctype, citems, _ in ELS.COLUMNS:\n # Subtract 1 because they are indexed from 1 in the .FMT\n f.seek(i*record_bytes + cstart - 1)\n columns[cname].append(f.read(np.dtype(ctype).itemsize*citems))\n\n for cname, _, ctype, citems, missing in ELS.COLUMNS:\n cstr = ''.join(columns[cname])\n col = np.fromstring(cstr, dtype=ctype, count=nrecords*citems)\n col = np.squeeze(col.reshape((nrecords, citems)))\n\n # Replace missing value with NaN\n if missing is not None:\n col[col == missing] = np.nan\n\n # Apply post-processing steps to appropriate columns\n if cname in ELS.POSTPROCESS:\n col = ELS.POSTPROCESS[cname](col)\n\n # Store column as object attribute\n setattr(self, cname, col)\n\n # Add iso_data by summing across theta/phi\n self.iso_data = np.sum(self.data, axis=(-2, -1))\n\n # Compute DEF, DNF, and PSD\n self.def_data = compute_def(self.dim1_e, self.data)\n self.dnf_data = compute_dnf(self.dim1_e, self.def_data)\n self.psd_data = compute_psd(self.dim1_e, self.def_data)\n"
] | [
[
"numpy.dtype",
"scipy.interpolate.interp1d",
"numpy.fromstring",
"numpy.load",
"numpy.sum"
]
] |
Shuai-Xie/openseg.pytorch | [
"79116a58782ccd2150f9eb9054e70cfd42fc9773"
] | [
"lib/loss/loss_helper.py"
] | [
"##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Donny You, RainbowSecret\n## Microsoft Research\n## [email protected]\n## Copyright (c) 2019\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pdb\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom lib.utils.tools.logger import Logger as Log\n\n\nclass WeightedFSOhemCELoss(nn.Module):\n def __init__(self, configer):\n super().__init__()\n self.configer = configer\n self.thresh = self.configer.get('loss', 'params')['ohem_thresh']\n self.reduction = 'elementwise_mean'\n if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):\n self.reduction = self.configer.get('loss', 'params')['ce_reduction']\n\n def forward(self, predict, target, min_kept=1, weight=None, ignore_index=-1, **kwargs):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n \"\"\"\n prob_out = F.softmax(predict, dim=1)\n tmp_target = target.clone()\n tmp_target[tmp_target == ignore_index] = 0\n prob = prob_out.gather(1, tmp_target.unsqueeze(1))\n mask = target.contiguous().view(-1,) != ignore_index\n sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()\n min_threshold = sort_prob[min(min_kept, sort_prob.numel() - 1)]\n threshold = max(min_threshold, self.thresh)\n loss_matrix = F.cross_entropy(predict, target, weight=weight, ignore_index=ignore_index, reduction='none').contiguous().view(-1,)\n sort_loss_matrix = loss_matrix[mask][sort_indices]\n select_loss_matrix = sort_loss_matrix[sort_prob < threshold]\n if self.reduction == 'sum':\n return select_loss_matrix.sum()\n elif self.reduction == 'elementwise_mean':\n return select_loss_matrix.mean()\n else:\n raise NotImplementedError('Reduction Error!')\n\n\n# Cross-entropy Loss\nclass FSCELoss(nn.Module):\n def __init__(self, configer=None):\n super(FSCELoss, self).__init__()\n self.configer = configer\n weight = None\n if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):\n weight = self.configer.get('loss', 'params')['ce_weight']\n weight = torch.FloatTensor(weight).cuda()\n\n reduction = 'elementwise_mean'\n if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):\n reduction = self.configer.get('loss', 'params')['ce_reduction']\n\n ignore_index = -1\n if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):\n ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']\n\n self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)\n\n def forward(self, inputs, *targets, weights=None, **kwargs):\n loss = 0.0\n if isinstance(inputs, tuple) or isinstance(inputs, list):\n if weights is None:\n weights = [1.0] * len(inputs)\n\n for i in range(len(inputs)):\n if len(targets) > 1:\n target = self._scale_target(targets[i], (inputs[i].size(2), inputs[i].size(3)))\n loss += weights[i] * self.ce_loss(inputs[i], target)\n else:\n target = self._scale_target(targets[0], (inputs[i].size(2), inputs[i].size(3)))\n loss += weights[i] * self.ce_loss(inputs[i], target)\n\n else:\n target = self._scale_target(targets[0], (inputs.size(2), inputs.size(3)))\n loss = self.ce_loss(inputs, target)\n\n return loss\n\n @staticmethod\n def _scale_target(targets_, scaled_size):\n targets = targets_.clone().unsqueeze(1).float()\n targets = F.interpolate(targets, size=scaled_size, mode='nearest')\n return targets.squeeze(1).long()\n\n\nclass FSOhemCELoss(nn.Module):\n def __init__(self, configer):\n super(FSOhemCELoss, self).__init__()\n self.configer = configer\n self.thresh = self.configer.get('loss', 'params')['ohem_thresh']\n self.min_kept = max(1, self.configer.get('loss', 'params')['ohem_minkeep'])\n weight = None\n if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):\n weight = self.configer.get('loss', 'params')['ce_weight']\n weight = torch.FloatTensor(weight).cuda()\n\n self.reduction = 'elementwise_mean'\n if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):\n self.reduction = self.configer.get('loss', 'params')['ce_reduction']\n\n ignore_index = -1\n if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):\n ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']\n\n self.ignore_label = ignore_index\n self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction='none')\n\n def forward(self, predict, target, **kwargs):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n weight (Tensor, optional): a manual rescaling weight given to each class.\n If given, has to be a Tensor of size \"nclasses\"\n \"\"\"\n prob_out = F.softmax(predict, dim=1)\n tmp_target = target.clone()\n tmp_target[tmp_target == self.ignore_label] = 0\n prob = prob_out.gather(1, tmp_target.unsqueeze(1))\n mask = target.contiguous().view(-1,) != self.ignore_label\n sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()\n min_threshold = sort_prob[min(self.min_kept, sort_prob.numel() - 1)]\n threshold = max(min_threshold, self.thresh)\n loss_matirx = self.ce_loss(predict, target).contiguous().view(-1,)\n sort_loss_matirx = loss_matirx[mask][sort_indices]\n select_loss_matrix = sort_loss_matirx[sort_prob < threshold]\n if self.reduction == 'sum':\n return select_loss_matrix.sum()\n elif self.reduction == 'elementwise_mean':\n return select_loss_matrix.mean()\n else:\n raise NotImplementedError('Reduction Error!')\n\n\nclass FSAuxOhemCELoss(nn.Module):\n def __init__(self, configer=None):\n super(FSAuxOhemCELoss, self).__init__()\n self.configer = configer\n self.ce_loss = FSCELoss(self.configer)\n if self.configer.get('loss', 'loss_type') == 'fs_auxohemce_loss':\n self.ohem_ce_loss = FSOhemCELoss(self.configer)\n else:\n assert self.configer.get('loss', 'loss_type') == 'fs_auxslowohemce_loss'\n self.ohem_ce_loss = FSSlowOhemCELoss(self.configer)\n\n def forward(self, inputs, targets, **kwargs):\n aux_out, seg_out = inputs\n seg_loss = self.ohem_ce_loss(seg_out, targets)\n aux_loss = self.ce_loss(aux_out, targets)\n loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss\n loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss\n return loss\n\n\nclass FSAuxCELoss(nn.Module):\n def __init__(self, configer=None):\n super(FSAuxCELoss, self).__init__()\n self.configer = configer\n self.ce_loss = FSCELoss(self.configer)\n\n def forward(self, inputs, targets, **kwargs):\n aux_out, seg_out = inputs\n seg_loss = self.ce_loss(seg_out, targets)\n aux_loss = self.ce_loss(aux_out, targets)\n loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss\n loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss\n return loss\n\n\nclass SegFixLoss(nn.Module):\n \"\"\"\n We predict a binary mask to categorize the boundary pixels as class 1 and otherwise as class 0\n Based on the pixels predicted as 1 within the binary mask, we further predict the direction for these\n pixels.\n \"\"\"\n\n def __init__(self, configer=None):\n super().__init__()\n self.configer = configer\n self.ce_loss = FSCELoss(self.configer)\n\n def calc_weights(self, label_map, num_classes):\n\n weights = []\n for i in range(num_classes):\n weights.append((label_map == i).sum().data)\n weights = torch.FloatTensor(weights)\n weights_sum = weights.sum()\n return (1 - weights / weights_sum).cuda() \n\n def forward(self, inputs, targets, **kwargs):\n\n from lib.utils.helpers.offset_helper import DTOffsetHelper\n\n pred_mask, pred_direction = inputs\n\n seg_label_map, distance_map, angle_map = targets[0], targets[1], targets[2]\n gt_mask = DTOffsetHelper.distance_to_mask_label(distance_map, seg_label_map, return_tensor=True)\n\n gt_size = gt_mask.shape[1:]\n mask_weights = self.calc_weights(gt_mask, 2)\n\n pred_direction = F.interpolate(pred_direction, size=gt_size, mode=\"bilinear\", align_corners=True)\n pred_mask = F.interpolate(pred_mask, size=gt_size, mode=\"bilinear\", align_corners=True)\n mask_loss = F.cross_entropy(pred_mask, gt_mask, weight=mask_weights, ignore_index=-1)\n\n mask_threshold = float(os.environ.get('mask_threshold', 0.5))\n binary_pred_mask = torch.softmax(pred_mask, dim=1)[:, 1, :, :] > mask_threshold\n\n gt_direction = DTOffsetHelper.angle_to_direction_label(\n angle_map,\n seg_label_map=seg_label_map,\n extra_ignore_mask=(binary_pred_mask == 0),\n return_tensor=True\n )\n\n direction_loss_mask = gt_direction != -1\n direction_weights = self.calc_weights(gt_direction[direction_loss_mask], pred_direction.size(1))\n direction_loss = F.cross_entropy(pred_direction, gt_direction, weight=direction_weights, ignore_index=-1)\n\n if self.training \\\n and self.configer.get('iters') % self.configer.get('solver', 'display_iter') == 0 \\\n and torch.cuda.current_device() == 0:\n Log.info('mask loss: {} direction loss: {}.'.format(mask_loss, direction_loss))\n\n mask_weight = float(os.environ.get('mask_weight', 1))\n direction_weight = float(os.environ.get('direction_weight', 1))\n\n return mask_weight * mask_loss + direction_weight * direction_loss"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.softmax",
"torch.cuda.current_device",
"torch.nn.functional.cross_entropy",
"torch.FloatTensor",
"torch.nn.functional.interpolate"
]
] |
paulfioravanti/Reinforcement-Learning-In-Motion | [
"e09afd23b82040d76c95875b077ba0a5af517470",
"e09afd23b82040d76c95875b077ba0a5af517470"
] | [
"Unit-7-The-Cartpole/q_learning.py",
"Unit-3-The-Multi-Armed-Bandit/bandit_ucb.py"
] | [
"import gym\nimport numpy as np\nfrom util import plot_running_average\n\n# pylint: disable-msg=redefined-outer-name\ndef max_action(estimates, state):\n values = np.array([estimates[state, i] for i in range(2)])\n action = np.argmax(values)\n return action\n\ndef get_state(observation):\n cart_x, cart_x_dot, cart_theta, cart_theta_dot = observation\n cart_x = int(np.digitize(cart_x, CART_POS_SPACE))\n cart_x_dot = int(np.digitize(cart_x_dot, CART_VEL_SPACE))\n cart_theta = int(np.digitize(cart_theta, POLE_THETA_SPACE))\n cart_theta_dot = int(np.digitize(cart_theta_dot, POLE_THETA_VEL_SPACE))\n\n return (cart_x, cart_x_dot, cart_theta, cart_theta_dot)\n\n# discretize the spaces\nPOLE_THETA_SPACE = np.linspace(-0.20943951, 0.20943951, 10)\nPOLE_THETA_VEL_SPACE = np.linspace(-4, 4, 10)\nCART_POS_SPACE = np.linspace(-2.4, 2.4, 10)\nCART_VEL_SPACE = np.linspace(-4, 4, 10)\n\nif __name__ == \"__main__\":\n ENV = gym.make(\"CartPole-v0\")\n # model hyperparameters\n STEP_SIZE = 0.1\n DISCOUNT = 1.0\n EPSILON = 1.0\n\n # construct state space\n STATES = []\n for i in range(len(CART_POS_SPACE) + 1):\n for j in range(len(CART_VEL_SPACE) + 1):\n for k in range(len(POLE_THETA_SPACE) + 1):\n for l in range(len(POLE_THETA_VEL_SPACE) + 1):\n STATES.append((i, j, k, l))\n\n ESTIMATES = {}\n for state in STATES:\n for action in range(2):\n ESTIMATES[state, action] = 0\n\n NUM_EPISODES = 50000\n REPORT_INTERVAL = 5000\n TOTAL_REWARDS = np.zeros(NUM_EPISODES)\n for i in range(NUM_EPISODES):\n if i % REPORT_INTERVAL == 0:\n print(\"starting game \", i)\n done = False\n episode_rewards = 0\n observation = ENV.reset()\n while not done:\n state = get_state(observation)\n rand = np.random.random()\n if rand < (1 - EPSILON):\n action = max_action(ESTIMATES, state)\n else:\n action = ENV.action_space.sample()\n observation_, reward, done, info = ENV.step(action)\n episode_rewards += reward\n state_ = get_state(observation_)\n action_ = max_action(ESTIMATES, state_)\n ESTIMATES[state, action] = (\n ESTIMATES[state, action] + STEP_SIZE\n * (\n reward + DISCOUNT\n * ESTIMATES[state_, action_] - ESTIMATES[state, action]\n )\n )\n observation = observation_\n if EPSILON - 2 / NUM_EPISODES > 0:\n EPSILON -= 2 / NUM_EPISODES\n else:\n EPSILON = 0\n TOTAL_REWARDS[i] = episode_rewards\n\n plot_running_average(TOTAL_REWARDS)\n",
"from enum import Enum\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nclass Mode(Enum):\n SAMPLE_AVERAGE = \"Sample Average\"\n CONSTANT = \"Constant Alpha\"\n\n# pylint: disable-msg=too-many-instance-attributes\nclass Bandit:\n __INCREMENT = 1\n\n # pylint: disable-msg=too-many-arguments\n def __init__(\n self,\n num_arms,\n initial_estimate,\n true_rewards,\n epsilon,\n exploration_degree,\n mode):\n self.num_arms = num_arms\n self.reward_estimates = np.full(num_arms, initial_estimate)\n self.pulled_arms_tally = np.zeros(num_arms, dtype=int)\n self.epsilon = epsilon\n self.true_rewards = true_rewards\n self.last_arm_pulled = None\n self.mode = mode\n self.steps = 0\n self.exploration_degree = exploration_degree\n # pylint: enable-msg=too-many-arguments\n\n def pull(self):\n self.__pull_arm()\n self.steps += self.__INCREMENT\n # Rewards are non-stationary.\n return self.__reward()\n\n def update_mean(self, reward):\n self.__increment_arm_tally()\n self.__update_reward_estimates(reward)\n\n def __pull_arm(self):\n if self.__can_confidently_explore():\n self.__run_upper_confidence_bound()\n else:\n self.__run_epsilon_greedy()\n\n def __can_confidently_explore(self):\n return self.exploration_degree > 0\n\n def __run_upper_confidence_bound(self):\n reward_estimates = np.zeros(self.num_arms)\n for arm_num, approx_reward in enumerate(self.reward_estimates):\n if self.__action_is_maximising(arm_num):\n # we've found the action we want to execute,\n # so break out of loop\n self.last_arm_pulled = arm_num\n break\n else:\n reward_estimates[arm_num] = (\n self.__upper_confidence_bound_selection(\n approx_reward, arm_num\n )\n )\n else:\n # At = argmax(Qt(a) + c √ ln t / Nt(a))\n self.last_arm_pulled = self.__random_best_arm(reward_estimates)\n\n # Nt(a) = 0\n def __action_is_maximising(self, arm_number):\n return self.pulled_arms_tally[arm_number] == 0\n\n # Qt(a) + c √ ln t / Nt(a)\n def __upper_confidence_bound_selection(self, approximate_reward, arm_num):\n return (\n approximate_reward\n + self.exploration_degree\n * np.sqrt(\n np.log(self.steps) / self.pulled_arms_tally[arm_num]\n )\n )\n\n def __run_epsilon_greedy(self):\n if self.__should_exploit():\n self.last_arm_pulled = self.__random_best_arm(self.reward_estimates)\n else:\n self.last_arm_pulled = self.__random_arm()\n\n def __should_exploit(self):\n return np.random.random() > self.epsilon\n\n @staticmethod\n def __random_best_arm(reward_estimates):\n # np.where returns an array of row idxs and an array of col idxs but\n # since columns are of length 1, the latter is an empty array.\n # Regardless, the first value in the array needs to be specifically\n # retrieved.\n # REF: https://stackoverflow.com/q/34667282/567863\n best_arms = np.where(reward_estimates == reward_estimates.max())[0]\n return np.random.choice(best_arms)\n\n def __random_arm(self):\n return np.random.choice(self.num_arms)\n\n def __reward(self):\n # Normally distributed random reward centered around the true reward\n # for the last pulled arm\n return np.random.randn() + self.true_rewards[self.last_arm_pulled]\n\n def __increment_arm_tally(self):\n self.pulled_arms_tally[self.last_arm_pulled] += self.__INCREMENT\n\n def __update_reward_estimates(self, reward):\n if self.mode is Mode.SAMPLE_AVERAGE:\n self.__sample_average_update(reward)\n else:\n # Weights most recent rewards more heavily than long past rewards.\n self.__constant_update(reward)\n\n # Q(A) <- Q(A) + 1/N(A)[R-Q(A)]\n def __sample_average_update(self, reward):\n self.reward_estimates[self.last_arm_pulled] = (\n self.__old_estimate()\n + self.__sample_average_alpha()\n * self.__error(reward)\n )\n\n # Q(A)\n def __old_estimate(self):\n return self.reward_estimates[self.last_arm_pulled]\n\n # 1/N(A)\n def __sample_average_alpha(self):\n # alpha decreases over time as more actions performed to help converge\n # on the expected reward more efficiently.\n return 1.0 / self.pulled_arms_tally[self.last_arm_pulled]\n\n # R-Q(A) Error in the estimate\n def __error(self, convergence_target):\n return convergence_target - self.__old_estimate()\n\n # Q(A) <- Q(A) + 0.1*[R-Q(A)]\n def __constant_update(self, reward):\n self.reward_estimates[self.last_arm_pulled] = (\n self.__old_estimate()\n + self.__constant_alpha()\n * self.__error(reward)\n )\n\n @staticmethod\n def __constant_alpha():\n return 0.1\n# pylint: enable-msg=too-many-instance-attributes\n\n# pylint: disable-msg=too-many-arguments\ndef simulate(num_simulations,\n num_arms,\n initial_estimate,\n epsilon,\n exploration_degree,\n num_pulls,\n mode):\n reward_history = np.zeros(num_pulls)\n print(f\"Beginning {num_simulations} simulations with:\")\n print(f\"Bandit <num_arms: {num_arms}, epsilon: {epsilon}, \", end=\"\")\n print(f\"exploration_degree: {exploration_degree}, \", end=\"\")\n print(f\"num_pulls: {num_pulls}, mode: {mode.value}>\")\n print(f\"Number of simulations completed:\")\n for simulation_num in range(num_simulations):\n rewards = np.random.randn(num_arms)\n bandit = Bandit(\n num_arms,\n initial_estimate,\n rewards,\n epsilon,\n exploration_degree,\n mode\n )\n if __should_report_simulation_number(simulation_num):\n print(simulation_num, end=\"...\", flush=True)\n for arm_pull in range(num_pulls):\n reward = bandit.pull()\n bandit.update_mean(reward)\n reward_history[arm_pull] += reward\n print(f\"{num_simulations}.\")\n # Average\n return reward_history / __NUM_SIMULATIONS\n# pylint: enable-msg=too-many-arguments\n\ndef __should_report_simulation_number(simulation):\n return simulation % __SIMULATION_STATUS_INTERVAL == 0\n\nif __name__ == \"__main__\":\n __NUM_SIMULATIONS = 2000\n __NUM_ARMS = 5\n __NUM_PULLS = 1000\n __SIMULATION_STATUS_INTERVAL = 200\n __EPSILON1 = 0.1\n __EPSILON2 = 0.0\n __EXPLORATION_DEGREE1 = 0\n __EXPLORATION_DEGREE2 = 2\n __INITIAL_ESTIMATE = 0.0\n\n # Epsilon Greedy\n __RUN1 = simulate(\n __NUM_SIMULATIONS,\n __NUM_ARMS,\n initial_estimate=__INITIAL_ESTIMATE,\n epsilon=__EPSILON1,\n exploration_degree=__EXPLORATION_DEGREE1,\n num_pulls=__NUM_PULLS,\n mode=Mode.CONSTANT\n )\n # Upper Confidence Bound where confidence degree in exploration is 2.\n __RUN2 = simulate(\n __NUM_SIMULATIONS,\n __NUM_ARMS,\n initial_estimate=__INITIAL_ESTIMATE,\n epsilon=__EPSILON2,\n exploration_degree=__EXPLORATION_DEGREE2,\n num_pulls=__NUM_PULLS,\n mode=Mode.CONSTANT\n )\n\n plt.plot(__RUN1, \"b--\", __RUN2, \"r--\")\n plt.legend([\"Epsilon Greedy\", f\"UCB Confidence = {__EXPLORATION_DEGREE2}\"])\n print(\"Simulations complete.\")\n print(\"----------\")\n print(\"A python matplotlib window has opened.\")\n print(\"Switch over to it, and quit there to terminate this script.\")\n plt.show()\n"
] | [
[
"numpy.random.random",
"numpy.linspace",
"numpy.argmax",
"numpy.digitize",
"numpy.zeros"
],
[
"matplotlib.pyplot.legend",
"numpy.log",
"numpy.random.random",
"numpy.random.choice",
"numpy.full",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
vardhanaleti/AdversarialQuerying | [
"f2ed5960f345ba448eeb4c9a1f5c819c41d092da",
"f2ed5960f345ba448eeb4c9a1f5c819c41d092da"
] | [
"models/R2D2_embedding.py",
"data/mini_imagenet.py"
] | [
"import torch.nn as nn\nimport torch\nimport math\n\n# Embedding network used in Meta-learning with differentiable closed-form solvers\n# (Bertinetto et al., in submission to NIPS 2018).\n# They call the ridge rigressor version as \"Ridge Regression Differentiable Discriminator (R2D2).\"\n \n# Note that they use a peculiar ordering of functions, namely conv-BN-pooling-lrelu,\n# as opposed to the conventional one (conv-BN-lrelu-pooling).\n \ndef R2D2_conv_block(in_channels, out_channels, retain_activation=True, keep_prob=1.0, activation='LeakyReLU'):\n block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.MaxPool2d(2)\n )\n if retain_activation:\n if activation == 'LeakyReLU':\n block.add_module(\"LeakyReLU\", nn.LeakyReLU(0.1))\n elif activation == 'ReLU':\n block.add_module(\"ReLU\", nn.ReLU())\n elif activation == 'Softplus':\n block.add_module(\"Softplus\", nn.Softplus())\n\n if keep_prob < 1.0:\n block.add_module(\"Dropout\", nn.Dropout(p=1 - keep_prob, inplace=False))\n\n return block\n\nclass R2D2Embedding(nn.Module):\n def __init__(self, x_dim=3, h1_dim=96, h2_dim=192, h3_dim=384, z_dim=512, \\\n retain_last_activation=False, denoise = False, activation='LeakyReLU'):\n super(R2D2Embedding, self).__init__()\n\n self.block1 = R2D2_conv_block(x_dim, h1_dim, activation=activation)\n self.block2 = R2D2_conv_block(h1_dim, h2_dim, activation=activation)\n self.block3 = R2D2_conv_block(h2_dim, h3_dim, keep_prob=0.9, activation=activation)\n self.denoise = denoise\n # In the last conv block, we disable activation function to boost the classification accuracy.\n # This trick was proposed by Gidaris et al. (CVPR 2018).\n # With this trick, the accuracy goes up from 50% to 51%.\n # Although the authors of R2D2 did not mention this trick in the paper,\n # we were unable to reproduce the result of Bertinetto et al. without resorting to this trick.\n self.block4 = R2D2_conv_block(h3_dim, z_dim, retain_activation=retain_last_activation, keep_prob=0.7)\n \n def forward(self, x):\n b1 = self.block1(x)\n b2 = self.block2(b1)\n if self.denoise:\n #print(\"before denoise\", b2.size())\n _, n_in, H, W = b2.size()\n theta = nn.Conv2d(n_in, int(n_in / 2), 1,\n stride=1, bias=False).to('cuda')\n phi = nn.Conv2d(n_in, int(n_in / 2), 1,\n stride=1, bias=False).to('cuda')\n g = b2\n f = torch.einsum('niab,nicd->nabcd', theta(b2), phi(b2))\n orig_shape = f.size()\n f = torch.reshape(f, (-1, H * W, H * W))\n f = f / math.sqrt(n_in)\n softmax = torch.nn.Softmax(dim = 0)\n f = softmax(f)\n f = torch.reshape(f, orig_shape)\n f = torch.einsum('nabcd,nicd->niab', f, g)\n final_conv = nn.Conv2d(f.size()[1], f.size()[1], 1, stride=1, bias=False).to('cuda')\n f = final_conv(f)\n b2 = b2 + f\n #print(\"after denoise\", b2.size())\n b3 = self.block3(b2)\n b4 = self.block4(b3)\n # Flatten and concatenate the output of the 3rd and 4th conv blocks as proposed in R2D2 paper.\n return torch.cat((b3.view(b3.size(0), -1), b4.view(b4.size(0), -1)), 1)\n",
"# Dataloader of Gidaris & Komodakis, CVPR 2018\n# Adapted from:\n# https://github.com/gidariss/FewShotWithoutForgetting/blob/master/dataloader.py\nfrom __future__ import print_function\n\nimport os\nimport os.path\nimport numpy as np\nimport random\nimport pickle\nimport json\nimport math\n\nimport torch\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport torchnet as tnt\n\nimport h5py\n\nfrom PIL import Image\nfrom PIL import ImageEnhance\n\nfrom pdb import set_trace as breakpoint\n\n\n# Set the appropriate paths of the datasets here.\n_MINI_IMAGENET_DATASET_DIR = '../Few-shot-datasets/Mini-ImageNet'\n\ndef buildLabelIndex(labels):\n label2inds = {}\n for idx, label in enumerate(labels):\n if label not in label2inds:\n label2inds[label] = []\n label2inds[label].append(idx)\n\n return label2inds\n\n\ndef load_data(file):\n try:\n with open(file, 'rb') as fo:\n data = pickle.load(fo)\n return data\n except:\n with open(file, 'rb') as f:\n u = pickle._Unpickler(f)\n u.encoding = 'latin1'\n data = u.load()\n return data\n\nclass MiniImageNet(data.Dataset):\n def __init__(self, phase='train', do_not_use_random_transf=False):\n\n self.base_folder = 'miniImagenet'\n assert(phase=='train' or phase=='val' or phase=='test')\n self.phase = phase\n self.name = 'MiniImageNet_' + phase\n\n print('Loading mini ImageNet dataset - phase {0}'.format(phase))\n file_train_categories_train_phase = os.path.join(\n _MINI_IMAGENET_DATASET_DIR,\n 'miniImageNet_category_split_train_phase_train.pickle')\n file_train_categories_val_phase = os.path.join(\n _MINI_IMAGENET_DATASET_DIR,\n 'miniImageNet_category_split_train_phase_val.pickle')\n file_train_categories_test_phase = os.path.join(\n _MINI_IMAGENET_DATASET_DIR,\n 'miniImageNet_category_split_train_phase_test.pickle')\n file_val_categories_val_phase = os.path.join(\n _MINI_IMAGENET_DATASET_DIR,\n 'miniImageNet_category_split_val.pickle')\n file_test_categories_test_phase = os.path.join(\n _MINI_IMAGENET_DATASET_DIR,\n 'miniImageNet_category_split_test.pickle')\n\n if self.phase=='train':\n # During training phase we only load the training phase images\n # of the training categories (aka base categories).\n data_train = load_data(file_train_categories_train_phase)\n self.data = data_train['data']\n self.labels = data_train['labels']\n\n self.label2ind = buildLabelIndex(self.labels)\n self.labelIds = sorted(self.label2ind.keys())\n self.num_cats = len(self.labelIds)\n self.labelIds_base = self.labelIds\n self.num_cats_base = len(self.labelIds_base)\n\n elif self.phase=='val' or self.phase=='test':\n if self.phase=='test':\n # load data that will be used for evaluating the recognition\n # accuracy of the base categories.\n data_base = load_data(file_train_categories_test_phase)\n # load data that will be use for evaluating the few-shot recogniton\n # accuracy on the novel categories.\n data_novel = load_data(file_test_categories_test_phase)\n else: # phase=='val'\n # load data that will be used for evaluating the recognition\n # accuracy of the base categories.\n data_base = load_data(file_train_categories_val_phase)\n # load data that will be use for evaluating the few-shot recogniton\n # accuracy on the novel categories.\n data_novel = load_data(file_val_categories_val_phase)\n\n self.data = np.concatenate(\n [data_base['data'], data_novel['data']], axis=0)\n self.labels = data_base['labels'] + data_novel['labels']\n\n self.label2ind = buildLabelIndex(self.labels)\n self.labelIds = sorted(self.label2ind.keys())\n self.num_cats = len(self.labelIds)\n\n self.labelIds_base = buildLabelIndex(data_base['labels']).keys()\n self.labelIds_novel = buildLabelIndex(data_novel['labels']).keys()\n self.num_cats_base = len(self.labelIds_base)\n self.num_cats_novel = len(self.labelIds_novel)\n intersection = set(self.labelIds_base) & set(self.labelIds_novel)\n assert(len(intersection) == 0)\n else:\n raise ValueError('Not valid phase {0}'.format(self.phase))\n\n mean_pix = [x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]]\n std_pix = [x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]]\n normalize = transforms.Normalize(mean=mean_pix, std=std_pix)\n\n if (self.phase=='test' or self.phase=='val') or (do_not_use_random_transf==True):\n self.transform = transforms.Compose([\n lambda x: np.asarray(x),\n transforms.ToTensor()#,\n #normalize\n ])\n else:\n self.transform = transforms.Compose([\n transforms.RandomCrop(84, padding=8),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n transforms.RandomHorizontalFlip(),\n lambda x: np.asarray(x),\n transforms.ToTensor()#,\n #normalize\n ])\n \n def __getitem__(self, index):\n img, label = self.data[index], self.labels[index]\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n if self.transform is not None:\n img = self.transform(img)\n return img, label\n\n def __len__(self):\n return len(self.data)\n\n\nclass FewShotDataloader():\n def __init__(self,\n dataset,\n nKnovel=5, # number of novel categories.\n nKbase=-1, # number of base categories.\n nExemplars=1, # number of training examples per novel category.\n nTestNovel=15*5, # number of test examples for all the novel categories.\n nTestBase=15*5, # number of test examples for all the base categories.\n batch_size=1, # number of training episodes per batch.\n num_workers=4,\n epoch_size=2000, # number of batches per epoch.\n ):\n\n self.dataset = dataset\n self.phase = self.dataset.phase\n max_possible_nKnovel = (self.dataset.num_cats_base if self.phase=='train'\n else self.dataset.num_cats_novel)\n assert(nKnovel >= 0 and nKnovel < max_possible_nKnovel)\n self.nKnovel = nKnovel\n\n max_possible_nKbase = self.dataset.num_cats_base\n nKbase = nKbase if nKbase >= 0 else max_possible_nKbase\n if self.phase=='train' and nKbase > 0:\n nKbase -= self.nKnovel\n max_possible_nKbase -= self.nKnovel\n\n assert(nKbase >= 0 and nKbase <= max_possible_nKbase)\n self.nKbase = nKbase\n\n self.nExemplars = nExemplars\n self.nTestNovel = nTestNovel\n self.nTestBase = nTestBase\n self.batch_size = batch_size\n self.epoch_size = epoch_size\n self.num_workers = num_workers\n self.is_eval_mode = (self.phase=='test') or (self.phase=='val')\n\n def sampleImageIdsFrom(self, cat_id, sample_size=1):\n \"\"\"\n Samples `sample_size` number of unique image ids picked from the\n category `cat_id` (i.e., self.dataset.label2ind[cat_id]).\n\n Args:\n cat_id: a scalar with the id of the category from which images will\n be sampled.\n sample_size: number of images that will be sampled.\n\n Returns:\n image_ids: a list of length `sample_size` with unique image ids.\n \"\"\"\n assert(cat_id in self.dataset.label2ind)\n assert(len(self.dataset.label2ind[cat_id]) >= sample_size)\n # Note: random.sample samples elements without replacement.\n return random.sample(self.dataset.label2ind[cat_id], sample_size)\n\n def sampleCategories(self, cat_set, sample_size=1):\n \"\"\"\n Samples `sample_size` number of unique categories picked from the\n `cat_set` set of categories. `cat_set` can be either 'base' or 'novel'.\n\n Args:\n cat_set: string that specifies the set of categories from which\n categories will be sampled.\n sample_size: number of categories that will be sampled.\n\n Returns:\n cat_ids: a list of length `sample_size` with unique category ids.\n \"\"\"\n if cat_set=='base':\n labelIds = self.dataset.labelIds_base\n elif cat_set=='novel':\n labelIds = self.dataset.labelIds_novel\n else:\n raise ValueError('Not recognized category set {}'.format(cat_set))\n\n assert(len(labelIds) >= sample_size)\n # return sample_size unique categories chosen from labelIds set of\n # categories (that can be either self.labelIds_base or self.labelIds_novel)\n # Note: random.sample samples elements without replacement.\n return random.sample(labelIds, sample_size)\n\n def sample_base_and_novel_categories(self, nKbase, nKnovel):\n \"\"\"\n Samples `nKbase` number of base categories and `nKnovel` number of novel\n categories.\n\n Args:\n nKbase: number of base categories\n nKnovel: number of novel categories\n\n Returns:\n Kbase: a list of length 'nKbase' with the ids of the sampled base\n categories.\n Knovel: a list of lenght 'nKnovel' with the ids of the sampled novel\n categories.\n \"\"\"\n if self.is_eval_mode:\n assert(nKnovel <= self.dataset.num_cats_novel)\n # sample from the set of base categories 'nKbase' number of base\n # categories.\n Kbase = sorted(self.sampleCategories('base', nKbase))\n # sample from the set of novel categories 'nKnovel' number of novel\n # categories.\n Knovel = sorted(self.sampleCategories('novel', nKnovel))\n else:\n # sample from the set of base categories 'nKnovel' + 'nKbase' number\n # of categories.\n cats_ids = self.sampleCategories('base', nKnovel+nKbase)\n assert(len(cats_ids) == (nKnovel+nKbase))\n # Randomly pick 'nKnovel' number of fake novel categories and keep\n # the rest as base categories.\n random.shuffle(cats_ids)\n Knovel = sorted(cats_ids[:nKnovel])\n Kbase = sorted(cats_ids[nKnovel:])\n\n return Kbase, Knovel\n\n def sample_test_examples_for_base_categories(self, Kbase, nTestBase):\n \"\"\"\n Sample `nTestBase` number of images from the `Kbase` categories.\n\n Args:\n Kbase: a list of length `nKbase` with the ids of the categories from\n where the images will be sampled.\n nTestBase: the total number of images that will be sampled.\n\n Returns:\n Tbase: a list of length `nTestBase` with 2-element tuples. The 1st\n element of each tuple is the image id that was sampled and the\n 2nd elemend is its category label (which is in the range\n [0, len(Kbase)-1]).\n \"\"\"\n Tbase = []\n if len(Kbase) > 0:\n # Sample for each base category a number images such that the total\n # number sampled images of all categories to be equal to `nTestBase`.\n KbaseIndices = np.random.choice(\n np.arange(len(Kbase)), size=nTestBase, replace=True)\n KbaseIndices, NumImagesPerCategory = np.unique(\n KbaseIndices, return_counts=True)\n\n for Kbase_idx, NumImages in zip(KbaseIndices, NumImagesPerCategory):\n imd_ids = self.sampleImageIdsFrom(\n Kbase[Kbase_idx], sample_size=NumImages)\n Tbase += [(img_id, Kbase_idx) for img_id in imd_ids]\n\n assert(len(Tbase) == nTestBase)\n\n return Tbase\n\n def sample_train_and_test_examples_for_novel_categories(\n self, Knovel, nTestNovel, nExemplars, nKbase):\n \"\"\"Samples train and test examples of the novel categories.\n\n Args:\n \t Knovel: a list with the ids of the novel categories.\n nTestNovel: the total number of test images that will be sampled\n from all the novel categories.\n nExemplars: the number of training examples per novel category that\n will be sampled.\n nKbase: the number of base categories. It is used as offset of the\n category index of each sampled image.\n\n Returns:\n Tnovel: a list of length `nTestNovel` with 2-element tuples. The\n 1st element of each tuple is the image id that was sampled and\n the 2nd element is its category label (which is in the range\n [nKbase, nKbase + len(Knovel) - 1]).\n Exemplars: a list of length len(Knovel) * nExemplars of 2-element\n tuples. The 1st element of each tuple is the image id that was\n sampled and the 2nd element is its category label (which is in\n the ragne [nKbase, nKbase + len(Knovel) - 1]).\n \"\"\"\n\n if len(Knovel) == 0:\n return [], []\n\n nKnovel = len(Knovel)\n Tnovel = []\n Exemplars = []\n assert((nTestNovel % nKnovel) == 0)\n nEvalExamplesPerClass = int(nTestNovel / nKnovel)\n\n for Knovel_idx in range(len(Knovel)):\n imd_ids = self.sampleImageIdsFrom(\n Knovel[Knovel_idx],\n sample_size=(nEvalExamplesPerClass + nExemplars))\n\n imds_tnovel = imd_ids[:nEvalExamplesPerClass]\n imds_ememplars = imd_ids[nEvalExamplesPerClass:]\n\n Tnovel += [(img_id, nKbase+Knovel_idx) for img_id in imds_tnovel]\n Exemplars += [(img_id, nKbase+Knovel_idx) for img_id in imds_ememplars]\n assert(len(Tnovel) == nTestNovel)\n assert(len(Exemplars) == len(Knovel) * nExemplars)\n random.shuffle(Exemplars)\n\n return Tnovel, Exemplars\n\n def sample_episode(self):\n \"\"\"Samples a training episode.\"\"\"\n nKnovel = self.nKnovel\n nKbase = self.nKbase\n nTestNovel = self.nTestNovel\n nTestBase = self.nTestBase\n nExemplars = self.nExemplars\n\n Kbase, Knovel = self.sample_base_and_novel_categories(nKbase, nKnovel)\n Tbase = self.sample_test_examples_for_base_categories(Kbase, nTestBase)\n Tnovel, Exemplars = self.sample_train_and_test_examples_for_novel_categories(\n Knovel, nTestNovel, nExemplars, nKbase)\n\n # concatenate the base and novel category examples.\n Test = Tbase + Tnovel\n random.shuffle(Test)\n Kall = Kbase + Knovel\n\n return Exemplars, Test, Kall, nKbase\n\n def createExamplesTensorData(self, examples):\n \"\"\"\n Creates the examples image and label tensor data.\n\n Args:\n examples: a list of 2-element tuples, each representing a\n train or test example. The 1st element of each tuple\n is the image id of the example and 2nd element is the\n category label of the example, which is in the range\n [0, nK - 1], where nK is the total number of categories\n (both novel and base).\n\n Returns:\n images: a tensor of shape [nExamples, Height, Width, 3] with the\n example images, where nExamples is the number of examples\n (i.e., nExamples = len(examples)).\n labels: a tensor of shape [nExamples] with the category label\n of each example.\n \"\"\"\n images = torch.stack(\n [self.dataset[img_idx][0] for img_idx, _ in examples], dim=0)\n labels = torch.LongTensor([label for _, label in examples])\n return images, labels\n\n def get_iterator(self, epoch=0):\n rand_seed = epoch\n random.seed(rand_seed)\n np.random.seed(rand_seed)\n def load_function(iter_idx):\n Exemplars, Test, Kall, nKbase = self.sample_episode()\n Xt, Yt = self.createExamplesTensorData(Test)\n Kall = torch.LongTensor(Kall)\n if len(Exemplars) > 0:\n Xe, Ye = self.createExamplesTensorData(Exemplars)\n return Xe, Ye, Xt, Yt, Kall, nKbase\n else:\n return Xt, Yt, Kall, nKbase\n\n tnt_dataset = tnt.dataset.ListDataset(\n elem_list=range(self.epoch_size), load=load_function)\n data_loader = tnt_dataset.parallel(\n batch_size=self.batch_size,\n num_workers=(0 if self.is_eval_mode else self.num_workers),\n shuffle=(False if self.is_eval_mode else True))\n\n return data_loader\n\n def __call__(self, epoch=0):\n return self.get_iterator(epoch)\n\n def __len__(self):\n return int(self.epoch_size / self.batch_size)\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.Softplus",
"torch.einsum",
"torch.reshape",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.LongTensor",
"numpy.random.seed",
"numpy.unique",
"numpy.asarray",
"numpy.concatenate",
"torch.stack"
]
] |
brianjo/pytorch | [
"fd8004b42e2a2348ec8837e3fb524b960c1b4cdb"
] | [
"torch/testing/_internal/distributed/distributed_test.py"
] | [
"import copy\nimport itertools\nimport math\nimport os\nimport random\nimport sys\nimport tempfile\nimport time\nfrom collections import namedtuple\nfrom contextlib import contextmanager, suppress\nfrom datetime import timedelta\nfrom functools import reduce\nfrom typing import Union, NamedTuple, Callable, Any\n\nimport torch\nimport torch.cuda\nimport torch.distributed as dist\nimport torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD\nimport torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD\nimport torch.distributed.algorithms.model_averaging.averagers as averagers\nimport torch.distributed.algorithms.model_averaging.utils as model_averaging_utils\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR\nfrom torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default\nfrom torch.distributed.algorithms.ddp_comm_hooks import (\n quantization as quantization_hooks,\n)\nfrom torch.distributed.distributed_c10d import (\n get_world_size,\n _get_default_group,\n AllreduceOptions,\n GroupMember,\n)\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars\nfrom torch.testing._internal.common_distributed import (\n MultiProcessTestCase,\n TEST_SKIPS,\n initialize_temp_directories,\n cleanup_temp_dir,\n simple_sparse_reduce_tests,\n skip_if_rocm,\n skip_if_small_worldsize,\n skip_if_lt_x_gpu,\n nccl_skip_if_lt_x_gpu,\n skip_if_no_gpu,\n require_n_gpus_for_nccl_backend,\n requires_nccl_version,\n captured_output,\n with_nccl_blocking_wait,\n with_dist_debug_levels,\n verify_ddp_error_logged,\n)\nfrom torch.testing._internal.common_utils import (\n IS_MACOS,\n IS_WINDOWS,\n FILE_SCHEMA,\n IS_FBCODE,\n NO_MULTIPROCESSING_SPAWN,\n sandcastle_skip,\n sandcastle_skip_if,\n)\n\nif not IS_WINDOWS:\n import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer\n from torch.distributed.optim.functional_sgd import _FunctionalSGD\n\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n import torchvision\n\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\n\nif sys.platform == \"win32\":\n import msvcrt\nelse:\n import fcntl\n\n\nclass Foo:\n def __init__(self, x):\n # Can be tensor or int\n self.x = x\n\n def __eq__(self, other):\n def eq(value, other):\n if isinstance(value, torch.Tensor):\n return torch.equal(value, other)\n return value == other\n\n for attr, value in self.__dict__.items():\n other_value = other.__dict__[attr]\n if not eq(value, other_value):\n return False\n return True\n\n\nf = Foo(10)\nf.bar = 1\n\nfoo_cpu_tensor = Foo(torch.randn(3, 3))\n\n\nCOLLECTIVES_OBJECT_TEST_LIST = [\n {\"key1\": 3, \"key2\": 4, \"key3\": {\"nested\": True}},\n f,\n foo_cpu_tensor,\n \"foo\",\n [1, 2, True, \"string\", [4, 5, \"nested\"]],\n]\n\n# Allowlist of distributed backends where profiling collectives is supported.\nPROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.NCCL,\n dist.Backend.GLOO,\n dist.Backend.MPI,\n]\n\n# Allowlist of distributed backends where profiling is supported with use_cuda=True\nCUDA_PROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.GLOO,\n dist.Backend.MPI,\n dist.Backend.NCCL,\n]\n\n# Allowlist of distributed backends where profiling is supported for p2p ops\nSEND_RECV_PROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.MPI,\n dist.Backend.GLOO,\n dist.Backend.NCCL,\n]\n\n# Dummy NamedTuple data structures to test DDP support for NamedTuple types.\nEXPECTED_FIELDS = (\"a\", \"b\")\nTestNamedTupleInput_0 = namedtuple(\"NamedTuple\", EXPECTED_FIELDS)\n\n\nclass TestNamedTupleInput_1(NamedTuple):\n a: torch.tensor\n b: torch.tensor\n\n\nskipIfNoTorchVision = sandcastle_skip_if(not HAS_TORCHVISION, \"no torchvision\")\n\nBACKEND = os.environ[\"BACKEND\"]\nINIT_METHOD = os.getenv(\"INIT_METHOD\", \"env://\")\n\nDEFAULT_TIMEOUT = 300\nCUSTOMIZED_TIMEOUT = {\"test_DistributedDataParallel\": 500}\n\n\ndef get_profiling_event(postfix, profiler):\n event_list = (\n profiler.events()\n if isinstance(profiler, torch.profiler.profile)\n else profiler.function_events\n )\n return [event for event in event_list if event.name.endswith(postfix)]\n\n\n# Base error message substring on unfinished reductions.\nddp_prev_reduction_unfinished_str = (\n \"Expected to have finished reduction in the prior iteration\"\n)\n# Error message substring when find_unused_parameters=True has not been passed\nddp_recommend_find_unused_params_str = (\n \"passing the keyword argument `find_unused_parameters=True`\"\n)\n# Error message substring when find_unused_parameters=True is enabled\nddp_find_unused_params_enabled_str = \"Since `find_unused_parameters=True` is enabled\"\n# Error message substring for possibility of not all model outputs being used\n# in loss computation\nddp_outputs_not_used_in_loss_str = (\n \"`forward` function outputs participate in calculating loss\"\n)\n# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG\nddp_suggest_debug_mode_str = (\n \"set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL\"\n)\n\n\nclass DDPUnevenTestInput(NamedTuple):\n name: str\n model: nn.Module\n inp: Union[torch.tensor, tuple]\n sync_interval: int\n throw_on_early_termination: bool = False\n hook: Callable = None\n state: Any = None\n\n\nclass _FC2(nn.Module):\n def __init__(self):\n super(_FC2, self).__init__()\n self.fc = nn.Linear(10, 50, bias=True)\n self.fc.bias.requires_grad = False\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = _FC2()\n self.fc3 = nn.Linear(50, 4, bias=False)\n self.relu = nn.ReLU()\n self.no_grad_param = nn.Parameter(\n torch.tensor([2, 2]).long(), requires_grad=False\n )\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n return F.softmax(x, dim=1)\n\n\nclass LargeNet(nn.Module):\n def __init__(self):\n super(LargeNet, self).__init__()\n self.fc1 = nn.Linear(1000, 2000, bias=False)\n self.fc2 = nn.Linear(2000, 500, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\nclass Task(nn.Module):\n def __init__(self):\n super().__init__()\n self.p = nn.Parameter(torch.ones(2, 2))\n\n def forward(self, x):\n return self.p + x\n\n\nclass BatchNormNet(nn.Module):\n def __init__(self, affine=True):\n super(BatchNormNet, self).__init__()\n self.fc1 = nn.Linear(2, 40, bias=False)\n self.bn = nn.BatchNorm1d(4, affine=affine)\n self.fc2 = nn.Linear(40, 4, bias=False)\n\n def forward(self, x):\n x = torch.reshape(self.fc1(x), (-1, 4, 10))\n x = self.bn(x)\n x = torch.reshape(x, (-1, 40))\n x = self.fc2(x)\n return F.softmax(x, dim=1)\n\n\nclass TwoLinLayerNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Linear(10, 10, bias=False)\n self.b = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n a = self.a(x)\n b = self.b(x)\n return (a, b)\n\n\nclass EmbeddingNet(nn.Module):\n def __init__(self, rank):\n super().__init__()\n embedding_dim = 500 if rank == 0 else 50\n self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim)\n self.lin = nn.Linear(embedding_dim, 1)\n\n def forward(self, x):\n x = self.embedding(x)\n return self.lin(x)\n\n\nclass ControlFlowToyModel(nn.Module):\n def __init__(self):\n super(ControlFlowToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n # Second layer is used dependent on input x.\n use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device))\n if use_second_layer:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n\nDDP_NET = Net()\nBN_NET = BatchNormNet()\nBN_NET_NO_AFFINE = BatchNormNet(affine=False)\nONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)\n\n\ndef get_timeout(test_id):\n test_name = test_id.split(\".\")[-1]\n if test_name in CUSTOMIZED_TIMEOUT:\n return CUSTOMIZED_TIMEOUT[test_name]\n else:\n return DEFAULT_TIMEOUT\n\n\ndefault_pg_timeout = 60\n\nCUSTOM_PG_TIMEOUT = {\n # This test runs slowly and needs additional time to complete, otherwise can\n # be taken down by NCCL_ASYNC_ERROR_HANDLING\n \"test_ddp_uneven_inputs\": 300,\n # This test has a short timeout since it tests being taken down by\n # NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly.\n \"test_ddp_model_diff_across_ranks\": 5,\n}\n\n\ndef require_backend(backends):\n if BACKEND not in backends:\n return sandcastle_skip(\"Test requires backend to be one of %s\" % backends)\n return lambda func: func\n\n\ndef require_backends_available(backends):\n def check(backend):\n if backend == dist.Backend.GLOO:\n return dist.is_gloo_available()\n if backend == dist.Backend.NCCL:\n return dist.is_nccl_available()\n if backend == dist.Backend.MPI:\n return dist.is_mpi_available()\n return False\n\n if not all(check(dist.Backend(backend)) for backend in backends):\n return sandcastle_skip(\"Test requires backends to be available %s\" % backends)\n return lambda func: func\n\n\ndef require_world_size(world_size):\n if int(os.environ[\"WORLD_SIZE\"]) < world_size:\n return sandcastle_skip(\"Test requires world size of %d\" % world_size)\n return lambda func: func\n\n\ndef apply_hack_for_nccl():\n # This is a hack for a known NCCL issue using multiprocess\n # in conjunction with multiple threads to manage different GPUs which\n # may cause ncclCommInitRank to fail.\n # http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4\n # It slows down the performance of collective operations.\n # Without this setting NCCL might throw unhandled error.\n os.environ[\"NCCL_MAX_NRINGS\"] = \"1\"\n\n\n@contextmanager\ndef _lock():\n TEMP_DIR = os.environ[\"TEMP_DIR\"]\n lockfile = os.path.join(TEMP_DIR, \"lockfile\")\n with open(lockfile, \"w\") as lf:\n try:\n if sys.platform == \"win32\":\n msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1)\n yield\n else:\n fcntl.flock(lf.fileno(), fcntl.LOCK_EX)\n yield\n finally:\n if sys.platform == \"win32\":\n msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1)\n else:\n fcntl.flock(lf.fileno(), fcntl.LOCK_UN)\n lf.close()\n\n\ndef _build_tensor(size, value=None, dtype=torch.float, device_id=None):\n if value is None:\n value = size\n if device_id is None:\n return torch.empty(size, size, size, dtype=dtype).fill_(value)\n else:\n return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id)\n\n\ndef _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float):\n if value is None:\n value = size\n return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value)\n\n\ndef _create_autograd_profiler():\n return torch.autograd.profiler.profile(record_shapes=True)\n\n\ndef _create_torch_profiler():\n return torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n ],\n record_shapes=True,\n )\n\n\nclass Barrier(object):\n barrier_id = 0\n\n @classmethod\n def init(cls):\n cls.barrier_id = 0\n barrier_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"barrier\")\n for f_name in os.listdir(barrier_dir):\n os.unlink(os.path.join(barrier_dir, f_name))\n\n @classmethod\n def sync(cls, wait_for=None, timeout=10):\n if wait_for is None:\n wait_for = dist.get_world_size()\n cls.barrier_id += 1\n barrier_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"barrier\")\n pid = str(os.getpid())\n barrier_file = os.path.join(barrier_dir, pid)\n with _lock():\n with open(barrier_file, \"w\") as f:\n f.write(str(cls.barrier_id))\n\n start_time = time.time()\n while True:\n arrived = 0\n with _lock():\n for f_name in os.listdir(barrier_dir):\n with open(os.path.join(barrier_dir, f_name), \"r\") as f:\n data = f.read()\n if int(data) >= cls.barrier_id:\n arrived += 1\n if arrived == wait_for:\n break\n\n if time.time() - start_time > timeout:\n raise RuntimeError(\"barrier timeout\")\n time.sleep(0.1)\n\n\nclass TestDistBackend(MultiProcessTestCase):\n @classmethod\n def setUpClass(cls):\n os.environ[\"MASTER_ADDR\"] = str(MASTER_ADDR)\n os.environ[\"MASTER_PORT\"] = str(MASTER_PORT)\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # such as test_batch_isend_irecv_nccl will test NCCL_BLOCKING_WAIT as\n # expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n super().setUpClass()\n\n def setUp(self):\n super().setUp()\n # initialize temp directories\n initialize_temp_directories()\n # initialize Barrier\n Barrier.init()\n # Skip return code checking for following tests as they are expected to\n # crash a process due to NCCL_ASYNC_ERROR_HANDLING.\n self.skip_return_code_checks = []\n\n def tearDown(self):\n cleanup_temp_dir()\n super().tearDown()\n\n @property\n def init_method(self):\n return \"{}{file_name}\".format(FILE_SCHEMA, file_name=self.file_name)\n\n @classmethod\n def _run(cls, rank, test_name, file_name, pipe):\n if BACKEND == \"nccl\" and not torch.cuda.is_available():\n sys.exit(TEST_SKIPS[\"no_cuda\"].exit_code)\n self = cls(test_name)\n self.rank = rank\n self.file_name = file_name\n\n if torch.cuda.is_available() and torch.cuda.device_count() < int(\n self.world_size\n ):\n sys.exit(TEST_SKIPS[f\"multi-gpu-{self.world_size}\"].exit_code)\n try:\n pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout)\n timeout = timedelta(seconds=pg_timeout_seconds)\n dist.init_process_group(\n init_method=self.init_method,\n backend=BACKEND,\n world_size=int(self.world_size),\n rank=self.rank,\n timeout=timeout,\n )\n except RuntimeError as e:\n if \"recompile\" in e.args[0]:\n sys.exit(TEST_SKIPS[\"backend_unavailable\"].exit_code)\n\n raise\n\n # Execute barrier prior to running test to ensure that every process\n # has finished initialization and that the following test\n # immediately exiting due to a skip doesn't cause flakiness.\n self._barrier()\n\n self.run_test(test_name, pipe)\n self._barrier()\n dist.destroy_process_group()\n sys.exit(0)\n\n # Needed since MultiProcessTestCase assumes a world_size of 4, but we\n # run these tests under other various world_sizes.\n @property\n def world_size(self):\n return os.environ[\"WORLD_SIZE\"]\n\n\nclass DistributedTest:\n class _DistTestBase:\n def _barrier(self, *args, **kwargs):\n Barrier.sync(*args, **kwargs)\n\n def _init_group_test(self, **kwargs):\n group = [1, 2]\n group_id = dist.new_group(group, **kwargs)\n rank = dist.get_rank()\n if rank not in group:\n return ([], None, rank)\n\n return (group, group_id, rank)\n\n def _init_full_group_test(self, **kwargs):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.new_group(**kwargs)\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n def _init_global_test(self):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.group.WORLD\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n # HELPER FOR MULTIGPU TESTS\n def _init_multigpu_helper(self):\n \"\"\"Multigpu tests are designed to simulate the multi nodes with multi\n GPUs on each node. Nccl backend requires equal #GPUs in each process.\n On a single node, all visible GPUs are evenly\n divided to subsets, each process only uses a subset.\n \"\"\"\n nGPUs = torch.cuda.device_count()\n world_size = dist.get_world_size()\n visible_devices = range(nGPUs)\n\n if BACKEND == \"nccl\":\n apply_hack_for_nccl()\n\n # If rank is lesser than or equal to number of available GPU's\n # then each rank can be mapped to corresponding GPU.\n nGPUs_per_process = 1\n if world_size > nGPUs:\n nGPUs_per_process = nGPUs // world_size\n rank_to_GPU = {\n i: list(\n visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]\n )\n for i in range(world_size)\n }\n return rank_to_GPU\n\n def test_dump_DDP_relevant_env_vars(self):\n with captured_output() as (out, _):\n _dump_DDP_relevant_env_vars()\n lines = out.getvalue().splitlines()\n\n def format_line(var):\n return \"env:%s=%s\" % (\n var,\n os.environ[var] if var in os.environ else \"N/A\",\n )\n\n # Check relevant env vars\n vars = [\n \"MASTER_ADDR\",\n \"MASTER_PORT\",\n \"WORLD_SIZE\",\n \"NCCL_TOPO_DUMP_FILE\", # N/A\n \"NCCL_ASYNC_ERROR_HANDLING\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertIn(line, lines)\n # Check irrelevant env vars\n vars = [\n \"xxx\",\n \"yyy\",\n \"zzz\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertNotIn(line, lines)\n\n # GET RANK\n def test_get_rank(self):\n test_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"test_dir\")\n pid = str(os.getpid())\n num_processes = dist.get_world_size()\n with open(os.path.join(test_dir, pid), \"w\") as f:\n f.write(str(dist.get_rank()))\n\n self._barrier()\n\n all_ranks = set()\n for f_name in os.listdir(test_dir):\n with open(os.path.join(test_dir, f_name), \"r\") as f:\n all_ranks.add(int(f.read()))\n self.assertEqual(len(all_ranks), num_processes)\n\n self._barrier()\n\n if dist.get_rank() == 0:\n for f_name in os.listdir(test_dir):\n os.unlink(os.path.join(test_dir, f_name))\n\n self._barrier()\n\n def test_get_backend(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n backend_str = BACKEND.lower()\n self.assertEqual(dist.get_backend(), backend_str)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_backend(group_id), backend_str)\n else:\n with self.assertRaisesRegex(\n RuntimeError, \"Invalid process group specified\"\n ):\n dist.get_backend(group_id)\n\n def test_Backend_enum_class(self):\n # test parsing\n backend = BACKEND.lower()\n self.assertEqual(dist.Backend(BACKEND.upper()), backend)\n self.assertEqual(dist.Backend(BACKEND), backend)\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'undefined'\"):\n dist.Backend(\"undefined\")\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'xYz'\"):\n dist.Backend(\"xYz\")\n with self.assertRaises(ValueError):\n dist.Backend(None)\n with self.assertRaises(ValueError):\n dist.Backend(3)\n with self.assertRaises(ValueError):\n dist.Backend([\"gloo\"])\n\n # Test destroy\n def test_destroy_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of group\n def test_get_rank_size_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_world_size(group_id), 2)\n self.assertTrue(dist.get_rank(group_id) in list(range(2)))\n else:\n self.assertEqual(dist.get_world_size(group_id), -1)\n self.assertEqual(dist.get_rank(group_id), -1)\n\n # Test destroy full groups\n def test_destroy_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of full group\n def test_get_rank_size_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())\n self.assertEqual(dist.get_rank(group_id), dist.get_rank())\n\n def _test_barrier_timeout(self, group_id, timeout):\n local_rank = dist.get_rank(group_id)\n\n # Only execute barrier on rank == 0, causing it to timeout\n if local_rank == 0:\n expected_time = time.time() + timeout.total_seconds()\n # In debug mode, we execute a monitored_barrier before the\n # collective, so assert on that.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL:\n exception_ctx = self.assertRaisesRegex(\n Exception, \"failed to pass monitoredBarrier\"\n )\n else:\n exception_ctx = self.assertRaisesRegex(\n Exception, \" (Timed out|closed|timeout) \"\n )\n with exception_ctx:\n dist.barrier(group_id)\n self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1)\n else:\n pass\n\n @sandcastle_skip_if(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n @sandcastle_skip_if(\n not INIT_METHOD.startswith(\"file://\"),\n \"Requires file:// initialization method. \"\n + \"Both tcp:// and env:// rely on the TCP store for which \"\n \"reinitialization has proven racy.\",\n )\n def test_barrier_timeout_global(self):\n dist.destroy_process_group()\n\n # Explicitly pass world size to the barrier because we've\n # just destroyed any state in torch.distributed.\n self._barrier(wait_for=int(os.environ[\"WORLD_SIZE\"]))\n\n # Reinitialize global process group\n timeout = timedelta(seconds=1)\n dist.init_process_group(\n init_method=INIT_METHOD,\n backend=BACKEND,\n world_size=int(os.environ[\"WORLD_SIZE\"]),\n rank=self.rank,\n timeout=timeout,\n )\n self._test_barrier_timeout(dist.group.WORLD, timeout)\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_group(self):\n timeout = timedelta(seconds=5)\n _, group_id, _ = self._init_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n @sandcastle_skip_if(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_full_group(self):\n timeout = timedelta(seconds=1)\n _, group_id, _ = self._init_full_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n # This test helper can only be used when using the Gloo or NCCL backend\n # **and** both the Gloo and NCCL backends are available.\n # See the @skip annotations below.\n def _test_group_override_backend(self, initializer):\n if BACKEND == \"gloo\":\n new_backend = \"nccl\"\n if BACKEND == \"nccl\":\n new_backend = \"gloo\"\n\n group, group_id, rank = initializer(backend=new_backend)\n if group_id is None:\n return\n\n if new_backend == \"gloo\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupGloo))\n if new_backend == \"nccl\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupNCCL))\n\n self.assertEqual(rank, group[dist.get_rank(group_id)])\n self.assertEqual(len(group), dist.get_world_size(group_id))\n\n # Pin device (so we avoid NCCL race conditions/deadlocks).\n group_rank = dist.get_rank(group_id)\n torch.cuda.set_device(group_rank)\n\n # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL).\n tensor = _build_tensor(2, value=group_rank).cuda()\n dist.broadcast(tensor, src=group[0], group=group_id)\n self.assertEqual(_build_tensor(2, value=0), tensor.to(\"cpu\"))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @require_world_size(3)\n @skip_if_lt_x_gpu(2)\n def test_backend_group(self):\n self._test_group_override_backend(self._init_group_test)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(3)\n def test_backend_full_group(self):\n self._test_group_override_backend(self._init_full_group_test)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(2)\n def test_new_subgroups(self):\n subgroup_size = 2\n cur_subgroup, subgroups = dist.new_subgroups(subgroup_size)\n\n world_size = dist.get_world_size()\n self.assertEqual(cur_subgroup.size(), subgroup_size)\n self.assertEqual(len(subgroups), world_size / subgroup_size)\n self.assertFalse(dist._rank_not_in_group(cur_subgroup))\n\n for subgroup in subgroups:\n dist.destroy_process_group(subgroup)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_no_gpu\n def test_new_subgroups_group_size_exceeds_world_size(self):\n with self.assertRaisesRegex(\n ValueError, \"The arg 'group_size' must not exceed the world size\"\n ):\n dist.new_subgroups(100)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_world_size_not_divisible_by_group_size(self):\n with self.assertRaisesRegex(\n ValueError, \"The world size must be divisible by 'group_size'\"\n ):\n dist.new_subgroups(3)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_by_enumeration(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n cur_subgroup, subgroups = dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0, 2], [1, 3]]\n )\n if device_id >= 4:\n self.assertIsNone(cur_subgroup)\n else:\n self.assertEqual(cur_subgroup.size(), 2)\n self.assertEqual(len(subgroups), 2)\n if device_id == 0 or device_id == 2:\n self.assertEqual(cur_subgroup, subgroups[0])\n else:\n self.assertEqual(cur_subgroup, subgroups[1])\n\n for subgroup in subgroups:\n dist.destroy_process_group(subgroup)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n world_size = get_world_size(group_id)\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"The new group's rank should be within the the world_size set by init_process_group\",\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0, 1], [world_size, 2]]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_no_gpu\n def test_new_subgroups_by_enumeration_negative_input_rank(self):\n group, group_id, rank = self._init_global_test()\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"The new group's rank should be within the the world_size set by init_process_group\",\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[-1, -2], [-3, -4]]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_overlap_not_allowed(self):\n with self.assertRaisesRegex(\n ValueError, \"Rank 1 has appeared in both subgroup\"\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0], [1, 2], [1, 3]]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_lt_x_gpu(2)\n def test_average_parameters(self):\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n\n model = nn.Sequential(\n nn.Conv2d(3, 3, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Linear(1, 5, bias=False),\n ).cuda(device_id)\n # Test global model averaging\n for p in model.parameters():\n p.data = torch.ones_like(p.data)\n model_averaging_utils.average_parameters(\n params=model.parameters(), process_group=None\n )\n # Every element will be the same as the input.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data))\n\n # Test partial model averaging\n for p in model.parameters():\n p.data = torch.ones_like(p.data) * rank\n group_nccl = dist.new_group(ranks=[0, 1], backend=\"nccl\")\n model_averaging_utils.average_parameters(\n params=model.parameters(), process_group=group_nccl\n )\n if not dist._rank_not_in_group(group_nccl):\n # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data) * 0.5)\n else:\n # Every element on device not in the subgroup should remain the same.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data) * rank)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_lt_x_gpu(2)\n def test_periodic_model_averager(self):\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n world_size = dist.get_world_size()\n\n model = nn.Linear(1, 5, bias=False).cuda(device_id)\n param = next(model.parameters())\n tensor = torch.ones_like(param.data) * rank\n expected_avg_tensor = (\n torch.ones_like(param.data) * sum(range(world_size)) / world_size\n )\n period = 4\n for warmup_steps in [12, 13, 14, 15]:\n averager = averagers.PeriodicModelAverager(period=period, warmup_steps=warmup_steps)\n for step in range(0, 20):\n # Reset the parameters at every step.\n param.data = copy.deepcopy(tensor)\n averager.average_parameters(model.parameters())\n if step >= warmup_steps and (step - warmup_steps) % period == 0:\n self.assertEqual(param.data, expected_avg_tensor)\n else:\n # No model averaging, so the parameters are not updated.\n self.assertEqual(param.data, tensor)\n\n # NCCL Batch SEND RECV\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n p2p_op_list = []\n\n for val in [\"1\", \"0\"]:\n os.environ[\"NCCL_BLOCKING_WAIT\"] = val\n for src in range(0, dist.get_world_size()):\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(src + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_self_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n p2p_op_list = []\n\n if rank == 0:\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, 0)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, 0)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n @skip_if_no_gpu\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_no_rank_zero_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n p2p_op_list = []\n\n if rank == 1:\n peer = 2\n elif rank == 2:\n peer = 1\n\n if rank in [1, 2]:\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, peer)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, peer)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # GLOO Batch SEND RECV CPU\n @sandcastle_skip_if(BACKEND != \"gloo\", \"GLOO Batch Send Recv CPU\")\n def test_batch_isend_irecv_gloo(self):\n self._barrier()\n rank = dist.get_rank()\n p2p_op_list = []\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n continue\n send_tensor = _build_tensor(rank + 1)\n recv_tensor = _build_tensor(src + 1, value=-1)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # GLOO Batch SEND RECV CPU with provided tags\n @sandcastle_skip_if(BACKEND != \"gloo\", \"GLOO Batch Send Recv CPU\")\n def test_batch_isend_irecv_gloo_tags(self):\n self._barrier()\n rank = dist.get_rank()\n p2p_op_list = []\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n continue\n send_tensor = _build_tensor(rank + 1)\n recv_tensor = _build_tensor(src + 1, value=-1)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # NCCL Batch SEND RECV Tensor Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_tensor_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(\n RuntimeError, \"Tensors must be CUDA and dense\"\n ):\n send_tensor = _build_tensor(rank + 1)\n send_op = dist.P2POp(dist.isend, send_tensor, 1)\n req = dist.batch_isend_irecv([send_op])\n req.wait()\n\n # NCCL Batch SEND RECV Op Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_op_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(RuntimeError, \"^Invalid ``op``\"):\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n send_op = dist.P2POp(dist.broadcast, send_tensor, 1)\n req = dist.batch_isend_irecv([send_op])\n req.wait()\n\n # NCCL Batch SEND RECV p2p_op_list Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_op_list_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(RuntimeError, \"^Invalid ``p2p_op_list``\"):\n send_tensor = _build_tensor(rank + 1)\n req = dist.batch_isend_irecv([1, 2])\n req.wait()\n\n # NCCL Batch SEND RECV Mixed Backend Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_mixed_backend_err(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n group_gloo = dist.new_group(ranks=[0, 1], backend=\"gloo\")\n group_nccl = dist.new_group(ranks=[0, 1], backend=\"nccl\")\n if rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, \"All groups need to use the same backend\"\n ):\n send_tensor = _build_tensor(rank + 1)\n send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo)\n send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl)\n req = dist.batch_isend_irecv([send_op_gloo, send_op_nccl])\n req.wait()\n\n # NCCL SEND RECV\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def _test_send_recv_nccl(self, profiler_ctx=None):\n # TODO: now that nccl send/recv is supported, there does not seem to\n # be a need to have nccl send/recv be tested separately.\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n\n tensor = _build_tensor(rank + 1, device_id=device_id)\n profiler_cls = profiler_ctx if profiler_ctx is not None else suppress()\n with profiler_cls as prof:\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n expected_tensor = _build_tensor(src + 1)\n output_tensor = _build_tensor(\n src + 1, value=-1, device_id=device_id\n )\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n self._barrier()\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n self.assertTrue(events)\n # Event order is not deterministic, so simply assert their shape\n # is found in the following list.\n expected_shapes = [\n [[rank + 1] * 3] for rank in range(dist.get_world_size())\n ]\n for event in events:\n self.assertTrue(event.input_shapes in expected_shapes)\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_send_recv_nccl(self):\n self._test_send_recv_nccl()\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_send_recv_nccl_autograd_profiler(self):\n profiler_ctx = torch.autograd.profiler.profile(record_shapes=True)\n self._test_send_recv_nccl(profiler_ctx)\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_nccl_torch_profiler(self):\n profiler_ctx = torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ],\n record_shapes=True,\n )\n self._test_send_recv_nccl(profiler_ctx)\n\n # SEND RECV\n def _test_send_recv(self, profiler_ctx):\n rank = dist.get_rank()\n send_size = rank + 1\n tensor = _build_tensor(send_size)\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n recv_size = src + 1\n expected_tensor = _build_tensor(recv_size)\n output_tensor = _build_tensor(recv_size, value=-1)\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from all other ranks.\n event_count = sum(e.count for e in events)\n expected_event_count = dist.get_world_size() - 1\n self.assertEqual(event_count, expected_event_count)\n # Event order is not deterministic, so simply assert their shape\n # is found in the following list.\n expected_shapes = [\n [[rank + 1] * 3] for rank in range(dist.get_world_size())\n ]\n for event in events:\n self.assertTrue(event.is_async)\n self.assertTrue(event.input_shapes in expected_shapes)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv(self):\n self._test_send_recv(profiler_ctx=None)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_send_recv(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv(profiler_ctx=torch_profiler_ctx)\n\n # SEND RECV ANY SOURCE\n def _test_send_recv_any_source(self, profiler_ctx):\n rank = dist.get_rank()\n send_recv_size = 10\n tensor = _build_tensor(send_recv_size, value=rank)\n recv_ranks = list()\n irecv_ranks = list()\n\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n # Recv mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n\n for recv in [\"recv\", \"irecv\"]:\n output_tensor = _build_tensor(send_recv_size, value=-1)\n\n if recv == \"recv\":\n sender = dist.recv(output_tensor)\n recv_ranks.append(sender)\n elif recv == \"irecv\":\n work = dist.irecv(output_tensor)\n work.wait()\n sender = work._source_rank()\n irecv_ranks.append(sender)\n\n # Assert the scalar value \"sender\" that should be\n # equal to the rank of the sender is equal to all\n # values in the received tensor.\n self.assertTrue(output_tensor.eq(sender).all())\n else:\n # Send mode\n dist.send(tensor, dst) # recv\n dist.send(tensor, dst) # irecv\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recvAnySource\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from other rank twice.\n self.assertEqual(\n sum(event.count for event in events),\n 2 * (dist.get_world_size() - 1),\n )\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.input_shapes, [[send_recv_size] * 3])\n\n # Each rank would have 2 * (world_size - 1) sends, verify that\n # globally we receive the same amount on the other end.\n recv_ranks_tensor = torch.cat(\n (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0\n )\n global_recv_ranks = [\n torch.empty_like(recv_ranks_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(global_recv_ranks, recv_ranks_tensor)\n global_recv_ranks_list = []\n for tensor in global_recv_ranks:\n global_recv_ranks_list += tensor.tolist()\n\n from itertools import groupby\n\n global_recv_ranks_list.sort()\n frequency = [\n len(list(group)) for key, group in groupby(global_recv_ranks_list)\n ]\n self.assertEqual(dist.get_world_size(), len(frequency))\n self.assertEqual(\n [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source(self):\n self._test_send_recv_any_source(profiler_ctx=None)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_any_source_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx)\n\n # SEND RECV WITH TAG\n def _test_send_recv_with_tag(self, profiler_ctx):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n send_recv_size = 10\n tensor = _build_tensor(send_recv_size, value=rank)\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for dst in range(0, world_size):\n if dst == rank:\n # Recv mode\n for src in range(0, world_size):\n if src == rank:\n continue\n output_tensor = _build_tensor(send_recv_size, value=-1)\n dist.recv(output_tensor, src, tag=src)\n self.assertTrue(output_tensor.eq(src).all())\n else:\n # Send mode\n dist.send(tensor, dst, tag=rank)\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from all other ranks\n event_count = sum(e.count for e in events)\n expected_event_count = dist.get_world_size() - 1\n self.assertEqual(event_count, expected_event_count)\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, event_name)\n self.assertEqual(event.input_shapes, [[send_recv_size] * 3])\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_with_tag(self):\n self._test_send_recv_with_tag(profiler_ctx=None)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_with_tag_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_with_tag_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx)\n\n # ISEND\n def _test_isend(self, profiler_ctx):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n if rank == 0:\n requests = [\n dist.isend(_build_tensor(dest, 10), dest)\n for dest in range(1, world_size)\n ]\n for request in requests:\n request.wait()\n self.assertTrue(request.is_completed())\n else:\n tensor = _build_tensor(rank, -1)\n dist.recv(tensor, 0)\n self.assertEqual(tensor, _build_tensor(rank, 10))\n\n self._barrier()\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n expected_event_name = (\n f\"{backend}:send\" if rank == 0 else f\"{backend}:recv\"\n )\n events = get_profiling_event(expected_event_name, prof)\n event_count = sum(e.count for e in events)\n expected_count = dist.get_world_size() - 1 if rank == 0 else 1\n self.assertEqual(expected_count, event_count)\n # Event ordering is not guaranteed, so simply ensure the shapes are\n # found in the following map.\n expected_shapes = {\n r: [[r] * 3] for r in range(1, dist.get_world_size())\n }\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, expected_event_name)\n if rank == 0:\n self.assertTrue(\n event.input_shapes in expected_shapes.values()\n )\n else:\n self.assertEqual(event.input_shapes, expected_shapes[rank])\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend(self):\n self._test_isend(profiler_ctx=None)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_isend(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support isend\")\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_isend_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n self._test_isend(profiler_ctx=torch_profiler_ctx)\n\n # IRECV\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support irecv\")\n def test_irecv(self):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n if rank == 0:\n expected_tensors = [\n _build_tensor(src, -1) for src in range(1, world_size)\n ]\n requests = [\n dist.irecv(expected_tensors[src - 1], src)\n for src in range(1, world_size)\n ]\n\n for src in range(1, world_size):\n requests[src - 1].wait()\n self.assertTrue(requests[src - 1].is_completed())\n self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))\n else:\n tensor = _build_tensor(rank, 10)\n dist.send(tensor, 0)\n\n self._barrier()\n\n # BROADCAST\n def _test_broadcast_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n with_options=False,\n ):\n for dtype, value, requires_cuda in [\n (torch.float, -1e-10, False),\n (torch.double, -1e-100, False),\n (torch.half, -0.1, True),\n (torch.int8, -2, False),\n (torch.uint8, 129, False),\n (torch.int, -1e5, False),\n (torch.long, -1e15, False),\n ]:\n if requires_cuda and not cuda:\n continue\n for src in group:\n expected_tensor = _build_tensor(src + 1, value, dtype)\n if cuda:\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n if rank == src:\n if with_options:\n opts = dist.BroadcastOptions()\n opts.rootTensor = 0\n opts.rootRank = src\n self.call_dist_op(\n \":broadcast\",\n True,\n group_id.broadcast,\n [expected_tensor],\n opts,\n )\n else:\n self.call_dist_op(\n \":broadcast\",\n False,\n dist.broadcast,\n expected_tensor,\n src,\n group_id,\n )\n else:\n tensor = _build_tensor(src + 1, -1, dtype)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n if with_options:\n opts = dist.BroadcastOptions()\n opts.rootTensor = 0\n opts.rootRank = src\n self.call_dist_op(\n \":broadcast\", True, group_id.broadcast, [tensor], opts\n )\n else:\n self.call_dist_op(\n \":broadcast\",\n False,\n dist.broadcast,\n tensor,\n src,\n group_id,\n )\n self.assertEqual(tensor.size(), expected_tensor.size())\n self.assertEqual(\n tensor.ne(expected_tensor).max(), torch.tensor(False)\n )\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast(self):\n group, group_id, rank = self._init_global_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and Nccl backend supports CUDA allReduce\",\n )\n @skip_if_no_gpu\n def test_broadcast_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\",\n \"Only NCCL backend supports high priority stream\",\n )\n @skip_if_no_gpu\n def test_nccl_high_priority_stream(self):\n group, _, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n\n new_port = str(MASTER_PORT + 1)\n os.environ[\"MASTER_PORT\"] = new_port\n gen_iterator = dist.rendezvous(\"env://\", rank, dist.get_world_size())\n store, rank, size = next(gen_iterator)\n store = dist.PrefixStore(new_port, store)\n\n opts = dist.ProcessGroupNCCL.Options()\n opts.is_high_priority_stream = False\n group_id = dist.ProcessGroupNCCL(store, rank, size, opts)\n\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True)\n\n # REDUCE\n def _test_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n tensor = _build_tensor(src + 1).fill_(\n master_value if rank == src else worker_value\n )\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n self.call_dist_op(\n \":reduce\",\n False,\n dist.reduce,\n tensor,\n src,\n op,\n group_id,\n tensor_shapes=[tensor.shape],\n )\n if rank == src:\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n def test_reduce_sum_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # REDUCE TWICE\n def _test_reduce_twice_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n tensors = [\n _build_tensor(src + 1).fill_(\n master_value if rank == src else worker_value\n )\n for i in range(2)\n ]\n if cuda:\n for i in range(2):\n tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0])\n self.call_dist_op(\n \":reduce\",\n False,\n dist.reduce,\n tensors[0],\n src,\n op,\n group_id,\n secondary_op_call=lambda: dist.reduce(\n tensors[1], src, op, group_id\n ),\n tensor_shapes=[tensors[0].shape],\n )\n if rank == src:\n for tensor in tensors:\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum_twice(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_twice_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n def test_reduce_sum_cuda_twice(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_twice_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @skip_if_no_gpu\n @require_backend({\"gloo\", \"nccl\"})\n def test_all_reduce_result_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n for src in group:\n if rank == src:\n tensor = _build_tensor(src + 1, 2)\n else:\n tensor = _build_tensor(src + 1, 10)\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n\n opts = AllreduceOptions()\n opts.reduceOp = dist.ReduceOp.SUM\n\n if group_id == GroupMember.WORLD:\n work = _get_default_group().allreduce([tensor], opts)\n else:\n work = group_id.allreduce([tensor], opts)\n\n if BACKEND == \"gloo\":\n # Calling result right the work is finished should throw exception.\n # Here we have a race condition, we may not assume the work is not\n # finished by the time we run next lines.\n try:\n with self.assertRaisesRegex(\n RuntimeError,\n \"Work needs to be completed before calling result\",\n ):\n work.result()\n except AssertionError:\n # Exception was not raised, ensure is_completed()\n self.assertTrue(work.is_completed())\n\n work.wait()\n result = work.result()\n else:\n # In case of NCCL we should be able to retrieve pointer to the result\n # even before work is finished.\n result = work.result()\n work.wait()\n\n expected_value = 2 + (10 * (len(group) - 1))\n self.assertEqual(result, [_build_tensor(src + 1, expected_value)])\n self._barrier()\n\n def call_dist_op(\n self,\n profiling_title_postfix,\n is_async,\n op,\n *args,\n expect_event=True,\n secondary_op_call=None,\n profile_cuda=False,\n tensor_shapes=None,\n **kwargs,\n ):\n op_calls = [lambda: op(*args, **kwargs)]\n if secondary_op_call is not None:\n op_calls.append(secondary_op_call)\n\n autograd_profiler_ctx = torch.autograd.profiler.profile(\n use_cuda=profile_cuda, record_shapes=True\n )\n\n # TODO: move this test to use torch.profiler once kineto issues are\n # fixed internally.\n with autograd_profiler_ctx as prof:\n works = [op_call() for op_call in op_calls]\n if is_async:\n for work in works:\n work.wait()\n\n if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS:\n events = get_profiling_event(\n profiling_title_postfix, autograd_profiler_ctx\n )\n # DETAIL debug mode can use a pg wrapper that issues more collectives\n # under the hood\n if dist._get_debug_mode() != dist._DistributedDebugLevel.DETAIL:\n self.assertEqual(len(events), len(op_calls))\n for e in events:\n self.assertTrue(e.is_async)\n self.assertEqual(e.count, 1)\n self.assertGreaterEqual(e.cpu_time, 0)\n # Verify tensor shapes if given\n # DETAIL debug mode can use a pg wrapper that issues more collectives\n # under the hood\n if (\n tensor_shapes is not None\n and dist._get_debug_mode() != dist._DistributedDebugLevel.DETAIL\n ):\n self.assertEqual(\n e.input_shapes,\n tensor_shapes,\n f\"event shape: {e.input_shapes} vs tensor {tensor_shapes}\",\n )\n\n # ALL REDUCE\n def _test_all_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float,\n async_op=False,\n ):\n for src in group:\n curr_value = master_value if rank == src else worker_value\n\n tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n if tensor.dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(tensor).shape]\n else:\n tensor_shapes = [tensor.shape]\n self.call_dist_op(\n \":all_reduce\",\n async_op,\n dist.all_reduce,\n tensor,\n op,\n group_id,\n async_op=async_op,\n tensor_shapes=tensor_shapes,\n )\n # Currently, only Gloo backend has profiling tested with CUDA enabled.\n # Only run cuda profiling test for one rank to speed up since\n # running with different src_rank does not affect the correctness.\n if (\n src == 0\n and cuda\n and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS\n ):\n self.call_dist_op(\n \":all_reduce\",\n async_op,\n dist.all_reduce,\n tensor,\n op,\n group_id,\n async_op=async_op,\n profile_cuda=True,\n tensor_shapes=tensor_shapes,\n )\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum_async(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n async_op=True,\n )\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda_async(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n async_op=True,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n complex(2, 3) + (complex(10, 11) * (len(group) - 1)),\n dtype=torch.cfloat,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_complex_unsupported_ops(self):\n unsupported_ops = [\n dist.ReduceOp.MAX,\n dist.ReduceOp.MIN,\n dist.ReduceOp.PRODUCT,\n dist.ReduceOp.BAND,\n dist.ReduceOp.BOR,\n dist.ReduceOp.BXOR,\n ]\n group, group_id, rank = self._init_global_test()\n for unsupported_op in unsupported_ops:\n with self.assertRaisesRegex(\n RuntimeError, \"all_reduce does not support\"\n ):\n dist.all_reduce(\n _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id\n )\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda_complex(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n complex(2, 3) + (complex(10, 11) * (len(group) - 1)),\n True,\n rank_to_GPU,\n dtype=torch.cfloat,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # SPARSE ALL REDUCE\n def _test_sparse_all_reduce_sum(self, fn):\n group, group_id, rank = self._init_global_test()\n\n tests = simple_sparse_reduce_tests(\n rank, dist.get_world_size(), num_inputs=1\n )\n for (inputs, outputs) in tests:\n tensors = [fn(input) for input in inputs]\n dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)\n self.assertEqual(tensors[0], outputs[0])\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\"\n )\n def test_sparse_all_reduce_sum(self):\n self._test_sparse_all_reduce_sum(lambda t: t)\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\"\n )\n @skip_if_no_gpu\n def test_sparse_all_reduce_sum_cuda(self):\n self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())\n\n # ALL REDUCE - COALESCED\n @staticmethod\n def _all_reduce_coalesced_sum_test_cases(group_size):\n return (\n [2, 3, complex(2, 3)],\n [10, 11, complex(10, 11)],\n [\n 2 + 10 * (group_size - 1),\n 3 + 11 * (group_size - 1),\n complex(2, 3) + complex(10, 11) * (group_size - 1),\n ],\n [torch.float, torch.float, torch.cfloat],\n )\n\n @staticmethod\n def _all_reduce_coalesced_product_test_cases(group_size):\n return (\n [1, 2],\n [3, 4],\n [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)],\n [torch.float, torch.float],\n )\n\n @staticmethod\n def _all_reduce_coalesced_min_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [1, 3],\n [torch.float, torch.float],\n )\n\n @staticmethod\n def _all_reduce_coalesced_max_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [2, 4],\n [torch.float, torch.float],\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_coalesced_max_complex_unsupported(self):\n group, group_id, rank = self._init_global_test()\n with self.assertRaisesRegex(RuntimeError, \"all_reduce does not support\"):\n dist.all_reduce_coalesced(\n [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id\n )\n\n def _test_all_reduce_coalesced_helper(\n self,\n group,\n group_id,\n rank,\n op,\n cuda=False,\n rank_to_GPU=None,\n ):\n test_case_func = {\n dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,\n dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,\n dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,\n dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases,\n }[op]\n\n master_values, worker_values, expected_values, dtypes = test_case_func(\n len(group)\n )\n\n for src in group:\n curr_values = master_values if rank == src else worker_values\n tensors = [\n _build_tensor(src + 1, val, dtype=dtype)\n for dtype, val in zip(dtypes, curr_values)\n ]\n if cuda:\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n tensor_shapes = []\n for tensor in tensors:\n if tensor.dtype == torch.complex64:\n tensor_shapes.append(torch.view_as_real(tensor).shape)\n else:\n tensor_shapes.append(tensor.shape)\n self.call_dist_op(\n \":all_reduce\",\n False,\n dist.all_reduce_coalesced,\n tensors,\n op,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n expected_tensors = [\n _build_tensor(src + 1, expected_value, dtype=dtype)\n for dtype, expected_value in zip(dtypes, expected_values)\n ]\n self.assertEqual(tensors, expected_tensors)\n\n self._barrier()\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n # SCATTER\n def _test_scatter_helper(self, group, group_id, rank, dtype=torch.float):\n for dest in group:\n tensor = _build_tensor(dest + 1, -1, dtype=dtype)\n expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype)\n tensors = (\n [_build_tensor(dest + 1, i, dtype=dtype) for i in group]\n if rank == dest\n else []\n )\n if dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(t).shape for t in tensors]\n else:\n tensor_shapes = [t.shape for t in tensors]\n self.call_dist_op(\n \":scatter\",\n False,\n dist.scatter,\n tensor,\n src=dest,\n scatter_list=tensors,\n group=group_id,\n tensor_shapes=tensor_shapes,\n )\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_scatter_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify scatter_list argument only on source rank.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, src=0, scatter_list=scatter_list)\n else:\n dist.scatter(output, src=0)\n self.assertEqual(output, one * rank)\n\n # Don't specify src argument.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, scatter_list=scatter_list)\n else:\n dist.scatter(output)\n self.assertEqual(output, one * rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n @skip_if_small_worldsize\n def test_scatter_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n # GATHER\n def _test_gather_helper(self, group, group_id, rank):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank)\n tensors = (\n [_build_tensor(dest + 1, -1) for i in group] if rank == dest else []\n )\n self.call_dist_op(\n \":gather\",\n False,\n dist.gather,\n tensor,\n dst=dest,\n gather_list=tensors,\n group=group_id,\n tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None,\n )\n if rank == dest:\n expected_tensors = [_build_tensor(dest + 1, i) for i in group]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify gather_list argument only on destination rank.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, dst=0, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank, dst=0)\n\n # Don't specify dst argument.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n # ALL GATHER\n def _test_all_gather_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank, dtype=dtype)\n tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group]\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n if tensors[0].dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(tensors[0]).shape]\n else:\n tensor_shapes = [tensors[0].shape]\n self.call_dist_op(\n \":all_gather\",\n False,\n dist.all_gather,\n tensors,\n tensor,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n\n expected_tensors = [\n _build_tensor(dest + 1, i, dtype=dtype) for i in group\n ]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n def _run_all_gather_coalesced_and_verify(\n self, output_tensor_lists, input_tensors, expected_tensors, group_id\n ):\n \"\"\"\n Helper that runs all_gather_coalesced and returns true if output\n matches expectations.\n \"\"\"\n tensor_shapes = []\n for input_tensor in input_tensors:\n if input_tensor.dtype == torch.complex64:\n tensor_shapes.append(torch.view_as_real(input_tensor).shape)\n else:\n tensor_shapes.append(input_tensor.shape)\n self.call_dist_op(\n \":all_gather\",\n False,\n dist.all_gather_coalesced,\n output_tensor_lists,\n input_tensors,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n\n for l1, l2 in zip(output_tensor_lists, expected_tensors):\n for t1, t2 in zip(l1, l2):\n if not torch.equal(t1, t2):\n return False\n return True\n\n def _test_all_gather_coalesced_helper(\n self, group, group_id, rank, dtype=torch.float\n ):\n # TODO: Instead we should probably go through _rank_not_in_group\n # mechanism to disable sending tensors\n if group_id is not None:\n for test_case_id in range(2, 5):\n # Make sure we create tensors of incompatible sizes, e.g.\n # [1], [2x2], [3x3x3] ... to be sent in one batch\n input_tensors = [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank + tensor_id, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n output_tensor_lists = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, -1, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n for _ in group\n ]\n expected_tensors = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n for rank_iter in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensor_lists, input_tensors, expected_tensors, group_id\n ), \"output tensors do not match expected ouputs\"\n\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_simple(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_with_empty(self):\n group, group_id, rank = self._init_global_test()\n input_tensors = [\n rank * torch.ones([2, 2]),\n torch.ones([0]),\n (rank + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0]),\n ]\n output_tensors_lists = [\n [\n -1 * torch.ones([2, 2]),\n -1 * torch.ones([0]),\n -1 * torch.ones([3, 3]),\n -1 * torch.ones([0]),\n -1 * torch.ones([0]),\n ]\n for _ in group\n ]\n expected_tensors = [\n [\n r * torch.ones([2, 2]),\n torch.ones([0]),\n (r + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0]),\n ]\n for r in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensors_lists, input_tensors, expected_tensors, group_id\n )\n self._barrier()\n\n # AllToAll\n def _test_all_to_all_single_equal_split_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n if group_id is not None:\n size = len(group)\n in_tensor = torch.ones([size, size], dtype=dtype) * rank\n expected_tensor = torch.cat(\n [torch.ones([1, size], dtype=dtype) * i for i in group]\n )\n out_tensor = torch.ones([size, size], dtype=dtype) * -1\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n if dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(in_tensor).shape]\n else:\n tensor_shapes = [in_tensor.shape]\n self.call_dist_op(\n \":all_to_all\",\n False,\n dist.all_to_all_single,\n out_tensor,\n in_tensor,\n group=group_id,\n tensor_shapes=tensor_shapes,\n )\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_single_unequal_split_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n out_splits = [rank + 1 for _ in group]\n in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank\n out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)\n expected_tensor = torch.cat(\n [torch.ones([rank + 1, size], dtype=dtype) * i for i in group]\n )\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n dist.all_to_all_single(\n out_tensor, in_tensor, out_splits, in_splits, group=group_id\n )\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float,\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n in_tensors = [\n torch.ones([in_splits[i], size], dtype=dtype) * rank\n for i, _ in enumerate(group)\n ]\n out_tensors = [\n torch.ones([(rank + 1), size], dtype=dtype) for _ in group\n ]\n expected_tensors = [\n torch.ones([rank + 1, size], dtype=dtype) * i for i in group\n ]\n if cuda:\n in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]\n expected_tensors = [\n t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors\n ]\n out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]\n dist.all_to_all(out_tensors, in_tensors, group=group_id)\n for t1, t2 in zip(out_tensors, expected_tensors):\n self.assertEqual(t1, t2)\n self._barrier()\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n dtype=torch.cfloat,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n @skip_if_small_worldsize\n def test_all_to_all_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_small_worldsize\n @skip_if_rocm\n def test_all_to_all_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n # BARRIER\n def _test_barrier_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None\n ):\n WAIT_TIME = 0.3 # seconds\n\n for dest in group:\n expected_time = torch.DoubleTensor(1).fill_(0.0)\n if cuda:\n expected_time = expected_time.cuda(rank_to_GPU[rank][0])\n if dest == rank:\n expected_time.fill_(time.time() + WAIT_TIME)\n dist.broadcast(expected_time, dest, group_id)\n time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer\n dist.barrier(group_id)\n else:\n dist.broadcast(expected_time, dest, group_id)\n dist.barrier(group_id)\n self.assertGreaterAlmostEqual(\n float(time.time()),\n float(expected_time[0]),\n \"destination rank: %d, my rank: %d\" % (dest, rank)\n + \" (if you see this failure, please report in #14554)\",\n )\n\n # Use higher timeout for the instance where the test runs\n # against a subgroup and uses a CUDA tensor for expected time.\n # The CUDA initialization for the participating processes can\n # take long enough for the barrier timeout to trigger on the\n # process that doesn't participate in the group.\n self._barrier(timeout=20)\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier(self):\n group, group_id, rank = self._init_global_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):\n for src in group:\n expected_tensor = _build_tensor(src + 1)\n tensors = [\n _build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]\n ]\n if rank == src:\n tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])\n\n dist.broadcast_multigpu(tensors, src, group_id)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL broadcast multigpu skipped\")\n @skip_if_no_gpu\n def test_broadcast_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n def _test_all_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n dtype=torch.float,\n ):\n for src in group:\n curr_value = master_value if rank == src else worker_value\n tensors = [\n _build_tensor(src + 1, curr_value, dtype=dtype).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n self.call_dist_op(\n \":all_reduce\",\n False,\n dist.all_reduce_multigpu,\n tensors,\n op,\n group_id,\n )\n expected_tensor = _build_tensor(src + 1, expected_value, dtype=dtype)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n (complex(2, 3) + complex(10, 11) * (len(group) - 1))\n * len(rank_to_GPU[0]),\n dtype=torch.cfloat,\n )\n\n def _test_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n ):\n for src in group:\n tensor_value = master_value if rank == src else worker_value\n tensors = [\n _build_tensor(src + 1, tensor_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n self.call_dist_op(\n \"reduce\",\n False,\n dist.reduce_multigpu,\n tensors,\n src,\n op,\n group_id,\n expect_event=len(tensors) == 1,\n tensor_shapes=[tensors[0].shape],\n )\n if rank == src:\n expected_tensor = _build_tensor(src + 1, expected_value)\n self.assertEqual(tensors[0], expected_tensor)\n\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\", \"Only Nccl backend supports reduce multigpu\"\n )\n @skip_if_no_gpu\n def test_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n def _test_all_gather_multigpu_helper(\n self, group, group_id, rank, rank_to_GPU, dtype=torch.float\n ):\n for dest in group:\n tensors = [\n _build_tensor(dest + 1, dtype=dtype).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n\n # construct expected output along with\n # a place holder to receive all gather results\n output_tensors = []\n expected_output = []\n output_per_gpu = (\n [_build_tensor(dest + 1, -1, dtype=dtype)]\n * len(rank_to_GPU[0])\n * len(group)\n )\n expected_per_gpu = (\n [_build_tensor(dest + 1, dtype=dtype)]\n * len(rank_to_GPU[0])\n * len(group)\n )\n for gpu in rank_to_GPU[rank]:\n output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])\n expected_output.append(\n [t.cuda(device=gpu) for t in expected_per_gpu]\n )\n self.call_dist_op(\n \"all_gather\",\n False,\n dist.all_gather_multigpu,\n output_tensors,\n tensors,\n group_id,\n expect_event=len(expected_output) == 1,\n )\n self.assertEqual(output_tensors, expected_output)\n\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\"\n )\n @skip_if_no_gpu\n def test_all_gather_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\"\n )\n @skip_if_no_gpu\n def test_all_gather_multigpu_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_all_gather_multigpu_helper(\n group, group_id, rank, rank_to_GPU, dtype=torch.cfloat\n )\n\n def _model_step(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad = None\n\n def _model_step_with_zero_grad(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad.requires_grad_(False)\n param.grad.zero_()\n\n def _prepare_dummy_data(self, local_bs):\n # global_bs for DDP should be divisible by WORLD_SIZE\n world_size = int(os.environ[\"WORLD_SIZE\"])\n global_bs = world_size * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n return global_bs, input_cpu, target, loss\n\n # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL\n def _test_DDP_helper(\n self, model, input_var, target, loss, scale_factor=1.0, memory_format=None\n ):\n model.train()\n output = model(input_var)\n l = loss(output, target) * scale_factor\n l.backward()\n if memory_format is not None:\n self.assertTrue(output.is_contiguous(memory_format=memory_format))\n\n def _assert_equal_param(self, param_gpu, param_DDP):\n self.assertEqual(len(param_gpu), len(param_DDP))\n for p_gpu, p_DDP in zip(param_gpu, param_DDP):\n self.assertEqual(p_gpu, p_DDP)\n\n def _test_DDP_niter(\n self,\n model_base,\n model_DDP,\n input,\n target,\n loss,\n local_bs,\n rank,\n batch_size,\n test_save,\n offset=None,\n world_size=0,\n zero_grad=False,\n memory_format=None,\n n_iter=5,\n ):\n for idx in range(n_iter):\n # single cpu/gpu training\n self._test_DDP_helper(\n model_base, input, target, loss, memory_format=memory_format\n )\n\n if offset is None:\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n world_size * local_bs / batch_size if world_size != 0 else 1,\n memory_format=memory_format,\n )\n\n # Update weights and run a second iteration to shake out errors\n if zero_grad:\n self._model_step_with_zero_grad(model_base)\n self._model_step_with_zero_grad(model_DDP)\n else:\n self._model_step(model_base)\n self._model_step(model_DDP)\n self._assert_equal_param(\n list(model_base.parameters()), list(model_DDP.module.parameters())\n )\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n # save the model in the middle and reload\n if test_save and idx == 2 and INIT_METHOD.startswith(\"file://\"):\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n with tempfile.TemporaryFile() as tmp_file:\n torch.save(model_DDP, tmp_file)\n tmp_file.seek(0)\n saved_model = torch.load(tmp_file)\n for k in model_DDP.state_dict():\n self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k])\n\n def _test_DistributedDataParallel(\n self,\n gpu_subset,\n rank,\n output_device=None,\n gradient_as_bucket_view=False,\n static_graph=False,\n ):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = DDP_NET\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = copy.deepcopy(model)\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP,\n device_ids=gpu_subset,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n if static_graph:\n model_DDP._set_static_graph()\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # dummy data initialization\n local_bs = len(gpu_subset)\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False):\n # Run a simple end to end DDP-CPU model, use result of single node\n # model as baseline\n group, group_id, rank = self._init_global_test()\n\n # cpu training setup\n model_base = DDP_NET\n\n # DDP-CPU training setup\n model_DDP = copy.deepcopy(model_base)\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, gradient_as_bucket_view=gradient_as_bucket_view\n )\n\n # dummy data initialization\n local_bs = 2\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_base,\n model_DDP,\n input_cpu,\n target,\n loss,\n local_bs,\n rank,\n global_bs,\n False,\n zero_grad=True,\n )\n self._barrier()\n\n return model_DDP\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_DistributedDataParallelCPU(self):\n self._test_DistributedDataParallelCPU()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_DistributedDataParallelCPU_grad_is_view(self):\n self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_DistributedDataParallel_requires_grad(self):\n # a module without gradients shouldn't be accepted\n self.assertRaises(\n RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module())\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_DistributedDataParallel_non_default_stream(self):\n stream = torch.cuda.Stream(self.rank)\n rank = self.rank\n with torch.cuda.stream(stream):\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]\n )\n for i in range(1000):\n # Clear gradients manually\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net.module.weight.grad\n avg = grad.clone()\n # All-reducing the gradient averages should give us the gradient\n # average. If not, then one of the workers has not correctly\n # written back the averaged gradient before this all-reduce call.\n dist.all_reduce(avg)\n world_size = int(os.environ[\"WORLD_SIZE\"])\n avg.div_(world_size)\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(\n avg[0, 0],\n expected_grad,\n msg=f\"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}\",\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_comm_hook_logging(self):\n hooks = [\n default.allreduce_hook,\n default.fp16_compress_hook,\n powerSGD.powerSGD_hook,\n powerSGD.batched_powerSGD_hook,\n quantization_hooks.quantization_pertensor_hook,\n quantization_hooks.quantization_perchannel_hook,\n ]\n\n cpp_builtin_hooks = [\n dist.BuiltinCommHookType.ALLREDUCE,\n dist.BuiltinCommHookType.FP16_COMPRESS,\n ]\n\n for hook in hooks:\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n ddp_model.register_comm_hook(None, hook)\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), hook.__qualname__)\n\n for hook in cpp_builtin_hooks:\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n ddp_model._register_builtin_comm_hook(hook)\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), str(hook))\n\n # No hook registered\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n # After second forward pass, hook should still be empty string\n for i in range(2):\n inp = torch.ones(1, 1, device=self.rank)\n loss = ddp_model(inp).sum()\n loss.backward()\n\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Note: DETAIL debug mode logs DDP logging data to stdout and\n # thus accesses std::map, which fills in a default value for the\n # type if it didn't exist.\n self.assertEqual(ddp_logging_data.get(\"comm_hook\", \"\"), \"\")\n\n def _test_ddp_hook_with_optimizer_parity(\n self, grad_as_bucket_view, static_graph\n ):\n rank = self.rank\n torch.cuda.set_device(rank)\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n models_to_test = [\n (LargeNet(), torch.randn(1, 1000).cuda()),\n ]\n if HAS_TORCHVISION:\n models_to_test.append(\n (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda())\n )\n # Enable determinism in cudnn operators\n for (model, inp) in models_to_test:\n with torch.backends.cudnn.flags(\n enabled=True, deterministic=True, benchmark=False\n ):\n sgd_lr = 1e-2\n sgd_momentum = 0.9\n sgd_weight_decay = 0.01\n ddp_model_with_optimizer_hook = (\n torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_as_bucket_view,\n )\n )\n if static_graph:\n ddp_model_with_optimizer_hook._set_static_graph()\n\n # Register hook that runs allreduce + functional SGD step.\n allreduce_hook = default.allreduce_hook\n opt_hook_state = default._OptimizerHookState(\n _FunctionalSGD,\n sgd_lr,\n momentum=sgd_momentum,\n weight_decay=sgd_weight_decay,\n )\n ddp_model_with_optimizer_hook.register_comm_hook(\n None,\n default._hook_then_optimizer(allreduce_hook, opt_hook_state),\n )\n # Create DDP model with no hook that does optimizer after\n # backward.\n ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_as_bucket_view,\n )\n if static_graph:\n ddp_model_with_no_hook._set_static_graph()\n\n sgd_no_hook = torch.optim.SGD(\n ddp_model_with_no_hook.parameters(),\n lr=sgd_lr,\n momentum=sgd_momentum,\n weight_decay=sgd_weight_decay,\n )\n\n # Verify parameters are equal initially.\n for hook_param, allreduce_param in zip(\n ddp_model_with_optimizer_hook.parameters(),\n ddp_model_with_no_hook.parameters(),\n ):\n self.assertEqual(hook_param, allreduce_param)\n\n # Save old parameters to later verify optimizer modified them.\n opt_hook_init_params = copy.deepcopy(\n list(ddp_model_with_optimizer_hook.parameters())\n )\n\n # Run optimizer with hook model.\n for i in range(6):\n ddp_model_with_optimizer_hook.zero_grad()\n out = ddp_model_with_optimizer_hook(inp)\n loss = out.sum()\n loss.backward()\n\n dist.barrier()\n\n # Run regular model.\n for i in range(6):\n ddp_model_with_no_hook.zero_grad()\n out = ddp_model_with_no_hook(inp)\n loss = out.sum()\n loss.backward()\n sgd_no_hook.step()\n\n dist.barrier()\n\n # Now verify parameters are equal.\n for hook_param, allreduce_param in zip(\n ddp_model_with_optimizer_hook.parameters(),\n ddp_model_with_no_hook.parameters(),\n ):\n self.assertEqual(hook_param, allreduce_param)\n\n # Verify optimizer modified parameters, otherwise they would be\n # trivially equal above.\n self.assertNotEqual(\n opt_hook_init_params,\n list(ddp_model_with_optimizer_hook.parameters()),\n )\n dist.barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @sandcastle_skip_if(IS_WINDOWS, \"FunctionalSGD not yet supported with Windows.\")\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_hook_with_optimizer_parity(self):\n for grad_as_bucket_view, static_graph in itertools.product(\n [True, False], [True, False]\n ):\n self._test_ddp_hook_with_optimizer_parity(\n grad_as_bucket_view=grad_as_bucket_view, static_graph=static_graph\n )\n\n def _test_ddp_hook_parity(self, state, hook):\n rank = self.rank\n m = torch.nn.Linear(1, 5)\n try:\n process_group = state.process_group\n except AttributeError:\n process_group = state\n\n net_with_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(m).to(rank),\n device_ids=[rank],\n process_group=process_group,\n )\n net_with_hook.register_comm_hook(state=state, hook=hook)\n net_without_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(m).to(rank),\n device_ids=[rank],\n process_group=process_group,\n )\n for i in range(100):\n # Clear gradients manually.\n for g in [\n net_without_hook.module.weight.grad,\n net_with_hook.module.weight.grad,\n ]:\n if g is not None:\n g.requires_grad_(False)\n g.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net_without_hook(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net_without_hook.module.weight.grad\n avg = grad.clone()\n expected_grad = (\n sum(i for i in range(dist.get_world_size())) / dist.get_world_size()\n )\n loss_hook = net_with_hook(batch).sum()\n loss_hook.backward()\n grad_hook = net_with_hook.module.weight.grad\n avg_hook = grad_hook.clone()\n # Verify hook grad with expected.\n # Cannot use exact match here due to a very small accuracy loss,\n # e.g. 1e-05, for powerSGD hook case.\n assert_func = (\n self.assertEqual\n if hook == default.allreduce_hook\n else torch.testing.assert_allclose\n )\n assert_func(\n avg_hook[0, 0],\n expected_grad,\n msg=f\"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}\",\n )\n # Verify hook grad with vanilla allreduce\n assert_func(\n avg_hook[0, 0],\n avg[0, 0],\n msg=f\"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}\",\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_allreduce(self):\n self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_allreduce_process_group(self):\n # process_group is passed in to both DDP and comm. hook\n rank_to_GPU = self._init_multigpu_helper()\n gpus = [rank_to_GPU[int(r)][0] for r in range(dist.get_world_size())]\n process_group = torch.distributed.new_group(gpus)\n self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_powerSGD(self):\n for warm_start in [True, False]:\n powersgd_state = powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n start_powerSGD_iter=2,\n warm_start=warm_start,\n )\n self._test_ddp_hook_parity(\n state=powersgd_state, hook=powerSGD.powerSGD_hook\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @sandcastle_skip_if(\n NO_MULTIPROCESSING_SPAWN,\n \"Disabled for environments that \\\n don't support multiprocessing with spawn start method\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_post_localSGD(self):\n # Although we start run local SGD at iteration 10, since we still use the global process group to run it,\n # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations.\n state = post_localSGD.PostLocalSGDState(\n process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10\n )\n self._test_ddp_hook_parity(\n state=state, hook=post_localSGD.post_localSGD_hook\n )\n\n # Since we start local SGD later than the total number of 100 iterations,\n # no local SGD actually is executed, and we don't even need to provide a subgroup for this case.\n state = post_localSGD.PostLocalSGDState(\n process_group=None, subgroup=None, start_localSGD_iter=1000\n )\n self._test_ddp_hook_parity(\n state=state, hook=post_localSGD.post_localSGD_hook\n )\n\n def _prepare_single_device_module(\n self,\n rank,\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n device = devices[0] if devices else torch.device(\"cuda:%d\" % rank)\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model).to(device),\n device_ids=device_ids,\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n model.to(device)\n\n input = torch.randn(global_batch_size, 2).to(device)\n target = torch.randn(global_batch_size, 4).to(device)\n\n return model, ddp_model, input, target\n\n def _prepare_cpu_module(\n self,\n process_group,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model),\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n input = torch.randn(global_batch_size, 2)\n target = torch.randn(global_batch_size, 4)\n return model, ddp_model, input, target\n\n def _test_accumulate_gradients_no_sync(\n self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False\n ):\n \"\"\"\n This is the recommended way to implement accumulate grads.\n If ``ddp_comm_hook`` input was specified, it will also register that hook\n to the ``ddp_model``. The hook fed into this function should not change\n the resulting gradients.\n \"\"\"\n group, group_id, rank = self._init_global_test()\n world_size = get_world_size()\n\n # FIXME: Add testing for gloo/CUDA\n if BACKEND == \"mpi\" or BACKEND == \"gloo\":\n global_batch_size = world_size\n local_batch_size = 1\n model, ddp_model, input, target = self._prepare_cpu_module(\n group_id, global_batch_size, gradient_as_bucket_view\n )\n\n if BACKEND == \"nccl\":\n rank_to_GPU = self._init_multigpu_helper()\n int_devices = rank_to_GPU[rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n global_batch_size = world_size\n local_batch_size = len(devices)\n model, ddp_model, input, target = self._prepare_single_device_module(\n rank,\n group_id,\n devices,\n devices,\n global_batch_size,\n gradient_as_bucket_view,\n )\n\n if ddp_comm_hook is not None:\n ddp_model.register_comm_hook(group_id, ddp_comm_hook)\n\n def step_model(model, input, target):\n model.train()\n output = model(input)\n loss = F.mse_loss(output, target.to(output.device))\n loss.backward()\n\n # ensure accumulate grads works with no_grad => no grads are accumulated.\n with torch.no_grad():\n with ddp_model.no_sync():\n ddp_model.train()\n ddp_model(input)\n\n # check two model parameters over num_iters iterations\n for iteration in range(num_iters):\n step_model(model, input, target)\n\n ddp_input = input[\n rank * local_batch_size : (rank + 1) * local_batch_size\n ]\n ddp_target = target[\n rank * local_batch_size : (rank + 1) * local_batch_size\n ]\n\n if iteration % num_iters == 0:\n # accumulate grads locally\n with ddp_model.no_sync():\n step_model(ddp_model, ddp_input, ddp_target)\n else:\n # sync grads\n step_model(ddp_model, ddp_input, ddp_target)\n\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n if not i.requires_grad:\n continue\n if iteration % num_iters == 0:\n self.assertNotEqual(i.grad, j.grad)\n else:\n self.assertEqual(i.grad, j.grad)\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + iteration)\n input = input[torch.randperm(global_batch_size)]\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync(self):\n \"\"\"\n Runs _test_accumulate_gradients_no_sync using default inputs\n \"\"\"\n self._test_accumulate_gradients_no_sync()\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_grad_is_view(self):\n \"\"\"\n Runs _test_accumulate_gradients_no_sync using default inputs\n \"\"\"\n self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_allreduce_hook(self):\n \"\"\"\n Runs multiple iterations on _test_accumulate_gradients_no_sync\n using allreduce hook and validates whether future result was properly\n passed as gradients in reducer.\n \"\"\"\n\n world_size = get_world_size()\n\n def allreduce_hook(\n group_id: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n tensors = [bucket.get_tensor() / world_size]\n return (\n group_id.allreduce(tensors)\n .get_future()\n .then(lambda fut: fut.value()[0])\n )\n\n self._test_accumulate_gradients_no_sync(\n num_iters=4, ddp_comm_hook=allreduce_hook\n )\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):\n \"\"\"\n Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce\n hook that also uses then callbacks. In first then callback result is multiplied\n by 2, and the second callback divides the result by 2 * world_size. It validates\n whether final result was properly passed as gradients in reducer.\n \"\"\"\n\n world_size = get_world_size()\n\n def allreduce_with_then_hook(\n group_id: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n fut = group_id.allreduce([bucket.get_tensor()]).get_future()\n\n def mult(fut):\n # Multiply the result by 2.\n return 2 * fut.wait()[0]\n\n def div(fut):\n # Divide the result by 2 * world_size.\n return fut.wait() / (2 * world_size)\n\n return fut.then(mult).then(div)\n\n self._test_accumulate_gradients_no_sync(\n num_iters=4, ddp_comm_hook=allreduce_with_then_hook\n )\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_get_future(self):\n def mult(fut):\n return [t * 3 for t in fut.wait()]\n\n def add(fut):\n return [t + 1 for t in fut.wait()]\n\n group, group_id, rank = self._init_global_test()\n input = _build_tensor(3, 2)\n if BACKEND == \"nccl\":\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n input = input.to(device_id)\n fut = group_id.allreduce([input]).get_future()\n res = fut.then(mult).then(add).wait()\n expected = _build_tensor(3, 2 * len(group) * 3 + 1)\n\n self.assertEqual(res[0], expected)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n gpus = list(rank_to_GPU[rank])\n\n for use_bucket_view, static_graph in itertools.product(\n (False, True), (False, True)\n ):\n self._test_DistributedDataParallel(\n gpu_subset=gpus,\n rank=rank,\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # test output_device\n self._test_DistributedDataParallel(\n gpu_subset=gpus,\n rank=rank,\n output_device=torch.device(\"cuda\"),\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # test device_ids\n gpus_list = [torch.device(\"cuda:\" + str(i)) for i in gpus]\n self._test_DistributedDataParallel(\n gpu_subset=gpus_list,\n rank=rank,\n output_device=torch.device(\"cuda\"),\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n def _test_DistributedDataParallel_with_amp(self, grad_is_view=False):\n torch.manual_seed(31415)\n # Creates model and optimizer in default precision\n model = copy.deepcopy(DDP_NET).cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.03)\n\n # Creates a GradScaler once at the beginning of training.\n scaler = GradScaler()\n\n ddp_model = nn.parallel.DistributedDataParallel(\n model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view\n )\n\n input = torch.randn(dist.get_world_size() * 2, 2).cuda()\n target = torch.randn(dist.get_world_size() * 2, 4).cuda()\n loss_fn = nn.MSELoss()\n\n # verify grads are none before training\n for p in ddp_model.parameters():\n self.assertTrue(p is not None)\n self.assertTrue(p.grad is None)\n\n for idx in range(20):\n optimizer.zero_grad()\n # Runs the forward pass with autocasting.\n with autocast():\n output = ddp_model(input)\n loss = loss_fn(output, target)\n\n # Scales loss. Calls backward() on scaled loss to create scaled gradients.\n # Backward passes under autocast are not recommended.\n # Backward ops run in the same dtype autocast chose for corresponding forward ops.\n scaler.scale(loss).backward()\n\n # verify grads are not none and are valid during training\n for p in ddp_model.parameters():\n if p.requires_grad:\n self.assertTrue(p.grad is not None)\n self.assertFalse(p.grad.isnan().any())\n self.assertFalse(p.grad.isinf().any())\n\n # scaler.step() first unscales the gradients of the optimizer's assigned params.\n # If these gradients do not contain infs or NaNs, optimizer.step() is then called,\n # otherwise, optimizer.step() is skipped.\n scaler.step(optimizer)\n\n # Updates the scale for next iteration.\n scaler.update()\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + idx)\n input = input[torch.randperm(dist.get_world_size() * 2)]\n\n return ddp_model\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_with_amp_and_grad_is_view(self):\n torch.cuda.set_device(self.rank)\n ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp(\n grad_is_view=False\n )\n ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp(\n grad_is_view=True\n )\n for i, j in zip(\n ddp_model_grad_not_view.parameters(),\n ddp_model_grad_is_view.parameters(),\n ):\n self.assertEqual(i, j)\n\n def _test_DistributedDataParallel_SyncBatchNorm(\n self,\n gpu_subset,\n rank,\n local_bs,\n global_bs,\n offset,\n output_device=None,\n affine=True,\n ):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = BN_NET if affine else BN_NET_NO_AFFINE\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpu_subset\n )\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # data initialization\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n offset,\n dist.get_world_size(),\n 5 if affine else 2,\n )\n self._barrier()\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @sandcastle_skip_if(\n IS_WINDOWS, \"PostLocalSGDOptimizer not yet supported with Windows.\"\n )\n def test_post_localSGD_optimizer_parity(self, grad_is_view=False):\n learning_rate = 0.03\n period = 4\n warmup_steps = 10\n torch.cuda.set_device(self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(DDP_NET).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_is_view,\n )\n opt = torch.optim.SGD(net.parameters(), lr=learning_rate)\n averager = averagers.PeriodicModelAverager(\n period=period, warmup_steps=warmup_steps\n )\n\n post_localSGD_net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(DDP_NET).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_is_view,\n )\n post_localSGD_opt = post_localSGD_optimizer.PostLocalSGDOptimizer(\n params=post_localSGD_net.parameters(),\n optimizer_class=torch.optim.SGD,\n averager=averagers.PeriodicModelAverager(\n period=period, warmup_steps=warmup_steps\n ),\n lr=learning_rate,\n )\n\n input = torch.randn(dist.get_world_size() * 2, 2).cuda()\n target = torch.randn(dist.get_world_size() * 2, 4).cuda()\n loss_fn = nn.MSELoss()\n\n for _ in range(20):\n opt.zero_grad()\n output = net(input)\n loss = loss_fn(output, target)\n loss.backward()\n opt.step()\n averager.average_parameters(net.parameters())\n\n post_localSGD_opt.zero_grad()\n post_localSGD_output = post_localSGD_net(input)\n post_localSGD_loss = loss_fn(post_localSGD_output, target)\n post_localSGD_loss.backward()\n post_localSGD_opt.step()\n\n for p1, p2 in zip(net.parameters(), post_localSGD_net.parameters()):\n self.assertEqual(p1.data, p2.data)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self):\n group, group_id, rank = self._init_global_test()\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n model = ONLY_SBN_NET\n model_gpu = copy.deepcopy(model).cuda(rank)\n model_DDP = nn.parallel.DistributedDataParallel(\n model_gpu, device_ids=[rank]\n )\n\n memory_format = torch.channels_last\n input_gpu = (\n torch.randn(global_bs, 2, 4, 4, dtype=torch.float)\n .cuda(rank)\n .to(memory_format=memory_format)\n )\n target_gpu = (\n torch.randn(global_bs, 2, 4, 4, dtype=torch.float)\n .cuda(rank)\n .to(memory_format=memory_format)\n )\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_gpu,\n target_gpu,\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n bs_offset,\n dist.get_world_size(),\n memory_format=memory_format,\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n )\n\n # test output_device\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device(\"cuda\"),\n )\n\n # test device_ids\n gpus = [torch.device(\"cuda:\" + str(i)) for i in gpus]\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device(\"cuda\"),\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n affine=False,\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)\n\n local_bs = len(gpus) * 2\n global_bs = dist.get_world_size() * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n @require_world_size(2)\n def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)\n\n local_bs = 1\n global_bs = dist.get_world_size()\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(\n self,\n ):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n model = nn.parallel.DistributedDataParallel(\n ONLY_SBN_NET.cuda(rank), device_ids=[rank]\n )\n\n input_var = []\n for i in range(dist.get_world_size()):\n input_var_rank = torch.cat(\n [\n torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),\n torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)),\n ],\n dim=1,\n )\n input_var.append(input_var_rank)\n\n all_input_var = torch.cat(\n [\n x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1)\n for x in input_var\n ],\n dim=1,\n ).cuda(rank)\n\n for i in range(100):\n y = model(input_var[rank].cuda(rank))\n y.mean().backward()\n\n running_mean, running_var = (\n model.module.running_mean,\n model.module.running_var,\n )\n torch.testing.assert_allclose(running_mean, all_input_var.mean(1))\n torch.testing.assert_allclose(running_var, all_input_var.var(1))\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):\n group, group_id, rank = self._init_global_test()\n # only do single GPU per process\n gpus = [rank]\n\n # cpu training setup\n model = BN_NET\n\n num_processes = dist.get_world_size()\n local_bs = rank + 2\n bs_offset = int((rank + 3) * rank / 2)\n global_bs = int((num_processes + 3) * num_processes / 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n )\n\n def _test_ddp_logging_data(self, is_gpu):\n rank = dist.get_rank()\n model_DDP = copy.deepcopy(DDP_NET)\n if is_gpu:\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP.cuda(rank), device_ids=[rank]\n )\n else:\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP)\n\n # dummy data initialization\n local_bs = 2\n batch_size, input, target, loss = self._prepare_dummy_data(local_bs)\n if is_gpu:\n input = input.cuda(rank)\n target = target.cuda(rank)\n\n model_DDP._set_ddp_runtime_logging_sample_rate(2)\n\n for idx in range(20):\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n 1,\n )\n\n self._model_step_with_zero_grad(model_DDP)\n\n # Verify DDP logging data is sampled as expected\n # If it has ran more than 10 iteratons and this is\n # the sampled iteration for measuring run time stats,\n # the run time stats for this idx-th iteration will not\n # be zeros.\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n if idx > 0 and (idx < 10 or idx % 2 == 0):\n self.assertGreaterEqual(\n ddp_logging_data.get(\"forward_compute_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_compute_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_comm_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_compute_time\"),\n ddp_logging_data.get(\"backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_comm_time\"),\n ddp_logging_data.get(\"backward_compute_comm_overlap_time\"),\n )\n self.assertEqual(ddp_logging_data.get(\"iteration\"), idx)\n elif idx > 0:\n # if the idx-th iteration is not sampled to set runtime stats,\n # ddp_logging_data.iteration will not be updated to current\n # iteration.\n self.assertNotEqual(ddp_logging_data.get(\"iteration\"), idx)\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n return model_DDP\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_ddp_logging_data_cpu(self):\n def parse_env(var):\n return os.environ[var] if var in os.environ else \"N/A\"\n\n os.environ[\"TORCH_DISTRIBUTED_DEBUG\"] = \"INFO\"\n group, group_id, rank = self._init_global_test()\n model_DDP = self._test_ddp_logging_data(is_gpu=False)\n\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"world_size\"), dist.get_world_size())\n self.assertEqual(ddp_logging_data.get(\"rank\"), dist.get_rank())\n self.assertEqual(ddp_logging_data.get(\"module_name\"), \"Net\")\n self.assertEqual(ddp_logging_data.get(\"device_ids\"), \"\")\n # output_device is -1 in default if it is not set, e.g.\n # output_device of CPU training is -1.\n self.assertEqual(ddp_logging_data.get(\"output_device\"), -1)\n self.assertEqual(ddp_logging_data.get(\"broadcast_buffers\"), 1)\n self.assertEqual(ddp_logging_data.get(\"bucket_cap_bytes\"), 25 * 1024 * 1024)\n self.assertEqual(ddp_logging_data.get(\"find_unused_parameters\"), 0)\n self.assertEqual(ddp_logging_data.get(\"gradient_as_bucket_view\"), 0)\n self.assertEqual(\n ddp_logging_data.get(\"backend_name\"), dist.get_backend(group_id)\n )\n self.assertEqual(ddp_logging_data.get(\"iteration\"), 18)\n params = list(model_DDP.parameters())\n num_params = 0\n param_size = 0\n params = list(\n parameter\n for parameter in filter(\n lambda parameter: parameter.requires_grad, params\n )\n )\n for p in params:\n num_params += 1\n param_size += p.numel() * p.element_size()\n self.assertEqual(ddp_logging_data.get(\"dtypes\"), \"float\")\n self.assertEqual(\n ddp_logging_data.get(\"total_parameter_size_bytes\"), param_size\n )\n self.assertEqual(ddp_logging_data.get(\"num_parameter_tensors\"), num_params)\n self.assertEqual(ddp_logging_data.get(\"bucket_sizes\"), str(param_size))\n self.assertEqual(\n ddp_logging_data.get(\"master_port\"), parse_env(\"MASTER_PORT\")\n )\n self.assertEqual(\n ddp_logging_data.get(\"master_addr\"), parse_env(\"MASTER_ADDR\")\n )\n self.assertEqual(\n ddp_logging_data.get(\"torch_distributed_debug\"),\n parse_env(\"TORCH_DISTRIBUTED_DEBUG\"),\n )\n self.assertEqual(\n ddp_logging_data.get(\"cuda_visible_devices\"),\n parse_env(\"CUDA_VISIBLE_DEVICES\"),\n )\n if ddp_logging_data.get(\"backend_name\") == \"gloo\":\n self.assertEqual(\n ddp_logging_data.get(\"gloo_socket_ifname\"),\n parse_env(\"GLOO_SOCKET_IFNAME\"),\n )\n self.assertEqual(\n ddp_logging_data.get(\"gloo_device_transport\"),\n parse_env(\"GLOO_DEVICE_TRANSPORT\"),\n )\n self.assertEqual(ddp_logging_data.get(\"nccl_socket_ifname\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_blocking_wait\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_async_error_handling\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_debug\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_nthreads\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_ib_timeout\"), None)\n # test runtime logging fields\n # Note: DETAIL debug mode logs DDP logging data to stdout and\n # thus accesses std::map, which fills in a default value for the\n # type if it didn't exist.\n self.assertEqual(ddp_logging_data.get(\"unused_parameter_size\", 0), 0)\n self.assertEqual(ddp_logging_data.get(\"has_rebuilt_buckets\"), 1)\n self.assertEqual(\n ddp_logging_data.get(\"rebuilt_bucket_sizes\"), str(param_size)\n )\n # It is hard to test accurate latency, but it can test whether the latency is\n # a valid value and in the expected range.\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_forward_compute_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"), 1\n )\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_backward_comm_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_comm_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n # test larger net with mixed data types, verify multiple bucket sizes\n model = LargeNet()\n model.float()\n model.fc1.double()\n model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5)\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n params = list(model_DDP.parameters())\n self.assertEqual(\n ddp_logging_data.get(\"bucket_cap_bytes\"), int(1.5 * 1024 * 1024)\n )\n bucket_sizes = [\n params[1].numel() * params[1].element_size(),\n params[0].numel() * params[0].element_size(),\n ]\n self.assertEqual(\n ddp_logging_data.get(\"bucket_sizes\"),\n \", \".join(str(x) for x in bucket_sizes),\n )\n self.assertEqual(ddp_logging_data.get(\"dtypes\"), \"double, float\")\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_ddp_logging_data_gpu(self):\n group, group_id, rank = self._init_global_test()\n model_DDP = self._test_ddp_logging_data(is_gpu=True)\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"device_ids\"), str(rank))\n self.assertEqual(ddp_logging_data.get(\"output_device\"), rank)\n # test runtime logging fields\n # It is hard to test accurate latency, but it can test whether the latency is\n # a valid value and in the expected range.\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_forward_compute_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_comm_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_static_graph_api_cpu(self):\n model_DDP = nn.parallel.DistributedDataParallel(DDP_NET)\n model_DDP._set_static_graph()\n self.assertEqual(\n model_DDP._get_ddp_logging_data().get(\"static_graph\"), True\n )\n expected_err = \"should be called before training loop starts\"\n with self.assertRaisesRegex(RuntimeError, expected_err):\n local_bs = 2\n batch_size, input, target, loss = self._prepare_dummy_data(local_bs)\n offset = dist.get_rank() * local_bs\n\n # DDP training, DDP scatters subsets of input to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n 1,\n )\n model_DDP._set_static_graph()\n\n # Verify error was logged in ddp_logging_data.\n verify_ddp_error_logged(model_DDP, expected_err)\n\n @skipIfNoTorchVision\n def test_SyncBatchNorm_process_group(self):\n # When adopting `convert_sync_batchnorm` to convert a `nn.modules`,\n # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`\n # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).\n\n process_ids = 0\n process_group = torch.distributed.new_group([process_ids])\n res50_model = torchvision.models.resnet50()\n res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(\n copy.deepcopy(res50_model), process_group\n )\n process_group_sync = res50_model_sync.layer1[0].bn1.process_group\n self.assertEqual(process_group_sync, process_group)\n\n def _run_reduction_test(\n self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None\n ):\n if reduction_fn != dist.all_reduce and dst is None:\n raise ValueError(f\"Reduction fn {reduction_fn} must specify dst!\")\n if dst is not None:\n reduction_fn(tensor, dst, op)\n # Only destination rank tensor is expected to have final result.\n if dist.get_rank() == dst:\n self.assertEqual(tensor, expected_tensor)\n else:\n reduction_fn(tensor, op)\n self.assertEqual(tensor, expected_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_allreduce(self):\n torch.cuda.set_device(self.rank)\n # Run all_reduce with PRODUCT\n element = self.rank % 2 == 0\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([False, False]).to(self.rank), op\n )\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(input_tensor, expected_tensor, op)\n\n # Run all_reduce with SUM\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([True, True]).to(self.rank), op\n )\n # TODO: NCCL backend does not work correctly for bitwise reduction ops\n # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for\n # these once it is supported.\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_allgather(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, True]}\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n # Preserve a copy of the tensor to compare against after allgather.\n input_tensor_copy = input_tensor.clone()\n tensor_list = [\n torch.tensor([False, False]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, input_tensor)\n\n self.assertEqual(len(tensor_list), dist.get_world_size())\n for i, t in enumerate(tensor_list):\n expected = torch.tensor(inp[i % 2]).to(self.rank)\n self.assertEqual(t, expected)\n # Ensure that the input tensor is not modified, since this collective\n # does not modify its input.\n self.assertEqual(input_tensor_copy, input_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_nccl_backend_bool_reduce(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, False]}\n # Run reduce() with product op\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = torch.tensor([False, False]).to(self.rank)\n self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(\n input_tensor, expected_tensor, op, dist.reduce, dst=0\n )\n\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = (\n torch.tensor([True, True]).to(self.rank)\n if self.rank == 0\n else input_tensor.clone()\n )\n self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_broadcast(self):\n tensor_size = 10\n bcast_tensor = torch.tensor(\n [\n (random.random() < 0.5 if self.rank == 0 else False)\n for _ in range(tensor_size)\n ]\n ).to(self.rank)\n dist.broadcast(bcast_tensor, src=0)\n # Now allgather and ensure the tensors are equal.\n tensor_list = [\n torch.tensor([False for _ in range(tensor_size)]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, bcast_tensor)\n expected = tensor_list[0]\n for tensor in tensor_list[1:]:\n self.assertEqual(tensor, expected)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_DistributedSampler_padding(self):\n # Tests padding of distributed sampler.\n world_size = dist.get_world_size()\n\n # Simulates the 'casual' dataset size\n dataset_size = 100 + world_size + 1\n dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]\n\n # Simulates the 'tiny' dataset size\n dataset_tiny_size = max(world_size // 2 - 1, 1)\n dataset_tiny = [\n torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size)\n ]\n\n # Specifying drop_last=True will cause the tail of the data to be dropped.\n dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)\n local_num_samples, local_dataset_size = (\n dist_sampler.num_samples,\n dist_sampler.total_size,\n )\n # The effective dataset size should be the greatest integer that is <=\n # dataset_size that is divisible by the world_size. This is to ensure each\n # rank processes the same number of samples.\n effective_dataset_size = (\n math.ceil((dataset_size - world_size) / world_size)\n if dataset_size % world_size != 0\n else dataset_size / world_size\n )\n self.assertEqual(local_num_samples, effective_dataset_size)\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler))\n self.assertEqual(len(indices_list), local_num_samples)\n\n def validate_global_samples(local_num_samples):\n # Ensure that each rank processes the same number of samples.\n world_samples = [\n torch.LongTensor([0]).to(self.rank) for _ in range(world_size)\n ]\n dist.all_gather(\n world_samples, torch.tensor([local_num_samples]).to(self.rank)\n )\n world_samples = [sample.item() for sample in world_samples]\n self.assertEqual(len(set(world_samples)), 1)\n\n validate_global_samples(local_num_samples)\n\n # drop_last=False is the default and will add additional indices to be sampled,\n # increasing the effective dataset size.\n dist_sampler_added_samples = DistributedSampler(dataset=dataset)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples.num_samples,\n dist_sampler_added_samples.total_size,\n )\n # The effective dataset size is the smallest integer that is >= dataset_size\n # and divisible by the world size.\n self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size))\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples))\n self.assertEqual(len(indices_list), local_num_samples)\n\n # Ensure that each rank processes the same number of samples.\n validate_global_samples(local_num_samples)\n\n # Ensure additional samples are padded even when\n # the extremely small dataset is given.\n dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples_tiny.num_samples,\n dist_sampler_added_samples_tiny.total_size,\n )\n self.assertEqual(\n local_num_samples, math.ceil(dataset_tiny_size / world_size)\n )\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples_tiny))\n self.assertEqual(len(indices_list), local_num_samples)\n validate_global_samples(local_num_samples)\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(\n int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"]\n )\n def test_allgather_object(self):\n # Only set device for NCCL backend since it must use GPUs.\n backend = os.environ[\"BACKEND\"]\n if backend == \"nccl\":\n # Case where rank != GPU device.\n next_rank = (self.rank + 1) % int(self.world_size)\n torch.cuda.set_device(next_rank)\n\n # If GPU test, add object with GPU tensor\n if backend == \"nccl\":\n COLLECTIVES_OBJECT_TEST_LIST.append(Foo(torch.randn(3, 3, device=0)))\n\n gather_objects = COLLECTIVES_OBJECT_TEST_LIST\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n @require_backend({\"gloo\"})\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support gather\")\n def test_gather_object(self):\n # Ensure stateful objects can be gathered\n gather_objects = COLLECTIVES_OBJECT_TEST_LIST\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n my_rank = dist.get_rank()\n dist.gather_object(\n gather_objects[self.rank % len(gather_objects)],\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n if my_rank != gather_on_rank:\n self.assertEqual(\n output_gathered, [None for _ in range(dist.get_world_size())]\n )\n else:\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n # Validate errors when objects can't be pickled.\n class Bar:\n pass\n\n b = Bar()\n gather_objects = [b for _ in range(dist.get_world_size())]\n with self.assertRaisesRegex(AttributeError, \"Can't pickle local object\"):\n dist.all_gather_object(\n [None for _ in range(dist.get_world_size())],\n gather_objects[self.rank],\n )\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_gather_object_err(self):\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n # Case where rank != GPU device.\n my_rank = dist.get_rank()\n next_rank = (my_rank + 1) % dist.get_world_size()\n torch.cuda.set_device(next_rank)\n with self.assertRaisesRegex(\n RuntimeError, \"ProcessGroupNCCL does not support gather\"\n ):\n dist.gather_object(\n \"foo\",\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n\n def validate_net_equivalence(self, net):\n # Helper to validate synchronization of nets across ranks.\n net_module_states = list(net.module.state_dict().values())\n # Check that all tensors in module's state_dict() are equal.\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for tensor in tensor_list:\n self.assertEqual(tensor, t)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_sync_params_and_buffers(self):\n # Test that after calling _sync_params_and_buffers, models across ranks\n # are the same and are equal to the model on the input rank.\n dim = 2\n rank = self.rank\n rank_to_broadcast = 1\n # Seed to ensure that ranks are initialized with different initial models.\n torch.manual_seed(rank)\n model = nn.Linear(dim, dim, bias=False)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n new_model = nn.Linear(dim, dim, bias=False).cuda(rank)\n net.module = copy.deepcopy(new_model)\n # Assert params are different\n net_module_states = list(net.module.state_dict().values())\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for i, tensor in enumerate(tensor_list):\n if i == rank:\n self.assertEqual(t, tensor)\n else:\n # tensor from another rank should be different.\n self.assertNotEqual(t, tensor)\n\n net._sync_params_and_buffers(authoritative_rank=rank_to_broadcast)\n # Now all model params should be the same.\n self.validate_net_equivalence(net)\n # Since the network params were broadcast from rank_to_broadcast, validate that\n # they are the same as new_model on rank_to_broadcast.\n if rank == rank_to_broadcast:\n expected_states = new_model.state_dict().values()\n for t, expected in zip(net_module_states, expected_states):\n self.assertEqual(t, expected)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_grad_div_uneven_inputs(self):\n # Test gradient division during training with join() API. If\n # divide_by_initial_world_size=False, we scale by the effective world\n # size when allreducing grads.\n dim = 5\n batch = 1\n grad_scale = 50\n rank = self.rank\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.ones(batch, dim, device=self.rank) * grad_scale\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n n_iters = 3\n if self.rank > 0:\n n_iters += 2\n\n with net.join(divide_by_initial_world_size=False):\n for _ in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n # The grad is always expected_grad, since we divide by the number\n # of currently active processes and inactive processes contribute\n # zero gradient. If we kept dividing by static initial world\n # size as processes leave, the grad would be smaller.\n expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grads so that it's the same every iteration\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n # If divide_by_initial_world_size=True (default), we always scale grads\n # by the initial world_size.\n with net.join(divide_by_initial_world_size=True):\n for i in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n effective_ws = dist.get_world_size()\n if i >= 3:\n effective_ws -= 1\n expected_grad = (\n torch.ones(dim, dim, device=self.rank)\n * grad_scale\n * effective_ws\n ) / dist.get_world_size()\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grad so that it's the same every iteration.\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n def _test_ddp_profiling(self, profiler_ctx):\n batch = 3\n dim = 10\n num_iters = 6\n torch.cuda.set_device(self.rank)\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n profiler_ctx_copy = copy.deepcopy(profiler_ctx)\n\n with profiler_ctx as prof:\n for i in range(num_iters):\n loss = net(inp).sum()\n loss.backward()\n\n all_reduce_event_name = f\"{dist.get_backend()}:all_reduce\"\n events = get_profiling_event(all_reduce_event_name, prof)\n event_count = sum(e.count for e in events)\n self.assertEqual(event_count, num_iters)\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, all_reduce_event_name)\n\n broadcast_event_name = f\"{dist.get_backend()}:broadcast\"\n broadcast_events = get_profiling_event(broadcast_event_name, prof)\n event_count = sum(e.count for e in broadcast_events)\n # Broadcast is called during rebuild_buckets\n self.assertGreaterEqual(event_count, 1)\n for event in broadcast_events:\n self.assertEqual(event.name, broadcast_event_name)\n\n # Run DDP with profiling for a few iterations, then enable profiling\n # for a single pass, and ensure it is recorded. This tests that the\n # thread local state is correctly updated.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n for i in range(3):\n loss = net(inp).sum()\n loss.backward()\n # Now enable the profiler.\n with profiler_ctx_copy as prof:\n loss = net(inp).sum()\n loss.backward()\n\n events = get_profiling_event(all_reduce_event_name, prof)\n self.assertGreaterEqual(len(events), 1)\n self.assertGreaterEqual(events[0].count, 1)\n self.assertEqual(events[0].name, all_reduce_event_name)\n for event in events:\n self.assertTrue(event.is_async)\n # Ensure searching unused parameters was profiled\n events = get_profiling_event(\"search_unused_parameters\", prof)\n self.assertEqual(len(events), 1)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_profiling_autograd_profiler(self):\n autograd_profiler_ctx = torch.autograd.profiler.profile()\n return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_ddp_profiling_torch_profiler(self):\n cpu_act = torch.profiler.ProfilerActivity.CPU\n cuda_act = torch.profiler.ProfilerActivity.CUDA\n torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act])\n self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_join_model_equivalence(self):\n # Verifies equivalence with model training locally and with DDP under\n # the join context manager.\n batch = 3\n dim = 10\n learning_rate = 0.03\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n local_model = copy.deepcopy(model)\n local_model = local_model.cuda(self.rank)\n rank_to_iter_mapping = {\n rank: 2 * (rank + 1) for rank in range(dist.get_world_size())\n }\n # run local model\n local_iters = sum(rank_to_iter_mapping.values())\n local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)\n for _ in range(local_iters):\n local_optim.zero_grad()\n out = local_model(inp)\n loss = out.sum()\n loss.backward()\n local_optim.step()\n\n # run DDP model with join API\n num_iters = rank_to_iter_mapping[self.rank]\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n ddp_optim = torch.optim.SGD(\n model.parameters(), lr=learning_rate * dist.get_world_size()\n )\n with net.join():\n for i in range(num_iters):\n ddp_optim.zero_grad()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n torch.cuda.synchronize(device=self.rank)\n ddp_optim.step()\n\n # Validate model state dicts are equal\n for (_, local_tensor), (_, dist_tensor) in zip(\n local_model.state_dict().items(), net.module.state_dict().items()\n ):\n self.assertEqual(local_tensor, dist_tensor)\n\n def _run_uneven_inputs_test(\n self,\n test_case,\n iteration_mapping,\n find_unused_params,\n ):\n model = test_case.model\n inp = test_case.inp\n rank = self.rank\n sync_interval = test_case.sync_interval\n torch.cuda.set_device(rank)\n # Ensure all outsanding GPU work is comlete so this test runs independently.\n dist.barrier()\n # Bucket_cap_mb is intentionally low to test allreduce scheduling when\n # there are many buckets.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank),\n device_ids=[rank],\n bucket_cap_mb=1,\n find_unused_parameters=find_unused_params,\n )\n # Register hook if specified\n if test_case.hook is not None:\n net.register_comm_hook(test_case.state, test_case.hook)\n print(f\"registered hook {test_case.hook}\")\n\n # Determine num iters for this rank via the passed in mapping.\n num_iters = iteration_mapping[rank]\n # If we throw when earliest rank terminates, we should ensure\n # that we iterate for that minimum number of times.\n num_iters_tensor = torch.tensor(\n [num_iters], device=torch.cuda.current_device()\n )\n dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN)\n min_num_iters = num_iters_tensor.item()\n total_iters = 0\n if test_case.throw_on_early_termination:\n if min_num_iters == num_iters:\n # Early termination rank(s)\n exception_ctx = self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} exhausted all inputs\"\n )\n else:\n # Non early termination rank\n exception_ctx = self.assertRaisesRegex(\n RuntimeError,\n \"Detected at least one rank that exhausted inputs.\",\n )\n else:\n exception_ctx = suppress()\n with exception_ctx:\n with net.join(\n throw_on_early_termination=test_case.throw_on_early_termination\n ):\n for i in range(num_iters):\n # Use model.no_sync() to disable grad synchronization every\n # sync_interval.\n if i % sync_interval != 0:\n context = net.no_sync()\n else:\n context = suppress()\n with context:\n if isinstance(inp, tuple):\n loss = net(*inp).sum()\n else:\n loss = net(inp).sum()\n loss.backward()\n self._model_step(net)\n # Ensure completion of GPU kernels (including allreduce). If the\n # join API is not properly implemented, then this should hang\n # since the allreduce will hang.\n torch.cuda.synchronize(device=rank)\n total_iters += 1\n if test_case.throw_on_early_termination:\n # Ensure we iterated min_num_iters times.\n self.assertEqual(total_iters, min_num_iters)\n else:\n # Ensure we iterated at least min_num_iters times.\n self.assertGreaterEqual(total_iters, min_num_iters)\n\n # Ensure completion of all GPU kernels.\n torch.cuda.synchronize(device=rank)\n # When throwing on early rank termination, we do not\n # broadcast model state from an authoritative rank. All models\n # should already be in sync.\n if not test_case.throw_on_early_termination:\n self.assertTrue(net._authoritative_rank)\n # All ranks should have agreed on the same authoritative_rank!\n final_rank_tensor = torch.tensor(\n [net._authoritative_rank], device=self.rank\n )\n tensor_list = [\n torch.zeros_like(final_rank_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, final_rank_tensor)\n max_rank = dist.get_world_size() - 1\n self.assertSetEqual(\n {max_rank}, set(tensor.item() for tensor in tensor_list)\n )\n # Ensure that all models are the same across ranks after all have joined.\n self.validate_net_equivalence(net)\n # Ensure that running with DDP uneven inputs was logged.\n ddp_logging_data = net._get_ddp_logging_data()\n self.assertTrue(ddp_logging_data.get(\"join_uneven_inputs\"))\n dist.barrier()\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_inputs_stop_iteration_sync_bn(self):\n # Tests that uneven inputs join handler correctly throws StopIteration\n # for models with SyncBN or general collective comm when\n # throw_on_early_termination=True.\n class ModelWithComm(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(2, 40, bias=False)\n\n def forward(self, x):\n x = self.lin(x)\n dist.all_reduce(x)\n return x\n\n torch.cuda.set_device(self.rank)\n model_bn = BN_NET\n model_bn = nn.SyncBatchNorm.convert_sync_batchnorm(\n copy.deepcopy(model_bn)\n ).cuda(self.rank)\n comm_model = ModelWithComm().cuda(self.rank)\n model_input = torch.randn(10, 2).cuda(torch.cuda.current_device())\n\n for model in [model_bn, comm_model]:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n )\n min_num_iters = 5\n if self.rank != 0:\n # Early termination rank(s)\n num_iters = min_num_iters\n exception_ctx = self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} exhausted all inputs\"\n )\n else:\n # Non early termination rank\n num_iters = min_num_iters * 2\n exception_ctx = self.assertRaisesRegex(\n RuntimeError,\n \"Detected at least one rank that exhausted inputs.\",\n )\n n = 0\n with exception_ctx:\n with model.join(throw_on_early_termination=True):\n for i in range(num_iters):\n loss = model(model_input).sum()\n loss.backward()\n self._model_step(model)\n n += 1\n\n self.assertEqual(n, min_num_iters)\n # Verify model equivalence\n self.validate_net_equivalence(model)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_inputs(self):\n dim = 1000\n batch = 1\n # Create a variety of models to run uneven input tests on.\n large_model = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 32, 5),\n nn.ReLU(),\n nn.Conv2d(32, 256, 5),\n nn.ReLU(),\n )\n small_model = nn.Linear(dim, dim, bias=False)\n bn_net = BatchNormNet()\n\n class UnusedParamModule(nn.Module):\n def __init__(self, unused_params_rank):\n super().__init__()\n self.t0 = Task()\n self.t1 = Task()\n self.unused_params_rank = unused_params_rank\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return (\n self.t1(self.t0(x))\n if rank != self.unused_params_rank\n else self.t1(x)\n )\n\n unjoined_rank_with_unused_params_model = UnusedParamModule(1)\n joined_rank_with_unused_params_model = UnusedParamModule(0)\n\n rank = self.rank\n models_to_test = [\n # Network with batchnorm\n DDPUnevenTestInput(\n name=\"batch_norm_net\",\n model=bn_net,\n inp=torch.ones(batch, 2, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"large_conv_model\",\n model=large_model,\n inp=torch.ones(batch, batch, dim, dim, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"small_model\",\n model=small_model,\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n # Unused parameter test where rank that does not join early has unused params\n DDPUnevenTestInput(\n name=\"unjoined_rank_with_unused_params_model\",\n model=unjoined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n sync_interval=1,\n ),\n # Unused parameter test where rank that does join early has unused params\n DDPUnevenTestInput(\n name=\"joined_rank_with_unused_params_model\",\n model=joined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n sync_interval=1,\n ),\n ]\n\n # Test models that have hook installed.\n models_with_hook = [\n DDPUnevenTestInput(\n name=\"small_model_allreduce_hook\",\n model=small_model,\n hook=default.allreduce_hook,\n state=None,\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"small_model_power_sgd_hook\",\n model=small_model,\n hook=powerSGD.powerSGD_hook,\n state=powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n # Config so that powerSGD runs immediately instead of\n # allreduce.\n start_powerSGD_iter=1,\n warm_start=False,\n use_error_feedback=False,\n ),\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n ]\n models_to_test.extend(models_with_hook)\n\n # Add resnet model if we have torchvision installed.\n if HAS_TORCHVISION:\n resnet_model = torchvision.models.resnet50()\n models_to_test.append(\n DDPUnevenTestInput(\n name=\"resnet_model\",\n model=resnet_model,\n inp=torch.ones(1, 3, 1000, 1000),\n sync_interval=1,\n )\n )\n\n # Test with no_sync every 2, 3, 4, ... iterations.\n models_with_sync = []\n for i, test_input in enumerate(models_to_test):\n models_with_sync.append(\n DDPUnevenTestInput(\n name=test_input.name,\n model=test_input.model,\n inp=test_input.inp,\n sync_interval=i + 2,\n )\n )\n\n throw_on_early_term_tests = []\n for test_input in models_to_test:\n throw_on_early_term_tests.append(\n DDPUnevenTestInput(\n name=test_input.name,\n model=test_input.model,\n inp=test_input.inp,\n sync_interval=test_input.sync_interval,\n throw_on_early_termination=True,\n )\n )\n\n models_to_test.extend(models_with_sync)\n models_to_test.extend(throw_on_early_term_tests)\n\n # 0 iteration tests for when one process does not train model at all, so\n # we must shadow the broadcast calls made when rebuilding buckets.\n baseline_num_iters = [0, 5]\n iteration_offsets = [2, 3, 10]\n num_uneven_ranks = [1]\n if dist.get_world_size() > 2:\n num_uneven_ranks.append(2)\n iteration_mappings = []\n # Generate rank : num_iters mappings for various uneven input scenarios.\n # This includes cases where rank 0 joins early and all other ranks join\n # later, and scenarios where multiple ranks join early, but at different\n # iterations, and later ranks join later.\n for num_early_join_ranks in num_uneven_ranks:\n for baseline_iter in baseline_num_iters:\n for offset in iteration_offsets:\n mapping = {\n rank: baseline_iter\n for rank in range(0, num_early_join_ranks)\n }\n # if num_early_join_ranks > 1, ranks > 0 that will join early\n # iterate offset//2 more times than rank 0, to test nodes\n # depleting inputs at different times.\n if num_early_join_ranks > 1:\n for rank in mapping.keys():\n if rank > 0:\n mapping[rank] += offset // 2\n mapping.update(\n {\n rank: baseline_iter + offset\n for rank in range(\n num_early_join_ranks, dist.get_world_size()\n )\n }\n )\n iteration_mappings.append(mapping)\n\n for (test_case, iteration_mapping) in itertools.product(\n models_to_test, iteration_mappings\n ):\n if self.rank == 0:\n print(\n f\"\"\"Running test: {test_case.name} sync interval\n {test_case.sync_interval} with iteration mapping\n {iteration_mapping}\"\"\"\n )\n self._run_uneven_inputs_test(\n test_case,\n iteration_mapping,\n find_unused_params=(\"unused_params_model\" in test_case.name),\n )\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_input_join_disable(self):\n # tests that if net.join() with enable=False is specified, DDP works as\n # expected with even inputs.\n torch.manual_seed(self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1) * self.rank\n n_iters = 5\n world_size = dist.get_world_size()\n with net.join(enable=False):\n for _ in range(n_iters):\n # Clear grads\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n # Validate gradients to ensure that we divide by the correct\n # world_size when join mode is disabled.\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(net.module.weight.grad.item(), expected_grad)\n\n join_config = net._join_config\n self.assertFalse(join_config.enable)\n self.validate_net_equivalence(net)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_input_exception(self):\n # Tests that exceptions during training are correctly propagated by the\n # context manager.\n error_str = \"Intentional error\"\n\n class ExceptionModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.param = nn.Parameter(torch.ones(1, requires_grad=True))\n\n def forward(self, _):\n raise ValueError(error_str)\n\n exception_module = ExceptionModule()\n net = torch.nn.parallel.DistributedDataParallel(\n exception_module.cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1)\n with self.assertRaisesRegex(ValueError, error_str):\n with net.join():\n out = net(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(\n int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"]\n )\n def test_broadcast_object_list(self):\n # Only set device for NCCL backend since it must use GPUs.\n # Case where rank != GPU device.\n next_rank = (self.rank + 1) % int(self.world_size)\n backend = os.environ[\"BACKEND\"]\n if backend == \"nccl\":\n torch.cuda.set_device(next_rank)\n\n src_rank = 0\n # If GPU test, add object with GPU tensor\n if backend == \"nccl\":\n COLLECTIVES_OBJECT_TEST_LIST.append(Foo(torch.randn(3, 3, device=0)))\n\n objects = (\n COLLECTIVES_OBJECT_TEST_LIST\n if self.rank == src_rank\n else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]\n )\n\n # Single object test with device specified. Backend=\"gloo\", device=cpu\n if backend != \"nccl\":\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(\n single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0]\n )\n dist.broadcast_object_list(\n single_obj_list, src=0, group=None, device=torch.device(\"cpu\")\n )\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test with device specified. Backend=\"gloo\", device=current_device+1\n # The test is gated by the fact GPU count is the same as world size to avoid the case\n # when backend is gloo but there is no multiple GPU devices.\n if backend != \"nccl\" and torch.cuda.device_count() == int(self.world_size):\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(\n single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0]\n )\n dist.broadcast_object_list(\n single_obj_list, src=0, group=None, device=torch.device(next_rank)\n )\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test with device specified. Backend=\"nccl\", device=current_device+1\n if backend == \"nccl\" and torch.cuda.device_count() == int(self.world_size):\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(\n single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0]\n )\n dist.broadcast_object_list(\n single_obj_list, src=0, group=None, device=torch.device(next_rank)\n )\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test: backward compatibility with device unspecified\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n dist.broadcast_object_list(single_obj_list, src=0)\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Multiple input objects test\n if self.rank != src_rank:\n self.assertNotEqual(objects, COLLECTIVES_OBJECT_TEST_LIST)\n dist.broadcast_object_list(objects, src=0)\n self.assertEqual(objects, COLLECTIVES_OBJECT_TEST_LIST)\n\n def _test_ddp_ignore_params_arg(self, static_graph=False):\n class TestModel(nn.Module):\n def __init__(self, rank):\n self.rank = rank\n super(TestModel, self).__init__()\n self.fc1 = nn.Linear(1, 1, bias=False)\n # Proxy that will be materialized to another architecture later.\n # (after wrapping model with DDP)\n if self.rank == 0:\n self.fc2 = nn.Linear(1, 10, bias=False)\n else:\n self.fc2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n device_id = self.rank\n # Ensure the test works for both find_unused_parameter and broadcast_buffer settings.\n for (find_unused, broadcast_buffers) in itertools.product(\n [False, True], [False, True]\n ):\n model = TestModel(self.rank).float().to(device_id)\n # Note that the model can have different shape buffers if we pass\n # them in to be ignored as well.\n model.fc2.register_buffer(\n \"ignore_buffer\", torch.zeros(5 + self.rank, device=self.rank)\n )\n proxy_params = list(model.fc2.parameters())\n proxy_buffers = list(model.fc2.buffers())\n model_fc2_name = [\n module_name\n for module_name, module in model.named_modules()\n if module is model.fc2\n ][0]\n proxy_param_names = [\n f\"{model_fc2_name}.{param_name}\"\n for param_name, _ in model.fc2.named_parameters()\n ]\n proxy_buffer_names = [\n f\"{model_fc2_name}.{buf_name}\"\n for buf_name, _ in model.fc2.named_buffers()\n ]\n # Specify that we should ignore proxy_params since it will be\n # materialized later.\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, proxy_param_names + proxy_buffer_names\n )\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[device_id],\n find_unused_parameters=find_unused,\n broadcast_buffers=broadcast_buffers,\n )\n if static_graph:\n ddp._set_static_graph()\n # Materialize new params. These are not registered in DDP and thus\n # don't have autograd hooks installed on them.\n ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id)\n # local model with the new materialized parameters.\n local_model = copy.deepcopy(ddp.module).cuda(self.rank)\n\n inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1)\n for i in range(6):\n ddp(inp).sum().backward()\n local_model(inp).sum().backward()\n # materialized param grad is not touched by DDP, so its grad should\n # be the same as if running locally.\n for materialized_param, local_param in zip(\n ddp.module.fc2.parameters(), local_model.fc2.parameters()\n ):\n self.assertEqual(materialized_param.grad, local_param.grad)\n\n # fc1 parameter grad should still be different, due to allreduce.\n for synced_param, local_param in zip(\n ddp.module.fc1.parameters(), local_model.fc1.parameters()\n ):\n self.assertFalse(synced_param.grad == local_param.grad)\n\n # Proxy module grad should not be touched\n for proxy_param in proxy_params:\n self.assertTrue(proxy_param.grad is None)\n\n # Synchronize since we run multiple iterations of this test, to\n # isolate failure hangs.\n torch.cuda.synchronize(device=self.rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_ignore_params_arg(self):\n self._test_ddp_ignore_params_arg(static_graph=False)\n self._test_ddp_ignore_params_arg(static_graph=True)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_unused_params_rebuild_buckets_exception(self):\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10, bias=False)\n self.net2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n return self.net1(x)\n\n ddp = torch.nn.parallel.DistributedDataParallel(\n ToyModel().cuda(self.rank), device_ids=[self.rank]\n )\n for i in range(2):\n inp = torch.rand(1, 10)\n if i > 0:\n # On 2nd iteration, this will fail during rebuild_buckets,\n # but we should report an error regarding unused parameters\n # since that is the underlying root cause.\n try:\n ddp(inp).sum().backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(ddp, msg)\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"net2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(\n True, \"DDP unused parameters error not raised.\"\n )\n else:\n ddp(inp).sum().backward()\n\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_shared_grad_acc_unused_params(self):\n # When find_unused_parameters=True, ensure we mark unused parameters\n # even if they share gradient accumulators.\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n # net1, bias, and net1.bias are all unused params.\n self.net1 = nn.Linear(10, 5, bias=False)\n self.bias = nn.Parameter(torch.zeros(5))\n # net1.bias and self.bias are names for the same underlying\n # parameter, so they share the same grad acc. This caused\n # the bug reported in https://github.com/pytorch/pytorch/issues/41324.\n self.net1.bias = self.bias\n self.net2 = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.net2(x)\n\n torch.cuda.set_device(self.rank)\n model = ToyModel().to(torch.cuda.current_device())\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.rank], find_unused_parameters=True\n )\n inp = torch.randn(20, 10, device=self.rank)\n for i in range(6):\n out = ddp_model(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_device(self):\n m = nn.Linear(10, 10).to(self.rank)\n expected_len = 2\n\n class TensorWrapper:\n __slots__ = [\"t\", \"moved_to_gpu\"]\n\n def __init__(self, t):\n self.t = t\n self.moved_to_gpu = False\n\n # Handlers for specific types of validation we want to do based on\n # the input type.\n\n def tuple_and_list_validator(x):\n self.assertTrue(len(x), expected_len)\n self.assertEqual(1, len(set(t.device for t in x)))\n self.assertEqual(x[0].device.index, self.rank)\n return x[0] + x[1]\n\n def namedtuple_validator(x):\n self.assertEqual(x._fields, EXPECTED_FIELDS)\n self.assertEqual(x.a.device.index, x.b.device.index)\n self.assertEqual(x.a.device.index, self.rank)\n return x.a + x.b\n\n def custom_type_validator(x):\n self.assertTrue(x.moved_to_gpu or (str(x.t.device) == \"cpu\"))\n x.t = x.t.to(self.rank)\n x.moved_to_gpu = True\n return x.t\n\n def dict_validator(x):\n self.assertTrue(EXPECTED_FIELDS[0] in x.keys())\n self.assertTrue(EXPECTED_FIELDS[1] in x.keys())\n self.assertEqual(1, len(set(t.device for t in x.values())))\n self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank)\n return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]]\n\n validators = {\n TensorWrapper: custom_type_validator,\n tuple: tuple_and_list_validator,\n list: tuple_and_list_validator,\n TestNamedTupleInput_0: namedtuple_validator,\n TestNamedTupleInput_1: namedtuple_validator,\n dict: dict_validator,\n }\n\n class ToyModel(torch.nn.Module):\n def __init__(_self): # noqa: B902\n super().__init__()\n _self.lin = nn.Linear(10, 10, bias=False)\n\n def forward(_self, x, expected_type): # noqa: B902\n # Similar to scatter, the recursive to in the single-device\n # case does not move tensors if they are in a custom type.\n self.assertTrue(isinstance(x, expected_type))\n fwd_tensor = validators[expected_type](x)\n return _self.lin(fwd_tensor)\n\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel().to(self.rank), device_ids=[self.rank]\n )\n\n def train_iter(inp, input_type):\n for _ in range(4):\n out = model(inp, input_type)\n out.sum().backward()\n\n # CPU tuple input, should be moved to the proper device before call\n # to forward.\n inp = tuple(torch.randn(10, 10) for _ in range(expected_len))\n train_iter(inp, tuple)\n\n # List CPU input, should be moved to proper device before call to\n # forward.\n inp = [torch.randn(10, 10) for _ in range(expected_len)]\n train_iter(inp, list)\n # Custom type containing tensor. The type is maintained, but the\n # device is not propagated (which is what happens with scatter too)\n inp = TensorWrapper(torch.randn(10, 10))\n train_iter(inp, TensorWrapper)\n # NamedTuple input. The type should be maintained and tensor inputs\n # should be moved to the correct device as in scatter.\n batch = 5\n dim = 10\n a = torch.rand(batch, dim)\n b = torch.rand(batch, dim)\n\n inp = TestNamedTupleInput_0(a, b)\n train_iter(inp, type(inp))\n\n inp = TestNamedTupleInput_1(a, b)\n train_iter(inp, type(inp))\n\n # dictionary input.\n inp = {\n EXPECTED_FIELDS[0]: a,\n EXPECTED_FIELDS[1]: b,\n }\n train_iter(inp, type(inp))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_namedtuple(self):\n batch = 5\n dim = 10\n\n a = torch.rand(batch, dim, device=self.rank)\n b = torch.rand(batch, dim, device=self.rank)\n\n class NamedTupleModule(torch.nn.Module):\n def __init__(_self): # noqa: B902\n super().__init__()\n _self.lin = nn.Linear(10, 1)\n\n def forward(_self, input, expected_type): # noqa: B902\n # Without NamedTuple support, this would be of type tuple.\n self.assertTrue(\n isinstance(input, expected_type),\n f\"Expected type {expected_type} but got {type(input)}\",\n )\n self.assertEqual(input._fields, EXPECTED_FIELDS)\n self.assertEqual(a, input.a)\n self.assertEqual(b, input.b)\n return _self.lin(torch.mul(input.a, input.b))\n\n model = torch.nn.parallel.DistributedDataParallel(\n NamedTupleModule().cuda(self.rank), device_ids=[self.rank]\n )\n inp = TestNamedTupleInput_0(a, b)\n # The following would fail if DDP does not propagate NamedTuples correctly.\n model(inp, type(inp))\n\n inp = TestNamedTupleInput_1(a, b)\n model(inp, type(inp))\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_control_flow_same_across_ranks(self):\n # Control flow that is the same across ranks.\n batch = 20\n dim = 10\n\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n random_input = torch.randn(batch, dim, device=self.rank)\n ones_input = torch.ones(batch, dim, device=self.rank)\n for i in range(6):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n # On even iterations, 2nd param goes unused, on odd iterations,\n # it is used.\n local_used_maps = model.reducer._get_local_used_maps()\n if i % 2 == 0:\n expected = torch.tensor(\n [world_size, 0], device=self.rank, dtype=torch.int32\n )\n else:\n expected = torch.tensor(\n [world_size, world_size], device=self.rank, dtype=torch.int32\n )\n\n # Validate parameter usage.\n variable_usage_tensor = local_used_maps[0]\n self.assertEqual(variable_usage_tensor, expected)\n\n # Validate appropriate error message when DDP is used with\n # find_unused_parameters=False.\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=False,\n )\n for i in range(2):\n if i == 0:\n loss = model(random_input).sum()\n loss.backward()\n else:\n try:\n loss = model(random_input).sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(model, msg)\n # 2nd linear layer is unused\n unused_param_index = 1\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}\",\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"lin2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_invalid_static_graph(self):\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n )\n model._set_static_graph()\n random_input = torch.randn(20, 10, device=self.rank)\n ones_input = torch.ones(20, 10, device=self.rank)\n # unused parameter in the first iteration got used\n # in second iteration.\n expected_err = \"Your training graph has changed in this iteration\"\n with self.assertRaisesRegex(RuntimeError, expected_err):\n for i in range(2):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n\n verify_ddp_error_logged(model, expected_err)\n\n # used parameter in the first iteration got unused\n # in second iteration.\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected to have finished reduction in the prior iteration \"\n \"before starting a new one. This error indicates that your \"\n \"training graph has changed in this iteration\",\n ):\n for i in range(2):\n if i % 2 != 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n\n verify_ddp_error_logged(model, \"Expected to have finished reduction\")\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_control_flow_different_across_ranks(self):\n # Control flow that is different across ranks.\n batch = 20\n dim = 10\n\n class ToyModel(nn.Module):\n def __init__(self, rank):\n super(ToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n self.rank = rank\n\n def forward(self, x):\n # Control-flow that is rank and input dependent for the\n # model.\n use_second_layer = (\n torch.equal(x, torch.ones(batch, dim, device=x.device))\n and self.rank == 1\n )\n\n if use_second_layer:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel(self.rank).cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n random_input = torch.randn(batch, dim, device=self.rank)\n ones_input = torch.ones(batch, dim, device=self.rank)\n for i in range(6):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n # On even iterations, 2nd param goes unused, on odd iterations,\n # it is used only on rank 1.\n local_used_maps = model.reducer._get_local_used_maps()\n\n if i % 2 == 0:\n expected = torch.tensor(\n [world_size, 0], device=self.rank, dtype=torch.int32\n )\n else:\n expected = torch.tensor(\n [world_size, 1], device=self.rank, dtype=torch.int32\n )\n\n variable_usage_tensor = local_used_maps[0]\n # Validate parameter usage. On odd iterations, 2nd param is only\n # used on rank 1.\n self.assertEqual(variable_usage_tensor, expected)\n\n # Validate appropriate error message when DDP is used with\n # find_unused_parameters=False.\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel(self.rank).cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=False,\n )\n for i in range(2):\n if i == 0:\n loss = model(random_input).sum()\n loss.backward()\n else:\n try:\n loss = model(random_input).sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(model, msg)\n unused_param_index = 1\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}\",\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"lin2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n @require_backend({\"gloo\"})\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support scatter\")\n def test_scatter_object_list(self):\n src_rank = 0\n scatter_list = (\n COLLECTIVES_OBJECT_TEST_LIST\n if self.rank == src_rank\n else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]\n )\n world_size = dist.get_world_size()\n scatter_list = scatter_list[:world_size]\n i = 0\n while len(scatter_list) < world_size:\n scatter_list.append(scatter_list[i])\n i += 1\n\n output_obj_list = [None]\n dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank)\n self.assertEqual(\n output_obj_list[0],\n COLLECTIVES_OBJECT_TEST_LIST[\n self.rank % len(COLLECTIVES_OBJECT_TEST_LIST)\n ],\n )\n # Ensure errors are raised upon incorrect arguments.\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected argument scatter_object_output_list to be a list of size at least 1.\",\n ):\n dist.scatter_object_list([], scatter_list, src=src_rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_model_diff_across_ranks(self):\n group_gloo = dist.new_group(\n timeout=timedelta(seconds=60), backend=dist.Backend.GLOO\n )\n # Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test\n # determinism.\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n group_to_use = dist.new_group(\n backend=dist.get_backend(), timeout=timedelta(seconds=5)\n )\n torch.cuda.set_device(self.rank)\n # Creates network with different sized embedding table on different\n # ranks. This should throw an error during DDP init.\n net = EmbeddingNet(self.rank)\n # When running with NCCL backend, we don't expect an error on rank 0,\n # rather, it will be taken down by NCCL_ASYNC_ERROR_HANDLING. When\n # running with Gloo or with debug mode wrapper, we expect the error\n # to be caught inline.\n is_detail_dbg_mode = (\n dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL\n )\n rank_0_ctx = (\n self.assertRaisesRegex(\n RuntimeError, \"Caught collective operation timeout\"\n )\n if dist.get_backend(group_to_use) == dist.Backend.NCCL\n and not is_detail_dbg_mode\n # Gloo can raise various exception messages, so just assert\n # Runtime error here.\n else self.assertRaises(RuntimeError)\n )\n ctx = (\n rank_0_ctx\n if self.rank == 0\n else self.assertRaisesRegex(RuntimeError, \"appears not to match\")\n )\n with ctx:\n net = torch.nn.parallel.DistributedDataParallel(\n net.to(self.rank),\n device_ids=[self.rank],\n process_group=group_to_use,\n )\n # Should only be run by rank 0, and blocking_wait catches and\n # reports exception.\n dist.barrier(group_to_use)\n\n # Perform gloo-based barrier to ensure one rank doesn't exit test\n # early which causes failure with Barrier.sync.\n dist.barrier(group_gloo)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_output_unused_in_loss(self):\n model = TwoLinLayerNet()\n # Need copy of model to pass into 2nd DDP ctor otherwise autograd hooks\n # on first DDP reducer will execute!\n model_copy = copy.deepcopy(model)\n net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(self.rank),\n device_ids=[self.rank],\n )\n net_with_find_unused = torch.nn.parallel.DistributedDataParallel(\n model_copy.cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n\n inp = torch.randn(10, 10)\n\n for ddp in [net, net_with_find_unused]:\n for i in range(2):\n if i == 0:\n a, b = ddp(inp)\n loss = b.sum()\n loss.backward()\n else:\n try:\n a, b = ddp(inp)\n loss = b.sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n unused_index = 0\n unused_index_substr = (\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_index}\"\n )\n if ddp == net:\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n unused_index_substr,\n ]\n unexpected_strs = [\n ddp_find_unused_params_enabled_str,\n ]\n elif ddp == net_with_find_unused:\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_outputs_not_used_in_loss_str,\n ddp_find_unused_params_enabled_str,\n unused_index_substr,\n ]\n unexpected_strs = [\n ddp_recommend_find_unused_params_str,\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if (\n dist._get_debug_mode()\n == dist._DistributedDebugLevel.OFF\n ):\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"a.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(\n s in msg, f\"Expected {s} to be in {msg}\"\n )\n for s in unexpected_strs:\n self.assertFalse(\n s in msg, f\"Expected {s} not to be in {msg}\"\n )\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n def _test_different_graph_across_ranks(\n self, find_unused_parameters=False, static_graph=False\n ):\n class ToyModel(nn.Module):\n def __init__(self, rank):\n super(ToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n self.rank = rank\n\n def forward(self, x):\n if self.rank == 0:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n torch.manual_seed(31415)\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = ToyModel(self.rank).cuda(self.rank)\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=find_unused_parameters,\n gradient_as_bucket_view=True,\n )\n if static_graph:\n ddp_model._set_static_graph()\n random_input = torch.randn(20, 10, device=self.rank)\n for i in range(10):\n out = ddp_model(random_input)\n loss = out.sum()\n loss.backward()\n return ddp_model\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_different_graph_across_ranks(self):\n base_model = self._test_different_graph_across_ranks(\n find_unused_parameters=True\n )\n self.assertFalse(\n base_model._get_ddp_logging_data().get(\"has_rebuilt_buckets\", 0)\n )\n static_model = self._test_different_graph_across_ranks(static_graph=True)\n self.assertTrue(\n static_model._get_ddp_logging_data().get(\"has_rebuilt_buckets\", 0)\n )\n for i, j in zip(base_model.parameters(), static_model.parameters()):\n self.assertEqual(i, j)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"MacOS uses uv transport which does not have as robust error handling as tcp transport\",\n )\n def test_monitored_barrier_gloo(self):\n tensors = [torch.ones(10) * self.rank]\n # Kick off some allreduce work on all ranks\n for _ in range(10):\n dist.all_reduce(torch.cat(tensors))\n # Run monitored barrier and ensure it passees\n timeout = timedelta(seconds=2)\n dist.monitored_barrier(timeout=timeout)\n # Check monitored_barrier success with wait_all_ranks=True\n for _ in range(10):\n dist.all_reduce(torch.cat(tensors))\n dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)\n # All ranks besides 1 call into barrier, rank 0 should report failure\n # while others report gloo error.\n failed_rank = 1\n src_rank = 0\n if self.rank == src_rank:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {failed_rank} failed to pass monitoredBarrier\"\n ):\n dist.monitored_barrier(timeout=timeout)\n elif self.rank != failed_rank:\n # Other ranks should not pass barrier since rank 0 failed.\n err_regex = (\n f\"Rank {self.rank} successfully reached monitoredBarrier,\"\n f\" but received errors while waiting to be unblocked by rank\"\n f\" {src_rank}\"\n )\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout)\n\n # We need a barrier since otherwise failed_rank exits too early\n # and cause a timeout.\n self._barrier(timeout=30)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n def test_monitored_barrier_gloo_subgroup(self):\n # Tests that monitored_barrier works as expected on non-default\n # process groups.\n failed_rank = 1\n timeout = 0.1\n subgroup = dist.new_group(ranks=[0, 1])\n\n if self.rank == failed_rank:\n return\n\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {failed_rank} failed to pass monitoredBarrier\"\n ):\n dist.monitored_barrier(subgroup, timeout)\n else:\n # Other ranks call into monitored_barrier, but this should be a\n # noop because they are not part of the subgroup. Verify that\n # there are no errors here.\n dist.monitored_barrier(subgroup, timeout)\n\n def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks):\n # tests expected behavior when nonzero rank hangs.\n nccl_pg = dist.new_group(\n ranks=list(i for i in range(int(self.world_size))),\n timeout=timedelta(seconds=2),\n backend=dist.Backend.NCCL,\n )\n gloo_pg = dist.new_group(\n ranks=list(i for i in range(int(self.world_size))),\n backend=dist.Backend.GLOO,\n )\n tensors = [torch.ones(10, device=self.rank) * self.rank]\n # Let all ranks call allreduce first to set up communicators etc.\n # Directly simulating error here will run into store issue described\n # in https://github.com/pytorch/pytorch/issues/54524.\n nccl_pg.allreduce(tensors).wait()\n # All ranks besides 0 call into allreduce. This is to simulate a\n # desync across the world, where some ranks call into\n # monitored_barrier() and others are stuck in collective comm. In\n # practice, we don't need NCCL_BLOCKING_WAIT, but we use it in this\n # test to ensure it exits cleanly.\n if self.rank != 0:\n # Can get different errors here depending on whether gloo-based\n # wrapper PG is enabled or not, since with wrapper pg, it will\n # fail in a collective synchronization check and not actually\n # call into the nccl pg.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL:\n err_regex = \"Timed out waiting\"\n else:\n err_regex = \"Caught collective operation timeout\"\n with self.assertRaisesRegex(RuntimeError, err_regex):\n nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1))\n else:\n # Rank 0 should report first (in order) timed out rank or all ranks\n # depending on wait_all_ranks flag passed into monitored_barrier.\n if wait_all_ranks:\n rank_str = \", \".join(\n [str(i) for i in range(1, int(self.world_size))]\n )\n err_regex = f\"Ranks {rank_str} failed to pass monitoredBarrier\"\n else:\n expected_first_fail_rank = 1\n err_regex = f\"Rank {expected_first_fail_rank} failed to pass monitoredBarrier\"\n monitored_barrier_timeout_seconds = timedelta(seconds=0.1)\n with self.assertRaisesRegex(RuntimeError, err_regex):\n gloo_pg.monitored_barrier(\n monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks\n )\n\n @with_nccl_blocking_wait\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_rocm\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_monitored_barrier_allreduce_hang(self):\n # tests expected behavior when nonzero rank hangs and we want to\n # report first timed out rank.\n self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False)\n\n @with_nccl_blocking_wait\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_rocm\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_monitored_barrier_allreduce_hang_wait_all_ranks(self):\n # tests expected behavior when nonzero rank hangs and we want to\n # report all timed out ranks.\n self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n def test_monitored_barrier_gloo_rank_0_timeout(self):\n # tests error when rank 0 exhausts its given timeout.\n process_group = dist.new_group(\n ranks=list(i for i in range(int(self.world_size)))\n )\n timeout = timedelta(seconds=0)\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} timed out in monitoredBarrier\"\n ):\n process_group.monitored_barrier(timeout)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @skip_if_small_worldsize\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"MacOS uses uv transport which does not have as robust error handling as tcp transport\",\n )\n def test_monitored_barrier_failure_order(self):\n # Ensure that the first (in sorted order) rank is reported when\n # multiple ranks fail to pass the monitored_barrier.\n # TODO(#54879): Provide ability to wait and report all failed ranks\n expected_first_failed_rank = 2\n timeout = timedelta(seconds=2)\n src_rank = 0\n if self.rank == src_rank:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {expected_first_failed_rank}\"\n ):\n dist.monitored_barrier(timeout=timeout)\n elif self.rank == 1:\n err_regex = (\n f\"Rank {self.rank} successfully reached monitoredBarrier,\"\n f\" but received errors while waiting to be unblocked by rank\"\n f\" {src_rank}\"\n )\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @skip_if_small_worldsize\n def test_monitored_barrier_wait_all_ranks(self):\n # Tests simple case where > 1 rank does not call into monitored\n # barrier and verifies all ranks are reported by rank 0.\n if self.rank == 0:\n timeout = timedelta(seconds=0.1)\n rank_str = \", \".join([str(i) for i in range(1, int(self.world_size))])\n err_regex = f\"Ranks {rank_str} failed to pass monitoredBarrier\"\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_build_param_to_name_mapping(self):\n model = TwoLinLayerNet()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n expected_mapping = {0: \"a.weight\", 1: \"b.weight\"}\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertDictEqual(expected_mapping, param_to_name_mapping)\n\n # Test when DDP is used with ignored parameters.\n model = TwoLinLayerNet()\n # Parameters to ignore are in the format {module_name}.{param_name}\n params_to_ignore = [\"a.weight\"]\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, params_to_ignore\n )\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n expected_mapping = {0: \"b.weight\"}\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertDictEqual(expected_mapping, param_to_name_mapping)\n\n # Test errors are raised when DDP and module parameters mismatch.\n # This generally indicates a bug with DDP and is not expected to\n # happen in user applications.\n model = TwoLinLayerNet()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n net_params, _ = net._build_params_for_reducer()\n if self.rank == 0:\n print(type(net_params[0][0]))\n\n net_params[0].extend(\n [\n torch.nn.Parameter(torch.ones(1)),\n torch.nn.Parameter(torch.ones(1)),\n ]\n )\n\n with self.assertRaisesRegex(ValueError, \"Expected param to name mapping\"):\n net._build_param_to_name_mapping(net_params)\n\n net_params[0] = net_params[0][:-3]\n with self.assertRaisesRegex(ValueError, \"Param with name\"):\n net._build_param_to_name_mapping(net_params)\n\n net_params[0].extend(\n [\n torch.nn.Parameter(torch.ones(1)),\n torch.nn.Parameter(torch.ones(1)),\n ]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_build_param_to_name_mapping_requires_grad(self):\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(10, 10)\n # Is not tracked by DDP and should not show up in param to\n # name mapping.\n self.lin.bias.requires_grad_(False)\n\n def forward(self, x):\n return self.lin(x)\n\n model = Net()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n expected_mapping = {\n 0: \"lin.weight\",\n }\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertEqual(param_to_name_mapping, expected_mapping)\n\n def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse):\n debug_mode_off = dist._get_debug_mode() == dist._DistributedDebugLevel.OFF\n\n class SubModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.embedding_net = EmbeddingNet(0)\n self.lin = TwoLinLayerNet()\n self.bn = BatchNormNet()\n self.lin_layer = nn.Linear(4, 10, bias=False)\n\n def forward(self, x):\n x = self.bn(x)\n x = self.lin_layer(x)\n x = self.lin.a(x) # self.lin.b param unused\n # EmbeddingNet entirely unused: self.embedding_net.embedding and\n # self.embedding_net.lin unused.\n return x\n\n class MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.sub_module = SubModule()\n\n def forward(self, x):\n return self.sub_module(x)\n\n model = MyModel()\n sparse_embedding_fqns = []\n if ignore_sparse:\n for module_name, module in model.named_modules():\n if module == model.sub_module.embedding_net.embedding:\n for parameter_name, param in module.named_parameters(\n recurse=False\n ):\n fqn = f\"{module_name}.{parameter_name}\"\n sparse_embedding_fqns.append(fqn)\n\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, sparse_embedding_fqns\n )\n unused_modules = [\n model.sub_module.embedding_net.lin,\n model.sub_module.lin.b,\n ]\n else:\n unused_modules = list(model.sub_module.embedding_net.modules()) + [\n model.sub_module.lin.b,\n ]\n\n expected_unused_param_fqns = []\n used_param_fqns = [] # Validate that these don't mistakenly show up.\n fqn_to_param_index = {}\n index = 0\n for module_name, module in model.named_modules():\n for parameter_name, param in module.named_parameters(recurse=False):\n fqn = f\"{module_name}.{parameter_name}\"\n fqn_to_param_index[fqn] = index\n if fqn not in sparse_embedding_fqns:\n index += 1\n if module in unused_modules:\n expected_unused_param_fqns.append(fqn)\n else:\n if (\n not ignore_sparse\n or module != model.sub_module.embedding_net.embedding\n ):\n used_param_fqns.append(fqn)\n\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n batch, dim = 10, 2\n inp = torch.ones(batch, dim)\n for i in range(2):\n if i == 0:\n out = net(inp)\n loss = out.sum()\n loss.backward()\n else:\n try:\n out = net(inp)\n loss = out.sum()\n loss.backward()\n except RuntimeError as e:\n e = str(e)\n\n unused_param_substr = e[e.find(\"did not receive grad\") :]\n # Validate that each unused param fully qualified name\n # shows up in error logs. We do this instead of\n # constructing a joined string since order of parameters\n # can be different in Reducer. In addition, validate\n # param indices show up as well.\n for unused_param_fqn in expected_unused_param_fqns:\n self.assertTrue(\n unused_param_fqn in unused_param_substr\n or debug_mode_off\n )\n self.assertTrue(\n str(fqn_to_param_index[unused_param_fqn])\n in unused_param_substr,\n f\"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}\",\n )\n\n # Validate that used param fqns don't show up in error\n # logs.\n for used_param_fqn in used_param_fqns:\n self.assertFalse(used_param_fqn in unused_param_substr)\n # Validate that ignored param fqns don't show up as unused\n # (since DDP does not track them)\n for sparse_param_fqn in sparse_embedding_fqns:\n self.assertFalse(sparse_param_fqn in unused_param_substr)\n else:\n self.assertTrue(False, \"Expected error was not raised!\")\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_multiple_nested_unused_params_error(self):\n self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_multiple_nested_unused_params_err_ignore_params(self):\n # Tests unused parameter reporting when DDP is configured to ignore\n # certain parameters.\n self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_inference(self):\n # tests that DDP module can be run on a single node with no_grad\n # or eval setting and there is no hang.\n rank = self.rank\n torch.cuda.set_device(rank)\n model = Net().cuda()\n local_model = copy.deepcopy(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n syncbn_model = nn.SyncBatchNorm(\n 2, momentum=0.99, track_running_stats=False\n ).cuda()\n local_syncbn_model = copy.deepcopy(syncbn_model)\n syncbn_model = torch.nn.parallel.DistributedDataParallel(\n syncbn_model, device_ids=[rank]\n )\n inp = torch.randn(10, 2, device=rank)\n inp_syncbn = torch.randn(10, 2, 4, 4, device=rank)\n tests = [\n (model, local_model, inp),\n (syncbn_model, local_syncbn_model, inp_syncbn),\n ]\n for test in tests:\n test_model, test_local_model, test_inp = test\n if self.rank == 0:\n test_model.eval()\n test_local_model.eval()\n for _ in range(6):\n self.assertEqual(\n test_model(test_inp), test_local_model(test_inp)\n )\n\n # Barrier since only rank 0 runs inference. Test should be\n # much faster than 30s, but this is to avoid flakiness.\n self._barrier(timeout=30)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_sync_bn_training_vs_eval(self):\n rank = self.rank\n torch.cuda.set_device(rank)\n # Need to set track_running_stats=False, when track_running_stats=True,\n # bn_training is False and sync could not occur in eval model.\n model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda(\n rank\n )\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])\n # Test sync occurs in training mode.\n with torch.autograd.profiler.profile() as prof:\n for i in range(6):\n inp = torch.randn(10, 2, 4, 4).cuda(rank)\n out = model(inp)\n loss = out.sum()\n loss.backward()\n\n # SyncBN allgathers stats across all ranks, so verify call to\n # all_gather in profiler.\n if BACKEND == \"nccl\":\n all_gather_calls = get_profiling_event(\"_all_gather_base\", prof)\n else:\n all_gather_calls = get_profiling_event(\"all_gather\", prof)\n self.assertNotEqual([], all_gather_calls)\n\n # Only do inference on one rank. If SyncBN did collective stats sync,\n # this would hang/error.\n model_inference = model.module\n if self.rank == 0:\n model_inference.eval()\n with torch.autograd.profiler.profile() as prof:\n for i in range(6):\n inp = torch.randn(10, 2, 4, 4).cuda(rank)\n out = model_inference(inp)\n loss = out.sum()\n loss.backward()\n\n # Ensure sync does not occur in eval() mode.\n if BACKEND == \"nccl\":\n all_gather_calls = get_profiling_event(\"_all_gather_base\", prof)\n else:\n all_gather_calls = get_profiling_event(\"all_gather\", prof)\n self.assertEqual([], all_gather_calls)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_python_error_logged(self):\n # Most python exceptions in DDP are raised during init before\n # reducer is constructed, so we don't have a logger in those cases.\n # However, the below is one example where a python error is thrown\n # after reducer is constructed.\n model = TwoLinLayerNet().cuda(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n )\n expected_err = \"must be callable\"\n with self.assertRaisesRegex(TypeError, expected_err):\n model.register_comm_hook({}, {})\n\n verify_ddp_error_logged(model, expected_err)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_static_graph_nested_types(self):\n # Tests for static graph training when outputs are not just tensors\n # but can be (nested) tuple, list, dict, etc.\n rank = self.rank\n torch.cuda.set_device(rank)\n\n class NestedOutputModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(100, 1, bias=False)\n\n def forward(self, inp, output_type):\n if output_type == \"tuple\":\n return (\n self.lin(inp),\n (\n self.lin(inp),\n self.lin(inp),\n ),\n )\n elif output_type == \"list\":\n return [\n self.lin(inp),\n [\n self.lin(inp),\n self.lin(inp),\n ],\n ]\n elif output_type == \"dict\":\n return {\n \"a\": self.lin(inp),\n \"b\": {\n \"c\": self.lin(inp),\n },\n }\n\n def get_loss(model_output):\n loss = 0.0\n if isinstance(model_output, torch.Tensor):\n return model_output.sum()\n elif isinstance(model_output, dict):\n for value in model_output.values():\n loss += get_loss(value)\n elif isinstance(model_output, tuple) or isinstance(model_output, list):\n for x in model_output:\n loss += get_loss(x)\n else:\n raise ValueError(f\"Unknown model output type {type(model_output)}\")\n return loss\n\n model = NestedOutputModule().cuda(rank)\n model_static_graph = copy.deepcopy(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n model_static_graph = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n model_static_graph._set_static_graph()\n inp = torch.randn(10, 100)\n type_mapping = {\n \"list\": list,\n \"tuple\": tuple,\n \"dict\": dict,\n }\n for output_type in type_mapping.keys():\n for i in range(6):\n out = model(inp, output_type=output_type)\n loss = get_loss(out)\n loss.backward()\n self._model_step(model)\n out_static = model_static_graph(inp, output_type=output_type)\n self.assertTrue(isinstance(out_static, type_mapping[output_type]))\n loss_static = get_loss(out_static)\n loss_static.backward()\n self._model_step(model_static_graph)\n for (p, p_static) in zip(\n model.parameters(), model_static_graph.parameters()\n ):\n self.assertEqual(p, p_static)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_detect_ddp_is_actually_static(self):\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10, bias=False)\n self.net2 = nn.Linear(10, 10)\n\n def forward(self, x, find_unused, dynamic):\n if find_unused:\n if dynamic:\n return self.net2(self.net1(x))\n else:\n return self.net2(x)\n else:\n return self.net2(self.net1(x))\n\n # Set of unused parameters don't change across iterations\n torch.cuda.set_device(self.rank)\n model = ToyModel().cuda()\n for find_unused in [True, False]:\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=find_unused,\n )\n inp = torch.randn(1, 10, device=\"cuda\")\n for _ in range(6):\n out = ddp(inp, find_unused=find_unused, dynamic=False)\n loss = out.sum()\n loss.backward()\n self.assertTrue(ddp.reducer._ddp_graph_static())\n\n # Set of unused parameters dynamically change\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n inp = torch.randn(1, 10, device=\"cuda\")\n for i in range(6):\n out = ddp(inp, find_unused=True, dynamic=i % 2 == 0)\n loss = out.sum()\n loss.backward()\n self.assertFalse(ddp.reducer._ddp_graph_static())\n\n def _test_ddp_new_tensor_in_fwd(self, static_graph):\n # Test from https://github.com/pytorch/pytorch/issues/60733\n class MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(10, 10, bias=False)\n self.fc2 = nn.Linear(10, 10, bias=False)\n\n def __init_opt(self):\n param = next(self.parameters())\n opt = torch.randn(1, 10, device=param.device)\n return opt\n\n def forward(self, x, opt_1, opt_2, opt_nested):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n if opt_1 is None:\n opt_1 = self.__init_opt()\n if opt_2 is None:\n opt_2 = self.__init_opt()\n if opt_nested is None or not torch.is_tensor(opt_nested):\n opt_nested = self.__init_opt()\n # Test multiple tensors as well as newly created tensors\n # within a struct.\n return x, opt_1, opt_2, {\"tensor\": opt_nested}\n\n model = MyModel().to(self.rank)\n for find_unused in [True, False]:\n ddp = DistributedDataParallel(\n model,\n device_ids=[self.rank],\n output_device=self.rank,\n broadcast_buffers=False,\n find_unused_parameters=find_unused,\n )\n\n if static_graph:\n ddp._set_static_graph()\n\n opt = [None for _ in range(3)]\n for i in range(2):\n ddp.zero_grad()\n x = torch.randn(1, 10, device=self.rank)\n out, opt[0], opt[1], opt[2] = ddp(\n x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2]\n )\n for i in range(len(opt)):\n if torch.is_tensor(opt[i]):\n self.assertEqual(opt[i].grad_fn, None)\n else:\n self.assertEqual(opt[i][\"tensor\"].grad_fn, None)\n out.mean().backward()\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_new_tensor_in_fwd(self):\n return self._test_ddp_new_tensor_in_fwd(static_graph=False)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_new_tensor_in_fwd_static_graph(self):\n return self._test_ddp_new_tensor_in_fwd(static_graph=True)\n"
] | [
[
"torch.distributed.scatter",
"torch.zeros",
"torch.testing._internal.common_distributed.nccl_skip_if_lt_x_gpu",
"torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook.PostLocalSGDState",
"torch.cuda.amp.autocast",
"torch.distributed.distributed_c10d._get_default_group",
"torch.nn.SyncBatchNorm",
"torch.cuda.stream",
"torch.device",
"torch.distributed.is_gloo_available",
"torch.randn",
"torch.equal",
"torch.ones_like",
"torch.distributed.distributed_c10d.AllreduceOptions",
"torch.empty_like",
"torch.cuda.current_device",
"torch.nn.Conv2d",
"torch.profiler.profile",
"torch.nn.Module",
"torch.nn.Linear",
"torch.distributed.destroy_process_group",
"torch.distributed.broadcast_object_list",
"torch.distributed.all_to_all_single",
"torch.distributed.all_gather",
"torch.distributed.is_nccl_available",
"torch.distributed.broadcast",
"torch.testing._internal.common_distributed.verify_ddp_error_logged",
"torch.randperm",
"torch.distributed.gather_object",
"torch.cuda.is_available",
"torch.distributed.is_mpi_available",
"torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model",
"torch.distributed._get_debug_mode",
"torch.utils.data.distributed.DistributedSampler",
"torch.reshape",
"torch.testing._internal.common_utils.sandcastle_skip_if",
"torch.tensor",
"torch.mul",
"torch.rand",
"torch.distributed.all_to_all",
"torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState",
"torch.LongTensor",
"torch.distributed.send",
"torch.distributed.scatter_object_list",
"torch.zeros_like",
"torch.is_tensor",
"torch.cuda.device_count",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.distributed.recv",
"torch.distributed.reduce",
"torch.distributed.gather",
"torch.nn.ReLU",
"torch.load",
"torch.cat",
"torch.nn.Embedding",
"torch.distributed.algorithms.ddp_comm_hooks.default_hooks._OptimizerHookState",
"torch.testing._internal.common_distributed.initialize_temp_directories",
"torch.distributed.irecv",
"torch.save",
"torch.cuda.synchronize",
"torch.ones",
"torch.distributed.barrier",
"torch.distributed.BroadcastOptions",
"torch.nn.parallel.distributed._dump_DDP_relevant_env_vars",
"torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager",
"torch.testing._internal.common_distributed.skip_if_lt_x_gpu",
"torch.DoubleTensor",
"torch.distributed.algorithms.ddp_comm_hooks.default_hooks._hook_then_optimizer",
"torch.cuda.amp.GradScaler",
"torch.distributed.get_world_size",
"torch.distributed.new_subgroups_by_enumeration",
"torch.distributed.get_backend",
"torch.distributed.batch_isend_irecv",
"torch.cuda.set_device",
"torch.distributed.ProcessGroupNCCL.Options",
"torch.distributed.broadcast_multigpu",
"torch.distributed._rank_not_in_group",
"torch.backends.cudnn.flags",
"torch.distributed.ProcessGroupNCCL",
"torch.cuda.Stream",
"torch.testing._internal.common_distributed.cleanup_temp_dir",
"torch.nn.functional.softmax",
"torch.testing._internal.common_distributed.with_dist_debug_levels",
"torch.distributed.PrefixStore",
"torch.testing._internal.common_utils.sandcastle_skip",
"torch.no_grad",
"torch.distributed.monitored_barrier",
"torch.distributed.get_rank",
"torch.distributed.P2POp",
"torch.nn.BatchNorm1d",
"torch.testing._internal.common_distributed.requires_nccl_version",
"torch.empty",
"torch.distributed.distributed_c10d.get_world_size",
"torch.distributed.new_subgroups",
"torch.autograd.profiler.profile",
"torch.distributed.Backend",
"torch.nn.parallel.DistributedDataParallel",
"torch.view_as_real",
"torch.testing._internal.common_distributed.captured_output",
"torch.distributed.new_group",
"torch.distributed.all_reduce",
"torch.nn.MSELoss"
]
] |
lacrosse91/scikit-learn | [
"2a67d88258264eb2b6dfad221be8f8d61684dcba",
"2a67d88258264eb2b6dfad221be8f8d61684dcba",
"2a67d88258264eb2b6dfad221be8f8d61684dcba",
"2a67d88258264eb2b6dfad221be8f8d61684dcba",
"2a67d88258264eb2b6dfad221be8f8d61684dcba",
"2a67d88258264eb2b6dfad221be8f8d61684dcba"
] | [
"sklearn/datasets/_twenty_newsgroups.py",
"sklearn/decomposition/_fastica.py",
"sklearn/ensemble/_iforest.py",
"sklearn/linear_model/_bayes.py",
"sklearn/cluster/_bicluster.py",
"sklearn/cluster/tests/test_k_means.py"
] | [
"\"\"\"Caching loader for the 20 newsgroups text classification dataset.\n\n\nThe description of the dataset is available on the official website at:\n\n http://people.csail.mit.edu/jrennie/20Newsgroups/\n\nQuoting the introduction:\n\n The 20 Newsgroups data set is a collection of approximately 20,000\n newsgroup documents, partitioned (nearly) evenly across 20 different\n newsgroups. To the best of my knowledge, it was originally collected\n by Ken Lang, probably for his Newsweeder: Learning to filter netnews\n paper, though he does not explicitly mention this collection. The 20\n newsgroups collection has become a popular data set for experiments\n in text applications of machine learning techniques, such as text\n classification and text clustering.\n\nThis dataset loader will download the recommended \"by date\" variant of the\ndataset and which features a point in time split between the train and\ntest sets. The compressed dataset size is around 14 Mb compressed. Once\nuncompressed the train set is 52 MB and the test set is 34 MB.\n\"\"\"\n# Copyright (c) 2011 Olivier Grisel <[email protected]>\n# License: BSD 3 clause\n\nimport os\nfrom os.path import dirname, join\nimport logging\nimport tarfile\nimport pickle\nimport shutil\nimport re\nimport codecs\n\nimport numpy as np\nimport scipy.sparse as sp\nimport joblib\n\nfrom . import get_data_home\nfrom . import load_files\nfrom ._base import _convert_data_dataframe\nfrom ._base import _pkl_filepath\nfrom ._base import _fetch_remote\nfrom ._base import RemoteFileMetadata\nfrom ..feature_extraction.text import CountVectorizer\nfrom .. import preprocessing\nfrom ..utils import check_random_state, Bunch\n\nlogger = logging.getLogger(__name__)\n\n# The original data can be found at:\n# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz\nARCHIVE = RemoteFileMetadata(\n filename=\"20news-bydate.tar.gz\",\n url=\"https://ndownloader.figshare.com/files/5975967\",\n checksum=(\"8f1b2514ca22a5ade8fbb9cfa5727df9\" \"5fa587f4c87b786e15c759fa66d95610\"),\n)\n\nCACHE_NAME = \"20news-bydate.pkz\"\nTRAIN_FOLDER = \"20news-bydate-train\"\nTEST_FOLDER = \"20news-bydate-test\"\n\n\ndef _download_20newsgroups(target_dir, cache_path):\n \"\"\"Download the 20 newsgroups data and stored it as a zipped pickle.\"\"\"\n train_path = os.path.join(target_dir, TRAIN_FOLDER)\n test_path = os.path.join(target_dir, TEST_FOLDER)\n\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n logger.info(\"Downloading dataset from %s (14 MB)\", ARCHIVE.url)\n archive_path = _fetch_remote(ARCHIVE, dirname=target_dir)\n\n logger.debug(\"Decompressing %s\", archive_path)\n tarfile.open(archive_path, \"r:gz\").extractall(path=target_dir)\n os.remove(archive_path)\n\n # Store a zipped pickle\n cache = dict(\n train=load_files(train_path, encoding=\"latin1\"),\n test=load_files(test_path, encoding=\"latin1\"),\n )\n compressed_content = codecs.encode(pickle.dumps(cache), \"zlib_codec\")\n with open(cache_path, \"wb\") as f:\n f.write(compressed_content)\n\n shutil.rmtree(target_dir)\n return cache\n\n\ndef strip_newsgroup_header(text):\n \"\"\"\n Given text in \"news\" format, strip the headers, by removing everything\n before the first blank line.\n\n Parameters\n ----------\n text : str\n The text from which to remove the signature block.\n \"\"\"\n _before, _blankline, after = text.partition(\"\\n\\n\")\n return after\n\n\n_QUOTE_RE = re.compile(\n r\"(writes in|writes:|wrote:|says:|said:\" r\"|^In article|^Quoted from|^\\||^>)\"\n)\n\n\ndef strip_newsgroup_quoting(text):\n \"\"\"\n Given text in \"news\" format, strip lines beginning with the quote\n characters > or |, plus lines that often introduce a quoted section\n (for example, because they contain the string 'writes:'.)\n\n Parameters\n ----------\n text : str\n The text from which to remove the signature block.\n \"\"\"\n good_lines = [line for line in text.split(\"\\n\") if not _QUOTE_RE.search(line)]\n return \"\\n\".join(good_lines)\n\n\ndef strip_newsgroup_footer(text):\n \"\"\"\n Given text in \"news\" format, attempt to remove a signature block.\n\n As a rough heuristic, we assume that signatures are set apart by either\n a blank line or a line made of hyphens, and that it is the last such line\n in the file (disregarding blank lines at the end).\n\n Parameters\n ----------\n text : str\n The text from which to remove the signature block.\n \"\"\"\n lines = text.strip().split(\"\\n\")\n for line_num in range(len(lines) - 1, -1, -1):\n line = lines[line_num]\n if line.strip().strip(\"-\") == \"\":\n break\n\n if line_num > 0:\n return \"\\n\".join(lines[:line_num])\n else:\n return text\n\n\ndef fetch_20newsgroups(\n *,\n data_home=None,\n subset=\"train\",\n categories=None,\n shuffle=True,\n random_state=42,\n remove=(),\n download_if_missing=True,\n return_X_y=False,\n):\n \"\"\"Load the filenames and data from the 20 newsgroups dataset \\\n(classification).\n\n Download it if necessary.\n\n ================= ==========\n Classes 20\n Samples total 18846\n Dimensionality 1\n Features text\n ================= ==========\n\n Read more in the :ref:`User Guide <20newsgroups_dataset>`.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify a download and cache folder for the datasets. If None,\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n subset : {'train', 'test', 'all'}, default='train'\n Select the dataset to load: 'train' for the training set, 'test'\n for the test set, 'all' for both, with shuffled ordering.\n\n categories : array-like, dtype=str or unicode, default=None\n If None (default), load all the categories.\n If not None, list of category names to load (other categories\n ignored).\n\n shuffle : bool, default=True\n Whether or not to shuffle the data: might be important for models that\n make the assumption that the samples are independent and identically\n distributed (i.i.d.), such as stochastic gradient descent.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for dataset shuffling. Pass an int\n for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n remove : tuple, default=()\n May contain any subset of ('headers', 'footers', 'quotes'). Each of\n these are kinds of text that will be detected and removed from the\n newsgroup posts, preventing classifiers from overfitting on\n metadata.\n\n 'headers' removes newsgroup headers, 'footers' removes blocks at the\n ends of posts that look like signatures, and 'quotes' removes lines\n that appear to be quoting another post.\n\n 'headers' follows an exact standard; the other filters are not always\n correct.\n\n download_if_missing : bool, default=True\n If False, raise an IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns `(data.data, data.target)` instead of a Bunch\n object.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n bunch : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : list of shape (n_samples,)\n The data list to learn.\n target: ndarray of shape (n_samples,)\n The target labels.\n filenames: list of shape (n_samples,)\n The path to the location of the data.\n DESCR: str\n The full description of the dataset.\n target_names: list of shape (n_classes,)\n The names of target classes.\n\n (data, target) : tuple if `return_X_y=True`\n .. versionadded:: 0.22\n \"\"\"\n\n data_home = get_data_home(data_home=data_home)\n cache_path = _pkl_filepath(data_home, CACHE_NAME)\n twenty_home = os.path.join(data_home, \"20news_home\")\n cache = None\n if os.path.exists(cache_path):\n try:\n with open(cache_path, \"rb\") as f:\n compressed_content = f.read()\n uncompressed_content = codecs.decode(compressed_content, \"zlib_codec\")\n cache = pickle.loads(uncompressed_content)\n except Exception as e:\n print(80 * \"_\")\n print(\"Cache loading failed\")\n print(80 * \"_\")\n print(e)\n\n if cache is None:\n if download_if_missing:\n logger.info(\"Downloading 20news dataset. \" \"This may take a few minutes.\")\n cache = _download_20newsgroups(\n target_dir=twenty_home, cache_path=cache_path\n )\n else:\n raise IOError(\"20Newsgroups dataset not found\")\n\n if subset in (\"train\", \"test\"):\n data = cache[subset]\n elif subset == \"all\":\n data_lst = list()\n target = list()\n filenames = list()\n for subset in (\"train\", \"test\"):\n data = cache[subset]\n data_lst.extend(data.data)\n target.extend(data.target)\n filenames.extend(data.filenames)\n\n data.data = data_lst\n data.target = np.array(target)\n data.filenames = np.array(filenames)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset\n )\n\n module_path = dirname(__file__)\n with open(join(module_path, \"descr\", \"twenty_newsgroups.rst\")) as rst_file:\n fdescr = rst_file.read()\n\n data.DESCR = fdescr\n\n if \"headers\" in remove:\n data.data = [strip_newsgroup_header(text) for text in data.data]\n if \"footers\" in remove:\n data.data = [strip_newsgroup_footer(text) for text in data.data]\n if \"quotes\" in remove:\n data.data = [strip_newsgroup_quoting(text) for text in data.data]\n\n if categories is not None:\n labels = [(data.target_names.index(cat), cat) for cat in categories]\n # Sort the categories to have the ordering of the labels\n labels.sort()\n labels, categories = zip(*labels)\n mask = np.in1d(data.target, labels)\n data.filenames = data.filenames[mask]\n data.target = data.target[mask]\n # searchsorted to have continuous labels\n data.target = np.searchsorted(labels, data.target)\n data.target_names = list(categories)\n # Use an object array to shuffle: avoids memory copy\n data_lst = np.array(data.data, dtype=object)\n data_lst = data_lst[mask]\n data.data = data_lst.tolist()\n\n if shuffle:\n random_state = check_random_state(random_state)\n indices = np.arange(data.target.shape[0])\n random_state.shuffle(indices)\n data.filenames = data.filenames[indices]\n data.target = data.target[indices]\n # Use an object array to shuffle: avoids memory copy\n data_lst = np.array(data.data, dtype=object)\n data_lst = data_lst[indices]\n data.data = data_lst.tolist()\n\n if return_X_y:\n return data.data, data.target\n\n return data\n\n\ndef fetch_20newsgroups_vectorized(\n *,\n subset=\"train\",\n remove=(),\n data_home=None,\n download_if_missing=True,\n return_X_y=False,\n normalize=True,\n as_frame=False,\n):\n \"\"\"Load and vectorize the 20 newsgroups dataset (classification).\n\n Download it if necessary.\n\n This is a convenience function; the transformation is done using the\n default settings for\n :class:`~sklearn.feature_extraction.text.CountVectorizer`. For more\n advanced usage (stopword filtering, n-gram extraction, etc.), combine\n fetch_20newsgroups with a custom\n :class:`~sklearn.feature_extraction.text.CountVectorizer`,\n :class:`~sklearn.feature_extraction.text.HashingVectorizer`,\n :class:`~sklearn.feature_extraction.text.TfidfTransformer` or\n :class:`~sklearn.feature_extraction.text.TfidfVectorizer`.\n\n The resulting counts are normalized using\n :func:`sklearn.preprocessing.normalize` unless normalize is set to False.\n\n ================= ==========\n Classes 20\n Samples total 18846\n Dimensionality 130107\n Features real\n ================= ==========\n\n Read more in the :ref:`User Guide <20newsgroups_dataset>`.\n\n Parameters\n ----------\n subset : {'train', 'test', 'all'}, default='train'\n Select the dataset to load: 'train' for the training set, 'test'\n for the test set, 'all' for both, with shuffled ordering.\n\n remove : tuple, default=()\n May contain any subset of ('headers', 'footers', 'quotes'). Each of\n these are kinds of text that will be detected and removed from the\n newsgroup posts, preventing classifiers from overfitting on\n metadata.\n\n 'headers' removes newsgroup headers, 'footers' removes blocks at the\n ends of posts that look like signatures, and 'quotes' removes lines\n that appear to be quoting another post.\n\n data_home : str, default=None\n Specify an download and cache folder for the datasets. If None,\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing : bool, default=True\n If False, raise an IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns ``(data.data, data.target)`` instead of a Bunch\n object.\n\n .. versionadded:: 0.20\n\n normalize : bool, default=True\n If True, normalizes each document's feature vector to unit norm using\n :func:`sklearn.preprocessing.normalize`.\n\n .. versionadded:: 0.22\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric, string, or categorical). The target is\n a pandas DataFrame or Series depending on the number of\n `target_columns`.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n bunch : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data: {sparse matrix, dataframe} of shape (n_samples, n_features)\n The input data matrix. If ``as_frame`` is `True`, ``data`` is\n a pandas DataFrame with sparse columns.\n target: {ndarray, series} of shape (n_samples,)\n The target labels. If ``as_frame`` is `True`, ``target`` is a\n pandas Series.\n target_names: list of shape (n_classes,)\n The names of target classes.\n DESCR: str\n The full description of the dataset.\n frame: dataframe of shape (n_samples, n_features + 1)\n Only present when `as_frame=True`. Pandas DataFrame with ``data``\n and ``target``.\n\n .. versionadded:: 0.24\n\n (data, target) : tuple if ``return_X_y`` is True\n `data` and `target` would be of the format defined in the `Bunch`\n description above.\n\n .. versionadded:: 0.20\n \"\"\"\n data_home = get_data_home(data_home=data_home)\n filebase = \"20newsgroup_vectorized\"\n if remove:\n filebase += \"remove-\" + (\"-\".join(remove))\n target_file = _pkl_filepath(data_home, filebase + \".pkl\")\n\n # we shuffle but use a fixed seed for the memoization\n data_train = fetch_20newsgroups(\n data_home=data_home,\n subset=\"train\",\n categories=None,\n shuffle=True,\n random_state=12,\n remove=remove,\n download_if_missing=download_if_missing,\n )\n\n data_test = fetch_20newsgroups(\n data_home=data_home,\n subset=\"test\",\n categories=None,\n shuffle=True,\n random_state=12,\n remove=remove,\n download_if_missing=download_if_missing,\n )\n\n if os.path.exists(target_file):\n try:\n X_train, X_test, feature_names = joblib.load(target_file)\n except ValueError as e:\n raise ValueError(\n f\"The cached dataset located in {target_file} was fetched \"\n f\"with an older scikit-learn version and it is not compatible \"\n f\"with the scikit-learn version imported. You need to \"\n f\"manually delete the file: {target_file}.\"\n ) from e\n else:\n vectorizer = CountVectorizer(dtype=np.int16)\n X_train = vectorizer.fit_transform(data_train.data).tocsr()\n X_test = vectorizer.transform(data_test.data).tocsr()\n feature_names = vectorizer.get_feature_names()\n\n joblib.dump((X_train, X_test, feature_names), target_file, compress=9)\n\n # the data is stored as int16 for compactness\n # but normalize needs floats\n if normalize:\n X_train = X_train.astype(np.float64)\n X_test = X_test.astype(np.float64)\n preprocessing.normalize(X_train, copy=False)\n preprocessing.normalize(X_test, copy=False)\n\n target_names = data_train.target_names\n\n if subset == \"train\":\n data = X_train\n target = data_train.target\n elif subset == \"test\":\n data = X_test\n target = data_test.target\n elif subset == \"all\":\n data = sp.vstack((X_train, X_test)).tocsr()\n target = np.concatenate((data_train.target, data_test.target))\n else:\n raise ValueError(\n \"%r is not a valid subset: should be one of \"\n \"['train', 'test', 'all']\" % subset\n )\n\n module_path = dirname(__file__)\n with open(join(module_path, \"descr\", \"twenty_newsgroups.rst\")) as rst_file:\n fdescr = rst_file.read()\n\n frame = None\n target_name = [\"category_class\"]\n\n if as_frame:\n frame, data, target = _convert_data_dataframe(\n \"fetch_20newsgroups_vectorized\",\n data,\n target,\n feature_names,\n target_names=target_name,\n sparse_data=True,\n )\n\n if return_X_y:\n return data, target\n\n return Bunch(\n data=data,\n target=target,\n frame=frame,\n target_names=target_names,\n feature_names=feature_names,\n DESCR=fdescr,\n )\n",
"\"\"\"\nPython implementation of the fast ICA algorithms.\n\nReference: Tables 8.3 and 8.4 page 196 in the book:\nIndependent Component Analysis, by Hyvarinen et al.\n\"\"\"\n\n# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,\n# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann\n# License: BSD 3 clause\n\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..exceptions import ConvergenceWarning\n\nfrom ..utils import check_array, as_float_array, check_random_state\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import FLOAT_DTYPES\n\n__all__ = [\"fastica\", \"FastICA\"]\n\n\ndef _gs_decorrelation(w, W, j):\n \"\"\"\n Orthonormalize w wrt the first j rows of W.\n\n Parameters\n ----------\n w : ndarray of shape (n,)\n Array to be orthogonalized\n\n W : ndarray of shape (p, n)\n Null space definition\n\n j : int < p\n The no of (from the first) rows of Null space W wrt which w is\n orthogonalized.\n\n Notes\n -----\n Assumes that W is orthogonal\n w changed in place\n \"\"\"\n w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])\n return w\n\n\ndef _sym_decorrelation(W):\n \"\"\"Symmetric decorrelation\n i.e. W <- (W * W.T) ^{-1/2} * W\n \"\"\"\n s, u = linalg.eigh(np.dot(W, W.T))\n # u (resp. s) contains the eigenvectors (resp. square roots of\n # the eigenvalues) of W * W.T\n return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])\n\n\ndef _ica_def(X, tol, g, fun_args, max_iter, w_init):\n \"\"\"Deflationary FastICA using fun approx to neg-entropy function\n\n Used internally by FastICA.\n \"\"\"\n\n n_components = w_init.shape[0]\n W = np.zeros((n_components, n_components), dtype=X.dtype)\n n_iter = []\n\n # j is the index of the extracted component\n for j in range(n_components):\n w = w_init[j, :].copy()\n w /= np.sqrt((w ** 2).sum())\n\n for i in range(max_iter):\n gwtx, g_wtx = g(np.dot(w.T, X), fun_args)\n\n w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w\n\n _gs_decorrelation(w1, W, j)\n\n w1 /= np.sqrt((w1 ** 2).sum())\n\n lim = np.abs(np.abs((w1 * w).sum()) - 1)\n w = w1\n if lim < tol:\n break\n\n n_iter.append(i + 1)\n W[j, :] = w\n\n return W, max(n_iter)\n\n\ndef _ica_par(X, tol, g, fun_args, max_iter, w_init):\n \"\"\"Parallel FastICA.\n\n Used internally by FastICA --main loop\n\n \"\"\"\n W = _sym_decorrelation(w_init)\n del w_init\n p_ = float(X.shape[1])\n for ii in range(max_iter):\n gwtx, g_wtx = g(np.dot(W, X), fun_args)\n W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)\n del gwtx, g_wtx\n # builtin max, abs are faster than numpy counter parts.\n lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))\n W = W1\n if lim < tol:\n break\n else:\n warnings.warn(\n \"FastICA did not converge. Consider increasing \"\n \"tolerance or the maximum number of iterations.\",\n ConvergenceWarning,\n )\n\n return W, ii + 1\n\n\n# Some standard non-linear functions.\n# XXX: these should be optimized, as they can be a bottleneck.\ndef _logcosh(x, fun_args=None):\n alpha = fun_args.get(\"alpha\", 1.0) # comment it out?\n\n x *= alpha\n gx = np.tanh(x, x) # apply the tanh inplace\n g_x = np.empty(x.shape[0])\n # XXX compute in chunks to avoid extra allocation\n for i, gx_i in enumerate(gx): # please don't vectorize.\n g_x[i] = (alpha * (1 - gx_i ** 2)).mean()\n return gx, g_x\n\n\ndef _exp(x, fun_args):\n exp = np.exp(-(x ** 2) / 2)\n gx = x * exp\n g_x = (1 - x ** 2) * exp\n return gx, g_x.mean(axis=-1)\n\n\ndef _cube(x, fun_args):\n return x ** 3, (3 * x ** 2).mean(axis=-1)\n\n\ndef fastica(\n X,\n n_components=None,\n *,\n algorithm=\"parallel\",\n whiten=True,\n fun=\"logcosh\",\n fun_args=None,\n max_iter=200,\n tol=1e-04,\n w_init=None,\n random_state=None,\n return_X_mean=False,\n compute_sources=True,\n return_n_iter=False,\n):\n \"\"\"Perform Fast Independent Component Analysis.\n\n Read more in the :ref:`User Guide <ICA>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n n_components : int, default=None\n Number of components to extract. If None no dimension reduction\n is performed.\n\n algorithm : {'parallel', 'deflation'}, default='parallel'\n Apply a parallel or deflational FASTICA algorithm.\n\n whiten : bool, default=True\n If True perform an initial whitening of the data.\n If False, the data is assumed to have already been\n preprocessed: it should be centered, normed and white.\n Otherwise you will get incorrect results.\n In this case the parameter n_components will be ignored.\n\n fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'\n The functional form of the G function used in the\n approximation to neg-entropy. Could be either 'logcosh', 'exp',\n or 'cube'.\n You can also provide your own function. It should return a tuple\n containing the value of the function, and of its derivative, in the\n point. The derivative should be averaged along its last dimension.\n Example:\n\n def my_g(x):\n return x ** 3, np.mean(3 * x ** 2, axis=-1)\n\n fun_args : dict, default=None\n Arguments to send to the functional form.\n If empty or None and if fun='logcosh', fun_args will take value\n {'alpha' : 1.0}\n\n max_iter : int, default=200\n Maximum number of iterations to perform.\n\n tol : float, default=1e-04\n A positive scalar giving the tolerance at which the\n un-mixing matrix is considered to have converged.\n\n w_init : ndarray of shape (n_components, n_components), default=None\n Initial un-mixing array of dimension (n.comp,n.comp).\n If None (default) then an array of normal r.v.'s is used.\n\n random_state : int, RandomState instance or None, default=None\n Used to initialize ``w_init`` when not specified, with a\n normal distribution. Pass an int, for reproducible results\n across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n return_X_mean : bool, default=False\n If True, X_mean is returned too.\n\n compute_sources : bool, default=True\n If False, sources are not computed, but only the rotation matrix.\n This can save memory when working with big data. Defaults to True.\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n K : ndarray of shape (n_components, n_features) or None\n If whiten is 'True', K is the pre-whitening matrix that projects data\n onto the first n_components principal components. If whiten is 'False',\n K is 'None'.\n\n W : ndarray of shape (n_components, n_components)\n The square matrix that unmixes the data after whitening.\n The mixing matrix is the pseudo-inverse of matrix ``W K``\n if K is not None, else it is the inverse of W.\n\n S : ndarray of shape (n_samples, n_components) or None\n Estimated source matrix\n\n X_mean : ndarray of shape (n_features,)\n The mean over features. Returned only if return_X_mean is True.\n\n n_iter : int\n If the algorithm is \"deflation\", n_iter is the\n maximum number of iterations run across all components. Else\n they are just the number of iterations taken to converge. This is\n returned only when return_n_iter is set to `True`.\n\n Notes\n -----\n\n The data matrix X is considered to be a linear combination of\n non-Gaussian (independent) components i.e. X = AS where columns of S\n contain the independent components and A is a linear mixing\n matrix. In short ICA attempts to `un-mix' the data by estimating an\n un-mixing matrix W where ``S = W K X.``\n While FastICA was proposed to estimate as many sources\n as features, it is possible to estimate less by setting\n n_components < n_features. It this case K is not a square matrix\n and the estimated A is the pseudo-inverse of ``W K``.\n\n This implementation was originally made for data of shape\n [n_features, n_samples]. Now the input is transposed\n before the algorithm is applied. This makes it slightly\n faster for Fortran-ordered input.\n\n Implemented using FastICA:\n *A. Hyvarinen and E. Oja, Independent Component Analysis:\n Algorithms and Applications, Neural Networks, 13(4-5), 2000,\n pp. 411-430*\n\n \"\"\"\n\n est = FastICA(\n n_components=n_components,\n algorithm=algorithm,\n whiten=whiten,\n fun=fun,\n fun_args=fun_args,\n max_iter=max_iter,\n tol=tol,\n w_init=w_init,\n random_state=random_state,\n )\n sources = est._fit(X, compute_sources=compute_sources)\n\n if whiten:\n if return_X_mean:\n if return_n_iter:\n return (est.whitening_, est._unmixing, sources, est.mean_, est.n_iter_)\n else:\n return est.whitening_, est._unmixing, sources, est.mean_\n else:\n if return_n_iter:\n return est.whitening_, est._unmixing, sources, est.n_iter_\n else:\n return est.whitening_, est._unmixing, sources\n\n else:\n if return_X_mean:\n if return_n_iter:\n return None, est._unmixing, sources, None, est.n_iter_\n else:\n return None, est._unmixing, sources, None\n else:\n if return_n_iter:\n return None, est._unmixing, sources, est.n_iter_\n else:\n return None, est._unmixing, sources\n\n\nclass FastICA(TransformerMixin, BaseEstimator):\n \"\"\"FastICA: a fast algorithm for Independent Component Analysis.\n\n Read more in the :ref:`User Guide <ICA>`.\n\n Parameters\n ----------\n n_components : int, default=None\n Number of components to use. If None is passed, all are used.\n\n algorithm : {'parallel', 'deflation'}, default='parallel'\n Apply parallel or deflational algorithm for FastICA.\n\n whiten : bool, default=True\n If whiten is false, the data is already considered to be\n whitened, and no whitening is performed.\n\n fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'\n The functional form of the G function used in the\n approximation to neg-entropy. Could be either 'logcosh', 'exp',\n or 'cube'.\n You can also provide your own function. It should return a tuple\n containing the value of the function, and of its derivative, in the\n point. Example::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)\n\n fun_args : dict, default=None\n Arguments to send to the functional form.\n If empty and if fun='logcosh', fun_args will take value\n {'alpha' : 1.0}.\n\n max_iter : int, default=200\n Maximum number of iterations during fit.\n\n tol : float, default=1e-4\n Tolerance on update at each iteration.\n\n w_init : ndarray of shape (n_components, n_components), default=None\n The mixing matrix to be used to initialize the algorithm.\n\n random_state : int, RandomState instance or None, default=None\n Used to initialize ``w_init`` when not specified, with a\n normal distribution. Pass an int, for reproducible results\n across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n components_ : ndarray of shape (n_components, n_features)\n The linear operator to apply to the data to get the independent\n sources. This is equal to the unmixing matrix when ``whiten`` is\n False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when\n ``whiten`` is True.\n\n mixing_ : ndarray of shape (n_features, n_components)\n The pseudo-inverse of ``components_``. It is the linear operator\n that maps independent sources to the data.\n\n mean_ : ndarray of shape(n_features,)\n The mean over features. Only set if `self.whiten` is True.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n n_iter_ : int\n If the algorithm is \"deflation\", n_iter is the\n maximum number of iterations run across all components. Else\n they are just the number of iterations taken to converge.\n\n whitening_ : ndarray of shape (n_components, n_features)\n Only set if whiten is 'True'. This is the pre-whitening matrix\n that projects data onto the first `n_components` principal components.\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.decomposition import FastICA\n >>> X, _ = load_digits(return_X_y=True)\n >>> transformer = FastICA(n_components=7,\n ... random_state=0)\n >>> X_transformed = transformer.fit_transform(X)\n >>> X_transformed.shape\n (1797, 7)\n\n Notes\n -----\n Implementation based on\n *A. Hyvarinen and E. Oja, Independent Component Analysis:\n Algorithms and Applications, Neural Networks, 13(4-5), 2000,\n pp. 411-430*\n\n \"\"\"\n\n def __init__(\n self,\n n_components=None,\n *,\n algorithm=\"parallel\",\n whiten=True,\n fun=\"logcosh\",\n fun_args=None,\n max_iter=200,\n tol=1e-4,\n w_init=None,\n random_state=None,\n ):\n super().__init__()\n if max_iter < 1:\n raise ValueError(\n \"max_iter should be greater than 1, got \"\n \"(max_iter={})\".format(max_iter)\n )\n self.n_components = n_components\n self.algorithm = algorithm\n self.whiten = whiten\n self.fun = fun\n self.fun_args = fun_args\n self.max_iter = max_iter\n self.tol = tol\n self.w_init = w_init\n self.random_state = random_state\n\n def _fit(self, X, compute_sources=False):\n \"\"\"Fit the model\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n compute_sources : bool, default=False\n If False, sources are not computes but only the rotation matrix.\n This can save memory when working with big data. Defaults to False.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n \"\"\"\n XT = self._validate_data(\n X, copy=self.whiten, dtype=FLOAT_DTYPES, ensure_min_samples=2\n ).T\n fun_args = {} if self.fun_args is None else self.fun_args\n random_state = check_random_state(self.random_state)\n\n alpha = fun_args.get(\"alpha\", 1.0)\n if not 1 <= alpha <= 2:\n raise ValueError(\"alpha must be in [1,2]\")\n\n if self.fun == \"logcosh\":\n g = _logcosh\n elif self.fun == \"exp\":\n g = _exp\n elif self.fun == \"cube\":\n g = _cube\n elif callable(self.fun):\n\n def g(x, fun_args):\n return self.fun(x, **fun_args)\n\n else:\n exc = ValueError if isinstance(self.fun, str) else TypeError\n raise exc(\n \"Unknown function %r;\"\n \" should be one of 'logcosh', 'exp', 'cube' or callable\" % self.fun\n )\n\n n_features, n_samples = XT.shape\n\n n_components = self.n_components\n if not self.whiten and n_components is not None:\n n_components = None\n warnings.warn(\"Ignoring n_components with whiten=False.\")\n\n if n_components is None:\n n_components = min(n_samples, n_features)\n if n_components > min(n_samples, n_features):\n n_components = min(n_samples, n_features)\n warnings.warn(\n \"n_components is too large: it will be set to %s\" % n_components\n )\n\n if self.whiten:\n # Centering the features of X\n X_mean = XT.mean(axis=-1)\n XT -= X_mean[:, np.newaxis]\n\n # Whitening and preprocessing by PCA\n u, d, _ = linalg.svd(XT, full_matrices=False, check_finite=False)\n\n del _\n K = (u / d).T[:n_components] # see (6.33) p.140\n del u, d\n X1 = np.dot(K, XT)\n # see (13.6) p.267 Here X1 is white and data\n # in X has been projected onto a subspace by PCA\n X1 *= np.sqrt(n_samples)\n else:\n # X must be casted to floats to avoid typing issues with numpy\n # 2.0 and the line below\n X1 = as_float_array(XT, copy=False) # copy has been taken care of\n\n w_init = self.w_init\n if w_init is None:\n w_init = np.asarray(\n random_state.normal(size=(n_components, n_components)), dtype=X1.dtype\n )\n\n else:\n w_init = np.asarray(w_init)\n if w_init.shape != (n_components, n_components):\n raise ValueError(\n \"w_init has invalid shape -- should be %(shape)s\"\n % {\"shape\": (n_components, n_components)}\n )\n\n kwargs = {\n \"tol\": self.tol,\n \"g\": g,\n \"fun_args\": fun_args,\n \"max_iter\": self.max_iter,\n \"w_init\": w_init,\n }\n\n if self.algorithm == \"parallel\":\n W, n_iter = _ica_par(X1, **kwargs)\n elif self.algorithm == \"deflation\":\n W, n_iter = _ica_def(X1, **kwargs)\n else:\n raise ValueError(\n \"Invalid algorithm: must be either `parallel` or\" \" `deflation`.\"\n )\n del X1\n\n if compute_sources:\n if self.whiten:\n S = np.linalg.multi_dot([W, K, XT]).T\n else:\n S = np.dot(W, XT).T\n else:\n S = None\n\n self.n_iter_ = n_iter\n\n if self.whiten:\n self.components_ = np.dot(W, K)\n self.mean_ = X_mean\n self.whitening_ = K\n else:\n self.components_ = W\n\n self.mixing_ = linalg.pinv(self.components_, check_finite=False)\n self._unmixing = W\n\n return S\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model and recover the sources from X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n \"\"\"\n return self._fit(X, compute_sources=True)\n\n def fit(self, X, y=None):\n \"\"\"Fit the model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n self\n \"\"\"\n self._fit(X, compute_sources=False)\n return self\n\n def transform(self, X, copy=True):\n \"\"\"Recover the sources from X (apply the unmixing matrix).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to transform, where n_samples is the number of samples\n and n_features is the number of features.\n\n copy : bool, default=True\n If False, data passed to fit can be overwritten. Defaults to True.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_data(\n X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES, reset=False\n )\n if self.whiten:\n X -= self.mean_\n\n return np.dot(X, self.components_.T)\n\n def inverse_transform(self, X, copy=True):\n \"\"\"Transform the sources back to the mixed data (apply mixing matrix).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n Sources, where n_samples is the number of samples\n and n_components is the number of components.\n copy : bool, default=True\n If False, data passed to fit are overwritten. Defaults to True.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_features)\n \"\"\"\n check_is_fitted(self)\n\n X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)\n X = np.dot(X, self.mixing_.T)\n if self.whiten:\n X += self.mean_\n\n return X\n",
"# Authors: Nicolas Goix <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport numbers\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom warnings import warn\n\nfrom ..tree import ExtraTreeRegressor\nfrom ..utils import (\n check_random_state,\n check_array,\n gen_batches,\n get_chunk_n_rows,\n)\nfrom ..utils.fixes import _joblib_parallel_args\nfrom ..utils.validation import check_is_fitted, _num_samples\nfrom ..base import OutlierMixin\n\nfrom ._bagging import BaseBagging\n\n__all__ = [\"IsolationForest\"]\n\n\nclass IsolationForest(OutlierMixin, BaseBagging):\n \"\"\"\n Isolation Forest Algorithm.\n\n Return the anomaly score of each sample using the IsolationForest algorithm\n\n The IsolationForest 'isolates' observations by randomly selecting a feature\n and then randomly selecting a split value between the maximum and minimum\n values of the selected feature.\n\n Since recursive partitioning can be represented by a tree structure, the\n number of splittings required to isolate a sample is equivalent to the path\n length from the root node to the terminating node.\n\n This path length, averaged over a forest of such random trees, is a\n measure of normality and our decision function.\n\n Random partitioning produces noticeably shorter paths for anomalies.\n Hence, when a forest of random trees collectively produce shorter path\n lengths for particular samples, they are highly likely to be anomalies.\n\n Read more in the :ref:`User Guide <isolation_forest>`.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n n_estimators : int, default=100\n The number of base estimators in the ensemble.\n\n max_samples : \"auto\", int or float, default=\"auto\"\n The number of samples to draw from X to train each base estimator.\n - If int, then draw `max_samples` samples.\n - If float, then draw `max_samples * X.shape[0]` samples.\n - If \"auto\", then `max_samples=min(256, n_samples)`.\n\n If max_samples is larger than the number of samples provided,\n all samples will be used for all trees (no sampling).\n\n contamination : 'auto' or float, default='auto'\n The amount of contamination of the data set, i.e. the proportion\n of outliers in the data set. Used when fitting to define the threshold\n on the scores of the samples.\n\n - If 'auto', the threshold is determined as in the\n original paper.\n - If float, the contamination should be in the range (0, 0.5].\n\n .. versionchanged:: 0.22\n The default value of ``contamination`` changed from 0.1\n to ``'auto'``.\n\n max_features : int or float, default=1.0\n The number of features to draw from X to train each base estimator.\n\n - If int, then draw `max_features` features.\n - If float, then draw `max_features * X.shape[1]` features.\n\n bootstrap : bool, default=False\n If True, individual trees are fit on random subsets of the training\n data sampled with replacement. If False, sampling without replacement\n is performed.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel for both :meth:`fit` and\n :meth:`predict`. ``None`` means 1 unless in a\n :obj:`joblib.parallel_backend` context. ``-1`` means using all\n processors. See :term:`Glossary <n_jobs>` for more details.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo-randomness of the selection of the feature\n and split values for each branching step and each tree in the forest.\n\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n verbose : int, default=0\n Controls the verbosity of the tree building process.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just fit a whole\n new forest. See :term:`the Glossary <warm_start>`.\n\n .. versionadded:: 0.21\n\n Attributes\n ----------\n base_estimator_ : ExtraTreeRegressor instance\n The child estimator template used to create the collection of\n fitted sub-estimators.\n\n estimators_ : list of ExtraTreeRegressor instances\n The collection of fitted sub-estimators.\n\n estimators_features_ : list of ndarray\n The subset of drawn features for each base estimator.\n\n estimators_samples_ : list of ndarray\n The subset of drawn samples (i.e., the in-bag samples) for each base\n estimator.\n\n max_samples_ : int\n The actual number of samples.\n\n offset_ : float\n Offset used to define the decision function from the raw scores. We\n have the relation: ``decision_function = score_samples - offset_``.\n ``offset_`` is defined as follows. When the contamination parameter is\n set to \"auto\", the offset is equal to -0.5 as the scores of inliers are\n close to 0 and the scores of outliers are close to -1. When a\n contamination parameter different than \"auto\" is provided, the offset\n is defined in such a way we obtain the expected number of outliers\n (samples with decision function < 0) in training.\n\n .. versionadded:: 0.20\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Notes\n -----\n The implementation is based on an ensemble of ExtraTreeRegressor. The\n maximum depth of each tree is set to ``ceil(log_2(n))`` where\n :math:`n` is the number of samples used to build the tree\n (see (Liu et al., 2008) for more details).\n\n References\n ----------\n .. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \"Isolation forest.\"\n Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.\n .. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \"Isolation-based\n anomaly detection.\" ACM Transactions on Knowledge Discovery from\n Data (TKDD) 6.1 (2012): 3.\n\n See Also\n ----------\n sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a\n Gaussian distributed dataset.\n sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.\n Estimate the support of a high-dimensional distribution.\n The implementation is based on libsvm.\n sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection\n using Local Outlier Factor (LOF).\n\n Examples\n --------\n >>> from sklearn.ensemble import IsolationForest\n >>> X = [[-1.1], [0.3], [0.5], [100]]\n >>> clf = IsolationForest(random_state=0).fit(X)\n >>> clf.predict([[0.1], [0], [90]])\n array([ 1, 1, -1])\n \"\"\"\n\n def __init__(\n self,\n *,\n n_estimators=100,\n max_samples=\"auto\",\n contamination=\"auto\",\n max_features=1.0,\n bootstrap=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n ):\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1, splitter=\"random\", random_state=random_state\n ),\n # here above max_features has no links with self.max_features\n bootstrap=bootstrap,\n bootstrap_features=False,\n n_estimators=n_estimators,\n max_samples=max_samples,\n max_features=max_features,\n warm_start=warm_start,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n )\n\n self.contamination = contamination\n\n def _set_oob_score(self, X, y):\n raise NotImplementedError(\"OOB score not supported by iforest\")\n\n def _parallel_args(self):\n # ExtraTreeRegressor releases the GIL, so it's more efficient to use\n # a thread-based backend rather than a process-based backend so as\n # to avoid suffering from communication overhead and extra memory\n # copies.\n return _joblib_parallel_args(prefer=\"threads\")\n\n def fit(self, X, y=None, sample_weight=None):\n \"\"\"\n Fit estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Use ``dtype=np.float32`` for maximum\n efficiency. Sparse matrices are also supported, use sparse\n ``csc_matrix`` for maximum efficiency.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n X = self._validate_data(X, accept_sparse=[\"csc\"])\n if issparse(X):\n # Pre-sort indices to avoid that each individual tree of the\n # ensemble sorts the indices.\n X.sort_indices()\n\n rnd = check_random_state(self.random_state)\n y = rnd.uniform(size=X.shape[0])\n\n # ensure that max_sample is in [1, n_samples]:\n n_samples = X.shape[0]\n\n if self.contamination != \"auto\":\n if not (0.0 < self.contamination <= 0.5):\n raise ValueError(\n \"contamination must be in (0, 0.5], \" \"got: %f\" % self.contamination\n )\n\n if isinstance(self.max_samples, str):\n if self.max_samples == \"auto\":\n max_samples = min(256, n_samples)\n else:\n raise ValueError(\n \"max_samples (%s) is not supported.\"\n 'Valid choices are: \"auto\", int or'\n \"float\" % self.max_samples\n )\n\n elif isinstance(self.max_samples, numbers.Integral):\n if self.max_samples > n_samples:\n warn(\n \"max_samples (%s) is greater than the \"\n \"total number of samples (%s). max_samples \"\n \"will be set to n_samples for estimation.\"\n % (self.max_samples, n_samples)\n )\n max_samples = n_samples\n else:\n max_samples = self.max_samples\n else: # float\n if not 0.0 < self.max_samples <= 1.0:\n raise ValueError(\n \"max_samples must be in (0, 1], got %r\" % self.max_samples\n )\n max_samples = int(self.max_samples * X.shape[0])\n\n self.max_samples_ = max_samples\n max_depth = int(np.ceil(np.log2(max(max_samples, 2))))\n super()._fit(\n X, y, max_samples, max_depth=max_depth, sample_weight=sample_weight\n )\n\n if self.contamination == \"auto\":\n # 0.5 plays a special role as described in the original paper.\n # we take the opposite as we consider the opposite of their score.\n self.offset_ = -0.5\n return self\n\n # else, define offset_ wrt contamination parameter\n self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)\n\n return self\n\n def predict(self, X):\n \"\"\"\n Predict if a particular sample is an outlier or not.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n For each observation, tells whether or not (+1 or -1) it should\n be considered as an inlier according to the fitted model.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n is_inlier = np.ones(X.shape[0], dtype=int)\n is_inlier[self.decision_function(X) < 0] = -1\n return is_inlier\n\n def decision_function(self, X):\n \"\"\"\n Average anomaly score of X of the base classifiers.\n\n The anomaly score of an input sample is computed as\n the mean anomaly score of the trees in the forest.\n\n The measure of normality of an observation given a tree is the depth\n of the leaf containing this observation, which is equivalent to\n the number of splittings required to isolate this point. In case of\n several observations n_left in the leaf, the average path length of\n a n_left samples isolation tree is added.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n scores : ndarray of shape (n_samples,)\n The anomaly score of the input samples.\n The lower, the more abnormal. Negative scores represent outliers,\n positive scores represent inliers.\n \"\"\"\n # We subtract self.offset_ to make 0 be the threshold value for being\n # an outlier:\n\n return self.score_samples(X) - self.offset_\n\n def score_samples(self, X):\n \"\"\"\n Opposite of the anomaly score defined in the original paper.\n\n The anomaly score of an input sample is computed as\n the mean anomaly score of the trees in the forest.\n\n The measure of normality of an observation given a tree is the depth\n of the leaf containing this observation, which is equivalent to\n the number of splittings required to isolate this point. In case of\n several observations n_left in the leaf, the average path length of\n a n_left samples isolation tree is added.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n scores : ndarray of shape (n_samples,)\n The anomaly score of the input samples.\n The lower, the more abnormal.\n \"\"\"\n # code structure from ForestClassifier/predict_proba\n\n check_is_fitted(self)\n\n # Check data\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n\n # Take the opposite of the scores as bigger is better (here less\n # abnormal)\n return -self._compute_chunked_score_samples(X)\n\n def _compute_chunked_score_samples(self, X):\n\n n_samples = _num_samples(X)\n\n if self._max_features == X.shape[1]:\n subsample_features = False\n else:\n subsample_features = True\n\n # We get as many rows as possible within our working_memory budget\n # (defined by sklearn.get_config()['working_memory']) to store\n # self._max_features in each row during computation.\n #\n # Note:\n # - this will get at least 1 row, even if 1 row of score will\n # exceed working_memory.\n # - this does only account for temporary memory usage while loading\n # the data needed to compute the scores -- the returned scores\n # themselves are 1D.\n\n chunk_n_rows = get_chunk_n_rows(\n row_bytes=16 * self._max_features, max_n_rows=n_samples\n )\n slices = gen_batches(n_samples, chunk_n_rows)\n\n scores = np.zeros(n_samples, order=\"f\")\n\n for sl in slices:\n # compute score on the slices of test samples:\n scores[sl] = self._compute_score_samples(X[sl], subsample_features)\n\n return scores\n\n def _compute_score_samples(self, X, subsample_features):\n \"\"\"\n Compute the score of each samples in X going through the extra trees.\n\n Parameters\n ----------\n X : array-like or sparse matrix\n Data matrix.\n\n subsample_features : bool\n Whether features should be subsampled.\n \"\"\"\n n_samples = X.shape[0]\n\n depths = np.zeros(n_samples, order=\"f\")\n\n for tree, features in zip(self.estimators_, self.estimators_features_):\n X_subset = X[:, features] if subsample_features else X\n\n leaves_index = tree.apply(X_subset)\n node_indicator = tree.decision_path(X_subset)\n n_samples_leaf = tree.tree_.n_node_samples[leaves_index]\n\n depths += (\n np.ravel(node_indicator.sum(axis=1))\n + _average_path_length(n_samples_leaf)\n - 1.0\n )\n denominator = len(self.estimators_) * _average_path_length([self.max_samples_])\n scores = 2 ** (\n # For a single training sample, denominator and depth are 0.\n # Therefore, we set the score manually to 1.\n -np.divide(\n depths, denominator, out=np.ones_like(depths), where=denominator != 0\n )\n )\n return scores\n\n def _more_tags(self):\n return {\n \"_xfail_checks\": {\n \"check_sample_weights_invariance\": (\n \"zero sample_weight is not equivalent to removing samples\"\n ),\n }\n }\n\n\ndef _average_path_length(n_samples_leaf):\n \"\"\"\n The average path length in a n_samples iTree, which is equal to\n the average path length of an unsuccessful BST search since the\n latter has the same structure as an isolation tree.\n Parameters\n ----------\n n_samples_leaf : array-like of shape (n_samples,)\n The number of training samples in each test sample leaf, for\n each estimators.\n\n Returns\n -------\n average_path_length : ndarray of shape (n_samples,)\n \"\"\"\n\n n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)\n\n n_samples_leaf_shape = n_samples_leaf.shape\n n_samples_leaf = n_samples_leaf.reshape((1, -1))\n average_path_length = np.zeros(n_samples_leaf.shape)\n\n mask_1 = n_samples_leaf <= 1\n mask_2 = n_samples_leaf == 2\n not_mask = ~np.logical_or(mask_1, mask_2)\n\n average_path_length[mask_1] = 0.0\n average_path_length[mask_2] = 1.0\n average_path_length[not_mask] = (\n 2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)\n - 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]\n )\n\n return average_path_length.reshape(n_samples_leaf_shape)\n",
"\"\"\"\nVarious bayesian regression\n\"\"\"\n\n# Authors: V. Michel, F. Pedregosa, A. Gramfort\n# License: BSD 3 clause\n\nfrom math import log\nimport numpy as np\nfrom scipy import linalg\n\nfrom ._base import LinearModel, _rescale_data\nfrom ..base import RegressorMixin\nfrom ._base import _deprecate_normalize\nfrom ..utils.extmath import fast_logdet\nfrom scipy.linalg import pinvh\nfrom ..utils.validation import _check_sample_weight\n\n\n###############################################################################\n# BayesianRidge regression\n\n\nclass BayesianRidge(RegressorMixin, LinearModel):\n \"\"\"Bayesian ridge regression.\n\n Fit a Bayesian ridge model. See the Notes section for details on this\n implementation and the optimization of the regularization parameters\n lambda (precision of the weights) and alpha (precision of the noise).\n\n Read more in the :ref:`User Guide <bayesian_regression>`.\n\n Parameters\n ----------\n n_iter : int, default=300\n Maximum number of iterations. Should be greater than or equal to 1.\n\n tol : float, default=1e-3\n Stop the algorithm if w has converged.\n\n alpha_1 : float, default=1e-6\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the alpha parameter.\n\n alpha_2 : float, default=1e-6\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the alpha parameter.\n\n lambda_1 : float, default=1e-6\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the lambda parameter.\n\n lambda_2 : float, default=1e-6\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the lambda parameter.\n\n alpha_init : float, default=None\n Initial value for alpha (precision of the noise).\n If not set, alpha_init is 1/Var(y).\n\n .. versionadded:: 0.22\n\n lambda_init : float, default=None\n Initial value for lambda (precision of the weights).\n If not set, lambda_init is 1.\n\n .. versionadded:: 0.22\n\n compute_score : bool, default=False\n If True, compute the log marginal likelihood at each iteration of the\n optimization.\n\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model.\n The intercept is not treated as a probabilistic parameter\n and thus has no associated variance. If set\n to False, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n .. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2.\n\n copy_X : bool, default=True\n If True, X will be copied; else, it may be overwritten.\n\n verbose : bool, default=False\n Verbose mode when fitting the model.\n\n\n Attributes\n ----------\n coef_ : array-like of shape (n_features,)\n Coefficients of the regression model (mean of distribution)\n\n intercept_ : float\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated precision of the noise.\n\n lambda_ : float\n Estimated precision of the weights.\n\n sigma_ : array-like of shape (n_features, n_features)\n Estimated variance-covariance matrix of the weights\n\n scores_ : array-like of shape (n_iter_+1,)\n If computed_score is True, value of the log marginal likelihood (to be\n maximized) at each iteration of the optimization. The array starts\n with the value of the log marginal likelihood obtained for the initial\n values of alpha and lambda and ends with the value obtained for the\n estimated alpha and lambda.\n\n n_iter_ : int\n The actual number of iterations to reach the stopping criterion.\n\n X_offset_ : float\n If `normalize=True`, offset subtracted for centering data to a\n zero mean.\n\n X_scale_ : float\n If `normalize=True`, parameter used to scale data to a unit\n standard deviation.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.BayesianRidge()\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n BayesianRidge()\n >>> clf.predict([[1, 1]])\n array([1.])\n\n Notes\n -----\n There exist several strategies to perform Bayesian ridge regression. This\n implementation is based on the algorithm described in Appendix A of\n (Tipping, 2001) where updates of the regularization parameters are done as\n suggested in (MacKay, 1992). Note that according to A New\n View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these\n update rules do not guarantee that the marginal likelihood is increasing\n between two consecutive iterations of the optimization.\n\n References\n ----------\n D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,\n Vol. 4, No. 3, 1992.\n\n M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,\n Journal of Machine Learning Research, Vol. 1, 2001.\n \"\"\"\n\n def __init__(\n self,\n *,\n n_iter=300,\n tol=1.0e-3,\n alpha_1=1.0e-6,\n alpha_2=1.0e-6,\n lambda_1=1.0e-6,\n lambda_2=1.0e-6,\n alpha_init=None,\n lambda_init=None,\n compute_score=False,\n fit_intercept=True,\n normalize=\"deprecated\",\n copy_X=True,\n verbose=False,\n ):\n self.n_iter = n_iter\n self.tol = tol\n self.alpha_1 = alpha_1\n self.alpha_2 = alpha_2\n self.lambda_1 = lambda_1\n self.lambda_2 = lambda_2\n self.alpha_init = alpha_init\n self.lambda_init = lambda_init\n self.compute_score = compute_score\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.verbose = verbose\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the model\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data\n y : ndarray of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : ndarray of shape (n_samples,), default=None\n Individual weights for each sample\n\n .. versionadded:: 0.20\n parameter *sample_weight* support to BayesianRidge.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._normalize = _deprecate_normalize(\n self.normalize, default=False, estimator_name=self.__class__.__name__\n )\n\n if self.n_iter < 1:\n raise ValueError(\n \"n_iter should be greater than or equal to 1.\"\n \" Got {!r}.\".format(self.n_iter)\n )\n\n X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n\n X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(\n X,\n y,\n self.fit_intercept,\n self._normalize,\n self.copy_X,\n sample_weight=sample_weight,\n )\n\n if sample_weight is not None:\n # Sample weight can be implemented via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n self.X_offset_ = X_offset_\n self.X_scale_ = X_scale_\n n_samples, n_features = X.shape\n\n # Initialization of the values of the parameters\n eps = np.finfo(np.float64).eps\n # Add `eps` in the denominator to omit division by zero if `np.var(y)`\n # is zero\n alpha_ = self.alpha_init\n lambda_ = self.lambda_init\n if alpha_ is None:\n alpha_ = 1.0 / (np.var(y) + eps)\n if lambda_ is None:\n lambda_ = 1.0\n\n verbose = self.verbose\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n\n self.scores_ = list()\n coef_old_ = None\n\n XT_y = np.dot(X.T, y)\n U, S, Vh = linalg.svd(X, full_matrices=False)\n eigen_vals_ = S ** 2\n\n # Convergence loop of the bayesian ridge regression\n for iter_ in range(self.n_iter):\n\n # update posterior mean coef_ based on alpha_ and lambda_ and\n # compute corresponding rmse\n coef_, rmse_ = self._update_coef_(\n X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_\n )\n if self.compute_score:\n # compute the log marginal likelihood\n s = self._log_marginal_likelihood(\n n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_\n )\n self.scores_.append(s)\n\n # Update alpha and lambda according to (MacKay, 1992)\n gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))\n lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)\n alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)\n\n # Check for convergence\n if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:\n if verbose:\n print(\"Convergence after \", str(iter_), \" iterations\")\n break\n coef_old_ = np.copy(coef_)\n\n self.n_iter_ = iter_ + 1\n\n # return regularization parameters and corresponding posterior mean,\n # log marginal likelihood and posterior covariance\n self.alpha_ = alpha_\n self.lambda_ = lambda_\n self.coef_, rmse_ = self._update_coef_(\n X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_\n )\n if self.compute_score:\n # compute the log marginal likelihood\n s = self._log_marginal_likelihood(\n n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_\n )\n self.scores_.append(s)\n self.scores_ = np.array(self.scores_)\n\n # posterior covariance is given by 1/alpha_ * scaled_sigma_\n scaled_sigma_ = np.dot(\n Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis]\n )\n self.sigma_ = (1.0 / alpha_) * scaled_sigma_\n\n self._set_intercept(X_offset_, y_offset_, X_scale_)\n\n return self\n\n def predict(self, X, return_std=False):\n \"\"\"Predict using the linear model.\n\n In addition to the mean of the predictive distribution, also its\n standard deviation can be returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n return_std : bool, default=False\n Whether to return the standard deviation of posterior prediction.\n\n Returns\n -------\n y_mean : array-like of shape (n_samples,)\n Mean of predictive distribution of query points.\n\n y_std : array-like of shape (n_samples,)\n Standard deviation of predictive distribution of query points.\n \"\"\"\n y_mean = self._decision_function(X)\n if return_std is False:\n return y_mean\n else:\n if self._normalize:\n X = (X - self.X_offset_) / self.X_scale_\n sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))\n return y_mean, y_std\n\n def _update_coef_(\n self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_\n ):\n \"\"\"Update posterior mean and compute corresponding rmse.\n\n Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where\n scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)\n + np.dot(X.T, X))^-1\n \"\"\"\n\n if n_samples > n_features:\n coef_ = np.linalg.multi_dot(\n [Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y]\n )\n else:\n coef_ = np.linalg.multi_dot(\n [X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y]\n )\n\n rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n\n return coef_, rmse_\n\n def _log_marginal_likelihood(\n self, n_samples, n_features, eigen_vals, alpha_, lambda_, coef, rmse\n ):\n \"\"\"Log marginal likelihood.\"\"\"\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n\n # compute the log of the determinant of the posterior covariance.\n # posterior covariance is given by\n # sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1\n if n_samples > n_features:\n logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals))\n else:\n logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)\n logdet_sigma[:n_samples] += alpha_ * eigen_vals\n logdet_sigma = -np.sum(np.log(logdet_sigma))\n\n score = lambda_1 * log(lambda_) - lambda_2 * lambda_\n score += alpha_1 * log(alpha_) - alpha_2 * alpha_\n score += 0.5 * (\n n_features * log(lambda_)\n + n_samples * log(alpha_)\n - alpha_ * rmse\n - lambda_ * np.sum(coef ** 2)\n + logdet_sigma\n - n_samples * log(2 * np.pi)\n )\n\n return score\n\n\n###############################################################################\n# ARD (Automatic Relevance Determination) regression\n\n\nclass ARDRegression(RegressorMixin, LinearModel):\n \"\"\"Bayesian ARD regression.\n\n Fit the weights of a regression model, using an ARD prior. The weights of\n the regression model are assumed to be in Gaussian distributions.\n Also estimate the parameters lambda (precisions of the distributions of the\n weights) and alpha (precision of the distribution of the noise).\n The estimation is done by an iterative procedures (Evidence Maximization)\n\n Read more in the :ref:`User Guide <bayesian_regression>`.\n\n Parameters\n ----------\n n_iter : int, default=300\n Maximum number of iterations.\n\n tol : float, default=1e-3\n Stop the algorithm if w has converged.\n\n alpha_1 : float, default=1e-6\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the alpha parameter.\n\n alpha_2 : float, default=1e-6\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the alpha parameter.\n\n lambda_1 : float, default=1e-6\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the lambda parameter.\n\n lambda_2 : float, default=1e-6\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the lambda parameter.\n\n compute_score : bool, default=False\n If True, compute the objective function at each step of the model.\n\n threshold_lambda : float, default=10 000\n threshold for removing (pruning) weights with high precision from\n the computation.\n\n fit_intercept : bool, default=True\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n .. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2.\n\n copy_X : bool, default=True\n If True, X will be copied; else, it may be overwritten.\n\n verbose : bool, default=False\n Verbose mode when fitting the model.\n\n Attributes\n ----------\n coef_ : array-like of shape (n_features,)\n Coefficients of the regression model (mean of distribution)\n\n alpha_ : float\n estimated precision of the noise.\n\n lambda_ : array-like of shape (n_features,)\n estimated precisions of the weights.\n\n sigma_ : array-like of shape (n_features, n_features)\n estimated variance-covariance matrix of the weights\n\n scores_ : float\n if computed, value of the objective function (to be maximized)\n\n intercept_ : float\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n X_offset_ : float\n If `normalize=True`, offset subtracted for centering data to a\n zero mean.\n\n X_scale_ : float\n If `normalize=True`, parameter used to scale data to a unit\n standard deviation.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.ARDRegression()\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n ARDRegression()\n >>> clf.predict([[1, 1]])\n array([1.])\n\n Notes\n -----\n For an example, see :ref:`examples/linear_model/plot_ard.py\n <sphx_glr_auto_examples_linear_model_plot_ard.py>`.\n\n References\n ----------\n D. J. C. MacKay, Bayesian nonlinear modeling for the prediction\n competition, ASHRAE Transactions, 1994.\n\n R. Salakhutdinov, Lecture notes on Statistical Machine Learning,\n http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15\n Their beta is our ``self.alpha_``\n Their alpha is our ``self.lambda_``\n ARD is a little different than the slide: only dimensions/features for\n which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are\n discarded.\n \"\"\"\n\n def __init__(\n self,\n *,\n n_iter=300,\n tol=1.0e-3,\n alpha_1=1.0e-6,\n alpha_2=1.0e-6,\n lambda_1=1.0e-6,\n lambda_2=1.0e-6,\n compute_score=False,\n threshold_lambda=1.0e4,\n fit_intercept=True,\n normalize=\"deprecated\",\n copy_X=True,\n verbose=False,\n ):\n self.n_iter = n_iter\n self.tol = tol\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.alpha_1 = alpha_1\n self.alpha_2 = alpha_2\n self.lambda_1 = lambda_1\n self.lambda_2 = lambda_2\n self.compute_score = compute_score\n self.threshold_lambda = threshold_lambda\n self.copy_X = copy_X\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the ARDRegression model according to the given training data\n and parameters.\n\n Iterative procedure to maximize the evidence\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n y : array-like of shape (n_samples,)\n Target values (integers). Will be cast to X's dtype if necessary\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._normalize = _deprecate_normalize(\n self.normalize, default=False, estimator_name=self.__class__.__name__\n )\n\n X, y = self._validate_data(\n X, y, dtype=np.float64, y_numeric=True, ensure_min_samples=2\n )\n\n n_samples, n_features = X.shape\n coef_ = np.zeros(n_features)\n\n X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(\n X, y, self.fit_intercept, self._normalize, self.copy_X\n )\n\n self.X_offset_ = X_offset_\n self.X_scale_ = X_scale_\n\n # Launch the convergence loop\n keep_lambda = np.ones(n_features, dtype=bool)\n\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n verbose = self.verbose\n\n # Initialization of the values of the parameters\n eps = np.finfo(np.float64).eps\n # Add `eps` in the denominator to omit division by zero if `np.var(y)`\n # is zero\n alpha_ = 1.0 / (np.var(y) + eps)\n lambda_ = np.ones(n_features)\n\n self.scores_ = list()\n coef_old_ = None\n\n def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):\n coef_[keep_lambda] = alpha_ * np.linalg.multi_dot(\n [sigma_, X[:, keep_lambda].T, y]\n )\n return coef_\n\n update_sigma = (\n self._update_sigma\n if n_samples >= n_features\n else self._update_sigma_woodbury\n )\n # Iterative procedure of ARDRegression\n for iter_ in range(self.n_iter):\n sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)\n coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)\n\n # Update alpha and lambda\n rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)\n lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (\n (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2\n )\n alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (\n rmse_ + 2.0 * alpha_2\n )\n\n # Prune the weights with a precision over a threshold\n keep_lambda = lambda_ < self.threshold_lambda\n coef_[~keep_lambda] = 0\n\n # Compute the objective function\n if self.compute_score:\n s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()\n s += alpha_1 * log(alpha_) - alpha_2 * alpha_\n s += 0.5 * (\n fast_logdet(sigma_)\n + n_samples * log(alpha_)\n + np.sum(np.log(lambda_))\n )\n s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())\n self.scores_.append(s)\n\n # Check for convergence\n if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:\n if verbose:\n print(\"Converged after %s iterations\" % iter_)\n break\n coef_old_ = np.copy(coef_)\n\n if not keep_lambda.any():\n break\n\n if keep_lambda.any():\n # update sigma and mu using updated params from the last iteration\n sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)\n coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)\n else:\n sigma_ = np.array([]).reshape(0, 0)\n\n self.coef_ = coef_\n self.alpha_ = alpha_\n self.sigma_ = sigma_\n self.lambda_ = lambda_\n self._set_intercept(X_offset_, y_offset_, X_scale_)\n return self\n\n def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):\n # See slides as referenced in the docstring note\n # this function is used when n_samples < n_features and will invert\n # a matrix of shape (n_samples, n_samples) making use of the\n # woodbury formula:\n # https://en.wikipedia.org/wiki/Woodbury_matrix_identity\n n_samples = X.shape[0]\n X_keep = X[:, keep_lambda]\n inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)\n sigma_ = pinvh(\n np.eye(n_samples) / alpha_ + np.dot(X_keep * inv_lambda, X_keep.T)\n )\n sigma_ = np.dot(sigma_, X_keep * inv_lambda)\n sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)\n sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda]\n return sigma_\n\n def _update_sigma(self, X, alpha_, lambda_, keep_lambda):\n # See slides as referenced in the docstring note\n # this function is used when n_samples >= n_features and will\n # invert a matrix of shape (n_features, n_features)\n X_keep = X[:, keep_lambda]\n gram = np.dot(X_keep.T, X_keep)\n eye = np.eye(gram.shape[0])\n sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram\n sigma_ = pinvh(sigma_inv)\n return sigma_\n\n def predict(self, X, return_std=False):\n \"\"\"Predict using the linear model.\n\n In addition to the mean of the predictive distribution, also its\n standard deviation can be returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n return_std : bool, default=False\n Whether to return the standard deviation of posterior prediction.\n\n Returns\n -------\n y_mean : array-like of shape (n_samples,)\n Mean of predictive distribution of query points.\n\n y_std : array-like of shape (n_samples,)\n Standard deviation of predictive distribution of query points.\n \"\"\"\n y_mean = self._decision_function(X)\n if return_std is False:\n return y_mean\n else:\n if self._normalize:\n X = (X - self.X_offset_) / self.X_scale_\n X = X[:, self.lambda_ < self.threshold_lambda]\n sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))\n return y_mean, y_std\n",
"\"\"\"Spectral biclustering algorithms.\"\"\"\n# Authors : Kemal Eren\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\n\nfrom scipy.linalg import norm\nfrom scipy.sparse import dia_matrix, issparse\nfrom scipy.sparse.linalg import eigsh, svds\n\nfrom . import KMeans, MiniBatchKMeans\nfrom ..base import BaseEstimator, BiclusterMixin\nfrom ..utils import check_random_state\n\nfrom ..utils.extmath import make_nonnegative, randomized_svd, safe_sparse_dot\n\nfrom ..utils.validation import assert_all_finite\n\n\n__all__ = [\"SpectralCoclustering\", \"SpectralBiclustering\"]\n\n\ndef _scale_normalize(X):\n \"\"\"Normalize ``X`` by scaling rows and columns independently.\n\n Returns the normalized matrix and the row and column scaling\n factors.\n\n \"\"\"\n X = make_nonnegative(X)\n row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()\n col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()\n row_diag = np.where(np.isnan(row_diag), 0, row_diag)\n col_diag = np.where(np.isnan(col_diag), 0, col_diag)\n if issparse(X):\n n_rows, n_cols = X.shape\n r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))\n c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))\n an = r * X * c\n else:\n an = row_diag[:, np.newaxis] * X * col_diag\n return an, row_diag, col_diag\n\n\ndef _bistochastic_normalize(X, max_iter=1000, tol=1e-5):\n \"\"\"Normalize rows and columns of ``X`` simultaneously so that all\n rows sum to one constant and all columns sum to a different\n constant.\n\n \"\"\"\n # According to paper, this can also be done more efficiently with\n # deviation reduction and balancing algorithms.\n X = make_nonnegative(X)\n X_scaled = X\n for _ in range(max_iter):\n X_new, _, _ = _scale_normalize(X_scaled)\n if issparse(X):\n dist = norm(X_scaled.data - X.data)\n else:\n dist = norm(X_scaled - X_new)\n X_scaled = X_new\n if dist is not None and dist < tol:\n break\n return X_scaled\n\n\ndef _log_normalize(X):\n \"\"\"Normalize ``X`` according to Kluger's log-interactions scheme.\"\"\"\n X = make_nonnegative(X, min_value=1)\n if issparse(X):\n raise ValueError(\n \"Cannot compute log of a sparse matrix,\"\n \" because log(x) diverges to -infinity as x\"\n \" goes to 0.\"\n )\n L = np.log(X)\n row_avg = L.mean(axis=1)[:, np.newaxis]\n col_avg = L.mean(axis=0)\n avg = L.mean()\n return L - row_avg - col_avg + avg\n\n\nclass BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for spectral biclustering.\"\"\"\n\n @abstractmethod\n def __init__(\n self,\n n_clusters=3,\n svd_method=\"randomized\",\n n_svd_vecs=None,\n mini_batch=False,\n init=\"k-means++\",\n n_init=10,\n random_state=None,\n ):\n self.n_clusters = n_clusters\n self.svd_method = svd_method\n self.n_svd_vecs = n_svd_vecs\n self.mini_batch = mini_batch\n self.init = init\n self.n_init = n_init\n self.random_state = random_state\n\n def _check_parameters(self):\n legal_svd_methods = (\"randomized\", \"arpack\")\n if self.svd_method not in legal_svd_methods:\n raise ValueError(\n \"Unknown SVD method: '{0}'. svd_method must be\"\n \" one of {1}.\".format(self.svd_method, legal_svd_methods)\n )\n\n def fit(self, X, y=None):\n \"\"\"Creates a biclustering for X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n\n y : Ignored\n\n \"\"\"\n X = self._validate_data(X, accept_sparse=\"csr\", dtype=np.float64)\n self._check_parameters()\n self._fit(X)\n return self\n\n def _svd(self, array, n_components, n_discard):\n \"\"\"Returns first `n_components` left and right singular\n vectors u and v, discarding the first `n_discard`.\n\n \"\"\"\n if self.svd_method == \"randomized\":\n kwargs = {}\n if self.n_svd_vecs is not None:\n kwargs[\"n_oversamples\"] = self.n_svd_vecs\n u, _, vt = randomized_svd(\n array, n_components, random_state=self.random_state, **kwargs\n )\n\n elif self.svd_method == \"arpack\":\n u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)\n if np.any(np.isnan(vt)):\n # some eigenvalues of A * A.T are negative, causing\n # sqrt() to be np.nan. This causes some vectors in vt\n # to be np.nan.\n A = safe_sparse_dot(array.T, array)\n random_state = check_random_state(self.random_state)\n # initialize with [-1,1] as in ARPACK\n v0 = random_state.uniform(-1, 1, A.shape[0])\n _, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)\n vt = v.T\n if np.any(np.isnan(u)):\n A = safe_sparse_dot(array, array.T)\n random_state = check_random_state(self.random_state)\n # initialize with [-1,1] as in ARPACK\n v0 = random_state.uniform(-1, 1, A.shape[0])\n _, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)\n\n assert_all_finite(u)\n assert_all_finite(vt)\n u = u[:, n_discard:]\n vt = vt[n_discard:]\n return u, vt.T\n\n def _k_means(self, data, n_clusters):\n if self.mini_batch:\n model = MiniBatchKMeans(\n n_clusters,\n init=self.init,\n n_init=self.n_init,\n random_state=self.random_state,\n )\n else:\n model = KMeans(\n n_clusters,\n init=self.init,\n n_init=self.n_init,\n random_state=self.random_state,\n )\n model.fit(data)\n centroid = model.cluster_centers_\n labels = model.labels_\n return centroid, labels\n\n def _more_tags(self):\n return {\n \"_xfail_checks\": {\n \"check_estimators_dtypes\": \"raises nan error\",\n \"check_fit2d_1sample\": \"_scale_normalize fails\",\n \"check_fit2d_1feature\": \"raises apply_along_axis error\",\n \"check_estimator_sparse_data\": \"does not fail gracefully\",\n \"check_methods_subset_invariance\": \"empty array passed inside\",\n \"check_dont_overwrite_parameters\": \"empty array passed inside\",\n \"check_fit2d_predict1d\": \"emptry array passed inside\",\n }\n }\n\n\nclass SpectralCoclustering(BaseSpectral):\n \"\"\"Spectral Co-Clustering algorithm (Dhillon, 2001).\n\n Clusters rows and columns of an array `X` to solve the relaxed\n normalized cut of the bipartite graph created from `X` as follows:\n the edge between row vertex `i` and column vertex `j` has weight\n `X[i, j]`.\n\n The resulting bicluster structure is block-diagonal, since each\n row and each column belongs to exactly one bicluster.\n\n Supports sparse matrices, as long as they are nonnegative.\n\n Read more in the :ref:`User Guide <spectral_coclustering>`.\n\n Parameters\n ----------\n n_clusters : int, default=3\n The number of biclusters to find.\n\n svd_method : {'randomized', 'arpack'}, default='randomized'\n Selects the algorithm for finding singular vectors. May be\n 'randomized' or 'arpack'. If 'randomized', use\n :func:`sklearn.utils.extmath.randomized_svd`, which may be faster\n for large matrices. If 'arpack', use\n :func:`scipy.sparse.linalg.svds`, which is more accurate, but\n possibly slower in some cases.\n\n n_svd_vecs : int, default=None\n Number of vectors to use in calculating the SVD. Corresponds\n to `ncv` when `svd_method=arpack` and `n_oversamples` when\n `svd_method` is 'randomized`.\n\n mini_batch : bool, default=False\n Whether to use mini-batch k-means, which is faster but may get\n different results.\n\n init : {'k-means++', 'random', or ndarray of shape \\\n (n_clusters, n_features), default='k-means++'\n Method for initialization of k-means algorithm; defaults to\n 'k-means++'.\n\n n_init : int, default=10\n Number of random initializations that are tried with the\n k-means algorithm.\n\n If mini-batch k-means is used, the best initialization is\n chosen and the algorithm runs once. Otherwise, the algorithm\n is run for each initialization and the best solution chosen.\n\n random_state : int, RandomState instance, default=None\n Used for randomizing the singular value decomposition and the k-means\n initialization. Use an int to make the randomness deterministic.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n rows_ : array-like of shape (n_row_clusters, n_rows)\n Results of the clustering. `rows[i, r]` is True if\n cluster `i` contains row `r`. Available only after calling ``fit``.\n\n columns_ : array-like of shape (n_column_clusters, n_columns)\n Results of the clustering, like `rows`.\n\n row_labels_ : array-like of shape (n_rows,)\n The bicluster label of each row.\n\n column_labels_ : array-like of shape (n_cols,)\n The bicluster label of each column.\n\n biclusters_ : tuple of two ndarrays\n The tuple contains the `rows_` and `columns_` arrays.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn.cluster import SpectralCoclustering\n >>> import numpy as np\n >>> X = np.array([[1, 1], [2, 1], [1, 0],\n ... [4, 7], [3, 5], [3, 6]])\n >>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)\n >>> clustering.row_labels_ #doctest: +SKIP\n array([0, 1, 1, 0, 0, 0], dtype=int32)\n >>> clustering.column_labels_ #doctest: +SKIP\n array([0, 0], dtype=int32)\n >>> clustering\n SpectralCoclustering(n_clusters=2, random_state=0)\n\n References\n ----------\n\n * Dhillon, Inderjit S, 2001. `Co-clustering documents and words using\n bipartite spectral graph partitioning\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.\n\n \"\"\"\n\n def __init__(\n self,\n n_clusters=3,\n *,\n svd_method=\"randomized\",\n n_svd_vecs=None,\n mini_batch=False,\n init=\"k-means++\",\n n_init=10,\n random_state=None,\n ):\n super().__init__(\n n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state\n )\n\n def _fit(self, X):\n normalized_data, row_diag, col_diag = _scale_normalize(X)\n n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))\n u, v = self._svd(normalized_data, n_sv, n_discard=1)\n z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))\n\n _, labels = self._k_means(z, self.n_clusters)\n\n n_rows = X.shape[0]\n self.row_labels_ = labels[:n_rows]\n self.column_labels_ = labels[n_rows:]\n\n self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])\n self.columns_ = np.vstack(\n [self.column_labels_ == c for c in range(self.n_clusters)]\n )\n\n\nclass SpectralBiclustering(BaseSpectral):\n \"\"\"Spectral biclustering (Kluger, 2003).\n\n Partitions rows and columns under the assumption that the data has\n an underlying checkerboard structure. For instance, if there are\n two row partitions and three column partitions, each row will\n belong to three biclusters, and each column will belong to two\n biclusters. The outer product of the corresponding row and column\n label vectors gives this checkerboard structure.\n\n Read more in the :ref:`User Guide <spectral_biclustering>`.\n\n Parameters\n ----------\n n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3\n The number of row and column clusters in the checkerboard\n structure.\n\n method : {'bistochastic', 'scale', 'log'}, default='bistochastic'\n Method of normalizing and converting singular vectors into\n biclusters. May be one of 'scale', 'bistochastic', or 'log'.\n The authors recommend using 'log'. If the data is sparse,\n however, log normalization will not work, which is why the\n default is 'bistochastic'.\n\n .. warning::\n if `method='log'`, the data must be sparse.\n\n n_components : int, default=6\n Number of singular vectors to check.\n\n n_best : int, default=3\n Number of best singular vectors to which to project the data\n for clustering.\n\n svd_method : {'randomized', 'arpack'}, default='randomized'\n Selects the algorithm for finding singular vectors. May be\n 'randomized' or 'arpack'. If 'randomized', uses\n :func:`~sklearn.utils.extmath.randomized_svd`, which may be faster\n for large matrices. If 'arpack', uses\n `scipy.sparse.linalg.svds`, which is more accurate, but\n possibly slower in some cases.\n\n n_svd_vecs : int, default=None\n Number of vectors to use in calculating the SVD. Corresponds\n to `ncv` when `svd_method=arpack` and `n_oversamples` when\n `svd_method` is 'randomized`.\n\n mini_batch : bool, default=False\n Whether to use mini-batch k-means, which is faster but may get\n different results.\n\n init : {'k-means++', 'random'} or ndarray of (n_clusters, n_features), \\\n default='k-means++'\n Method for initialization of k-means algorithm; defaults to\n 'k-means++'.\n\n n_init : int, default=10\n Number of random initializations that are tried with the\n k-means algorithm.\n\n If mini-batch k-means is used, the best initialization is\n chosen and the algorithm runs once. Otherwise, the algorithm\n is run for each initialization and the best solution chosen.\n\n random_state : int, RandomState instance, default=None\n Used for randomizing the singular value decomposition and the k-means\n initialization. Use an int to make the randomness deterministic.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n rows_ : array-like of shape (n_row_clusters, n_rows)\n Results of the clustering. `rows[i, r]` is True if\n cluster `i` contains row `r`. Available only after calling ``fit``.\n\n columns_ : array-like of shape (n_column_clusters, n_columns)\n Results of the clustering, like `rows`.\n\n row_labels_ : array-like of shape (n_rows,)\n Row partition labels.\n\n column_labels_ : array-like of shape (n_cols,)\n Column partition labels.\n\n biclusters_ : tuple of two ndarrays\n The tuple contains the `rows_` and `columns_` arrays.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> from sklearn.cluster import SpectralBiclustering\n >>> import numpy as np\n >>> X = np.array([[1, 1], [2, 1], [1, 0],\n ... [4, 7], [3, 5], [3, 6]])\n >>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)\n >>> clustering.row_labels_\n array([1, 1, 1, 0, 0, 0], dtype=int32)\n >>> clustering.column_labels_\n array([0, 1], dtype=int32)\n >>> clustering\n SpectralBiclustering(n_clusters=2, random_state=0)\n\n References\n ----------\n\n * Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray\n data: coclustering genes and conditions\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.\n\n \"\"\"\n\n def __init__(\n self,\n n_clusters=3,\n *,\n method=\"bistochastic\",\n n_components=6,\n n_best=3,\n svd_method=\"randomized\",\n n_svd_vecs=None,\n mini_batch=False,\n init=\"k-means++\",\n n_init=10,\n random_state=None,\n ):\n super().__init__(\n n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state\n )\n self.method = method\n self.n_components = n_components\n self.n_best = n_best\n\n def _check_parameters(self):\n super()._check_parameters()\n legal_methods = (\"bistochastic\", \"scale\", \"log\")\n if self.method not in legal_methods:\n raise ValueError(\n \"Unknown method: '{0}'. method must be\"\n \" one of {1}.\".format(self.method, legal_methods)\n )\n try:\n int(self.n_clusters)\n except TypeError:\n try:\n r, c = self.n_clusters\n int(r)\n int(c)\n except (ValueError, TypeError) as e:\n raise ValueError(\n \"Incorrect parameter n_clusters has value:\"\n \" {}. It should either be a single integer\"\n \" or an iterable with two integers:\"\n \" (n_row_clusters, n_column_clusters)\"\n ) from e\n if self.n_components < 1:\n raise ValueError(\n \"Parameter n_components must be greater than 0,\"\n \" but its value is {}\".format(self.n_components)\n )\n if self.n_best < 1:\n raise ValueError(\n \"Parameter n_best must be greater than 0,\"\n \" but its value is {}\".format(self.n_best)\n )\n if self.n_best > self.n_components:\n raise ValueError(\n \"n_best cannot be larger than\"\n \" n_components, but {} > {}\"\n \"\".format(self.n_best, self.n_components)\n )\n\n def _fit(self, X):\n n_sv = self.n_components\n if self.method == \"bistochastic\":\n normalized_data = _bistochastic_normalize(X)\n n_sv += 1\n elif self.method == \"scale\":\n normalized_data, _, _ = _scale_normalize(X)\n n_sv += 1\n elif self.method == \"log\":\n normalized_data = _log_normalize(X)\n n_discard = 0 if self.method == \"log\" else 1\n u, v = self._svd(normalized_data, n_sv, n_discard)\n ut = u.T\n vt = v.T\n\n try:\n n_row_clusters, n_col_clusters = self.n_clusters\n except TypeError:\n n_row_clusters = n_col_clusters = self.n_clusters\n\n best_ut = self._fit_best_piecewise(ut, self.n_best, n_row_clusters)\n\n best_vt = self._fit_best_piecewise(vt, self.n_best, n_col_clusters)\n\n self.row_labels_ = self._project_and_cluster(X, best_vt.T, n_row_clusters)\n\n self.column_labels_ = self._project_and_cluster(X.T, best_ut.T, n_col_clusters)\n\n self.rows_ = np.vstack(\n [\n self.row_labels_ == label\n for label in range(n_row_clusters)\n for _ in range(n_col_clusters)\n ]\n )\n self.columns_ = np.vstack(\n [\n self.column_labels_ == label\n for _ in range(n_row_clusters)\n for label in range(n_col_clusters)\n ]\n )\n\n def _fit_best_piecewise(self, vectors, n_best, n_clusters):\n \"\"\"Find the ``n_best`` vectors that are best approximated by piecewise\n constant vectors.\n\n The piecewise vectors are found by k-means; the best is chosen\n according to Euclidean distance.\n\n \"\"\"\n\n def make_piecewise(v):\n centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)\n return centroid[labels].ravel()\n\n piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)\n dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors))\n result = vectors[np.argsort(dists)[:n_best]]\n return result\n\n def _project_and_cluster(self, data, vectors, n_clusters):\n \"\"\"Project ``data`` to ``vectors`` and cluster the result.\"\"\"\n projected = safe_sparse_dot(data, vectors)\n _, labels = self._k_means(projected, n_clusters)\n return labels\n",
"\"\"\"Testing for K-means\"\"\"\nimport re\nimport sys\n\nimport numpy as np\nfrom scipy import sparse as sp\nfrom threadpoolctl import threadpool_limits\n\nimport pytest\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils.fixes import _astype_copy_false\nfrom sklearn.base import clone\nfrom sklearn.exceptions import ConvergenceWarning\n\nfrom sklearn.utils.extmath import row_norms\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics import pairwise_distances_argmin\nfrom sklearn.metrics.cluster import v_measure_score\nfrom sklearn.cluster import KMeans, k_means, kmeans_plusplus\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.cluster._kmeans import _labels_inertia\nfrom sklearn.cluster._kmeans import _mini_batch_step\nfrom sklearn.cluster._k_means_common import _relocate_empty_clusters_dense\nfrom sklearn.cluster._k_means_common import _relocate_empty_clusters_sparse\nfrom sklearn.cluster._k_means_common import _euclidean_dense_dense_wrapper\nfrom sklearn.cluster._k_means_common import _euclidean_sparse_dense_wrapper\nfrom sklearn.cluster._k_means_common import _inertia_dense\nfrom sklearn.cluster._k_means_common import _inertia_sparse\nfrom sklearn.datasets import make_blobs\nfrom io import StringIO\n\n\n# non centered, sparse centers to check the\ncenters = np.array(\n [\n [0.0, 5.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 4.0, 0.0, 0.0],\n [1.0, 0.0, 0.0, 5.0, 1.0],\n ]\n)\nn_samples = 100\nn_clusters, n_features = centers.shape\nX, true_labels = make_blobs(\n n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42\n)\nX_csr = sp.csr_matrix(X)\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\[email protected](\"algo\", [\"full\", \"elkan\"])\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_kmeans_results(array_constr, algo, dtype):\n # Checks that KMeans works as intended on toy dataset by comparing with\n # expected results computed by hand.\n X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype)\n sample_weight = [3, 1, 1, 3]\n init_centers = np.array([[0, 0], [1, 1]], dtype=dtype)\n\n expected_labels = [0, 0, 1, 1]\n expected_inertia = 0.375\n expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype)\n expected_n_iter = 2\n\n kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)\n kmeans.fit(X, sample_weight=sample_weight)\n\n assert_array_equal(kmeans.labels_, expected_labels)\n assert_allclose(kmeans.inertia_, expected_inertia)\n assert_allclose(kmeans.cluster_centers_, expected_centers)\n assert kmeans.n_iter_ == expected_n_iter\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\[email protected](\"algo\", [\"full\", \"elkan\"])\ndef test_kmeans_relocated_clusters(array_constr, algo):\n # check that empty clusters are relocated as expected\n X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]])\n\n # second center too far from others points will be empty at first iter\n init_centers = np.array([[0.5, 0.5], [3, 3]])\n\n expected_labels = [0, 0, 1, 1]\n expected_inertia = 0.25\n expected_centers = [[0.25, 0], [0.75, 1]]\n expected_n_iter = 3\n\n kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)\n kmeans.fit(X)\n\n assert_array_equal(kmeans.labels_, expected_labels)\n assert_allclose(kmeans.inertia_, expected_inertia)\n assert_allclose(kmeans.cluster_centers_, expected_centers)\n assert kmeans.n_iter_ == expected_n_iter\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\ndef test_relocate_empty_clusters(array_constr):\n # test for the _relocate_empty_clusters_(dense/sparse) helpers\n\n # Synthetic dataset with 3 obvious clusters of different sizes\n X = np.array([-10.0, -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1)\n X = array_constr(X)\n sample_weight = np.ones(10)\n\n # centers all initialized to the first point of X\n centers_old = np.array([-10.0, -10, -10]).reshape(-1, 1)\n\n # With this initialization, all points will be assigned to the first center\n # At this point a center in centers_new is the weighted sum of the points\n # it contains if it's not empty, otherwise it is the same as before.\n centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1)\n weight_in_clusters = np.array([10.0, 0, 0])\n labels = np.zeros(10, dtype=np.int32)\n\n if array_constr is np.array:\n _relocate_empty_clusters_dense(\n X, sample_weight, centers_old, centers_new, weight_in_clusters, labels\n )\n else:\n _relocate_empty_clusters_sparse(\n X.data,\n X.indices,\n X.indptr,\n sample_weight,\n centers_old,\n centers_new,\n weight_in_clusters,\n labels,\n )\n\n # The relocation scheme will take the 2 points farthest from the center and\n # assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The\n # first center will be updated to contain the other 8 points.\n assert_array_equal(weight_in_clusters, [8, 1, 1])\n assert_allclose(centers_new, [[-36], [10], [9.5]])\n\n\[email protected](\"distribution\", [\"normal\", \"blobs\"])\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\[email protected](\"tol\", [1e-2, 1e-8, 1e-100, 0])\ndef test_kmeans_elkan_results(distribution, array_constr, tol):\n # Check that results are identical between lloyd and elkan algorithms\n rnd = np.random.RandomState(0)\n if distribution == \"normal\":\n X = rnd.normal(size=(5000, 10))\n else:\n X, _ = make_blobs(random_state=rnd)\n X[X < 0] = 0\n X = array_constr(X)\n\n km_full = KMeans(algorithm=\"full\", n_clusters=5, random_state=0, n_init=1, tol=tol)\n km_elkan = KMeans(\n algorithm=\"elkan\", n_clusters=5, random_state=0, n_init=1, tol=tol\n )\n\n km_full.fit(X)\n km_elkan.fit(X)\n assert_allclose(km_elkan.cluster_centers_, km_full.cluster_centers_)\n assert_array_equal(km_elkan.labels_, km_full.labels_)\n assert km_elkan.n_iter_ == km_full.n_iter_\n assert km_elkan.inertia_ == pytest.approx(km_full.inertia_, rel=1e-6)\n\n\[email protected](\"algorithm\", [\"full\", \"elkan\"])\ndef test_kmeans_convergence(algorithm):\n # Check that KMeans stops when convergence is reached when tol=0. (#16075)\n rnd = np.random.RandomState(0)\n X = rnd.normal(size=(5000, 10))\n max_iter = 300\n\n km = KMeans(\n algorithm=algorithm,\n n_clusters=5,\n random_state=0,\n n_init=1,\n tol=0,\n max_iter=max_iter,\n ).fit(X)\n\n assert km.n_iter_ < max_iter\n\n\ndef test_minibatch_update_consistency():\n # Check that dense and sparse minibatch update give the same results\n rng = np.random.RandomState(42)\n\n centers_old = centers + rng.normal(size=centers.shape)\n centers_old_csr = centers_old.copy()\n\n centers_new = np.zeros_like(centers_old)\n centers_new_csr = np.zeros_like(centers_old_csr)\n\n weight_sums = np.zeros(centers_old.shape[0], dtype=X.dtype)\n weight_sums_csr = np.zeros(centers_old.shape[0], dtype=X.dtype)\n\n x_squared_norms = (X ** 2).sum(axis=1)\n x_squared_norms_csr = row_norms(X_csr, squared=True)\n\n sample_weight = np.ones(X.shape[0], dtype=X.dtype)\n\n # extract a small minibatch\n X_mb = X[:10]\n X_mb_csr = X_csr[:10]\n x_mb_squared_norms = x_squared_norms[:10]\n x_mb_squared_norms_csr = x_squared_norms_csr[:10]\n sample_weight_mb = sample_weight[:10]\n\n # step 1: compute the dense minibatch update\n old_inertia = _mini_batch_step(\n X_mb,\n x_mb_squared_norms,\n sample_weight_mb,\n centers_old,\n centers_new,\n weight_sums,\n np.random.RandomState(0),\n random_reassign=False,\n )\n assert old_inertia > 0.0\n\n # compute the new inertia on the same batch to check that it decreased\n labels, new_inertia = _labels_inertia(\n X_mb, sample_weight_mb, x_mb_squared_norms, centers_new\n )\n assert new_inertia > 0.0\n assert new_inertia < old_inertia\n\n # step 2: compute the sparse minibatch update\n old_inertia_csr = _mini_batch_step(\n X_mb_csr,\n x_mb_squared_norms_csr,\n sample_weight_mb,\n centers_old_csr,\n centers_new_csr,\n weight_sums_csr,\n np.random.RandomState(0),\n random_reassign=False,\n )\n assert old_inertia_csr > 0.0\n\n # compute the new inertia on the same batch to check that it decreased\n labels_csr, new_inertia_csr = _labels_inertia(\n X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, centers_new_csr\n )\n assert new_inertia_csr > 0.0\n assert new_inertia_csr < old_inertia_csr\n\n # step 3: check that sparse and dense updates lead to the same results\n assert_array_equal(labels, labels_csr)\n assert_allclose(centers_new, centers_new_csr)\n assert_allclose(old_inertia, old_inertia_csr)\n assert_allclose(new_inertia, new_inertia_csr)\n\n\ndef _check_fitted_model(km):\n # check that the number of clusters centers and distinct labels match\n # the expectation\n centers = km.cluster_centers_\n assert centers.shape == (n_clusters, n_features)\n\n labels = km.labels_\n assert np.unique(labels).shape[0] == n_clusters\n\n # check that the labels assignment are perfect (up to a permutation)\n assert_allclose(v_measure_score(true_labels, labels), 1.0)\n assert km.inertia_ > 0.0\n\n\[email protected](\"data\", [X, X_csr], ids=[\"dense\", \"sparse\"])\[email protected](\n \"init\",\n [\"random\", \"k-means++\", centers, lambda X, k, random_state: centers],\n ids=[\"random\", \"k-means++\", \"ndarray\", \"callable\"],\n)\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_all_init(Estimator, data, init):\n # Check KMeans and MiniBatchKMeans with all possible init.\n n_init = 10 if isinstance(init, str) else 1\n km = Estimator(\n init=init, n_clusters=n_clusters, random_state=42, n_init=n_init\n ).fit(data)\n _check_fitted_model(km)\n\n\[email protected](\n \"init\",\n [\"random\", \"k-means++\", centers, lambda X, k, random_state: centers],\n ids=[\"random\", \"k-means++\", \"ndarray\", \"callable\"],\n)\ndef test_minibatch_kmeans_partial_fit_init(init):\n # Check MiniBatchKMeans init with partial_fit\n n_init = 10 if isinstance(init, str) else 1\n km = MiniBatchKMeans(\n init=init, n_clusters=n_clusters, random_state=0, n_init=n_init\n )\n for i in range(100):\n # \"random\" init requires many batches to recover the true labels.\n km.partial_fit(X)\n _check_fitted_model(km)\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_fortran_aligned_data(Estimator):\n # Check that KMeans works with fortran-aligned data.\n X_fortran = np.asfortranarray(X)\n centers_fortran = np.asfortranarray(centers)\n\n km_c = Estimator(\n n_clusters=n_clusters, init=centers, n_init=1, random_state=42\n ).fit(X)\n km_f = Estimator(\n n_clusters=n_clusters, init=centers_fortran, n_init=1, random_state=42\n ).fit(X_fortran)\n assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_)\n assert_array_equal(km_c.labels_, km_f.labels_)\n\n\[email protected](\"algo\", [\"full\", \"elkan\"])\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"constructor\", [np.asarray, sp.csr_matrix])\[email protected](\n \"seed, max_iter, tol\",\n [\n (0, 2, 1e-7), # strict non-convergence\n (1, 2, 1e-1), # loose non-convergence\n (3, 300, 1e-7), # strict convergence\n (4, 300, 1e-1), # loose convergence\n ],\n)\ndef test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol):\n # check that fit.predict gives same result as fit_predict\n # There's a very small chance of failure with elkan on unstructured dataset\n # because predict method uses fast euclidean distances computation which\n # may cause small numerical instabilities.\n # NB: This test is largely redundant with respect to test_predict and\n # test_predict_equal_labels. This test has the added effect of\n # testing idempotence of the fittng procesdure which appears to\n # be where it fails on some MacOS setups.\n if sys.platform == \"darwin\":\n pytest.xfail(\n \"Known failures on MacOS, See \"\n \"https://github.com/scikit-learn/scikit-learn/issues/12644\"\n )\n\n rng = np.random.RandomState(seed)\n\n X = make_blobs(n_samples=1000, n_features=10, centers=10, random_state=rng)[\n 0\n ].astype(dtype, copy=False)\n X = constructor(X)\n\n kmeans = KMeans(\n algorithm=algo, n_clusters=10, random_state=seed, tol=tol, max_iter=max_iter\n )\n\n labels_1 = kmeans.fit(X).predict(X)\n labels_2 = kmeans.fit_predict(X)\n\n # Due to randomness in the order in which chunks of data are processed when\n # using more than one thread, the absolute values of the labels can be\n # different between the 2 strategies but they should correspond to the same\n # clustering.\n assert v_measure_score(labels_1, labels_2) == pytest.approx(1, abs=1e-15)\n\n\ndef test_minibatch_kmeans_verbose():\n # Check verbose mode of MiniBatchKMeans for better coverage.\n km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1)\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n km.fit(X)\n finally:\n sys.stdout = old_stdout\n\n\[email protected](\"algorithm\", [\"full\", \"elkan\"])\[email protected](\"tol\", [1e-2, 0])\ndef test_kmeans_verbose(algorithm, tol, capsys):\n # Check verbose mode of KMeans for better coverage.\n X = np.random.RandomState(0).normal(size=(5000, 10))\n\n KMeans(\n algorithm=algorithm,\n n_clusters=n_clusters,\n random_state=42,\n init=\"random\",\n n_init=1,\n tol=tol,\n verbose=1,\n ).fit(X)\n\n captured = capsys.readouterr()\n\n assert re.search(r\"Initialization complete\", captured.out)\n assert re.search(r\"Iteration [0-9]+, inertia\", captured.out)\n\n if tol == 0:\n assert re.search(r\"strict convergence\", captured.out)\n else:\n assert re.search(r\"center shift .* within tolerance\", captured.out)\n\n\ndef test_minibatch_kmeans_warning_init_size():\n # Check that a warning is raised when init_size is smaller than n_clusters\n with pytest.warns(\n RuntimeWarning, match=r\"init_size.* should be larger than n_clusters\"\n ):\n MiniBatchKMeans(init_size=10, n_clusters=20).fit(X)\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_warning_n_init_precomputed_centers(Estimator):\n # Check that a warning is raised when n_init > 1 and an array is passed for\n # the init parameter.\n with pytest.warns(\n RuntimeWarning,\n match=\"Explicit initial center position passed: \" \"performing only one init\",\n ):\n Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X)\n\n\ndef test_minibatch_sensible_reassign():\n # check that identical initial clusters are reassigned\n # also a regression test for when there are more desired reassignments than\n # samples.\n zeroed_X, true_labels = make_blobs(n_samples=100, centers=5, random_state=42)\n zeroed_X[::2, :] = 0\n\n km = MiniBatchKMeans(\n n_clusters=20, batch_size=10, random_state=42, init=\"random\"\n ).fit(zeroed_X)\n # there should not be too many exact zero cluster centers\n assert km.cluster_centers_.any(axis=1).sum() > 10\n\n # do the same with batch-size > X.shape[0] (regression test)\n km = MiniBatchKMeans(\n n_clusters=20, batch_size=200, random_state=42, init=\"random\"\n ).fit(zeroed_X)\n # there should not be too many exact zero cluster centers\n assert km.cluster_centers_.any(axis=1).sum() > 10\n\n # do the same with partial_fit API\n km = MiniBatchKMeans(n_clusters=20, random_state=42, init=\"random\")\n for i in range(100):\n km.partial_fit(zeroed_X)\n # there should not be too many exact zero cluster centers\n assert km.cluster_centers_.any(axis=1).sum() > 10\n\n\[email protected](\"data\", [X, X_csr], ids=[\"dense\", \"sparse\"])\ndef test_minibatch_reassign(data):\n # Check the reassignment part of the minibatch step with very high or very\n # low reassignment ratio.\n perfect_centers = np.empty((n_clusters, n_features))\n for i in range(n_clusters):\n perfect_centers[i] = X[true_labels == i].mean(axis=0)\n\n x_squared_norms = row_norms(data, squared=True)\n sample_weight = np.ones(n_samples)\n centers_new = np.empty_like(perfect_centers)\n\n # Give a perfect initialization, but a large reassignment_ratio, as a\n # result many centers should be reassigned and the model should no longer\n # be good\n score_before = -_labels_inertia(\n data, sample_weight, x_squared_norms, perfect_centers, 1\n )[1]\n\n _mini_batch_step(\n data,\n x_squared_norms,\n sample_weight,\n perfect_centers,\n centers_new,\n np.zeros(n_clusters),\n np.random.RandomState(0),\n random_reassign=True,\n reassignment_ratio=1,\n )\n\n score_after = -_labels_inertia(\n data, sample_weight, x_squared_norms, centers_new, 1\n )[1]\n\n assert score_before > score_after\n\n # Give a perfect initialization, with a small reassignment_ratio,\n # no center should be reassigned.\n _mini_batch_step(\n data,\n x_squared_norms,\n sample_weight,\n perfect_centers,\n centers_new,\n np.zeros(n_clusters),\n np.random.RandomState(0),\n random_reassign=True,\n reassignment_ratio=1e-15,\n )\n\n assert_allclose(centers_new, perfect_centers)\n\n\ndef test_minibatch_with_many_reassignments():\n # Test for the case that the number of clusters to reassign is bigger\n # than the batch_size. Run the test with 100 clusters and a batch_size of\n # 10 because it turned out that these values ensure that the number of\n # clusters to reassign is always bigger than the batch_size.\n MiniBatchKMeans(\n n_clusters=100,\n batch_size=10,\n init_size=n_samples,\n random_state=42,\n verbose=True,\n ).fit(X)\n\n\ndef test_minibatch_kmeans_init_size():\n # Check the internal _init_size attribute of MiniBatchKMeans\n\n # default init size should be 3 * batch_size\n km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X)\n assert km._init_size == 15\n\n # if 3 * batch size < n_clusters, it should then be 3 * n_clusters\n km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X)\n assert km._init_size == 30\n\n # it should not be larger than n_samples\n km = MiniBatchKMeans(\n n_clusters=10, batch_size=5, n_init=1, init_size=n_samples + 1\n ).fit(X)\n assert km._init_size == n_samples\n\n\[email protected](\"tol, max_no_improvement\", [(1e-4, None), (0, 10)])\ndef test_minibatch_declared_convergence(capsys, tol, max_no_improvement):\n # Check convergence detection based on ewa batch inertia or on\n # small center change.\n X, _, centers = make_blobs(centers=3, random_state=0, return_centers=True)\n\n km = MiniBatchKMeans(\n n_clusters=3,\n init=centers,\n batch_size=20,\n tol=tol,\n random_state=0,\n max_iter=10,\n n_init=1,\n verbose=1,\n max_no_improvement=max_no_improvement,\n )\n\n km.fit(X)\n assert 1 < km.n_iter_ < 10\n\n captured = capsys.readouterr()\n if max_no_improvement is None:\n assert \"Converged (small centers change)\" in captured.out\n if tol == 0:\n assert \"Converged (lack of improvement in inertia)\" in captured.out\n\n\ndef test_minibatch_iter_steps():\n # Check consistency of n_iter_ and n_steps_ attributes.\n batch_size = 30\n n_samples = X.shape[0]\n km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size, random_state=0).fit(X)\n\n # n_iter_ is the number of started epochs\n assert km.n_iter_ == np.ceil((km.n_steps_ * batch_size) / n_samples)\n assert isinstance(km.n_iter_, int)\n\n # without stopping condition, max_iter should be reached\n km = MiniBatchKMeans(\n n_clusters=3,\n batch_size=batch_size,\n random_state=0,\n tol=0,\n max_no_improvement=None,\n max_iter=10,\n ).fit(X)\n\n assert km.n_iter_ == 10\n assert km.n_steps_ == (10 * n_samples) // batch_size\n assert isinstance(km.n_steps_, int)\n\n\ndef test_kmeans_copyx():\n # Check that copy_x=False returns nearly equal X after de-centering.\n my_X = X.copy()\n km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)\n km.fit(my_X)\n _check_fitted_model(km)\n\n # check that my_X is de-centered\n assert_allclose(my_X, X)\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_score_max_iter(Estimator):\n # Check that fitting KMeans or MiniBatchKMeans with more iterations gives\n # better score\n X = np.random.RandomState(0).randn(100, 10)\n\n km1 = Estimator(n_init=1, random_state=42, max_iter=1)\n s1 = km1.fit(X).score(X)\n km2 = Estimator(n_init=1, random_state=42, max_iter=10)\n s2 = km2.fit(X).score(X)\n assert s2 > s1\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"init\", [\"random\", \"k-means++\"])\[email protected](\n \"Estimator, algorithm\",\n [(KMeans, \"full\"), (KMeans, \"elkan\"), (MiniBatchKMeans, None)],\n)\ndef test_predict(Estimator, algorithm, init, dtype, array_constr):\n # Check the predict method and the equivalence between fit.predict and\n # fit_predict.\n\n # There's a very small chance of failure with elkan on unstructured dataset\n # because predict method uses fast euclidean distances computation which\n # may cause small numerical instabilities.\n if sys.platform == \"darwin\":\n pytest.xfail(\n \"Known failures on MacOS, See \"\n \"https://github.com/scikit-learn/scikit-learn/issues/12644\"\n )\n\n X, _ = make_blobs(n_samples=500, n_features=10, centers=10, random_state=0)\n X = array_constr(X)\n\n # With n_init = 1\n km = Estimator(n_clusters=10, init=init, n_init=1, random_state=0)\n if algorithm is not None:\n km.set_params(algorithm=algorithm)\n km.fit(X)\n labels = km.labels_\n\n # re-predict labels for training set using predict\n pred = km.predict(X)\n assert_array_equal(pred, labels)\n\n # re-predict labels for training set using fit_predict\n pred = km.fit_predict(X)\n assert_array_equal(pred, labels)\n\n # predict centroid labels\n pred = km.predict(km.cluster_centers_)\n assert_array_equal(pred, np.arange(10))\n\n # With n_init > 1\n # Due to randomness in the order in which chunks of data are processed when\n # using more than one thread, there might be different rounding errors for\n # the computation of the inertia between 2 runs. This might result in a\n # different ranking of 2 inits, hence a different labeling, even if they\n # give the same clustering. We only check the labels up to a permutation.\n\n km = Estimator(n_clusters=10, init=init, n_init=10, random_state=0)\n if algorithm is not None:\n km.set_params(algorithm=algorithm)\n km.fit(X)\n labels = km.labels_\n\n # re-predict labels for training set using predict\n pred = km.predict(X)\n assert_allclose(v_measure_score(pred, labels), 1)\n\n # re-predict labels for training set using fit_predict\n pred = km.fit_predict(X)\n assert_allclose(v_measure_score(pred, labels), 1)\n\n # predict centroid labels\n pred = km.predict(km.cluster_centers_)\n assert_allclose(v_measure_score(pred, np.arange(10)), 1)\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_dense_sparse(Estimator):\n # Check that the results are the same for dense and sparse input.\n sample_weight = np.random.RandomState(0).random_sample((n_samples,))\n km_dense = Estimator(n_clusters=n_clusters, random_state=0, n_init=1)\n km_dense.fit(X, sample_weight=sample_weight)\n km_sparse = Estimator(n_clusters=n_clusters, random_state=0, n_init=1)\n km_sparse.fit(X_csr, sample_weight=sample_weight)\n\n assert_array_equal(km_dense.labels_, km_sparse.labels_)\n assert_allclose(km_dense.cluster_centers_, km_sparse.cluster_centers_)\n\n\[email protected](\n \"init\", [\"random\", \"k-means++\", centers], ids=[\"random\", \"k-means++\", \"ndarray\"]\n)\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_predict_dense_sparse(Estimator, init):\n # check that models trained on sparse input also works for dense input at\n # predict time and vice versa.\n n_init = 10 if isinstance(init, str) else 1\n km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init, random_state=0)\n\n km.fit(X_csr)\n assert_array_equal(km.predict(X), km.labels_)\n\n km.fit(X)\n assert_array_equal(km.predict(X_csr), km.labels_)\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\[email protected](\"dtype\", [np.int32, np.int64])\[email protected](\"init\", [\"k-means++\", \"ndarray\"])\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_integer_input(Estimator, array_constr, dtype, init):\n # Check that KMeans and MiniBatchKMeans work with integer input.\n X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]])\n X = array_constr(X_dense, dtype=dtype)\n\n n_init = 1 if init == \"ndarray\" else 10\n init = X_dense[:2] if init == \"ndarray\" else init\n\n km = Estimator(n_clusters=2, init=init, n_init=n_init, random_state=0)\n if Estimator is MiniBatchKMeans:\n km.set_params(batch_size=2)\n\n km.fit(X)\n\n # Internally integer input should be converted to float64\n assert km.cluster_centers_.dtype == np.float64\n\n expected_labels = [0, 1, 1, 0, 0, 1]\n assert_allclose(v_measure_score(km.labels_, expected_labels), 1)\n\n # Same with partial_fit (#14314)\n if Estimator is MiniBatchKMeans:\n km = clone(km).partial_fit(X)\n assert km.cluster_centers_.dtype == np.float64\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_transform(Estimator):\n # Check the transform method\n km = Estimator(n_clusters=n_clusters).fit(X)\n\n # Transorfming cluster_centers_ should return the pairwise distances\n # between centers\n Xt = km.transform(km.cluster_centers_)\n assert_allclose(Xt, pairwise_distances(km.cluster_centers_))\n # In particular, diagonal must be 0\n assert_array_equal(Xt.diagonal(), np.zeros(n_clusters))\n\n # Transorfming X should return the pairwise distances between X and the\n # centers\n Xt = km.transform(X)\n assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_))\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_fit_transform(Estimator):\n # Check equivalence between fit.transform and fit_transform\n X1 = Estimator(random_state=0, n_init=1).fit(X).transform(X)\n X2 = Estimator(random_state=0, n_init=1).fit_transform(X)\n assert_allclose(X1, X2)\n\n\ndef test_n_init():\n # Check that increasing the number of init increases the quality\n previous_inertia = np.inf\n for n_init in [1, 5, 10]:\n # set max_iter=1 to avoid finding the global minimum and get the same\n # inertia each time\n km = KMeans(\n n_clusters=n_clusters,\n init=\"random\",\n n_init=n_init,\n random_state=0,\n max_iter=1,\n ).fit(X)\n assert km.inertia_ <= previous_inertia\n\n\ndef test_k_means_function():\n # test calling the k_means function directly\n cluster_centers, labels, inertia = k_means(\n X, n_clusters=n_clusters, sample_weight=None\n )\n\n assert cluster_centers.shape == (n_clusters, n_features)\n assert np.unique(labels).shape[0] == n_clusters\n\n # check that the labels assignment are perfect (up to a permutation)\n assert_allclose(v_measure_score(true_labels, labels), 1.0)\n assert inertia > 0.0\n\n\[email protected](\"data\", [X, X_csr], ids=[\"dense\", \"sparse\"])\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_float_precision(Estimator, data):\n # Check that the results are the same for single and double precision.\n km = Estimator(n_init=1, random_state=0)\n\n inertia = {}\n Xt = {}\n centers = {}\n labels = {}\n\n for dtype in [np.float64, np.float32]:\n X = data.astype(dtype, **_astype_copy_false(data))\n km.fit(X)\n\n inertia[dtype] = km.inertia_\n Xt[dtype] = km.transform(X)\n centers[dtype] = km.cluster_centers_\n labels[dtype] = km.labels_\n\n # dtype of cluster centers has to be the dtype of the input data\n assert km.cluster_centers_.dtype == dtype\n\n # same with partial_fit\n if Estimator is MiniBatchKMeans:\n km.partial_fit(X[0:3])\n assert km.cluster_centers_.dtype == dtype\n\n # compare arrays with low precision since the difference between 32 and\n # 64 bit comes from an accumulation of rounding errors.\n assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-5)\n assert_allclose(Xt[np.float32], Xt[np.float64], rtol=1e-5)\n assert_allclose(centers[np.float32], centers[np.float64], rtol=1e-5)\n assert_array_equal(labels[np.float32], labels[np.float64])\n\n\[email protected](\"dtype\", [np.int32, np.int64, np.float32, np.float64])\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_centers_not_mutated(Estimator, dtype):\n # Check that KMeans and MiniBatchKMeans won't mutate the user provided\n # init centers silently even if input data and init centers have the same\n # type.\n X_new_type = X.astype(dtype, copy=False)\n centers_new_type = centers.astype(dtype, copy=False)\n\n km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1)\n km.fit(X_new_type)\n\n assert not np.may_share_memory(km.cluster_centers_, centers_new_type)\n\n\[email protected](\"data\", [X, X_csr], ids=[\"dense\", \"sparse\"])\ndef test_kmeans_init_fitted_centers(data):\n # Check that starting fitting from a local optimum shouldn't change the\n # solution\n km1 = KMeans(n_clusters=n_clusters).fit(data)\n km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_, n_init=1).fit(data)\n\n assert_allclose(km1.cluster_centers_, km2.cluster_centers_)\n\n\ndef test_kmeans_warns_less_centers_than_unique_points():\n # Check KMeans when the number of found clusters is smaller than expected\n X = np.asarray([[0, 0], [0, 1], [1, 0], [1, 0]]) # last point is duplicated\n km = KMeans(n_clusters=4)\n\n # KMeans should warn that fewer labels than cluster centers have been used\n msg = (\n r\"Number of distinct clusters \\(3\\) found smaller than \"\n r\"n_clusters \\(4\\). Possibly due to duplicate points in X.\"\n )\n with pytest.warns(ConvergenceWarning, match=msg):\n km.fit(X)\n # only three distinct points, so only three clusters\n # can have points assigned to them\n assert set(km.labels_) == set(range(3))\n\n\ndef _sort_centers(centers):\n return np.sort(centers, axis=0)\n\n\ndef test_weighted_vs_repeated():\n # Check that a sample weight of N should yield the same result as an N-fold\n # repetition of the sample. Valid only if init is precomputed, otherwise\n # rng produces different results. Not valid for MinibatchKMeans due to rng\n # to extract minibatches.\n sample_weight = np.random.RandomState(0).randint(1, 5, size=n_samples)\n X_repeat = np.repeat(X, sample_weight, axis=0)\n\n km = KMeans(init=centers, n_init=1, n_clusters=n_clusters, random_state=0)\n\n km_weighted = clone(km).fit(X, sample_weight=sample_weight)\n repeated_labels = np.repeat(km_weighted.labels_, sample_weight)\n km_repeated = clone(km).fit(X_repeat)\n\n assert_array_equal(km_repeated.labels_, repeated_labels)\n assert_allclose(km_weighted.inertia_, km_repeated.inertia_)\n assert_allclose(\n _sort_centers(km_weighted.cluster_centers_),\n _sort_centers(km_repeated.cluster_centers_),\n )\n\n\[email protected](\"data\", [X, X_csr], ids=[\"dense\", \"sparse\"])\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_unit_weights_vs_no_weights(Estimator, data):\n # Check that not passing sample weights should be equivalent to passing\n # sample weights all equal to one.\n sample_weight = np.ones(n_samples)\n\n km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)\n km_none = clone(km).fit(data, sample_weight=None)\n km_ones = clone(km).fit(data, sample_weight=sample_weight)\n\n assert_array_equal(km_none.labels_, km_ones.labels_)\n assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_)\n\n\[email protected](\"data\", [X, X_csr], ids=[\"dense\", \"sparse\"])\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_scaled_weights(Estimator, data):\n # Check that scaling all sample weights by a common factor\n # shouldn't change the result\n sample_weight = np.random.RandomState(0).uniform(n_samples)\n\n km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)\n km_orig = clone(km).fit(data, sample_weight=sample_weight)\n km_scaled = clone(km).fit(data, sample_weight=0.5 * sample_weight)\n\n assert_array_equal(km_orig.labels_, km_scaled.labels_)\n assert_allclose(km_orig.cluster_centers_, km_scaled.cluster_centers_)\n\n\ndef test_kmeans_elkan_iter_attribute():\n # Regression test on bad n_iter_ value. Previous bug n_iter_ was one off\n # it's right value (#11340).\n km = KMeans(algorithm=\"elkan\", max_iter=1).fit(X)\n assert km.n_iter_ == 1\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\ndef test_kmeans_empty_cluster_relocated(array_constr):\n # check that empty clusters are correctly relocated when using sample\n # weights (#13486)\n X = array_constr([[-1], [1]])\n sample_weight = [1.9, 0.1]\n init = np.array([[-1], [10]])\n\n km = KMeans(n_clusters=2, init=init, n_init=1)\n km.fit(X, sample_weight=sample_weight)\n\n assert len(set(km.labels_)) == 2\n assert_allclose(km.cluster_centers_, [[-1], [1]])\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_result_equal_in_diff_n_threads(Estimator):\n # Check that KMeans/MiniBatchKMeans give the same results in parallel mode\n # than in sequential mode.\n rnd = np.random.RandomState(0)\n X = rnd.normal(size=(50, 10))\n\n with threadpool_limits(limits=1, user_api=\"openmp\"):\n result_1 = Estimator(n_clusters=n_clusters, random_state=0).fit(X).labels_\n with threadpool_limits(limits=2, user_api=\"openmp\"):\n result_2 = Estimator(n_clusters=n_clusters, random_state=0).fit(X).labels_\n assert_array_equal(result_1, result_2)\n\n\[email protected](\"attr\", [\"counts_\", \"init_size_\", \"random_state_\"])\ndef test_minibatch_kmeans_deprecated_attributes(attr):\n # check that we raise a deprecation warning when accessing `init_size_`\n # FIXME: remove in 1.1\n depr_msg = (\n f\"The attribute '{attr}' is deprecated in 0.24 and will be \" f\"removed in 1.1\"\n )\n km = MiniBatchKMeans(n_clusters=2, n_init=1, init=\"random\", random_state=0)\n km.fit(X)\n\n with pytest.warns(FutureWarning, match=depr_msg):\n getattr(km, attr)\n\n\ndef test_warning_elkan_1_cluster():\n # Check warning messages specific to KMeans\n with pytest.warns(\n RuntimeWarning,\n match=\"algorithm='elkan' doesn't make sense for a single\" \" cluster\",\n ):\n KMeans(n_clusters=1, algorithm=\"elkan\").fit(X)\n\n\[email protected](\n \"array_constr\", [np.array, sp.csr_matrix], ids=[\"dense\", \"sparse\"]\n)\[email protected](\"algo\", [\"full\", \"elkan\"])\ndef test_k_means_1_iteration(array_constr, algo):\n # check the results after a single iteration (E-step M-step E-step) by\n # comparing against a pure python implementation.\n X = np.random.RandomState(0).uniform(size=(100, 5))\n init_centers = X[:5]\n X = array_constr(X)\n\n def py_kmeans(X, init):\n new_centers = init.copy()\n labels = pairwise_distances_argmin(X, init)\n for label in range(init.shape[0]):\n new_centers[label] = X[labels == label].mean(axis=0)\n labels = pairwise_distances_argmin(X, new_centers)\n return labels, new_centers\n\n py_labels, py_centers = py_kmeans(X, init_centers)\n\n cy_kmeans = KMeans(\n n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1\n ).fit(X)\n cy_labels = cy_kmeans.labels_\n cy_centers = cy_kmeans.cluster_centers_\n\n assert_array_equal(py_labels, cy_labels)\n assert_allclose(py_centers, cy_centers)\n\n\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"squared\", [True, False])\ndef test_euclidean_distance(dtype, squared):\n # Check that the _euclidean_(dense/sparse)_dense helpers produce correct\n # results\n rng = np.random.RandomState(0)\n a_sparse = sp.random(\n 1, 100, density=0.5, format=\"csr\", random_state=rng, dtype=dtype\n )\n a_dense = a_sparse.toarray().reshape(-1)\n b = rng.randn(100).astype(dtype, copy=False)\n b_squared_norm = (b ** 2).sum()\n\n expected = ((a_dense - b) ** 2).sum()\n expected = expected if squared else np.sqrt(expected)\n\n distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared)\n distance_sparse_dense = _euclidean_sparse_dense_wrapper(\n a_sparse.data, a_sparse.indices, b, b_squared_norm, squared\n )\n\n assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6)\n assert_allclose(distance_dense_dense, expected, rtol=1e-6)\n assert_allclose(distance_sparse_dense, expected, rtol=1e-6)\n\n\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_inertia(dtype):\n # Check that the _inertia_(dense/sparse) helpers produce correct results.\n rng = np.random.RandomState(0)\n X_sparse = sp.random(\n 100, 10, density=0.5, format=\"csr\", random_state=rng, dtype=dtype\n )\n X_dense = X_sparse.toarray()\n sample_weight = rng.randn(100).astype(dtype, copy=False)\n centers = rng.randn(5, 10).astype(dtype, copy=False)\n labels = rng.randint(5, size=100, dtype=np.int32)\n\n distances = ((X_dense - centers[labels]) ** 2).sum(axis=1)\n expected = np.sum(distances * sample_weight)\n\n inertia_dense = _inertia_dense(X_dense, sample_weight, centers, labels, n_threads=1)\n inertia_sparse = _inertia_sparse(\n X_sparse, sample_weight, centers, labels, n_threads=1\n )\n\n assert_allclose(inertia_dense, inertia_sparse, rtol=1e-6)\n assert_allclose(inertia_dense, expected, rtol=1e-6)\n assert_allclose(inertia_sparse, expected, rtol=1e-6)\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\ndef test_sample_weight_unchanged(Estimator):\n # Check that sample_weight is not modified in place by KMeans (#17204)\n X = np.array([[1], [2], [4]])\n sample_weight = np.array([0.5, 0.2, 0.3])\n Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight)\n\n assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3]))\n\n\[email protected](\"Estimator\", [KMeans, MiniBatchKMeans])\[email protected](\n \"param, match\",\n [\n ({\"n_init\": 0}, r\"n_init should be > 0\"),\n ({\"max_iter\": 0}, r\"max_iter should be > 0\"),\n ({\"n_clusters\": n_samples + 1}, r\"n_samples.* should be >= n_clusters\"),\n (\n {\"init\": X[:2]},\n r\"The shape of the initial centers .* does not match \"\n r\"the number of clusters\",\n ),\n (\n {\"init\": lambda X_, k, random_state: X_[:2]},\n r\"The shape of the initial centers .* does not match \"\n r\"the number of clusters\",\n ),\n (\n {\"init\": X[:8, :2]},\n r\"The shape of the initial centers .* does not match \"\n r\"the number of features of the data\",\n ),\n (\n {\"init\": lambda X_, k, random_state: X_[:8, :2]},\n r\"The shape of the initial centers .* does not match \"\n r\"the number of features of the data\",\n ),\n (\n {\"init\": \"wrong\"},\n r\"init should be either 'k-means\\+\\+', 'random', \"\n r\"a ndarray or a callable\",\n ),\n ],\n)\ndef test_wrong_params(Estimator, param, match):\n # Check that error are raised with clear error message when wrong values\n # are passed for the parameters\n # Set n_init=1 by default to avoid warning with precomputed init\n km = Estimator(n_init=1)\n with pytest.raises(ValueError, match=match):\n km.set_params(**param).fit(X)\n\n\[email protected](\n \"param, match\",\n [({\"algorithm\": \"wrong\"}, r\"Algorithm must be 'auto', 'full' or 'elkan'\")],\n)\ndef test_kmeans_wrong_params(param, match):\n # Check that error are raised with clear error message when wrong values\n # are passed for the KMeans specific parameters\n with pytest.raises(ValueError, match=match):\n KMeans(**param).fit(X)\n\n\[email protected](\n \"param, match\",\n [\n ({\"max_no_improvement\": -1}, r\"max_no_improvement should be >= 0\"),\n ({\"batch_size\": -1}, r\"batch_size should be > 0\"),\n ({\"init_size\": -1}, r\"init_size should be > 0\"),\n ({\"reassignment_ratio\": -1}, r\"reassignment_ratio should be >= 0\"),\n ],\n)\ndef test_minibatch_kmeans_wrong_params(param, match):\n # Check that error are raised with clear error message when wrong values\n # are passed for the MiniBatchKMeans specific parameters\n with pytest.raises(ValueError, match=match):\n MiniBatchKMeans(**param).fit(X)\n\n\[email protected](\n \"param, match\",\n [\n (\n {\"n_local_trials\": 0},\n r\"n_local_trials is set to 0 but should be an \"\n r\"integer value greater than zero\",\n ),\n (\n {\"x_squared_norms\": X[:2]},\n r\"The length of x_squared_norms .* should \"\n r\"be equal to the length of n_samples\",\n ),\n ],\n)\ndef test_kmeans_plusplus_wrong_params(param, match):\n with pytest.raises(ValueError, match=match):\n kmeans_plusplus(X, n_clusters, **param)\n\n\[email protected](\"data\", [X, X_csr])\[email protected](\"dtype\", [np.float64, np.float32])\ndef test_kmeans_plusplus_output(data, dtype):\n # Check for the correct number of seeds and all positive values\n data = data.astype(dtype)\n centers, indices = kmeans_plusplus(data, n_clusters)\n\n # Check there are the correct number of indices and that all indices are\n # positive and within the number of samples\n assert indices.shape[0] == n_clusters\n assert (indices >= 0).all()\n assert (indices <= data.shape[0]).all()\n\n # Check for the correct number of seeds and that they are bound by the data\n assert centers.shape[0] == n_clusters\n assert (centers.max(axis=0) <= data.max(axis=0)).all()\n assert (centers.min(axis=0) >= data.min(axis=0)).all()\n\n # Check that indices correspond to reported centers\n # Use X for comparison rather than data, test still works against centers\n # calculated with sparse data.\n assert_allclose(X[indices].astype(dtype), centers)\n\n\[email protected](\"x_squared_norms\", [row_norms(X, squared=True), None])\ndef test_kmeans_plusplus_norms(x_squared_norms):\n # Check that defining x_squared_norms returns the same as default=None.\n centers, indices = kmeans_plusplus(X, n_clusters, x_squared_norms=x_squared_norms)\n\n assert_allclose(X[indices], centers)\n\n\ndef test_kmeans_plusplus_dataorder():\n # Check that memory layout does not effect result\n centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=0)\n\n X_fortran = np.asfortranarray(X)\n\n centers_fortran, _ = kmeans_plusplus(X_fortran, n_clusters, random_state=0)\n\n assert_allclose(centers_c, centers_fortran)\n"
] | [
[
"numpy.arange",
"numpy.in1d",
"numpy.concatenate",
"numpy.searchsorted",
"scipy.sparse.vstack",
"numpy.array"
],
[
"numpy.dot",
"scipy.linalg.pinv",
"scipy.linalg.svd",
"numpy.sqrt",
"numpy.asarray",
"numpy.linalg.multi_dot",
"numpy.tanh",
"numpy.exp",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.log",
"numpy.ones_like",
"scipy.sparse.issparse",
"numpy.ones",
"numpy.logical_or",
"numpy.zeros"
],
[
"numpy.diag",
"numpy.dot",
"numpy.log",
"scipy.linalg.svd",
"numpy.sqrt",
"numpy.abs",
"numpy.diag_indices",
"numpy.eye",
"numpy.linalg.multi_dot",
"numpy.ones",
"numpy.finfo",
"numpy.copy",
"scipy.linalg.pinvh",
"numpy.var",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.log",
"numpy.log2",
"scipy.sparse.issparse",
"numpy.isnan",
"scipy.sparse.linalg.svds",
"numpy.apply_along_axis",
"scipy.linalg.norm",
"numpy.argsort",
"scipy.sparse.dia_matrix",
"scipy.sparse.linalg.eigsh",
"numpy.vstack"
],
[
"numpy.sqrt",
"sklearn.cluster.KMeans",
"numpy.asarray",
"sklearn.cluster._kmeans._labels_inertia",
"sklearn.cluster._k_means_common._relocate_empty_clusters_sparse",
"scipy.sparse.random",
"sklearn.base.clone",
"numpy.zeros_like",
"sklearn.datasets.make_blobs",
"sklearn.utils.fixes._astype_copy_false",
"sklearn.utils._testing.assert_allclose",
"numpy.may_share_memory",
"numpy.unique",
"numpy.empty_like",
"numpy.arange",
"sklearn.cluster._k_means_common._inertia_dense",
"numpy.ceil",
"sklearn.cluster._k_means_common._euclidean_sparse_dense_wrapper",
"numpy.repeat",
"numpy.zeros",
"sklearn.utils._testing.assert_array_equal",
"sklearn.cluster._k_means_common._relocate_empty_clusters_dense",
"numpy.asfortranarray",
"scipy.sparse.csr_matrix",
"sklearn.cluster._k_means_common._inertia_sparse",
"sklearn.cluster.kmeans_plusplus",
"numpy.random.RandomState",
"numpy.array",
"numpy.sum",
"sklearn.metrics.cluster.v_measure_score",
"sklearn.metrics.pairwise_distances",
"sklearn.cluster._k_means_common._euclidean_dense_dense_wrapper",
"sklearn.cluster.k_means",
"numpy.sort",
"numpy.ones",
"sklearn.metrics.pairwise_distances_argmin",
"sklearn.cluster.MiniBatchKMeans",
"numpy.empty",
"sklearn.utils.extmath.row_norms"
]
] |
ZiningWang/Sparse_Pooling | [
"a160ddf9a03ef53bad630b4ac186a8437bd0475c",
"a160ddf9a03ef53bad630b4ac186a8437bd0475c",
"a160ddf9a03ef53bad630b4ac186a8437bd0475c"
] | [
"MV3D_TF_release/lib/datasets/voc_eval.py",
"MV3D_TF_release/lib/gt_data_layer/roidb.py",
"avod/scripts/offline_eval/get_AP.py"
] | [
"# --------------------------------------------------------\n# Fast/er R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Bharath Hariharan\n# --------------------------------------------------------\n\nimport xml.etree.ElementTree as ET\nimport os\nimport pickle\nimport numpy as np\nimport pdb\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file \"\"\"\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n objects.append(obj_struct)\n\n return objects\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n\n Top level function that does the PASCAL VOC evaluation.\n\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, 'annots.pkl')\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n if not os.path.isfile(cachefile):\n # load annots\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(annopath.format(imagename))\n if i % 100 == 0:\n print ('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(imagenames)))\n # save\n print ('Saving cached annotations to {:s}'.format(cachefile))\n with open(cachefile, 'w') as f:\n cPickle.dump(recs, f)\n else:\n # load\n with open(cachefile, 'r') as f:\n recs = cPickle.load(f)\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n if any(lines) == 1:\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n else:\n rec = -1\n prec = -1\n ap = -1\n\n return rec, prec, ap\n",
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Transform a roidb into a trainable roidb by adding a bunch of metadata.\"\"\"\n\nimport numpy as np\nfrom fast_rcnn.config import cfg\nfrom utils.cython_bbox import bbox_overlaps\nfrom utils.boxes_grid import get_boxes_grid\nimport scipy.sparse\nimport PIL\nimport math\nimport os\nimport pickle\nimport pdb\n\n\ndef prepare_roidb(imdb):\n \"\"\"Enrich the imdb's roidb by adding some derived quantities that\n are useful for training. This function precomputes the maximum\n overlap, taken over ground-truth boxes, between each ROI and\n each ground-truth box. The class with maximum overlap is also\n recorded.\n \"\"\"\n cache_file = os.path.join(imdb.cache_path, imdb.name + '_gt_roidb_prepared.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n imdb._roidb = cPickle.load(fid)\n print ('{} gt roidb prepared loaded from {}'.format(imdb.name, cache_file))\n return\n\n roidb = imdb.roidb\n for i in xrange(len(imdb.image_index)):\n roidb[i]['image'] = imdb.image_path_at(i)\n boxes = roidb[i]['boxes']\n labels = roidb[i]['gt_classes']\n info_boxes = np.zeros((0, 18), dtype=np.float32)\n\n if boxes.shape[0] == 0:\n roidb[i]['info_boxes'] = info_boxes\n continue\n\n # compute grid boxes\n s = PIL.Image.open(imdb.image_path_at(i)).size\n image_height = s[1]\n image_width = s[0]\n # only apply to VGG\n boxes_grid, cx, cy = get_boxes_grid(image_height, image_width)\n \n # for each scale\n for scale_ind, scale in enumerate(cfg.TRAIN.SCALES):\n boxes_rescaled = boxes * scale\n\n # compute overlap\n overlaps = bbox_overlaps(boxes_grid.astype(np.float), boxes_rescaled.astype(np.float))\n max_overlaps = overlaps.max(axis = 1)\n argmax_overlaps = overlaps.argmax(axis = 1)\n max_classes = labels[argmax_overlaps]\n\n # select positive boxes\n fg_inds = []\n for k in xrange(1, imdb.num_classes):\n fg_inds.extend(np.where((max_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH))[0])\n\n if len(fg_inds) > 0:\n gt_inds = argmax_overlaps[fg_inds]\n # bounding box regression targets\n gt_targets = _compute_targets(boxes_grid[fg_inds,:], boxes_rescaled[gt_inds,:])\n # scale mapping for RoI pooling\n scale_ind_map = cfg.TRAIN.SCALE_MAPPING[scale_ind]\n scale_map = cfg.TRAIN.SCALES[scale_ind_map]\n # contruct the list of positive boxes\n # (cx, cy, scale_ind, box, scale_ind_map, box_map, gt_label, gt_sublabel, target)\n info_box = np.zeros((len(fg_inds), 18), dtype=np.float32)\n info_box[:, 0] = cx[fg_inds]\n info_box[:, 1] = cy[fg_inds]\n info_box[:, 2] = scale_ind\n info_box[:, 3:7] = boxes_grid[fg_inds,:]\n info_box[:, 7] = scale_ind_map\n info_box[:, 8:12] = boxes_grid[fg_inds,:] * scale_map / scale\n info_box[:, 12] = labels[gt_inds]\n info_box[:, 14:] = gt_targets\n info_boxes = np.vstack((info_boxes, info_box))\n\n roidb[i]['info_boxes'] = info_boxes\n\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print ('wrote gt roidb prepared to {}'.format(cache_file))\n\ndef add_bbox_regression_targets(roidb):\n \"\"\"Add information needed to train bounding-box regressors.\"\"\"\n assert len(roidb) > 0\n assert 'info_boxes' in roidb[0], 'Did you call prepare_roidb first?'\n\n num_images = len(roidb)\n # Infer number of classes from the number of columns in gt_overlaps\n num_classes = roidb[0]['gt_overlaps'].shape[1]\n\n # Compute values needed for means and stds\n # var(x) = E(x^2) - E(x)^2\n class_counts = np.zeros((num_classes, 1)) + cfg.EPS\n sums = np.zeros((num_classes, 4))\n squared_sums = np.zeros((num_classes, 4))\n for im_i in xrange(num_images):\n targets = roidb[im_i]['info_boxes']\n for cls in xrange(1, num_classes):\n cls_inds = np.where(targets[:, 12] == cls)[0]\n if cls_inds.size > 0:\n class_counts[cls] += cls_inds.size\n sums[cls, :] += targets[cls_inds, 14:].sum(axis=0)\n squared_sums[cls, :] += (targets[cls_inds, 14:] ** 2).sum(axis=0)\n\n means = sums / class_counts\n stds = np.sqrt(squared_sums / class_counts - means ** 2)\n\n # Normalize targets\n for im_i in xrange(num_images):\n targets = roidb[im_i]['info_boxes']\n for cls in xrange(1, num_classes):\n cls_inds = np.where(targets[:, 12] == cls)[0]\n roidb[im_i]['info_boxes'][cls_inds, 14:] -= means[cls, :]\n if stds[cls, 0] != 0:\n roidb[im_i]['info_boxes'][cls_inds, 14:] /= stds[cls, :]\n\n # These values will be needed for making predictions\n # (the predicts will need to be unnormalized and uncentered)\n return means.ravel(), stds.ravel()\n\ndef _compute_targets(ex_rois, gt_rois):\n \"\"\"Compute bounding-box regression targets for an image. The targets are scale invariance\"\"\"\n\n ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + cfg.EPS\n ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + cfg.EPS\n ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths\n ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights\n\n gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + cfg.EPS\n gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + cfg.EPS\n gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths\n gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights\n\n targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths\n targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights\n targets_dw = np.log(gt_widths / ex_widths)\n targets_dh = np.log(gt_heights / ex_heights)\n\n targets = np.zeros((ex_rois.shape[0], 4), dtype=np.float32)\n targets[:, 0] = targets_dx\n targets[:, 1] = targets_dy\n targets[:, 2] = targets_dw\n targets[:, 3] = targets_dh\n return targets\n",
"import os\nimport numpy as np\n\ndef main():\n write_path = 'avod/data/outputs/pyramid_people_example_train/predictions/kitti_predictions_3d/val/0.1/120000/data/'\n write_name = 'pedestrian'#'cyclist'#'pedestrian'\n result_type = 'ground'#'3d'\n PR_file = os.path.join(write_path,('../plot/'+write_name+'_detection_'+result_type+'.txt'))\n #try:\n PRs = np.loadtxt(PR_file)\n print('file loaded')\n APs = np.sum(PRs[0:-1,1:4]*(PRs[1:,0:1]-PRs[0:-1,0:1]),axis=0)\n conclusion_path = os.path.join(write_path,'../../../conclusion.txt')\n with open(conclusion_path,'a+') as conclusion_file:\n print('conclusion filed opened')\n conclusion_file.write('iteration '+': ')\n conclusion_file.write('\\nrecall :\\n')\n PRs[:,0].tofile(conclusion_file,\" \",format='%.3f')\n conclusion_file.write('\\nprec_easy, AP: %.2f :\\n'%APs[0])\n PRs[:,1].tofile(conclusion_file,\" \",format='%.3f')\n conclusion_file.write('\\nprec_mod , AP: %.2f :\\n'%APs[1])\n PRs[:,2].tofile(conclusion_file,\" \",format='%.3f')\n conclusion_file.write('\\nprec_hard, AP: %.2f :\\n'%APs[2])\n PRs[:,3].tofile(conclusion_file,\" \",format='%.3f')\n print('APs: %.3f, %.3f, %.3f'%(APs[0],APs[1],APs[2]))\n #except:\n # #f_log.write('No object detected')\n # print('No object detected')\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.arange",
"numpy.cumsum",
"numpy.sort",
"numpy.finfo",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.where",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.vstack",
"numpy.where",
"numpy.zeros"
],
[
"numpy.sum",
"numpy.loadtxt"
]
] |
sadams2013/pvtools | [
"12bd9334a1335972519c81d0c01c6308aa597c39"
] | [
"pvtools.py"
] | [
"# Import standard libraries.\nimport json\n\n# Import external libraries.\nimport numpy as np\nimport pandas as pd\n\nclass dbSNP:\n \"\"\"Store dbSNP data for a gene.\n\n Parameters\n ----------\n dbsnp_file : str\n Path to a dbSNP file containing variant information.\n\n Attributes\n ----------\n df : pandas.DataFrame\n Dataframe containing dbSNP data.\n \"\"\"\n def __init__(self, dbsnp_file):\n self.df = pd.read_table(dbsnp_file)\n\n def get_ref(self, start, end):\n \"\"\"Return reference allele.\"\"\"\n try:\n i = (self.df['chromStart'] == start) & (self.df['chromEnd'] == end)\n result = self.df[i]['name'].values[0]\n except IndexError:\n result = None\n return result\n\nclass LookupTable:\n \"\"\"Store liftover data for a gene.\n\n Parameters\n ----------\n ng : Sequence\n Sequence object for RefSeqGene.\n g7 : Sequence\n Sequence object for GRCh37.\n g8 : Sequence\n Sequence object for GRCh38.\n\n Attributes\n ----------\n ng : Sequence\n Sequence object for RefSeqGene.\n g7 : Sequence\n Sequence object for GRCh37.\n g8 : Sequence\n Sequence object for GRCh38.\n df : pandas.DataFrame\n Dataframe containing liftover data.\n \"\"\"\n def __init__(self, ng, g7, g8):\n self.ng = ng\n self.g7 = g7\n self.g8 = g8\n self.df = self._build_lookup_table(ng, g7, g8)\n\n def _build_lookup_table(self, ng, g7, g8):\n ng_pos1 = np.arange(1, len(ng.seq)+1)\n ng_pos2 = ng_pos1 - ng.data['CDSStarts'][0]\n ng_pos3 = ng.liftover()\n g7_pos = list(range(g7.data['Start'], g7.data['End']+1))\n g8_pos = list(range(g8.data['Start'], g8.data['End']+1))\n allele = np.array(list(ng.seq))\n annot1 = ng.annotate(cds=False)\n annot2 = ng.annotate(cds=True)\n d = {'Start_Position': ng_pos1, 'ATG_Position': ng_pos2,\n 'Transcript_Position': ng_pos3, 'GRCh37_Position': g7_pos,\n 'GRCh38_Position': g8_pos, 'Allele': allele,\n 'Exon_Annotation': annot1, 'CDS_Annotation': annot2}\n return pd.DataFrame(d)\n\n def to_tsv(self, f):\n self.df.to_csv(f, sep='\\t', index=False)\n\n def find(self, system1, system2, value):\n try:\n result = self.df[self.df[system1] == value][system2].values[0]\n except IndexError:\n result = None\n return result\n\nclass Sequence:\n \"\"\"Store sequence data for a gene.\n\n Parameters\n ----------\n fasta_file : str\n Path to a FASTA file containing the DNA sequence.\n json_file : str\n Path to a JSON file containing metadata for the DNA sequence.\n\n Attributes\n ----------\n name : str\n Sequence identifier with the leading character '>' removed.\n seq : str\n DNA sequence.\n len : int\n Length of the DNA sequence.\n data : dict\n Metadata of the DNA sequence.\n \"\"\"\n def __init__(self, fasta_file, json_file=None):\n self.name, self.seq = self._read_fasta_file(fasta_file)\n self.len = len(self.seq)\n self.data = self._read_json_file(json_file)\n\n def _read_fasta_file(self, fasta_file):\n name = ''\n seq = ''\n with open(fasta_file) as f:\n name = next(f).strip().replace('>', '')\n for line in f:\n seq += line.strip()\n return name, seq\n\n def _read_json_file(self, json_file):\n if json_file is None:\n return None\n with open(json_file) as f:\n return json.load(f)\n\n def transcribe(self):\n \"\"\"Transcribe the DNA sequence.\n\n Returns\n -------\n str\n mRNA sequence.\n \"\"\"\n rna = ''\n for i in range(self.data['ExonCount']):\n start = self.data['ExonStarts'][i]\n end = self.data['ExonEnds'][i]\n rna += self.seq[start-1:end]\n return rna\n\n def get_exon_dataframe(self):\n \"\"\"Tabulate Exon data.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe containing Exon data.\n \"\"\"\n exon_starts = self.data['ExonStarts']\n exon_ends = self.data['ExonEnds']\n exon_names = [f'Exon {x+1}' for x in range(len(exon_starts))]\n intron_starts = [x+1 for x in exon_ends[:-1]]\n intron_ends = [x-1 for x in exon_starts[1:]]\n intron_names = [f'Intron {x+1}' for x in range(len(intron_starts))]\n upstream_start = 1\n upstream_end = exon_starts[0] - 1\n upstream_name = 'Upstream'\n downstream_start = exon_ends[-1] + 1\n downstream_end = len(self.seq)\n downstream_name = 'Downstream'\n starts = exon_starts + intron_starts + [upstream_start, downstream_start]\n ends = exon_ends + intron_ends + [upstream_end, downstream_end]\n names = exon_names + intron_names + [upstream_name, downstream_name]\n df = pd.DataFrame({'Name': names, 'Start': starts, 'End': ends})\n df = df.sort_values('Start')\n df = df.reset_index(drop=True)\n return df\n\n def get_cds_dataframe(self):\n \"\"\"Tabulate CDS data.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe containing CDS data.\n \"\"\"\n cds_starts = self.data['CDSStarts']\n cds_ends = self.data['CDSEnds']\n cds_names = [f'CDS {x+1}' for x in range(len(cds_starts))]\n\n intron_starts = [x+1 for x in cds_ends[:-1]]\n intron_ends = [x-1 for x in cds_starts[1:]]\n intron_names = [f'Intron {x+1}' for x in range(len(intron_starts))]\n\n exon_df = self.get_exon_dataframe()\n\n upstream_start = 1\n upstream_end = exon_df[exon_df.Name == 'Upstream'].End.values[0]\n upstream_name = 'Upstream'\n\n utr5_starts = []\n utr5_ends = []\n atg_pos = self.get_atg_pos()\n i = self.get_atg_exon_index()\n for x in range(self.data['ExonCount']):\n start = self.data['ExonStarts'][x]\n end = self.data['ExonEnds'][x]\n if x < i:\n utr5_starts.append(start)\n utr5_ends.append(end)\n elif x == i:\n utr5_starts.append(start)\n utr5_ends.append(atg_pos-1)\n else:\n break\n utr5_names = [f\"5' UTR Exon {x+1}\" for x in range(len(utr5_starts))]\n\n utr5_intron_starts = []\n utr5_intron_ends = []\n for utr5_end in utr5_ends[:-1]:\n utr5_intron_starts.append(utr5_end+1)\n for utr5_start in utr5_starts[1:]:\n utr5_intron_ends.append(utr5_start-1)\n utr5_intron_names = [f\"5' UTR Intron {x+1}\" for x in range(len(utr5_intron_starts))]\n\n utr3_starts = []\n utr3_ends = []\n stop_pos = self.get_stop_pos()\n i = self.get_stop_exon_index()\n for x in range(self.data['ExonCount']):\n start = self.data['ExonStarts'][x]\n end = self.data['ExonEnds'][x]\n if x < i:\n pass\n elif x == i:\n utr3_starts.append(stop_pos+1)\n utr3_ends.append(end)\n else:\n utr3_starts.append(start)\n utr3_ends.append(end)\n utr3_names = [f\"3' UTR Exon {x+1}\" for x in range(len(utr3_starts))]\n\n utr3_intron_starts = []\n utr3_intron_ends = []\n for utr3_end in utr3_ends[:-1]:\n utr3_intron_starts.append(utr3_end+1)\n for utr3_start in utr3_starts[1:]:\n utr3_intron_ends.append(utr3_start-1)\n utr3_intron_names = [f\"3' UTR Intron {x+1}\" for x in range(len(utr3_intron_starts))]\n\n downstream_start = exon_df[exon_df.Name == 'Downstream'].Start.values[0]\n downstream_end = len(self.seq)\n downstream_name = 'Downstream'\n\n starts = cds_starts + intron_starts + utr5_starts + utr5_intron_starts + utr3_starts + utr3_intron_starts + [upstream_start, downstream_start]\n ends = cds_ends + intron_ends + utr5_ends + utr5_intron_ends + utr3_ends + utr3_intron_ends + [upstream_end, downstream_end]\n names = cds_names + intron_names + utr5_names + utr5_intron_names + utr3_names + utr3_intron_names + [upstream_name, downstream_name]\n df = pd.DataFrame({'Name': names, 'Start': starts, 'End': ends})\n df = df.sort_values('Start')\n df = df.reset_index(drop=True)\n return df\n\n def annotate(self, cds=False):\n if cds:\n df = self.get_cds_dataframe()\n else:\n df = self.get_exon_dataframe()\n annotations = []\n for i, r in df.iterrows():\n n = r.End - r.Start + 1\n annotations += [r.Name] * n\n return annotations\n\n def liftover(self):\n cds_df = self.get_cds_dataframe()\n cds_pos = []\n cds_sum = 1\n atg_start = self.data['CDSStarts'][0]\n utr5_exon_offset = -1 * self.get_utr5_exon_len()\n utr3_exon_sum = 1\n for i, r in cds_df.iterrows():\n cds_len = r.End - r.Start + 1\n if r.Name.startswith('CDS'):\n cds_pos += list(range(cds_sum, cds_sum + cds_len))\n cds_sum += cds_len\n elif r.Name.startswith('Intron'):\n cds_pos += [f'{cds_sum-1}+{x}' for x in range(1, cds_len+1)]\n elif r.Name == 'Upstream':\n a = self.get_atg_pos() - self.get_utr5_intron_len()\n cds_pos += [x-a for x in range(1, r.End+1)]\n elif r.Name.startswith(\"5' UTR Exon\"):\n a = r.End - r.Start + 1\n cds_pos += [x for x in range(utr5_exon_offset, utr5_exon_offset+a)]\n utr5_exon_offset += a\n elif r.Name.startswith(\"5' UTR Intron\"):\n cds_pos += [f'{utr5_exon_offset-1}+{x}' for x in range(1, cds_len+1)]\n elif r.Name == 'Downstream':\n a = self.get_utr3_exon_len() + 1\n b = r.End - r.Start + 1\n cds_pos += [f'*{x+a}' for x in range(b)]\n elif r.Name.startswith(\"3' UTR Exon\"):\n a = r.End - r.Start + 1\n cds_pos += [f'*{x}' for x in list(range(utr3_exon_sum, utr3_exon_sum+a))]\n utr3_exon_sum += a\n elif r.Name.startswith(\"3' UTR Intron\"):\n cds_pos += [f'*{utr3_exon_sum-1}+{x}' for x in range(1, cds_len+1)]\n else:\n cds_pos += ['.' for x in range(cds_len)]\n if len(cds_pos) != self.len:\n raise ValueError(f\"LiftOver length error: expected {self.len} bp, \"\n f\"but generated: {len(cds_pos)} bp\")\n return [f'c.{x}' for x in cds_pos]\n\n def get_atg_pos(self):\n return self.data['CDSStarts'][0]\n\n def get_atg_exon_index(self):\n exon_starts = self.data['ExonStarts']\n exon_ends = self.data['ExonEnds']\n atg_pos = self.get_atg_pos()\n for i in range(self.data['ExonCount']):\n if exon_starts[i] <= atg_pos <= exon_ends[i]:\n return i\n\n def get_stop_pos(self):\n return self.data['CDSEnds'][-1]\n\n def get_stop_exon_index(self):\n exon_starts = self.data['ExonStarts']\n exon_ends = self.data['ExonEnds']\n stop_pos = self.get_stop_pos()\n for i in range(self.data['ExonCount']):\n if exon_starts[i] <= stop_pos <= exon_ends[i]:\n return i\n\n def get_utr5_intron_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"5' UTR Intron\")]\n return sum(df.End - df.Start + 1)\n\n def get_utr5_exon_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"5' UTR Exon\")]\n return sum(df.End - df.Start + 1)\n\n def get_utr3_intron_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"3' UTR Intron\")]\n return sum(df.End - df.Start + 1)\n\n def get_utr3_exon_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"3' UTR Exon\")]\n return sum(df.End - df.Start + 1)\n"
] | [
[
"pandas.read_table",
"pandas.DataFrame"
]
] |
vishalbelsare/tsa | [
"203e602fe5fc95b89afb454156fc7e4faee90f2a",
"203e602fe5fc95b89afb454156fc7e4faee90f2a"
] | [
"src/main/python/thalesians/tsa/optimization/visual.py",
"src/main/python/thalesians/tsa/numpyutils.py"
] | [
"import itertools\nimport time\nimport warnings\n\nimport numpy as np\nimport matplotlib.colors\nimport matplotlib.pyplot as plt\n\nimport thalesians.tsa.checks as checks\nimport thalesians.tsa.numpyutils as npu\nimport thalesians.tsa.utils as utils\n\ndef _aggregate(aggregate_func, data, empty_aggregate):\n if empty_aggregate != 'none':\n return npu.apply(lambda x: empty_aggregate if len(x) == 0 else aggregate_func(x), data)\n else:\n return npu.apply(aggregate_func, data)\n\ndef visualize_grid_search(grid_search_result,\n aggregate_func=np.nanmean, empty_aggregate='none',\n fig=None, title=None,\n refresh_until_ready=False):\n if fig is None: fig = plt.figure()\n\n if title is None: title = grid_search_result.optimization_id\n fig.suptitle(title)\n\n param_names = list(grid_search_result.param_ranges.keys())\n\n subplots = {}\n heatmaps = {}\n datas = {}\n\n for i1 in range(len(param_names)):\n param_name1 = param_names[i1]\n param_values1 = grid_search_result.param_ranges[param_name1]\n for i2 in range(i1):\n param_name2 = param_names[i2]\n param_values2 = grid_search_result.param_ranges[param_name2]\n data = np.empty((len(param_values1), len(param_values2)), dtype=object)\n for i in range(np.size(data)): data.flat[i] = []\n datas[(i1, i2)] = data\n\n ax = fig.add_subplot(len(param_names) - 1, len(param_names) - 1, (i1 - 1) * (len(param_names) - 1) + i2 + 1)\n subplots[(i1, i2)] = ax\n \n initial_data = _aggregate(aggregate_func, datas[(i1, i2)], empty_aggregate)\n\n heatmaps[(i1, i2)] = ax.matshow(npu.apply(aggregate_func, initial_data), cmap='coolwarm')\n\n if i2 == i1 - 1:\n ax.set_xticklabels([np.nan] + [0. if x == 1e-06 else x for x in param_values2], fontsize=6, rotation='vertical', verticalalignment='bottom')\n ax.xaxis.set_ticks_position('top')\n ax.set_yticklabels([np.nan] + [0. if x == 1e-06 else x for x in param_values1], fontsize=6)\n ax.yaxis.set_ticks_position('right')\n else:\n ax.set_xticks([])\n ax.set_yticks([])\n if i1 == len(param_names) - 1: ax.set_xlabel(param_name2)\n if i2 == 0: ax.set_ylabel(param_name1)\n\n while True:\n all_ready = True\n for status in grid_search_result.evaluation_statuses:\n if not status.ready: all_ready = False\n else:\n checks.check(utils.sequence_eq(param_names, status.work.info['param_names']))\n param_value_index_combinations = itertools.combinations(range(len(param_names)), 2)\n param_value_index_combinations = [(i2, i1) for (i1, i2) in param_value_index_combinations if i1 != i2]\n for i1, i2 in param_value_index_combinations:\n param_value_index1 = status.work.info['param_value_indices'][i1]\n param_value_index2 = status.work.info['param_value_indices'][i2]\n if status.result.exception is not None:\n result = np.nan\n elif status.result.result is None:\n result = np.nan\n else:\n result = status.result.result\n datas[(i1, i2)][param_value_index1, param_value_index2].append(result)\n for i1 in range(len(param_names)):\n for i2 in range(i1):\n new_data = _aggregate(aggregate_func, datas[(i1, i2)], empty_aggregate)\n heatmaps[(i1, i2)].set_data(new_data)\n heatmaps[(i1, i2)].autoscale()\n if (not refresh_until_ready) or all_ready: break\n else:\n fig.canvas.draw()\n time.sleep(1)\n\n return fig\n",
"import datetime as dt\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nimport thalesians.tsa.checks as checks\r\nimport thalesians.tsa.utils as utils\r\n\r\ndef init_warnings():\r\n np.warnings.filterwarnings('ignore', message='Mean of empty slice')\r\n np.warnings.filterwarnings('ignore', message='All-NaN axis encountered')\r\n warnings.filterwarnings('ignore', message='Warning: converting a masked element to nan.')\r\n\r\ndef apply(func, arg, dtype='float64'):\r\n result = np.empty(np.shape(arg), dtype=dtype)\r\n result.flat[:] = [func(x) for x in arg.flat[:]]\r\n return result\r\n\r\ndef sign(arg):\r\n if isinstance(arg, dt.timedelta):\r\n arg = arg.total_seconds()\r\n elif checks.is_numpy_array(arg) and arg.dtype == object and np.size(arg) > 0 and isinstance(arg.item(0), dt.timedelta):\r\n arg = np.vectorize(lambda x: x.total_seconds())(arg)\r\n return np.sign(arg)\r\n\r\ndef is_view_of(arg1, arg2):\r\n if not checks.is_numpy_array(arg1) or not checks.is_numpy_array(arg2):\r\n return False\r\n return arg1.base is arg2\r\n\r\ndef are_views_of_same(arg1, arg2):\r\n if not checks.is_numpy_array(arg1) or not checks.is_numpy_array(arg2):\r\n return False\r\n return (arg1.base is arg2) or (arg2.base is arg1) or ((arg1.base is arg2.base) and arg1.base is not None)\r\n \r\ndef nrow(arg):\r\n return np.shape(arg)[0]\r\n\r\ndef ncol(arg):\r\n return np.shape(arg)[1]\r\n\r\ndef to_scalar(arg, raise_value_error=True):\r\n try:\r\n if checks.is_float(arg): return arg\r\n elif checks.is_numpy_array(arg): return np.asscalar(arg)\r\n else: return np.asscalar(np.array(arg))\r\n except:\r\n if raise_value_error: raise\r\n return arg\r\n\r\ndef to_ndim_1(arg, copy=False):\r\n r = np.reshape(arg, (np.size(arg),))\r\n if r.base is arg and copy: r = np.copy(r)\r\n return r\r\n\r\ndef to_ndim_2(arg, ndim_1_to_col=False, copy=False):\r\n r = np.ndim(arg)\r\n if r == 0: arg = np.array(((arg,),))\r\n elif r == 1:\r\n arg = np.array((arg,))\r\n if ndim_1_to_col: arg = arg.T\r\n return np.array(arg, copy=copy)\r\n\r\ndef row(*args):\r\n return to_ndim_2(args, ndim_1_to_col=False)\r\n\r\ndef col(*args):\r\n return to_ndim_2(args, ndim_1_to_col=True)\r\n\r\ndef matrix(ncol, *args):\r\n return np.array(utils.batch(ncol, args))\r\n\r\ndef matrix_of(nrow, ncol, val):\r\n r = np.empty((nrow, ncol))\r\n r.fill(val)\r\n return r\r\n\r\ndef row_of(n, val):\r\n return matrix_of(1, n, val)\r\n\r\ndef col_of(n, val):\r\n return matrix_of(n, 1, val)\r\n\r\ndef ndim_1_of(n, val):\r\n r = np.empty((n,))\r\n r.fill(val)\r\n return r\r\n\r\ndef make_immutable(arg, allow_none=False):\r\n if allow_none and arg is None: return None\r\n checks.check_numpy_array(arg)\r\n arg.flags.writeable = False\r\n return arg\r\n\r\ndef immutable_copy_of(arg):\r\n if checks.is_numpy_array(arg):\r\n result = np.copy(arg) if arg.flags.writeable else arg\r\n else:\r\n result = np.array(arg)\r\n result.flags.writeable = False\r\n return result\r\n \r\ndef lower_to_symmetric(a, copy=False):\r\n a = np.copy(a) if copy else a\r\n idxs = np.triu_indices_from(a)\r\n a[idxs] = a[(idxs[1], idxs[0])]\r\n return a\r\n\r\ndef upper_to_symmetric(a, copy=False):\r\n a = np.copy(a) if copy else a\r\n idxs = np.triu_indices_from(a)\r\n a[(idxs[1], idxs[0])] = a[idxs]\r\n return a\r\n\r\ndef kron_sum(arg1, arg2):\r\n return np.kron(arg1, np.eye(nrow(arg2))) + np.kron(np.eye(nrow(arg1)), arg2)\r\n\r\ndef vec(arg):\r\n return np.resize(to_ndim_2(arg, ndim_1_to_col=True, copy=False).T, (np.size(arg), 1))\r\n\r\ndef unvec(arg, nrow):\r\n return np.resize(to_ndim_1(arg, copy=False), (np.size(arg) // nrow, nrow)).T\r\n\r\ndef vectorized(func):\r\n func.__dict__['vectorized'] = True\r\n return func\r\n\r\ndef is_vectorized(func):\r\n res = False\r\n if hasattr(func, '__call__'):\r\n if hasattr(func.__call__, '__dict__'):\r\n res |= func.__call__.__getattribute__('__dict__').get('vectorized', False)\r\n if not res and hasattr(func, '__dict__'):\r\n res = func.__getattribute__('__dict__').get('vectorized', False)\r\n return res\r\n"
] | [
[
"numpy.size",
"matplotlib.pyplot.figure"
],
[
"numpy.asscalar",
"numpy.triu_indices_from",
"numpy.sign",
"numpy.ndim",
"numpy.copy",
"numpy.size",
"numpy.shape",
"numpy.warnings.filterwarnings",
"numpy.array",
"numpy.empty"
]
] |
MUYANGGUO/HPC | [
"ab95d18d4054b892269dd439470548abd06f5512"
] | [
"projects/1-molecular-dynamics/check.py"
] | [
"\nif __name__ == \"__main__\":\n import sys\n import json\n import numpy as np\n\n firstline = sys.stdin.readline()\n obj = json.loads(firstline)\n\n Np = obj['num_points']\n dt = obj['dt']\n L = obj['L']\n Nt = obj['num_steps']\n Nint = obj['step_chunk']\n k = obj['k']\n d = obj['d']\n gifname = obj['gifname']\n\n numframes = int(Nt) // int(Nint) + 1\n maxinterv = 100\n maxinterv = min(maxinterv,numframes -1)\n accum = np.zeros((maxinterv,1))\n denom = np.zeros((maxinterv,1))\n for i in range(numframes):\n try:\n line = sys.stdin.readline()\n obj = json.loads(line)\n X = np.array(obj['X'])\n except:\n break\n center = np.mean(X,axis=1)\n X = X - center.reshape((3,1)) * np.ones((1,X.shape[1]))\n if not i:\n X0 = np.ndarray((maxinterv,X.shape[0],X.shape[1]))\n for j in range(maxinterv):\n X0[j,:,:] = X[:,:]\n continue\n for interv in range(1,maxinterv+1):\n if i % interv:\n continue\n r = X[:,:] - X0[interv-1,:,:]\n s_pro = r[0,:]*r[0,:] + r[1,:]*r[1,:] + r[2,:]*r[2,:]\n accum[interv-1] = accum[interv-1] + np.mean(s_pro)\n denom[interv-1] = denom[interv-1] + 1\n X0[interv-1,:,:] = X[:,:]\n\n out = accum / denom\n x = np.linspace(dt*Nint,dt*Nint*maxinterv,maxinterv)\n p = np.polyfit(x,out,1)\n print(f'Diffusion constant: {p[0] / 6.}')\n"
] | [
[
"numpy.polyfit",
"numpy.linspace",
"numpy.ndarray",
"numpy.ones",
"numpy.mean",
"numpy.array",
"numpy.zeros"
]
] |
hiroyasuakada/ros_start | [
"10221ad2bcaefa4aaadc6c90424a3751126ac256"
] | [
"scripts/gan/cycle_gan/train.py"
] | [
"import os\nimport random\nimport itertools\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom torchvision.utils import make_grid\nfrom torch.autograd import Variable\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom tensorboardX import SummaryWriter\nimport time\nimport cv2\n\n##################################################################\nfrom dataset import UnalignedDataset\nfrom model_base import ResNetBlock, Generator, Discriminator\nfrom model_cyclegan import CycleGAN\n##################################################################\n\n\ndef train(log_dir, device, lr, beta1, lambda_idt, lambda_A, lambda_B, lambda_mask,\n num_epoch, num_epoch_resume, save_epoch_freq):\n model = CycleGAN(log_dir=log_dir, device=device, lr=lr, beta1=beta1,\n lambda_idt=lambda_idt, lambda_A=lambda_A, lambda_B=lambda_B, lambda_mask=lambda_mask)\n\n if num_epoch_resume != 0:\n model.log_dir = 'logs'\n print('load model {}'.format(num_epoch_resume))\n model.load('epoch' + str(num_epoch_resume))\n\n writer = SummaryWriter(log_dir)\n\n for epoch in range(num_epoch):\n print('epoch {} started'.format(epoch + 1 + num_epoch_resume))\n t1 = time.perf_counter()\n\n losses = model.train(train_loader)\n\n t2 = time.perf_counter()\n get_processing_time = t2 - t1\n\n print('epoch: {}, elapsed_time: {} sec losses: {}'\n .format(epoch + 1 + num_epoch_resume, get_processing_time, losses))\n\n writer.add_scalar('loss_G_A', losses[0], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_D_A', losses[1], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_G_B', losses[2], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_D_B', losses[3], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_cycle_A', losses[4], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_cycle_B', losses[5], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_idt_A', losses[6], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_idt_B', losses[7], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_mask', losses[8], epoch + 1 + num_epoch_resume)\n\n if (epoch + 1 + num_epoch_resume) % save_epoch_freq == 0:\n model.save('epoch%d' % (epoch + 1 + num_epoch_resume))\n\n\nif __name__ == '__main__':\n\n # random seeds\n torch.manual_seed(1234)\n np.random.seed(1234)\n random.seed(1234)\n\n # image\n height = 128\n width = 256\n\n # training details\n batch_size = 1\n lr = 0.0002 # initial learning rate for adam\n beta1 = 0.5 # momentum term of adam\n\n num_epoch = 100\n num_epoch_resume = 0\n save_epoch_freq = 1\n\n # weights of loss function\n # lambda_idt = 5\n # lambda_A = 10.0\n # lambda_B = 10.0\n # lambda_mask = 10.0\n lambda_idt = 5.0\n lambda_A = 10.0\n lambda_B = 10.0\n lambda_mask = 0\n\n # files, dirs\n log_dir = 'logs'\n\n # gpu\n device = torch.device(\"cuda:0\" if torch.cuda.is_available else \"cpu\")\n print('device {}'.format(device))\n\n # dataset\n train_dataset = UnalignedDataset(is_train=True)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n\n # train\n train(log_dir, device, lr, beta1, lambda_idt, lambda_A, lambda_B, lambda_mask,\n num_epoch, num_epoch_resume, save_epoch_freq)\n\n\n\n"
] | [
[
"torch.device",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"numpy.random.seed"
]
] |
ziyedy/category-priornet | [
"5aa080eeff936ce3939f0d5458a2936677c15726"
] | [
"lib/prior/priorNet.py"
] | [
"import sys\n\nsys.path.append(\"../../\")\nimport lib.gcn3d as gcn3d\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PriorEncoder(nn.Module):\n def __init__(self, support_num: int, neighbor_num: int):\n super(PriorEncoder, self).__init__()\n\n self.neighbor_num = neighbor_num\n\n self.conv_0 = gcn3d.Conv_surface(kernel_num=32, support_num=support_num)\n self.conv_1 = gcn3d.Conv_layer(32, 64, support_num=support_num)\n self.pool_1 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)\n self.conv_2 = gcn3d.Conv_layer(64, 128, support_num=support_num)\n self.conv_3 = gcn3d.Conv_layer(128, 256, support_num=support_num)\n self.pool_2 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)\n self.conv_4 = gcn3d.Conv_layer(256, 512, support_num=support_num)\n self.pool_3 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)\n\n def forward(self, vertices: \"(bs, vertice_num, 3)\"):\n bs, vertice_num, _ = vertices.size()\n\n neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)\n\n fm_0 = self.conv_0(neighbor_index, vertices)\n fm_0 = F.relu(fm_0, inplace=True)\n fm_1 = self.conv_1(neighbor_index, vertices, fm_0)\n fm_1 = F.relu(fm_1, inplace=True)\n vertices, fm_1 = self.pool_1(vertices, fm_1)\n\n neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)\n fm_2 = self.conv_2(neighbor_index, vertices, fm_1)\n fm_2 = F.relu(fm_2, inplace=True)\n fm_3 = self.conv_3(neighbor_index, vertices, fm_2)\n fm_3 = F.relu(fm_3, inplace=True)\n vertices, fm_3 = self.pool_2(vertices, fm_3)\n neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)\n\n fm_4 = self.conv_4(neighbor_index, vertices, fm_3)\n feature_global = fm_4.max(1)[0]\n # fm_4 = F.relu(fm_4, inplace=True)\n # vertices, fm_4 = self.pool_3(vertices, fm_4)\n\n return feature_global\n\n\nclass PriorDecoder(nn.Module):\n def __init__(self, emb_dim, n_pts):\n super(PriorDecoder, self).__init__()\n self.fc1 = nn.Linear(emb_dim, 512)\n self.fc2 = nn.Linear(512, 1024)\n self.fc3 = nn.Linear(1024, 3 * n_pts)\n\n def forward(self, embedding):\n \"\"\"\n Args:\n embedding: (B, 512)\n\n \"\"\"\n bs = embedding.size()[0]\n out1 = F.relu(self.fc1(embedding))\n out2 = F.relu(self.fc2(out1))\n out3 = self.fc3(out2)\n out_pc = out3.view(bs, -1, 3)\n return out_pc\n\n\nclass PriorNet(nn.Module):\n def __init__(self, emb_dim=512, n_pts=1024):\n super(PriorNet, self).__init__()\n self.encoder = PriorEncoder(1, 20)\n self.decoder = PriorDecoder(emb_dim, n_pts)\n\n def forward(self, in_pc):\n emb = self.encoder(in_pc)\n out_pc = self.decoder(emb)\n return emb, out_pc\n\n\nif __name__ == '__main__':\n estimator = PriorEncoder(1, 1)\n xyz = torch.randn(32, 2048, 3)\n\n gg = estimator(xyz)"
] | [
[
"torch.nn.Linear",
"torch.randn",
"torch.nn.functional.relu"
]
] |
yulong314/mmpose | [
"cdfce789d0e48dd868c70a405a7d7f3da2b4ebe3",
"cdfce789d0e48dd868c70a405a7d7f3da2b4ebe3"
] | [
"mmpose/datasets/datasets/hand/freihand_dataset.py",
"mmpose/datasets/datasets/fashion/fashion_base_dataset.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .hand_base_dataset import HandBaseDataset\n\n\[email protected]_module()\nclass FreiHandDataset(HandBaseDataset):\n \"\"\"FreiHand dataset for top-down hand pose estimation.\n\n `FreiHAND: A Dataset for Markerless Capture of Hand Pose\n and Shape from Single RGB Images' ICCV'2019\n More details can be found in the `paper\n <https://arxiv.org/pdf/1909.04349.pdf>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n FreiHand keypoint indexes::\n\n 0: 'wrist',\n 1: 'thumb1',\n 2: 'thumb2',\n 3: 'thumb3',\n 4: 'thumb4',\n 5: 'forefinger1',\n 6: 'forefinger2',\n 7: 'forefinger3',\n 8: 'forefinger4',\n 9: 'middle_finger1',\n 10: 'middle_finger2',\n 11: 'middle_finger3',\n 12: 'middle_finger4',\n 13: 'ring_finger1',\n 14: 'ring_finger2',\n 15: 'ring_finger3',\n 16: 'ring_finger4',\n 17: 'pinky_finger1',\n 18: 'pinky_finger2',\n 19: 'pinky_finger3',\n 20: 'pinky_finger4'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n self.ann_info['use_different_joint_weights'] = False\n assert self.ann_info['num_joints'] == 21\n self.ann_info['joint_weights'] = \\\n np.ones((self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.dataset_name = 'freihand'\n self.db = self._get_db()\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self):\n \"\"\"Load dataset.\"\"\"\n gt_db = []\n bbox_id = 0\n num_joints = self.ann_info['num_joints']\n for img_id in self.img_ids:\n\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)\n objs = self.coco.loadAnns(ann_ids)\n\n for obj in objs:\n if max(obj['keypoints']) == 0:\n continue\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n keypoints = np.array(obj['keypoints']).reshape(-1, 3)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])\n\n # the ori image is 224x224\n center, scale = self._xywh2cs(0, 0, 224, 224, 0.8)\n\n image_file = os.path.join(self.img_prefix,\n self.id2name[img_id])\n gt_db.append({\n 'image_file': image_file,\n 'center': center,\n 'scale': scale,\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'dataset': self.dataset_name,\n 'bbox': obj['bbox'],\n 'bbox_score': 1,\n 'bbox_id': bbox_id\n })\n bbox_id = bbox_id + 1\n gt_db = sorted(gt_db, key=lambda x: x['bbox_id'])\n\n return gt_db\n\n def evaluate(self, outputs, res_folder, metric='PCK', **kwargs):\n \"\"\"Evaluate freihand keypoint results. The pose prediction results will\n be saved in `${res_folder}/result_keypoints.json`.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs (list(preds, boxes, image_path, output_heatmap))\n :preds (np.ndarray[N,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n :boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n :image_paths (list[str]): For example, ['training/rgb/\n 00031426.jpg']\n :output_heatmap (np.ndarray[N, K, H, W]): model outpus.\n\n res_folder (str): Path of directory to save the results.\n metric (str | list[str]): Metric to be performed.\n Options: 'PCK', 'AUC', 'EPE'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'AUC', 'EPE']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n for output in outputs:\n preds = output['preds']\n boxes = output['boxes']\n image_paths = output['image_paths']\n bbox_ids = output['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n\n kpts.append({\n 'keypoints': preds[i].tolist(),\n 'center': boxes[i][0:2].tolist(),\n 'scale': boxes[i][2:4].tolist(),\n 'area': float(boxes[i][4]),\n 'score': float(boxes[i][5]),\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n return name_value\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom abc import ABCMeta, abstractmethod\n\nimport json_tricks as json\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom xtcocotools.coco import COCO\n\nfrom mmpose.core.evaluation.top_down_eval import (keypoint_auc, keypoint_epe,\n keypoint_pck_accuracy)\nfrom mmpose.datasets.pipelines import Compose\n\n\nclass FashionBaseDataset(Dataset, metaclass=ABCMeta):\n \"\"\"Base class for fashion landmark datasets.\n\n All fashion datasets should subclass it.\n All subclasses should overwrite:\n Methods:`_get_db`, 'evaluate'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n self.image_info = {}\n self.ann_info = {}\n\n self.annotations_path = ann_file\n self.img_prefix = img_prefix\n self.pipeline = pipeline\n self.test_mode = test_mode\n\n self.ann_info['image_size'] = np.array(data_cfg['image_size'])\n self.ann_info['heatmap_size'] = np.array(data_cfg['heatmap_size'])\n self.ann_info['num_joints'] = data_cfg['num_joints']\n\n self.ann_info['flip_pairs'] = []\n\n self.ann_info['inference_channel'] = data_cfg['inference_channel']\n self.ann_info['num_output_channels'] = data_cfg['num_output_channels']\n self.ann_info['dataset_channel'] = data_cfg['dataset_channel']\n\n self.coco = COCO(ann_file)\n self.img_ids = self.coco.getImgIds()\n self.num_images = len(self.img_ids)\n self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)\n\n self.db = []\n\n self.pipeline = Compose(self.pipeline)\n\n @staticmethod\n def _get_mapping_id_name(imgs):\n \"\"\"\n Args:\n imgs (dict): dict of image info.\n\n Returns:\n tuple: Image name & id mapping dicts.\n\n - id2name (dict): Mapping image id to name.\n - name2id (dict): Mapping image name to id.\n \"\"\"\n id2name = {}\n name2id = {}\n for image_id, image in imgs.items():\n file_name = image['file_name']\n id2name[image_id] = file_name\n name2id[file_name] = image_id\n\n return id2name, name2id\n\n def _xywh2cs(self, x, y, w, h, padding=1.25):\n \"\"\"This encodes bbox(x,y,w,h) into (center, scale)\n\n Args:\n x, y, w, h (float): left, top, width and height\n padding (float): bounding box padding factor\n\n Returns:\n center (np.ndarray[float32](2,)): center of the bbox (x, y).\n scale (np.ndarray[float32](2,)): scale of the bbox w & h.\n \"\"\"\n aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[\n 'image_size'][1]\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\n\n if (not self.test_mode) and np.random.rand() < 0.3:\n center += 0.4 * (np.random.rand(2) - 0.5) * [w, h]\n\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n\n # pixel std is 200.0\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\n # padding to include proper amount of context\n scale = scale * padding\n\n return center, scale\n\n @abstractmethod\n def _get_db(self):\n \"\"\"Load dataset.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def evaluate(self, cfg, preds, output_dir, *args, **kwargs):\n \"\"\"Evaluate keypoint results.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self,\n res_file,\n metrics,\n pck_thr=0.2,\n pckh_thr=0.7,\n auc_nor=30):\n \"\"\"Keypoint evaluation.\n\n Args:\n res_file (str): Json file stored prediction results.\n metrics (str | list[str]): Metric to be performed.\n Options: 'PCK', 'PCKh', 'AUC', 'EPE'.\n pck_thr (float): PCK threshold, default as 0.2.\n pckh_thr (float): PCKh threshold, default as 0.7.\n auc_nor (float): AUC normalization factor, default as 30 pixel.\n\n Returns:\n List: Evaluation results for evaluation metric.\n \"\"\"\n info_str = []\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n outputs = []\n gts = []\n masks = []\n threshold_bbox = []\n threshold_head_box = []\n\n for pred, item in zip(preds, self.db):\n outputs.append(np.array(pred['keypoints'])[:, :-1])\n gts.append(np.array(item['joints_3d'])[:, :-1])\n masks.append((np.array(item['joints_3d_visible'])[:, 0]) > 0)\n if 'PCK' in metrics:\n bbox = np.array(item['bbox'])\n bbox_thr = np.max(bbox[2:])\n threshold_bbox.append(np.array([bbox_thr, bbox_thr]))\n if 'PCKh' in metrics:\n head_box_thr = item['head_size']\n threshold_head_box.append(\n np.array([head_box_thr, head_box_thr]))\n\n outputs = np.array(outputs)\n gts = np.array(gts)\n masks = np.array(masks)\n threshold_bbox = np.array(threshold_bbox)\n threshold_head_box = np.array(threshold_head_box)\n\n if 'PCK' in metrics:\n _, pck, _ = keypoint_pck_accuracy(outputs, gts, masks, pck_thr,\n threshold_bbox)\n info_str.append(('PCK', pck))\n\n if 'PCKh' in metrics:\n _, pckh, _ = keypoint_pck_accuracy(outputs, gts, masks, pckh_thr,\n threshold_head_box)\n info_str.append(('PCKh', pckh))\n\n if 'AUC' in metrics:\n info_str.append(('AUC', keypoint_auc(outputs, gts, masks,\n auc_nor)))\n\n if 'EPE' in metrics:\n info_str.append(('EPE', keypoint_epe(outputs, gts, masks)))\n\n return info_str\n\n def __len__(self):\n \"\"\"Get the size of the dataset.\"\"\"\n return len(self.db)\n\n def __getitem__(self, idx):\n \"\"\"Get the sample given index.\"\"\"\n results = copy.deepcopy(self.db[idx])\n results['ann_info'] = self.ann_info\n return self.pipeline(results)\n\n def _sort_and_unique_bboxes(self, kpts, key='bbox_id'):\n \"\"\"sort kpts and remove the repeated ones.\"\"\"\n kpts = sorted(kpts, key=lambda x: x[key])\n num = len(kpts)\n for i in range(num - 1, 0, -1):\n if kpts[i][key] == kpts[i - 1][key]:\n del kpts[i]\n\n return kpts\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.minimum",
"numpy.ones"
],
[
"numpy.max",
"numpy.array",
"numpy.random.rand"
]
] |
gonsoomoon/tensorflow-workshop-for-sagemaker | [
"985ab3853c16f4833caeae6382ccfc4474ac8e98"
] | [
"training_script/cifar10_keras_sm.py"
] | [
"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this\n# software and associated documentation files (the \"Software\"), to deal in the Software\n# without restriction, including without limitation the rights to use, copy, modify,\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D, BatchNormalization\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam, SGD, RMSprop\nimport tensorflow as tf\nfrom keras import backend as K\n\nsess = tf.Session()\nK.set_session(sess)\n\nlogging.getLogger().setLevel(logging.INFO)\ntf.logging.set_verbosity(tf.logging.INFO)\nHEIGHT = 32\nWIDTH = 32\nDEPTH = 3\nNUM_CLASSES = 10\nNUM_DATA_BATCHES = 5\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES\nINPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + \"_input\"\n\ndef keras_model_fn(learning_rate, weight_decay, optimizer, momentum):\n \"\"\"keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.\n The model will be transformed into a TensorFlow Estimator before training and it will be saved in a \n TensorFlow Serving SavedModel at the end of training.\n\n Args:\n hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow \n training script.\n Returns: A compiled Keras model\n \"\"\"\n model = Sequential()\n model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Conv2D(32, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(128, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Conv2D(128, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(NUM_CLASSES))\n model.add(Activation('softmax'))\n\n size = 1\n\n if optimizer.lower() == 'sgd':\n opt = SGD(lr=learning_rate * size, decay=weight_decay, momentum=momentum)\n elif optimizer.lower() == 'rmsprop':\n opt = RMSprop(lr=learning_rate * size, decay=weight_decay)\n else:\n opt = Adam(lr=learning_rate * size, decay=weight_decay)\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n return model\n\n\ndef get_filenames(channel_name, channel):\n if channel_name in ['train', 'validation', 'eval']:\n return [os.path.join(channel, channel_name + '.tfrecords')]\n else:\n raise ValueError('Invalid data subset \"%s\"' % channel_name)\n\n\ndef train_input_fn():\n return _input(args.epochs, args.batch_size, args.train, 'train')\n\n\ndef eval_input_fn():\n return _input(args.epochs, args.batch_size, args.eval, 'eval')\n\n\ndef validation_input_fn():\n return _input(args.epochs, args.batch_size, args.validation, 'validation')\n\n\ndef _input(epochs, batch_size, channel, channel_name):\n\n filenames = get_filenames(channel_name, channel)\n dataset = tf.data.TFRecordDataset(filenames)\n\n dataset = dataset.repeat(epochs)\n dataset = dataset.prefetch(10)\n\n # Parse records.\n dataset = dataset.map(\n _dataset_parser, num_parallel_calls=10)\n\n # Potentially shuffle records.\n if channel_name == 'train':\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size\n dataset = dataset.shuffle(buffer_size=buffer_size)\n\n # Batch it up.\n dataset = dataset.batch(batch_size, drop_remainder=True)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return {INPUT_TENSOR_NAME: image_batch}, label_batch\n\n\ndef _train_preprocess_fn(image):\n \"\"\"Preprocess a single training image of layout [height, width, depth].\"\"\"\n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_image_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)\n\n # Randomly crop a [HEIGHT, WIDTH] section of the image.\n image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n return image\n\n\ndef _dataset_parser(value):\n \"\"\"Parse a CIFAR-10 record from value.\"\"\"\n featdef = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n }\n\n example = tf.parse_single_example(value, featdef)\n image = tf.decode_raw(example['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(example['label'], tf.int32)\n image = _train_preprocess_fn(image)\n return image, tf.one_hot(label, NUM_CLASSES)\n\ndef save_model(model, output):\n signature = tf.saved_model.signature_def_utils.predict_signature_def(\n inputs={'inputs': model.input}, outputs={'scores': model.output})\n\n builder = tf.saved_model.builder.SavedModelBuilder(output+'/1/')\n builder.add_meta_graph_and_variables(\n sess=K.get_session(),\n tags=[tf.saved_model.tag_constants.SERVING],\n signature_def_map={\"serving_default\": signature})\n builder.save()\n\n logging.info(\"Model successfully saved at: {}\".format(output))\n return\n\ndef main(args):\n logging.info(\"getting data\")\n train_dataset = train_input_fn()\n eval_dataset = eval_input_fn()\n validation_dataset = validation_input_fn()\n\n logging.info(\"configuring model\")\n model = keras_model_fn(args.learning_rate, args.weight_decay, args.optimizer, args.momentum)\n callbacks = []\n\n # -----------수정 부분\n# callbacks.append(ModelCheckpoint(args.model_dir + '/checkpoint-{epoch}.h5'))\n callbacks.append(ModelCheckpoint(args.model_output_dir + '/checkpoint-{epoch}.h5'))\n\n logging.info(\"Starting training\")\n model.fit(x=train_dataset[0], y=train_dataset[1],\n steps_per_epoch=(num_examples_per_epoch('train') // args.batch_size),\n epochs=args.epochs, validation_data=validation_dataset,\n validation_steps=(num_examples_per_epoch('validation') // args.batch_size), callbacks=callbacks)\n\n score = model.evaluate(eval_dataset[0], eval_dataset[1], steps=num_examples_per_epoch('eval') // args.batch_size,\n verbose=0)\n\n logging.info('Test loss:{}'.format(score[0]))\n logging.info('Test accuracy:{}'.format(score[1]))\n\n # -------------수정 부분\n# return save_model(model, args.model_dir)\n return save_model(model, args.model_output_dir)\n\ndef num_examples_per_epoch(subset='train'):\n if subset == 'train':\n return 40000\n elif subset == 'validation':\n return 10000\n elif subset == 'eval':\n return 10000\n else:\n raise ValueError('Invalid data subset \"%s\"' % subset)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--train',\n type=str,\n required=False,\n default=os.environ.get('SM_CHANNEL_TRAIN'), # ----수정 부분\n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--validation',\n type=str,\n required=False,\n default=os.environ.get('SM_CHANNEL_VALIDATION'), # ----수정 부분 \n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--eval',\n type=str,\n required=False,\n default=os.environ.get('SM_CHANNEL_EVAL'), # ----수정 부분 \n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--model_dir',\n type=str,\n required=True,\n help='The directory where the model will be stored.')\n parser.add_argument(\n '--weight-decay',\n type=float,\n default=2e-4,\n help='Weight decay for convolutions.')\n parser.add_argument(\n '--learning-rate',\n type=float,\n default=0.001,\n help=\"\"\"\\\n This is the inital learning rate value. The learning rate will decrease\n during training. For more details check the model_fn implementation in\n this file.\\\n \"\"\")\n parser.add_argument(\n '--epochs',\n type=int,\n default=10,\n help='The number of steps to use for training.')\n parser.add_argument(\n '--batch-size',\n type=int,\n default=128,\n help='Batch size for training.')\n parser.add_argument(\n '--optimizer',\n type=str,\n default='adam')\n parser.add_argument(\n '--momentum',\n type=float,\n default='0.9')\n # ----------추가 부분\n parser.add_argument(\n '--model_output_dir',\n type=str,\n default=os.environ.get('SM_MODEL_DIR'))\n \n args = parser.parse_args()\n main(args)"
] | [
[
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.image.random_flip_left_right",
"tensorflow.FixedLenFeature",
"tensorflow.data.TFRecordDataset",
"tensorflow.decode_raw",
"tensorflow.cast",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.reshape",
"tensorflow.random_crop",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.one_hot",
"tensorflow.parse_single_example",
"tensorflow.saved_model.signature_def_utils.predict_signature_def"
]
] |
slimnsour/datman | [
"6ac4827e2ae20401eb4b048d42bdfca5db5d3de9"
] | [
"bin/dm_link.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nRenames (links) exam zip archives by consulting a lookup table.\n\nThis program looks up the proper name in a table that lists the original exam\narchive name, and the target name.\n\nUsage:\n dm_link.py [options] <study>\n dm_link.py [options] <study> <zipfile>\n\nArguments:\n <study> Name of the study to process\n <zipfile> Single Zipfile to process\n\nOptions:\n --lookup FILE Path to scan id lookup table,\n overrides metadata/scans.csv\n --scanid-field STR Dicom field to match target_name with\n [default: PatientName]\n -v --verbose Verbose logging\n -d --debug Debug logging\n -q --quiet Less debuggering\n --dry-run Dry run\n\n\nDETAILS\n This program is used to rename an exam archive with their properly\n formatted scan names (see datman.scanid). Two approaches are used to find\n this name:\n\n ### Scan ID in a lookup table (--lookup)\n\n The lookup table should have atleast two columns: source_name, and\n target_name. For example:\n\n source_name target_name\n 2014_0126_FB001 ASDD_CMH_FB001_01_01\n\n The source_name column is matched against the archive filename (so the\n entry above applies to 2014_0126_FB001.zip). The target_name column\n specifies the proper name for the exam.\n\n If the archive is not found in the lookup table, the dicom header is\n consulted:\n\n ### Scan ID in the dicom header (--scanid-field)\n\n Some scans may have the scan ID embedded in a dicom header field.\n\n The --scanid-field specifies a dicom header field to check for a\n well-formatted exam name.\n\n\nADDITIONAL MATCH CONDITIONS\n Additional columns in the lookup table can be specified to ensure that the\n DICOM headers of the file match what is expected. These column names should\n start with dicom_. For example,\n\n source_name target_name dicom_StudyID\n 2014_0126_FB001 ASDD_CMH_FB001_01_01 512\n\n In the example above, this script would check that the StudyID field of an\n arbitrary dicom file in the archive contains the value \"512\". If not, an\n error is thrown.\n\nIGNORING EXAM ARCHIVES\n Exam archives can be ignored by placing an entry into the lookup table with\n the target_name of '<ignore>', for example:\n source_name target_name dicom_StudyID\n 2014_0126_FB001 <ignore>\n\"\"\"\n\nimport glob\nimport logging\nimport os\nimport sys\n\nfrom docopt import docopt\nimport pandas as pd\n\nimport datman.config\nimport datman.scanid\nimport datman.utils\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\nalready_linked = {}\nlookup = None\nDRYRUN = None\n\n\ndef main():\n # make the already_linked dict global as we are going to use it a lot\n global already_linked\n global lookup\n global DRYRUN\n\n arguments = docopt(__doc__)\n verbose = arguments[\"--verbose\"]\n debug = arguments[\"--debug\"]\n DRYRUN = arguments[\"--dry-run\"]\n quiet = arguments[\"--quiet\"]\n study = arguments[\"<study>\"]\n lookup_path = arguments[\"--lookup\"]\n scanid_field = arguments[\"--scanid-field\"]\n zipfile = arguments[\"<zipfile>\"]\n\n # setup logging\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.WARN)\n logger.setLevel(logging.WARN)\n if quiet:\n logger.setLevel(logging.ERROR)\n ch.setLevel(logging.ERROR)\n if verbose:\n logger.setLevel(logging.INFO)\n ch.setLevel(logging.INFO)\n if debug:\n logger.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - {study} - \"\n \"%(levelname)s - %(message)s\".format(\n study=study))\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)\n\n # setup the config object\n cfg = datman.config.config(study=study)\n if not lookup_path:\n lookup_path = os.path.join(cfg.get_path(\"meta\"), \"scans.csv\")\n\n dicom_path = cfg.get_path(\"dicom\")\n zips_path = cfg.get_path(\"zips\")\n\n if not os.path.isdir(dicom_path):\n logger.warning(\"Dicom folder {} doesnt exist, creating it.\".format(\n dicom_path))\n try:\n os.makedirs(dicom_path)\n except IOError:\n logger.error(\"Failed to create dicom path {}\".format(dicom_path))\n return\n\n if not os.path.isdir(zips_path):\n logger.error(\"Zips path {} doesnt exist\".format(zips_path))\n return\n\n try:\n lookup = pd.read_csv(lookup_path, sep=\"\\s+\", dtype=str) # noqa: W605\n except IOError:\n logger.error(\"Lookup file {} not found\".format(lookup_path))\n return\n\n # identify which zip files have already been linked\n already_linked = {os.path.realpath(f): f\n for f\n in glob.glob(os.path.join(dicom_path, \"*\"))\n if os.path.islink(f)}\n\n if zipfile:\n if isinstance(zipfile, str):\n zipfile = [zipfile]\n archives = [os.path.join(zips_path, zip) for zip in zipfile]\n else:\n archives = [os.path.join(zips_path, archive)\n for archive\n in os.listdir(zips_path)\n if os.path.splitext(archive)[1] == \".zip\"]\n\n logger.info(\"Found {} archives\".format(len(archives)))\n for archive in archives:\n link_archive(archive, dicom_path, scanid_field, cfg)\n\n\ndef link_archive(archive_path, dicom_path, scanid_field, config):\n if not os.path.isfile(archive_path):\n logger.error(\"Archive {} not found\".format(archive_path))\n return\n\n try:\n linked_path = already_linked[os.path.realpath(archive_path)]\n except KeyError:\n linked_path = \"\"\n\n if linked_path:\n logger.info(\"{} already linked at {}\".format(archive_path,\n linked_path))\n return\n\n scanid = get_scanid_from_lookup_table(archive_path)\n\n # if scanid has been returned from the lookup table its a tuplet\n # otherwise None\n if scanid:\n scanid, lookupinfo = scanid\n\n if scanid == \"<ignore>\":\n logger.info(\"Ignoring {}\".format(archive_path))\n return\n\n if not scanid:\n scanid = get_scanid_from_header(archive_path, scanid_field)\n\n if not scanid:\n logger.error(\"Scanid not found for archive: {}\".format(archive_path))\n return\n\n try:\n ident = datman.utils.validate_subject_id(scanid, config)\n except datman.scanid.ParseException as e:\n logger.error(\"Can't make link for {}. Reason: {}\".format(\n archive_path, e))\n return\n\n scanid = str(ident)\n\n # do the linking\n target = os.path.join(dicom_path, scanid)\n target = target + datman.utils.get_extension(archive_path)\n if os.path.exists(target):\n logger.error(\"Target: {} already exists for archive: {}\"\n .format(target, archive_path))\n return\n\n relpath = os.path.relpath(archive_path, dicom_path)\n logger.info(\"Linking {} to {}\".format(relpath, target))\n if not DRYRUN:\n os.symlink(relpath, target)\n\n\ndef get_scanid_from_lookup_table(archive_path):\n \"\"\"\n Gets the scanid from the lookup table (pandas dataframe)\n\n Returns the scanid and the rest of the lookup table information (e.g.\n expected dicom header matches). If no match is found, both the scan id and\n lookup table info is None.\n \"\"\"\n global lookup\n basename = os.path.basename(os.path.normpath(archive_path))\n source_name = basename[:-len(datman.utils.get_extension(basename))]\n lookupinfo = lookup[lookup[\"source_name\"] == source_name]\n\n if len(lookupinfo) == 0:\n logger.debug(\"{} not found in source_name column.\"\n .format(source_name))\n return\n else:\n scanid = lookupinfo[\"target_name\"].tolist()[0]\n return (scanid, lookupinfo)\n\n\ndef get_archive_headers(archive_path):\n # get some DICOM headers from the archive\n header = None\n try:\n header = datman.utils.get_archive_headers(archive_path,\n stop_after_first=True)\n header = list(header.values())[0]\n except Exception:\n logger.warning(\"Archive: {} contains no DICOMs\".format(archive_path))\n return header\n\n\ndef get_scanid_from_header(archive_path, scanid_field):\n \"\"\"\n Gets the scanid from the dicom header object.\n\n Returns None if the header field isn't present or the value isn't a proper\n scan ID.\n \"\"\"\n header = get_archive_headers(archive_path)\n if not header:\n return False\n if scanid_field not in header:\n logger.error(\"{} field is not in {} dicom headers\"\n .format(scanid_field, archive_path))\n return\n\n scanid = str(header.get(scanid_field))\n\n if datman.scanid.is_scanid(scanid):\n logger.debug(\"{}: Using scan ID from dicom field {} = {}.\"\n .format(archive_path, scanid_field, scanid))\n return scanid\n else:\n logger.warning(\"{}: {} (header {}) not valid scan ID\"\n .format(archive_path, scanid, scanid_field))\n return None\n\n\ndef validate_headers(archive_path, lookupinfo, scanid_field):\n \"\"\"\n Validates an exam archive against the lookup table\n\n Checks that all dicom_* dicom header fields match the lookup table\n \"\"\"\n header = get_archive_headers(archive_path)\n if not header:\n return False\n\n columns = lookupinfo.columns.values.tolist()\n dicom_cols = [c for c in columns if c.startswith(\"dicom_\")]\n\n for c in dicom_cols:\n f = c.split(\"_\")[1]\n\n if f not in header:\n logger.error(\"{} field is not in {} dicom headers\"\n .format(scanid_field, archive_path))\n return False\n\n actual = str(header.get(f))\n expected = str(lookupinfo[c].tolist()[0])\n\n if actual != expected:\n logger.error(\"{}: dicom field '{}' = '{}', expected '{}'\"\n .format(archive_path, f, actual, expected))\n return False\n return True\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv"
]
] |
haggaila/qiskit-dynamics | [
"fd20314e2b591c35323782bc429d9f928fdb9a12"
] | [
"test/dynamics/solvers/test_solver_classes.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=invalid-name\n\n\"\"\"\nTests for solver classes module.\n\"\"\"\n\nimport numpy as np\n\nfrom qiskit import QiskitError\nfrom qiskit.quantum_info import Operator, Statevector, SuperOp, DensityMatrix\n\nfrom qiskit_dynamics import Solver\nfrom qiskit_dynamics.signals import Signal\n\nfrom ..common import QiskitDynamicsTestCase, TestJaxBase\n\n\nclass TestSolverExceptions(QiskitDynamicsTestCase):\n \"\"\"Tests for Solver exception raising based on input types.\"\"\"\n\n def setUp(self):\n X = Operator.from_label(\"X\")\n self.ham_solver = Solver(hamiltonian_operators=[X], hamiltonian_signals=[1.0])\n\n self.lindblad_solver = Solver(\n hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X]\n )\n\n self.vec_lindblad_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[1.0],\n dissipator_operators=[X],\n evaluation_mode=\"dense_vectorized\",\n )\n\n def test_hamiltonian_shape_error(self):\n \"\"\"Test error raising if invalid shape for Hamiltonian model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.ham_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.ham_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.ham_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n def test_lindblad_shape_error(self):\n \"\"\"Test error raising if invalid shape for Lindblad model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n def test_vectorized_lindblad_shape_error(self):\n \"\"\"Test error raising if invalid shape for vectorized Lindblad model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.vec_lindblad_solver.solve([0.0, 1.0], np.array([[1.0, 0.0], [0.0, 1.0]]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.vec_lindblad_solver.solve([0.0, 1.0], DensityMatrix(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.vec_lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n def test_non_vectorized_SuperOp_error(self):\n \"\"\"Test SuperOp simulation attempt for non-vectorized Lindblad model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], SuperOp(np.eye(4)))\n self.assertTrue(\"Simulating SuperOp\" in str(qe.exception))\n\n\nclass TestSolver(QiskitDynamicsTestCase):\n \"\"\"Tests for Solver class.\"\"\"\n\n def setUp(self):\n \"\"\"Set up some simple models.\"\"\"\n X = 2 * np.pi * Operator.from_label(\"X\") / 2\n Z = 2 * np.pi * Operator.from_label(\"Z\") / 2\n self.ham_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n drift=5 * Z,\n rotating_frame=5 * Z,\n )\n\n self.rwa_ham_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n drift=5 * Z,\n rotating_frame=5 * Z,\n rwa_cutoff_freq=2 * 5.0,\n )\n\n self.lindblad_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n dissipator_operators=[0.01 * X],\n drift=5 * Z,\n rotating_frame=5 * Z,\n )\n\n self.vec_lindblad_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n dissipator_operators=[0.01 * X],\n drift=5 * Z,\n rotating_frame=5 * Z,\n evaluation_mode=\"dense_vectorized\",\n )\n\n # lindblad solver with no dissipation for testing\n self.vec_lindblad_solver_no_diss = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n dissipator_operators=[0.0 * X],\n drift=5 * Z,\n rotating_frame=5 * Z,\n evaluation_mode=\"dense_vectorized\",\n )\n self.method = \"DOP853\"\n\n def test_lindblad_solve_statevector(self):\n \"\"\"Test correct conversion of Statevector to DensityMatrix.\"\"\"\n\n results = self.lindblad_solver.solve(\n [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method\n )\n self.assertTrue(isinstance(results.y[-1], DensityMatrix))\n self.assertTrue(results.y[-1].data[0, 0] > 0.99 and results.y[-1].data[0, 0] < 0.999)\n\n def test_vec_lindblad_statevector(self):\n \"\"\"Test correct conversion of Statevector to DensityMatrix and vectorized solving.\"\"\"\n\n results = self.vec_lindblad_solver.solve(\n [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method\n )\n results2 = self.lindblad_solver.solve(\n [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method\n )\n self.assertTrue(isinstance(results.y[-1], DensityMatrix))\n self.assertAllClose(results.y[-1].data, results2.y[-1].data)\n\n def test_array_vectorized_lindblad(self):\n \"\"\"Test Lindblad solver is array-vectorized.\"\"\"\n results = self.lindblad_solver.solve(\n [0.0, 1.0],\n y0=np.array([[[0.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]]]),\n method=self.method,\n )\n self.assertTrue(results.y[-1][0, 0, 0] > 0.99 and results.y[-1][0, 0, 0] < 0.999)\n self.assertTrue(results.y[-1][1, 1, 1] > 0.99 and results.y[-1][1, 1, 1] < 0.999)\n\n def test_rwa_hamiltonian(self):\n \"\"\"Test perfect inversion for pi pulse with RWA.\"\"\"\n results = self.rwa_ham_solver.solve(\n [0.0, 1.0], y0=np.array([0.0, 1.0]), atol=1e-10, rtol=1e-10, method=self.method\n )\n self.assertTrue(np.abs(results.y[-1][0]) > (1 - 1e-8))\n\n def test_hamiltonian_DensityMatrix(self):\n \"\"\"Test correct conjugation of Hamiltonian-based density matrix simulation.\"\"\"\n results = self.ham_solver.solve(\n [0.0, 1.0],\n y0=DensityMatrix(np.array([0.0, 1.0])),\n atol=1e-10,\n rtol=1e-10,\n method=self.method,\n )\n self.assertTrue(isinstance(results.y[-1], DensityMatrix))\n self.assertTrue(np.abs(results.y[-1].data[0, 0]) > 0.999)\n\n def test_hamiltonian_SuperOp(self):\n \"\"\"Test Hamiltonian-based SuperOp simulation.\"\"\"\n results = self.rwa_ham_solver.solve(\n [0.0, 1.0], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method\n )\n self.assertTrue(isinstance(results.y[-1], SuperOp))\n X = np.array([[0.0, 1.0], [1.0, 0.0]])\n self.assertAllClose(results.y[-1].data, np.kron(X, X))\n\n def test_hamiltonian_lindblad_SuperOp_consistency(self):\n \"\"\"Test Hamiltonian-based SuperOp simulation.\"\"\"\n results = self.ham_solver.solve(\n [0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method\n )\n results2 = self.vec_lindblad_solver_no_diss.solve(\n [0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10\n )\n self.assertAllClose(results.y[-1].data, results2.y[-1].data)\n\n\nclass TestSolverJax(TestSolver, TestJaxBase):\n \"\"\"JAX version of TestSolver.\"\"\"\n\n def setUp(self):\n \"\"\"Set method to 'jax_odeint' to speed up running of jax version of tests.\"\"\"\n super().setUp()\n self.method = \"jax_odeint\"\n\n def test_jit_solve(self):\n \"\"\"Test jitting setting signals and solving.\"\"\"\n\n def func(a):\n ham_solver = self.ham_solver.copy()\n ham_solver.signals = [Signal(lambda t: a, 5.0)]\n yf = ham_solver.solve(\n np.array([0.0, 1.0]), y0=np.array([0.0, 1.0]), method=self.method\n ).y[-1]\n return yf\n\n jit_func = self.jit_wrap(func)\n self.assertAllClose(jit_func(2.0), func(2.0))\n\n def test_jit_grad_solve(self):\n \"\"\"Test jitting setting signals and solving.\"\"\"\n\n def func(a):\n lindblad_solver = self.lindblad_solver.copy()\n lindblad_solver.signals = [[Signal(lambda t: a, 5.0)], [1.0]]\n yf = lindblad_solver.solve(\n [0.0, 1.0], y0=np.array([[0.0, 1.0], [0.0, 1.0]]), method=self.method\n ).y[-1]\n return yf\n\n jit_grad_func = self.jit_grad_wrap(func)\n jit_grad_func(1.0)\n"
] | [
[
"numpy.eye",
"numpy.array",
"numpy.kron",
"numpy.abs"
]
] |
Sourodip-ghosh123/Fruits-360 | [
"f15ce919757f0a0ce057f4ba4b49ce3d5aba53e2"
] | [
"ResNet50 V2/resnet50_v2_model.py"
] | [
"from keras.applications.resnet_v2 import ResNet50V2\nmodel=ResNet50V2(include_top=True, weights=None, input_tensor=None, input_shape=(100,100,3),classes=41)\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nprint('Compiled!')\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D,MaxPooling2D\nfrom keras.layers import Activation, Dense, Flatten, Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import backend as K\nbatch_size = 50\n\ncheckpointer = ModelCheckpoint(filepath = 'cnn_from_scratch_fruits.hdf5', save_best_only = True)\n\nhistory = model.fit(x_train,y_train,\n batch_size = 50,\n epochs=15,\n validation_data=(x_valid, y_vaild),\n callbacks = [checkpointer],\n shuffle=True\n )\n \nmodel.load_weights('cnn_from_scratch_fruits.hdf5')\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('\\n', 'Test accuracy:', score[1])\n\nimport matplotlib.pyplot as plt\n\n# Plot training & validation accuracy values\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
gandreassi/ImmoKaa | [
"904115e5a6f91ca78b41aebdaf4ffe3934a4c318"
] | [
"ImmoKaa/scraper.py"
] | [
"from bs4 import BeautifulSoup\nimport urllib.request as urllib2\nimport random\nfrom random import choice\nimport pandas as pd\nimport copy, time, sys, shutil, os, yaml, json\nimport datetime as dt\nfrom glob import glob\nimport regex\n\nclass scraper():\n \n criteria = None\n df = None\n df_pre = None\n __verbose = False\n __parameter_names = { #this dict translate the parameters into thei corresponding url bit\n 'min_price' : 'pf',\n 'max_price' : 'pt',\n 'min_rooms' : 'nrf',\n 'max_rooms' : 'nrt',\n 'radius' : 'r',\n 'days_old' : 'pa',\n }\n __instance_name = None\n __root_dir = \"./ImmoKaa_data/\"\n __base_dir = None\n \n \n \n def __init__(self, instance_name, criteria_file):\n self.__instance_name = instance_name\n self.__base_dir = self.__root_dir+instance_name\n os.makedirs(self.__base_dir, exist_ok=True)\n with open(criteria_file) as file:\n self.criteria = yaml.load(file, Loader=yaml.FullLoader) \n self.get_preexisting_data()\n \n\n\n def _urlquery(self, url, verbose=False):\n # function cycles randomly through different user agents and time intervals to simulate more natural queries\n try:\n sleeptime = float(random.randint(1,6))/5\n time.sleep(sleeptime)\n\n agents = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',\n 'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',\n 'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',\n 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',\n 'Mozilla/3.0',\n 'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',\n 'Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522+ (KHTML, like Gecko) Safari/419.3',\n 'Opera/9.00 (Windows NT 5.1; U; en)']\n\n agent = choice(agents)\n opener = urllib2.build_opener()\n opener.addheaders = [('User-agent', agent)]\n\n html = opener.open(url).read()\n time.sleep(sleeptime)\n\n return html\n\n except Exception as e:\n if verbose: print('Something went wrong with Crawling:\\n%s' % e)\n return None\n \n \n \n def _immoscout24parser(self, url, verbose=False):\n '''\n Read search results from Immoscout24.ch, given a specific url indicating the search criteria and the page number.\n '''\n if verbose: print (\"Scanning the following url:\", url)\n\n try:\n soup = BeautifulSoup(self._urlquery(url, verbose), 'html.parser')\n scripts = soup.findAll('script')\n scripts = filter(None, [script.string for script in scripts])\n sr = next(script for script in scripts if 'searchResult' in script)\n #Come cleaning... with not-so-clean code. Because ImmoScout keeps changing stuff and I can't be bothered to fix this properly every time.\n s = sr.replace(\":undefined\", ':\"undefined\"').lstrip(\"__INITIAL_STATE__=\")\n s = regex.sub('\\{\"render\".*?(?:\\{(?:(?R)|[^{}])*})\\}', '\"\"', s)\n poss = [m.start() for m in regex.finditer('e=>', s)]\n res = s[:poss[0]]\n for i in range(len(poss)):\n end = len(s)\n if i+1 < len(poss):\n end = poss[i+1]\n dd = regex.sub('(?:\\{(?:(?R)|[^{}])*})', '\"\"', s[poss[i]+3:end], 1)\n res += dd\n \n js = json.loads(res)\n return js\n \n except Exception as e:\n if verbose: print(\"Error in immoscout24 parser: %s\" % e)\n return None\n \n \n \n def _make_url(self, criteria, page):\n url = 'https://www.immoscout24.ch/en/real-estate/{mode}/city-{city}?'.format(**criteria)\n for key in [x for x in criteria.keys() if x not in ['city', 'mode']]:\n try:\n url+=self.__parameter_names[key]+'='+str(criteria[key])+\"&\"\n except KeyError:\n raise Exception(\"Error in make_url\", \"Unsupported search parameter!\")\n url = url[:-1]+\"&pn=\"+str(page) #add page number\n\n return url\n \n \n\n def _get_listings(self, criteria, verbose):\n \"\"\"\n Pull a list of listings for given criteria and cities, and put them in a dataframe.\n \"\"\"\n print (\"city:\",criteria['city'])\n page = 0\n data_pages = []\n numberOfPages = 1\n while page<numberOfPages:\n page+=1\n url = self._make_url(criteria, page)\n resultlist_json = None\n N_attempts = 0\n while resultlist_json is None and N_attempts<5:\n try: \n N_attempts+=1\n resultlist_json = self._immoscout24parser(url, verbose)\n numberOfPages = int(resultlist_json[\"pages\"][\"searchResult\"][\"resultData\"][\"pagingData\"][\"totalPages\"])\n print(\"\\tpage: {0}/{1}\".format(page,numberOfPages), end=\" \")\n data = resultlist_json[\"pages\"][\"searchResult\"][\"resultData\"][\"listData\"]\n data = pd.DataFrame.from_dict(data)\n data[\"searched-city\"]=criteria['city'] #store which city we searched, for reference\n data[\"fetch-date\"]=dt.datetime.now().date()\n print(\"({0} results)\".format(data.shape[0]))\n data_pages.append(copy.copy(data))\n except Exception as e:\n print (e)\n pass\n data_all = pd.concat(data_pages)\n\n return data_all\n \n \n \n def scrape(self):\n dfs = []\n for city in self.criteria['cities']:\n criteria_city = copy.copy(self.criteria)\n criteria_city['city'] = city\n del criteria_city['cities']\n dfs.append(self._get_listings(criteria_city, verbose=self.__verbose))\n\n self.df = pd.concat(dfs)\n \n \n \n def set_verbose(self, flag):\n if not isinstance(flag, bool):\n raise Exception(\"ImmoKaa - set_verbose\", \"Argument must be bool.\")\n self.__verbose=flag\n \n \n \n def save_scraped_dataframe(self):\n if self.df is None:\n raise Exception(\"There is no scraped dataset to save.\")\n today = dt.datetime.now().date().strftime(\"%Y-%m-%d\")\n self.df.to_csv(self.__base_dir+\"/serach_results_\"+today+\".csv\", mode=\"w\")\n print (\"History file created/overwritten.\")\n \n \n \n def get_preexisting_data(self):\n pres = []\n try:\n for f in glob(self.__base_dir+\"/serach_results_*.csv\"):\n pres.append(pd.read_csv(f))\n pres[-1][\"fetch-date\"] = pd.to_datetime(pres[-1]['fetch-date'],\\\n format=\"%Y-%m-%d\").dt.date\n self.df_pre = pd.concat(pres)\n print (\"Found {0} pre-existing data file(s). You can access the full dataset using get_full_dataset().\". format(len(pres)))\n except FileNotFoundError:\n pass \n \n \n def get_full_dataset(self):\n return pd.concat([self.df, self.df_pre])"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame.from_dict"
]
] |
yxia-fb/shaDow-GNN | [
"2b867011c7084d4ed1b407e29f3ee09632fcc3dc"
] | [
"shaDow/utils.py"
] | [
"import os\nimport torch\nimport glob\n\nimport numpy as np\nimport scipy.sparse as sp\nimport yaml\nfrom sklearn.preprocessing import StandardScaler\n\nfrom shaDow.globals import git_rev, timestamp, Logger\nfrom torch_scatter import scatter\n\nfrom copy import deepcopy\n\nfrom typing import List, Union\nfrom shaDow import TRAIN, VALID, TEST\n\nfrom shaDow.data_converter import convert2shaDow, to_undirected\n\n\n\ndef load_data(prefix, dataset, config_data, os_='linux'):\n Logger.printf(\"Loading training data..\")\n prefix_l = prefix['local']\n fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']\n if not all(glob.glob(f\"{prefix_l}/{dataset}/{f}\") for f in fs_shadow):\n convert2shaDow(dataset, prefix_l)\n role = np.load(f\"./{prefix_l}/{dataset}/split.npy\", allow_pickle=True)\n if type(role) == np.ndarray:\n role = role[()]\n else:\n assert type(role) == dict\n # role is used as index, which is required to be int64 (node_set won't take much mem anyways)\n node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64), \n VALID: np.asarray(role[VALID], dtype=np.int64), \n TEST : np.asarray(role[TEST], dtype=np.int64)}\n # load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,\n # then we skip the conversion in the program and directly load the undirected adj. \n bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},\n VALID: {'indptr': None, 'indices': None, 'data': None},\n TEST: {'indptr': None, 'indices': None, 'data': None}}\n def fill_bin_adj_dict(mode_, split_, type_):\n for d in ['indptr', 'indices', 'data']:\n bin_adj_files[mode_][d] = f\"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin\"\n if config_data['to_undirected']:\n if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:\n adj_full = load_adj(prefix_l, dataset, 'raw', 'full')\n adj_full = to_undirected(adj_full)\n fill_bin_adj_dict(VALID, 'full', 'undirected')\n fill_bin_adj_dict(TEST, 'full', 'undirected')\n if config_data['transductive']:\n adj_train = adj_full\n fill_bin_adj_dict(TRAIN, 'full', 'undirected')\n elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:\n adj_train = load_adj(prefix_l, dataset, 'raw', 'train')\n adj_train = to_undirected(adj_train)\n fill_bin_adj_dict(TRAIN, 'train', 'undirected')\n assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))\n else:\n adj_full = load_adj(prefix_l, dataset, 'raw', 'full')\n fill_bin_adj_dict(VALID, 'full', 'raw')\n fill_bin_adj_dict(TEST, 'full', 'raw')\n if config_data['transductive']:\n adj_train = adj_full\n fill_bin_adj_dict(TRAIN, 'full', 'raw')\n else:\n adj_train = load_adj(prefix, dataset, 'raw', 'train')\n assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))\n fill_bin_adj_dict(TRAIN, 'train', 'raw')\n\n bin_adj_files = validate_bin_file(bin_adj_files)\n\n Logger.printf(f\"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING\", style=\"red\")\n label_full = np.load(f\"./{prefix_l}/{dataset}/label_full.npy\")\n label_full = torch.from_numpy(label_full)\n \n # ======= deal with feats =======\n mode_norm = 'all' if config_data['transductive'] else 'train'\n if config_data['norm_feat'] and os.path.isfile(f\"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy\"):\n feats = np.load(f\"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy\")\n Logger.printf(f\"Loading '{mode_norm}'-normalized features\", style='yellow')\n else:\n feats = np.load(f\"./{prefix_l}/{dataset}/feat_full.npy\")\n if config_data['norm_feat']:\n feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]\n scaler = StandardScaler()\n scaler.fit(feats_fit)\n feats = scaler.transform(feats)\n Logger.printf(f\"Normalizing node features (mode = {mode_norm})\", style=\"yellow\")\n else:\n Logger.printf(\"Not normalizing node features\", style=\"yellow\")\n feats = torch.from_numpy(feats.astype(np.float32, copy=False))\n Logger.printf(\"Done loading training data..\")\n return {'adj_full' : adj_full, \n 'adj_train' : adj_train, \n 'feat_full' : feats, \n 'label_full': label_full, \n 'node_set' : node_set,\n 'bin_adj_files': bin_adj_files}\n\n\ndef parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):\n # [config]\n if args.configs is not None:\n config_train = args.configs\n else:\n assert task in ['inference', 'postproc']\n if task == 'inference':\n if args.inference_configs is None:\n assert not args.compute_complexity_only\n dir_candy = args.inference_dir\n else:\n assert args.inference_dir is None and args.compute_complexity_only\n dir_candy = None\n config_train = args.inference_configs\n else: \n if args.postproc_dir is not None:\n dir_candy = args.postproc_dir\n else:\n with open(args.postproc_configs) as f:\n config_temp = yaml.load(f, Loader=yaml.FullLoader)\n if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml\n dir_candy = config_temp['dir_pred_mat'][0] \n elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)\n dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]\n else:\n raise NotImplementedError\n if dir_candy is not None:\n assert os.path.isdir(dir_candy)\n f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]\n assert len(f_yml) == 1\n config_train = f\"{dir_candy}/{f_yml[0]}\"\n with open(config_train) as f_config_train:\n config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)\n config_train_copy = deepcopy(config_train)\n # [data]\n config_data = {\"to_undirected\" : False,\n \"transductive\" : False,\n \"norm_feat\" : True}\n config_data.update(config_train['data'])\n # [arch]\n arch_gnn = { # default values\n \"dim\" : -1,\n \"aggr\" : \"sage\",\n \"residue\" : \"none\",\n \"pooling\" : \"center\",\n \"loss\" : \"softmax\",\n \"num_layers\" : -1,\n \"act\" : \"I\",\n \"heads\" : -1,\n \"feature_augment\" : \"hops\",\n \"feature_smoothen\" : \"none\",\n \"label_smoothen\" : \"none\", # label_smoothen is only considered if use_label != none\n \"ensemble_act\" : \"leakyrelu\",\n \"branch_sharing\" : False,\n \"use_label\" : \"none\"\n }\n arch_gnn.update(config_train[\"architecture\"])\n assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']\n assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']\n assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']\n assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']\n assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']\n if arch_gnn[\"feature_augment\"] and arch_gnn[\"feature_augment\"].lower() != \"none\":\n arch_gnn[\"feature_augment\"] = set(k for k in arch_gnn[\"feature_augment\"].split(\"-\"))\n else:\n arch_gnn['feature_augment'] = set()\n # [params]\n params_train = {\n \"lr\" : 0.01,\n \"dropedge\" : 0.0,\n \"ensemble_dropout\" : \"none\"\n }\n params_train.update(config_train[\"hyperparameter\"])\n params_train[\"lr\"] = float(params_train[\"lr\"])\n # [sampler]\n sampler_preproc, sampler_train = [], []\n for s in config_train['sampler']:\n phase = s.pop('phase')\n if phase == 'preprocess':\n sampler_preproc.append(s)\n elif phase == 'train':\n sampler_train.append(s)\n else:\n raise NotImplementedError\n batch_size = config_train[\"hyperparameter\"][\"batch_size\"]\n config_sampler_preproc = {\"batch_size\": batch_size, \"configs\": sampler_preproc}\n config_sampler_train = {\"batch_size\": batch_size, \"configs\": sampler_train}\n # add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges\n if arch_gnn[\"aggr\"] in [\"gcn\", \"gat\", \"gatscat\"]:\n for sc in config_sampler_train[\"configs\"]:\n num_ens = [len(v) for k, v in sc.items() if k != 'method']\n assert max(num_ens) == min(num_ens)\n sc[\"add_self_edge\"] = [True] * num_ens[0]\n # [copy yml]\n name_key = f\"{arch_gnn['aggr']}_{arch_gnn['num_layers']}\"\n dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)\n return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full\n\n\ndef parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):\n if f_config is not None:\n with open(f_config) as f:\n config_postproc = yaml.load(f, Loader=yaml.FullLoader)\n name_key = f\"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}\"\n log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)\n skip_instantiate = []\n if 'check_record' in config_postproc:\n load_acc_record = config_postproc['check_record']\n else:\n load_acc_record = True\n if config_postproc['method'] == 'cs': # C&S\n acc_record = [] if load_acc_record else None\n if dir_load is not None:\n if 'dir_pred_mat' not in config_postproc:\n config_postproc['dir_pred_mat'] = [dir_load]\n elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:\n config_postproc['dir_pred_mat'].append(dir_load)\n config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])\n for i, di in enumerate(config_postproc['dir_pred_mat']):\n if load_acc_record:\n acc_record.append(logger.decode_csv('final', di))\n for f in os.listdir(di):\n if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):\n config_postproc['pred_mat'][i] = torch.load(f\"{di}/{f}\")\n break\n if all(m is not None for m in config_postproc['pred_mat']):\n skip_instantiate = ['data', 'model']\n elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc\n acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None\n assert dir_load is None\n config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}\n for sname, dirs_l in config_postproc['dir_emb_mat'].items():\n for i, di in enumerate(dirs_l):\n if load_acc_record:\n acc_record[sname].append(logger.decode_csv('final', di))\n for f in os.listdir(di):\n if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):\n config_postproc['emb_mat'][sname][i] = torch.load(f\"{di}/{f}\")\n break\n if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):\n skip_instantiate = ['model'] # you have to load data (role, labels) anyways\n return config_postproc, acc_record, skip_instantiate\n\n\ndef log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):\n if task == 'train':\n prefix = 'running'\n elif task == 'inference':\n prefix = 'INF'\n elif task == 'postproc':\n prefix = 'POST'\n else:\n raise NotImplementedError\n log_dir = f\"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/\"\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n yml_file = f\"{log_dir}/{yml_name_key}.yml\"\n with open(yml_file, 'w') as f:\n yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)\n return log_dir\n\n\n# =============== #\n# ADJ UTILS #\n# =============== #\n\ndef get_deg_torch_sparse(adj):\n return scatter(adj._values(), adj._indices()[0], reduce=\"sum\")\n\n\ndef adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):\n \"\"\"\n Normalize adj according to the method of rw normalization.\n Note that sym norm is used in the original GCN paper (kipf),\n while rw norm is used in GraphSAGE and some other variants.\n \n # Procedure:\n # 1. adj add self-connection --> adj'\n # 2. D' deg matrix from adj'\n # 3. norm by D^{-1} x adj'\n if sort_indices is True, we re-sort the indices of the returned adj\n Note that after 'dot' the indices of a node would be in descending order\n rather than ascending order\n \"\"\"\n if type(adj) == torch.Tensor:\n assert deg is None\n assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]\n _deg_orig = get_deg_torch_sparse(adj)\n if dropedge > 0:\n masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()\n adj._values()[masked_indices] = 0\n _deg_dropped = get_deg_torch_sparse(adj)\n else:\n _deg_dropped = _deg_orig\n _deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())\n _deg = torch.clamp(_deg, min=1)\n _val = adj._values()\n _val /= _deg\n adj_norm = adj\n else:\n assert dropedge == 0., \"not supporting dropedge for scipy csr matrices\"\n assert adj.shape[0] == adj.shape[1]\n diag_shape = (adj.shape[0], adj.shape[1])\n D = adj.sum(1).flatten() if deg is None else deg\n D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to. \n norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)\n adj_norm = norm_diag.dot(adj)\n if sort_indices:\n adj_norm.sort_indices()\n return adj_norm\n\n\ndef adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):\n assert adj.shape[0] == adj.shape[1]\n assert adj.data.sum() == adj.size, \"symmetric normalization only supports binary input adj\"\n N = adj.shape[0]\n # drop edges symmetrically\n if dropedge > 0:\n masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))\n adj.data[masked_indices] = 0\n adjT = adj.tocsc()\n data_add = adj.data + adjT.data\n survived_indices = np.where(data_add == 2)[0]\n adj.data *= 0\n adj.data[survived_indices] = 1\n # augment adj with self-connection\n if add_self_edge:\n indptr_new = np.zeros(N + 1)\n neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]\n for i in range(len(neigh_list)):\n neigh_list[i].add(i)\n neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))\n indptr_new[i + 1] = neigh_list[i].size\n indptr_new = indptr_new.cumsum()\n indices_new = np.concatenate(neigh_list)\n data_new = np.broadcast_to(np.ones(1), indices_new.size)\n adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)\n # NOTE: no need to explicitly convert dtype, since adj_norm_sym is used for subg only\n else:\n adj_aug = adj\n # normalize\n D = np.clip(adj_aug.sum(1).flatten(), 1, None)\n norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)\n adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)\n if sort_indices:\n adj_norm.sort_indices()\n return adj_norm\n\n\ndef coo_scipy2torch(adj):\n \"\"\"\n convert a scipy sparse COO matrix to torch\n \"\"\"\n values = adj.data\n indices = np.vstack((adj.row, adj.col))\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))\n\n\n# ================= #\n# ADJ FILE IO UTILS #\n# ================= #\n\ndef load_adj(prefix, dataset, type_, split_):\n \"\"\"\n Try to load the prestored undirected adj. If the file does not exist, then you MUST return a None\n \"\"\"\n assert split_ in ['full', 'train'], \"UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]\"\n assert type_ in ['raw', 'undirected'], \"UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]\"\n file_adj = f\"{prefix}/{dataset}/adj_{split_}_{type_}.\" + \"{}\"\n if os.path.isfile(file_adj.format('npz')):\n adj = sp.load_npz(file_adj.format('npz'))\n elif os.path.isfile(file_adj.format('npy')):\n adj_d = np.load(file_adj.format('npy'), allow_pickle=True)\n if type(adj_d) == np.ndarray:\n adj_d = adj_d[()]\n else:\n assert type(adj_d) == dict\n indptr = adj_d['indptr']\n indices = adj_d['indices']\n if 'data' in adj_d:\n data = adj_d['data']\n else:\n data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)\n num_nodes = indptr.size - 1\n adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))\n else:\n adj = None\n return adj\n\n\ndef validate_bin_file(bin_adj_files):\n for md, df in bin_adj_files.items():\n assert set(df.keys()) == set(['indptr', 'indices', 'data'])\n if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):\n return {mmd: None for mmd in bin_adj_files}\n if not os.path.isfile(df['data']):\n df['data'] = ''\n return bin_adj_files\n\n\ndef merge_stat_record(dict_l : List[dict]):\n key_l = [set(d.keys()) for d in dict_l]\n assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)\n names_stat = set(dict_l[0][TRAIN].keys())\n ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}\n for d in dict_l:\n for m in [TRAIN, VALID, TEST]:\n assert set(d[m].keys()) == names_stat\n for k, v in d[m].items():\n ret[k][m].append(v)\n return ret"
] | [
[
"torch.LongTensor",
"torch.Size",
"numpy.clip",
"numpy.asarray",
"numpy.power",
"torch.load",
"torch.from_numpy",
"scipy.sparse.csr_matrix",
"numpy.ones",
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler",
"torch.FloatTensor",
"numpy.load",
"torch.clamp",
"scipy.sparse.dia_matrix",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
]
] |
dmuehlemann/RPGV | [
"18b4216e6cedce40a020a57e1822a363a8a6b60c"
] | [
"3_ gph-low-pass-filter.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sam Aug 7 11:50:05 2020\n\n@author: Dirk\n\nThis scripts applies a 10day low pass filter to the ERA5 gph daily means\n\n\"\"\"\n\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nimport xarray as xr\n\n\n#Define input and output data\ndata_folder = Path(\"../data/\")\nfilename = data_folder / 'gph-daily-mean.nc'\n\ndata_out = data_folder / 'gph-daily-mean-lowpass_2_0-1.nc'\nfig_out = data_folder / 'fig/gph-daily-mean-lowpass_2_0-1.png'\n\n\n#Load data\nz_all = xr.open_dataset(filename)\n\n\n# First, design the Buterworth filter\nN = 2 # Filter order\nWn = 0.1 # Cutoff frequency\nB, A = signal.butter(N, Wn, output='ba')\n\n\n# temp = z_all.isel(latitude=10, longitude=10).z.loc[\"2000-01-01\":\"2005-01-01\"]\n# Second, apply the filter\nz_allf = xr.apply_ufunc(\n signal.filtfilt, B, A, z_all,\n kwargs=dict(\n axis=0,\n )\n)\n\n\n# Make plots\nd = 10000\na=10150\nb=100\nc=150\nfor i in range(0,10):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plt.plot(z_all.z[d:a, b, c], 'b-')\n plt.plot(z_allf.z[d:a, b, c], 'r-',)\n plt.ylabel(\"Geopotential height\")\n plt.legend(['Original','Filtered'])\n plt.title(\"4-day lowpass filtered geopotential height\")\n ax1.axes.get_xaxis().set_visible(False)\n \n ax1 = fig.add_subplot(212)\n plt.plot(z_all.z[d:a, b, c]-z_allf.z[d:a, b, c], 'b-')\n plt.ylabel(\"Geopotential height\")\n plt.xlabel(\"Days\")\n plt.legend(['Residuals'])\n name= 'fig/filter/gph-daily-mean-lowpass_2_0-25_150d'+str(i)+'.png'\n a = a +5\n b = b +5\n c = c+5\n d = d +5\n fig.savefig(data_folder / name)\n\n\n#save results and plot\n# z_allf.to_netcdf(data_out)\n# fig.savefig(fig_out)\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"scipy.signal.butter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
endremborza/data-bevy | [
"25398124595ffddc201de6a748e84bb24d5885b2"
] | [
"staging/stop_detection/stops.py"
] | [
"from dataclasses import dataclass\nfrom datetime import datetime\n\nimport datazimmer as dz\nimport pandas as pd\nfrom colassigner import ColAssigner, get_all_cols\n\n\nclass NoStops(Exception):\n pass\n\n\n@dataclass\nclass DaySetup:\n work_start: int\n work_end: int\n home_arrive: int\n home_depart: int\n\n\nclass Coordinates(dz.CompositeTypeBase):\n lat = float\n lon = float\n\n\nclass Interval(dz.CompositeTypeBase):\n start = datetime\n end = datetime\n\n\nclass PingFeatures(dz.TableFeaturesBase):\n loc = Coordinates\n datetime = datetime\n device_id = str\n\n\nclass StopFeatures(dz.TableFeaturesBase):\n device_id = str\n destination_label = str\n stay_number = int\n n_events = int\n interval = Interval\n center = Coordinates\n is_home = bool\n is_work = bool\n info = str\n\n\nclass Labeler(ColAssigner):\n def __init__(self, model, day: DaySetup) -> None:\n self.model = model\n self.day = day\n\n def ts(self, df):\n return df[PingFeatures.datetime].view(int) / 10**9\n\n def hour(self, df):\n return df[PingFeatures.datetime].dt.hour\n\n def destination_label(self, df):\n arr = df.loc[:, [PingFeatures.loc.lat, PingFeatures.loc.lon, Labeler.ts]].values\n try:\n return self.model.fit_predict(arr).astype(str)\n except Exception as e:\n assert \"No stop events found\" in str(e)\n raise NoStops(\"hopefully\")\n\n def stay_number(self, df):\n return (\n df[Labeler.destination_label] != df[Labeler.destination_label].shift(1)\n ).cumsum()\n\n def is_worktime(self, df):\n return (df[Labeler.hour] >= self.day.work_start) & (\n df[Labeler.hour] <= self.day.work_end\n )\n\n def is_hometime(self, df):\n return (df[Labeler.hour] >= self.day.home_arrive) | (\n df[Labeler.hour] <= self.day.home_depart\n )\n\n\ndef proc_device_pings(ping_df, model, day: DaySetup):\n return (\n ping_df.sort_values(PingFeatures.datetime)\n .pipe(Labeler(model, day))\n .pipe(_gb_stop)\n )\n\n\ndef _gb_stop(labeled_df):\n dt_col = PingFeatures.datetime\n return (\n labeled_df.groupby([Labeler.stay_number, Labeler.destination_label])\n .agg(\n **{\n StopFeatures.n_events: pd.NamedAgg(dt_col, \"count\"),\n StopFeatures.interval.start: pd.NamedAgg(dt_col, \"first\"),\n StopFeatures.interval.end: pd.NamedAgg(dt_col, \"last\"),\n StopFeatures.center.lon: pd.NamedAgg(PingFeatures.loc.lon, \"mean\"),\n StopFeatures.center.lat: pd.NamedAgg(PingFeatures.loc.lat, \"mean\"),\n \"home_rate\": pd.NamedAgg(Labeler.is_hometime, \"mean\"),\n \"work_rate\": pd.NamedAgg(Labeler.is_worktime, \"mean\"),\n }\n )\n .reset_index()\n .assign(\n **{\n \"dur\": lambda df: (\n df[StopFeatures.interval.end] - df[StopFeatures.interval.start]\n ).dt.total_seconds()\n * (df[StopFeatures.destination_label] != \"-1\"),\n StopFeatures.is_work: lambda df: _is_maxw(df, \"work_rate\"),\n StopFeatures.is_home: lambda df: _is_maxw(df, \"home_rate\"),\n StopFeatures.info: \"N/A\",\n StopFeatures.device_id: \"0\",\n }\n )\n .loc[:, get_all_cols(StopFeatures)]\n )\n\n\ndef _is_maxw(df, rate_col):\n gb_cols = [\"_week\", StopFeatures.destination_label]\n wdf = df.assign(\n _week=df[StopFeatures.interval.start].dt.isocalendar().week,\n target=df[\"dur\"] * df[rate_col],\n )\n wsums = wdf.groupby(gb_cols)[\"target\"].sum()\n wmaxs = wsums.groupby(\"_week\").transform(\"max\")\n return (wsums == wmaxs).reindex(wdf[gb_cols]).values\n"
] | [
[
"pandas.NamedAgg"
]
] |
zhubonan/castepxbin | [
"24b875cf44b83d5eac75b52cf45e378a3361e90e"
] | [
"castepxbin/pdos.py"
] | [
"\"\"\"\nReader module for CASTEP pdos_bin\n\nWritten based on the example `pdos_bin.f90` file in open-source OptaDos code\n\"\"\"\nfrom enum import Enum, unique\nimport numpy as np\nfrom scipy.io import FortranFile\n\n\n@unique\nclass SpinEnum(Enum):\n \"\"\"\n Enum type for Spin. Only up and down.\n Usage: Spin.up, Spin.down.\n \"\"\"\n up, down = (1, -1)\n\n def __int__(self):\n return self.value\n\n def __float__(self):\n return float(self.value)\n\n def __str__(self):\n return str(self.value)\n\n\n@unique\nclass OrbitalType(Enum):\n \"\"\"\n Enum type for orbital type. Indices are basically the azimuthal quantum\n number, l.\n \"\"\"\n\n s = 0\n p = 1\n d = 2\n f = 3\n\n def __str__(self):\n return str(self.name)\n\n\n\n@unique\nclass OrbitalEnum(Enum):\n \"\"\"\n Enum type for specific orbitals. The value are the name reported by CASTEP.\n \"\"\"\n\n s = \"S\"\n px = \"Px\"\n py = \"Py\"\n pz = \"Pz\"\n dxy = \"Dxy\"\n dyz = \"Dzy\"\n dz2 = \"Dzz\"\n dxz = \"Dzx\"\n dx2 = \"Dxx-yy\"\n f_xxx = \"Fxxx\"\n f_yyy = \"Fyyy\"\n f_zzz = \"Fzzz\"\n f_xyz = \"Fxyz\"\n f_z_xx_yy = \"Fz(xx-yy)\"\n f_y_zz_xx = \"Fy(zz-xx)\"\n f_x_yy_zz = \"Fx(yy-zz)\"\n\n def __int__(self):\n return self.value\n\n def __str__(self):\n return str(self.name)\n\n @property\n def orbital_type(self):\n \"\"\"\n Returns OrbitalType of an orbital.\n \"\"\"\n return OrbitalType[self.name[0]]\n\n\ndef read_pdos_bin(filename, endian='big'):\n \"\"\"\n Read the pdos_bin file generated by CASTEP Spectral task.\n\n Args:\n filename (str): name of the file to be read\n\n Returns:\n A dictionary of the data that have been read.\n the weights of each orbital in stored in the 'pdos_weights' array\n with dimension (n_orbital, n_max_eign, n_kpoints, n_spin)\n \"\"\"\n esymbol = '>' if endian.upper() == 'BIG' else '>'\n dint = np.dtype(esymbol + 'i4')\n ddouble = np.dtype(esymbol + 'f8')\n dch80 = np.dtype(esymbol + 'a80')\n diarray = lambda x: '{}({},)i4'.format(esymbol, x)\n ddarray = lambda x: '{}({},)f8'.format(esymbol, x)\n\n with FortranFile(filename, header_dtype=np.dtype('>u4')) as fhandle:\n fversion = fhandle.read_record(ddouble)[0]\n fheader = fhandle.read_record(dch80)[0].decode()\n num_kpoints = fhandle.read_record(dint)[0]\n num_spins = fhandle.read_record(dint)[0]\n num_popn_orb = fhandle.read_record(dint)[0]\n max_eignenv = fhandle.read_record(dint)[0]\n\n # Now we start to read more data\n species = fhandle.read_record(diarray(num_popn_orb))\n ion = fhandle.read_record(diarray(num_popn_orb))\n am_channel = fhandle.read_record(diarray(num_popn_orb))\n\n # Now we initialize the storage space for the weights\n pdos_weights = np.zeros(\n (num_popn_orb, max_eignenv, num_kpoints, num_spins),\n dtype=float)\n\n kpoint_positions = np.zeros((num_kpoints, 3), dtype=float)\n num_eigenvalues = np.zeros(num_spins, dtype=int)\n # Now we start to read the actual data\n for nk in range(num_kpoints):\n _, kpoint_positions[nk, :] = fhandle.read_record('>i4', '>(3,)f8')\n for ns in range(num_spins):\n _ = fhandle.read_record(dint)\n num_eigenvalues[ns] = fhandle.read_record(dint)\n for nb in range(num_eigenvalues[ns]):\n pdos_weights[:, nb, nk, ns] = fhandle.read_record(\n '>({},)f8'.format(num_popn_orb))\n\n output = {\n 'fversion': fversion,\n 'fheader': fheader,\n 'num_kpoints': num_kpoints,\n 'num_spins': num_spins,\n 'num_popn_orb': num_popn_orb,\n 'max_eigenenv': max_eignenv,\n 'species': species,\n 'ion': ion,\n 'am_channel': am_channel,\n 'pdos_weights': pdos_weights,\n 'kpoints_positions': kpoint_positions,\n 'num_eigenvalues': num_eigenvalues,\n 'pdos_weights': pdos_weights,\n }\n return output\n\ndef reorder_pdos_data(input_items, pymatgen_labels=True, use_string_as_keys=False):\n \"\"\"\n Arrange the PDOS weights so it is more meaningful\n\n The result can be used to compute PDOS for creating CompleteDos object\n that can be used for Pymatgen\n\n Args:\n input_items (dict): A dictionary of the pdos information, use the\n output of `read_pdos` function. \n pymatgen_labels (bool): Use pymatgen Enum as the keys of the result dictionary. \n \n\n Returns:\n A dictionary of {Site_index: {Orbital: {Spin: weight}}}\n \"\"\"\n if pymatgen_labels is True:\n try:\n from pymatgen.electronic_structure.core import Orbital as POrbital\n from pymatgen.electronic_structure.core import Spin as PSpin\n except ImportError:\n pymatgen_labels = False\n\n if pymatgen_labels:\n # Note that s-p labels are inferreed from dot castep output\n # f labels - I know the first three is among the first three.\n # There is no way to tell if they are correct, f_1 is not very informative from VASP....\n orbital_mapping = [[POrbital.s], [POrbital.px, POrbital.py, POrbital.pz],\n [\n POrbital.dz2, POrbital.dyz, POrbital.dxz, POrbital.dx2,\n POrbital.dxy\n ],\n [\n POrbital.f_1, POrbital.f_2, POrbital.f_3, POrbital.f0,\n POrbital.f1, POrbital.f2, POrbital.f3\n ]]\n Spin = PSpin\n else:\n # These are the orders inferred from CASTEP output\n orbital_mapping = [[OrbitalEnum.s], [OrbitalEnum.px, OrbitalEnum.py, OrbitalEnum.pz],\n [\n OrbitalEnum.dz2, OrbitalEnum.dyz, OrbitalEnum.dxz, OrbitalEnum.dx2,\n OrbitalEnum.dxy\n ],\n [\n OrbitalEnum.f_xxx, OrbitalEnum.f_yyy, OrbitalEnum.f_zzz, OrbitalEnum.f_xyz,\n OrbitalEnum.f_z_xx_yy, OrbitalEnum.f_y_zz_xx, OrbitalEnum.f_x_yy_zz\n ]]\n Spin = SpinEnum\n\n # We take average of each kpoints from here\n # One might task why not take account the kpoints weight?\n # because it should be taken account of in the TDOS\n weights = input_items['pdos_weights']\n # Specie index for all orbitals\n species = input_items['species']\n # Index of each ion for all orbitals\n ion = input_items['ion']\n num_spins = input_items['num_spins']\n # Angular momentum channel all orbitals\n am_channel = input_items['am_channel']\n\n unique_speices = np.unique(species)\n unique_speices.sort()\n site_index = 0\n output_data = {}\n # Initialise storage space\n for specie in unique_speices:\n specie_mask = specie == species\n # Total number of ions for this specie\n total_ions = ion[specie_mask].max()\n # Note that indice are from one, not zero\n for nion in range(1, total_ions + 1):\n # Iterate through each ion\n ion_mask = (ion == nion) & specie_mask\n max_am = am_channel[ion_mask].max()\n site_dict = {} # {Orbital: {Spin: weight}...}\n for am in range(max_am + 1):\n # Collect the angular momentum channels\n ion_am_mask = (am_channel == am) & ion_mask\n # Indices of each matched channels\n ion_am_idx = np.where(ion_am_mask)[0]\n for iam, iloc in enumerate(ion_am_idx):\n # iloc - index of the oribtal\n # You can have 4 orbitals for p channel - they have difference n numbers\n this_orb = orbital_mapping[am][iam % (2 * am + 1)]\n orb_dict = {} # {Spin: weight...}\n if num_spins == 2:\n for ispin, espin in enumerate((Spin.up, Spin.down)):\n # Sumup\n wtmp = weights[iloc, :, :, ispin]\n orb_dict[espin] = wtmp\n else:\n orb_dict[Spin.up] = weights[iloc, :, :, 0]\n\n # Now we have the orb_dict populated\n # Combined the weights if this orbital has been seen...\n if this_orb in site_dict:\n site_dict[this_orb] = _merge_weights(\n site_dict[this_orb], orb_dict)\n else:\n site_dict[this_orb] = orb_dict\n # Now we populated site_dict add it to output_data\n output_data[site_index] = site_dict\n site_index += 1\n\n return output_data\n\n\ndef compute_pdos(pdos_bin, eigenvalues, kpoints_weights, bins):\n \"\"\"\n Compute the PDOS from eigenvalue and kpoint weights\n \n Args:\n pdos_bin (str): Path to the binary pdos_bin file\n eigenvealues (str): Eigenvalue as {Spin: array_)}.\n kpoints_weights (np.ndarray): Weights of each kpoints.\n bins: The bins for computing the density of states.\n \"\"\"\n\n # Walk through the ordred_weights dictionary and compute PDOS for each weight\n ordered_weights = reorder_pdos_data(read_pdos_bin(pdos_bin))\n pdos_data = {}\n for site, porbs_dict in ordered_weights.items():\n porbs_outdict = {}\n for orb, pspin_dict in porbs_dict.items():\n pdos_orbit = {\n spin: np.histogram(\n eigenvalue_set,\n bins=bins,\n weights=kpoints_weights * pspin_dict[\n spin] # weight (nk, ); pspin_dict[spin] (nk, nb)\n )[0]\n for spin, eigenvalue_set in eigenvalues.items()\n }\n porbs_outdict[orb] = pdos_orbit\n pdos_data[site] = porbs_outdict\n return pdos_data\n\n\ndef _merge_weights(spin_d1, spin_d2):\n \"\"\"Sum the weights stored in two dictionaries with keys being the spins\"\"\"\n if len(spin_d1) != len(spin_d2):\n raise RuntimeError(\"Critical - mismatch spin-dict length\")\n out = {}\n for spin in spin_d1:\n out[spin] = spin_d1[spin] + spin_d2[spin]\n return out\n"
] | [
[
"numpy.unique",
"numpy.dtype",
"numpy.histogram",
"numpy.where",
"numpy.zeros"
]
] |
TwinMooon/transformers-plus-performers | [
"c17d6473deb5316363f60bb2ddd1007d4364abe4"
] | [
"src/transformers/modeling_tf_performer_attention.py"
] | [
"from typing import Optional, Union\nimport logging\nimport numpy as np\nimport tensorflow as tf\n\n\nfrom .configuration_performer_attention import PerformerAttentionConfig\nfrom .modeling_utils import (\n find_pruneable_heads_and_indices,\n prune_linear_layer\n)\n\nKERNEL_CALLABLES = {\n 'cosh': lambda x, h: tf.concat((tf.exp(h + x), tf.exp(h - x)), dim=-1),\n 'exp': lambda x, h: tf.exp(h + x), # Default\n 'elu': lambda x: tf.nn.elu(x) + 1,\n 'relu': tf.nn.relu\n}\n\nSHORT_SEQUENCE_BEHAVIOR_CALLABLES = {\n 'use_softmax_eval_only': lambda L, M, training: False if training else L < 2.0 * M,\n 'use_softmax_eval_and_train': lambda L, M, training: L < 2.0 * M, \n 'never_use_softmax': lambda L, M, training: False\n}\n\n\nclass TFPerformerAttention(tf.keras.layers.Layer):\n def __init__(self, config: Optional[Union[dict, PerformerAttentionConfig]] = None, **kwargs):\n super().__init__()\n \n if config is not None:\n # config can either be a dictionary or a PerformerAttentionConfig object\n if not isinstance(config, dict):\n config = config.__dict__\n \n # Just copy over all the parameters\n self.__dict__.update(config)\n else:\n # Make sure we have all the default values filled in\n config = PerformerAttentionConfig(**kwargs)\n kwargs = config.__dict__\n \n # kwargs take precedence over the default values that might be stored in the config object\n self.__dict__.update(kwargs)\n \n if self.num_heads is None or self.d_model is None:\n raise ValueError(\"PerformerAttention: num_heads and d_model must be non-None\")\n \n self.dropout = tf.keras.layers.Dropout(rate=self.attention_dropout)\n self.calls_since_last_redraw = 0\n self.random_features = None\n \n behavior = self.short_sequence_behavior\n if not behavior:\n behavior = 'never_use_softmax' if self.kernel_type == 'relu' else 'use_softmax_eval_only'\n self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]\n \n elif self.kernel_type == 'relu' and behavior != 'never_use_softmax':\n raise ValueError(f\"PerformerAttention: short_sequence_behavior = {behavior} cannot be combined with the relu \"\n \"kernel type\")\n \n elif isinstance(behavior, str):\n self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]\n elif callable(behavior):\n self.should_fallback_to_softmax = behavior\n else:\n raise ValueError(\"PerformerAttention: short_sequence_behavior must be either str or Callable\")\n \n self.kernel_fn = KERNEL_CALLABLES[self.kernel_type]\n\n assert self.d_model % self.num_heads == 0\n \n if self.use_qkv_linear_layers:\n self.q_lin = tf.keras.layers.Dense(units=self.d_model)\n self.k_lin = tf.keras.layers.Dense(units=self.d_model)\n self.v_lin = tf.keras.layers.Dense(units=self.d_model)\n \n self.out_lin = tf.keras.layers.Dense(units=self.d_model)\n\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n attention_head_size = self.d_model // self.num_heads\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)\n # Prune linear layers\n if self.use_qkv_linear_layers:\n self.q_lin = prune_linear_layer(self.q_lin, index)\n self.k_lin = prune_linear_layer(self.k_lin, index)\n self.v_lin = prune_linear_layer(self.v_lin, index)\n \n self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)\n # Update hyper params\n self.num_heads = self.num_heads - len(heads)\n self.d_model = attention_head_size * self.num_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n \n def redraw_features_now(self):\n self._generate_feature_matrix()\n \n if self.training and self.redraw_verbose:\n logging.info(\"PerformerAttention: Just redrew random features.\")\n \n self.calls_since_last_redraw = 0\n\n def call(self, query, key, value, mask=None, head_mask=None, output_attentions=False):\n \"\"\"\n Parameters:\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)\n\n Returns:\n weights: tf.tensor(bs, num_heads, seq_length, seq_length) Attention weights context: tf.tensor(bs,\n seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`\n \"\"\"\n bs, q_length, dim = query.size()\n k_length = key.size(1)\n # assert dim == self.d_model, 'Dimensions do not match: %s input vs %s configured' % (dim, self.d_model)\n # assert key.size() == value.size()\n\n dim_per_head = self.d_model // self.num_heads\n mask_reshp = (bs, 1, 1, k_length)\n\n def shape(x):\n \"\"\" separate heads \"\"\"\n new_shape = tf.concat((x.shape[:-1], tf.constant([self.num_heads, dim_per_head])), axis=0)\n return tf.transpose(tf.reshape(x, new_shape), perm=[0, 2, 1, 3])\n \n if self.use_qkv_linear_layers:\n q = self.q_lin(query)\n k = self.k_lin(key)\n v = self.v_lin(value)\n else:\n q, k, v = query, key, value\n \n # (bs, num_heads, q_length, dim_per_head)\n q, k, v = (shape(x) for x in (q, k, v))\n \n # If the sequence length is short enough that FAVOR+ would use considerably more time and/or memory than just\n # using softmax attention, use softmax. This works because FAVOR+ is an unbiased estimator of softmax attention.\n m = round(dim_per_head * np.log(dim_per_head)) # m is the number of random features\n if self.should_fallback_to_softmax(q_length, m, self.training):\n scores = q @ tf.linalg.matrix_transpose(k) / (dim ** 0.5)\n \n if mask is not None:\n mask = tf.reshape((mask == 0), mask_reshp) # .expand_as(scores) # (bs, num_heads, q_length, k_length)\n scores -= 1e9 * tf.cast(mask, q.dtype) # (bs, num_heads, q_length, k_length)\n \n attn_map = tf.nn.softmax(scores, dim=-1)\n attn_map = self.dropout(attn_map) # (bs, num_heads, q_length, k_length)\n return self._finalize_attention_output(attn_map @ v, head_mask, attn_map)\n \n # When we're using FAVOR+ we can't output the attention matrix\n if output_attentions:\n raise ValueError(\"TFPerformerAttention: Can't output attention maps when using FAVOR+ linear attention.\")\n \n self._redraw_features_if_needed()\n \n # Get the transformed values of Q and K\n q_prime, k_prime = self.get_projected_queries_and_keys(q, k)\n return self.compute_attention_with_projected_queries_and_keys(q_prime, k_prime, v, mask, head_mask)\n \n # Turns Q into Q', K into K'\n def get_projected_queries_and_keys(self, q, k):\n # Broadcast the feature matrix across the batch dimension\n # new_shape = list(q.shape)\n # new_shape[-2] = self.random_features.shape[-2]\n W_t = tf.linalg.matrix_transpose(self.random_features) # .expand(new_shape)\n \n # Instead of dividing the product QK^T by sqrt(d), we divide Q and K by the 4th root of d.\n q = q / (self.d_model ** 0.25)\n k = k / (self.d_model ** 0.25)\n \n projected_q = q @ W_t\n projected_k = k @ W_t\n \n # Special logic for kernels that attempt to approximate softmax\n if self.kernel_type in ('cosh', 'exp'):\n # The h(x) function is defined in Lemma 1 in Choromanski et al. pg. 4 as exp(-||x||**2 / 2). For numerical\n # stability we leverage the fact that exp(x)*exp(y) = exp(x + y) here and delay computing the exp().\n h_of_q = -tf.reduce_sum(q ** 2, dim=-1, keepdim=True) / 2\n h_of_k = -tf.reduce_sum(k ** 2, dim=-1, keepdim=True) / 2\n \n # Compute the numerical stabilizer that we subtract from the input to exp(). For some reason the original\n # Jax implementation uses different types of stabilizers for queries vs. keys, and we follow that here.\n q_stabilizer = tf.math.reduce_max(h_of_q, axis=-1, keepdims=True)\n \n # This is just a scalar\n k_stabilizer = tf.math.reduce_max(h_of_k)\n \n q_kernel_output = self.kernel_fn(projected_q - q_stabilizer, h_of_q)\n k_kernel_output = self.kernel_fn(projected_k - k_stabilizer, h_of_k)\n \n # By multiplying by 1/sqrt(m), we ensure the final matrix product will contain a factor of 1/m. This means\n # each row of Q'K'^T can be interpreted as an average over the exp(omega^T * q) * exp(omega^T * k) terms.\n normalizing_constant = (q_kernel_output.shape[-1] ** -0.5)\n \n q_prime = normalizing_constant * (q_kernel_output + self.kernel_epsilon)\n k_prime = normalizing_constant * (k_kernel_output + self.kernel_epsilon)\n return q_prime, k_prime\n \n # Generalized attention (ReLU, ELU...)\n else:\n return (self.kernel_fn(x) + self.kernel_epsilon for x in (projected_q, projected_k))\n \n def compute_attention_with_projected_queries_and_keys(self, q_prime, k_prime, v, mask = None, head_mask = None):\n # Apply the padding mask to K'. Also applying it to Q' would be redundant.\n if mask is not None:\n k_prime *= tf.expand_dims(tf.expand_dims(mask, 1), -1)#.expand_as(k_prime)\n \n k_prime_t = tf.linalg.matrix_transpose(k_prime)\n output = q_prime @ (k_prime_t @ v)\n \n # Ensure that the output vectors are convex combinations of input vectors; that is,\n # the implied attention scores sum to 1\n if self.normalize_output: \n # Equivalent to multiplying K'^T by a ones vector\n d = q_prime @ tf.expand_dims(tf.math.reduce_sum(k_prime), -1)\n \n # Avoid dividing by very small numbers\n d += 2 * self.normalization_stabilizer * (tf.abs(d) <= self.normalization_stabilizer)\n output /= d\n \n return self._finalize_attention_output(output, head_mask)\n \n def _finalize_attention_output(self, context, head_mask=None, att_map_to_output=None):\n def unshape(x):\n \"\"\" group heads \"\"\"\n x = tf.transpose(context, perm=[0, 2, 1, 3]) # [...seq_len, num_heads, dim_per_head]\n new_last_dim = tf.constant(x.shape[-2] * x.shape[-1]) # Multiply num_heads * dim_per_head\n return tf.reshape(x, tf.concat((x.shape[:-2], new_last_dim), axis=0))\n \n # Mask heads if we want to\n if head_mask is not None:\n context = context * head_mask\n \n context = unshape(context) # (bs, q_length, dim)\n context = self.out_lin(context) # (bs, q_length, dim)\n\n if att_map_to_output:\n return context, att_map_to_output\n else:\n return context,\n\n def _generate_feature_matrix(self):\n dim_per_head = self.d_model // self.num_heads\n num_rows = round(dim_per_head * np.log(dim_per_head))\n \n if not self.use_orthogonal_features:\n return tf.random.normal((num_rows, dim_per_head))\n \n def get_square_block(size):\n with tf.device('/CPU:0'):\n unstructured_block = tf.random.normal((size, size))\n orthog, r = tf.linalg.qr(unstructured_block)\n\n return orthog.t()\n\n num_full_blocks = num_rows // dim_per_head\n block_list = [get_square_block(dim_per_head) for _ in range(num_full_blocks)]\n \n remaining_rows = num_rows - num_full_blocks * dim_per_head\n if remaining_rows > 0:\n q = get_square_block(dim_per_head)\n block_list.append(q[:remaining_rows])\n \n final_matrix = tf.concat(block_list)\n \n # This option yields SMREG\n if self.regularize_feature_norms:\n final_matrix *= dim_per_head ** 0.5\n else:\n # Hack to make the matrix columns have the norm we would expect them to have if they were sampled straight\n # from a Gaussian, instead of being all norm 1 since they went through QR decomposition\n multiplier = tf.random.normal((num_rows, dim_per_head)).norm(dim = 1)\n final_matrix = tf.linalg.diag(multiplier) @ final_matrix\n\n self.random_features = final_matrix\n \n def _redraw_features_if_needed(self):\n # We haven't created the projection matrix yet, let's create it\n if self.random_features is None:\n self._generate_feature_matrix()\n \n elif self.feature_redraw_interval is not None:\n if self.redraw_stochastically:\n # Flip a (very biased) coin\n if np.random.default_rng().binomial(1, 1. / self.feature_redraw_interval):\n self.redraw_features_now()\n \n # It's time to redraw the projection matrix\n elif self.calls_since_last_redraw >= self.feature_redraw_interval:\n self.redraw_features_now()\n \n # Keep track of how many forward passes we do before we redraw again\n else:\n self.calls_since_last_redraw += 1\n"
] | [
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"numpy.random.default_rng",
"tensorflow.math.reduce_sum",
"numpy.log",
"tensorflow.nn.elu",
"tensorflow.math.reduce_max",
"tensorflow.keras.layers.Dense",
"tensorflow.linalg.diag",
"tensorflow.exp",
"tensorflow.linalg.qr",
"tensorflow.nn.softmax",
"tensorflow.linalg.matrix_transpose",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Dropout",
"tensorflow.random.normal",
"tensorflow.abs"
]
] |
ChaplinMarchais/cortana-intelligence-product-detection-from-images | [
"2e5370098f9f83cd27cdaba2eab675f3c30ae157"
] | [
"technical_deployment/train_model/imdb_data.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nfrom __future__ import print_function\nfrom builtins import range\nimport sys, os\nfrom helpers import *\nimport scipy.sparse\nimport scipy.io as sio\nimport pickle as cp\nimport numpy as np\nimport fastRCNN\n\n\nclass imdb_data(fastRCNN.imdb):\n def __init__(self, image_set, classes, maxNrRois, imgDir, roiDir, cacheDir, boAddGroundTruthRois):\n fastRCNN.imdb.__init__(self, image_set + \".cache\") #'data_' + image_set)\n self._image_set = image_set\n self._maxNrRois = maxNrRois\n self._imgDir = imgDir\n self._roiDir = roiDir\n self._cacheDir = cacheDir #cache_path\n self._imgSubdirs ={'train': ['positive', 'negative'], 'test': ['testImages']}\n self._classes = classes\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.jpg'\n self._image_index, self._image_subdirs = self._load_image_set_index()\n self._roidb_handler = self.selective_search_roidb\n self._boAddGroundTruthRois = boAddGroundTruthRois\n\n\n #overwrite parent definition\n @property\n def cache_path(self):\n return self._cacheDir\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_subdirs[i], self._image_index[i])\n\n def image_path_from_index(self, subdir, fname):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._imgDir, subdir, fname)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Compile list of image indices and the subdirectories they are in.\n \"\"\"\n image_index = []\n image_subdirs = []\n for subdir in self._imgSubdirs[self._image_set]:\n imgFilenames = getFilesInDirectory(os.path.join(self._imgDir,subdir), self._image_ext)\n image_index += imgFilenames\n image_subdirs += [subdir] * len(imgFilenames)\n return image_index, image_subdirs\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cp.load(fid)\n print ('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_annotation(i) for i in range(self.num_images)]\n with open(cache_file, 'wb') as fid:\n cp.dump(gt_roidb, fid, cp.HIGHEST_PROTOCOL)\n print ('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n if sys.version_info[0] < 3: \n roidb = cp.load(fid)\n else: \n roidb = cp.load(fid, encoding='latin1')\n print ('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n\n #add ground truth ROIs\n if self._boAddGroundTruthRois:\n roidb = self.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = ss_roidb\n\n #Keep max of e.g. 2000 rois\n if self._maxNrRois and self._maxNrRois > 0:\n print (\"Only keeping the first %d ROIs..\" % self._maxNrRois)\n for i in range(self.num_images):\n gt_overlaps = roidb[i]['gt_overlaps']\n gt_overlaps = gt_overlaps.todense()[:self._maxNrRois]\n gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)\n roidb[i]['gt_overlaps'] = gt_overlaps\n roidb[i]['boxes'] = roidb[i]['boxes'][:self._maxNrRois,:]\n roidb[i]['gt_classes'] = roidb[i]['gt_classes'][:self._maxNrRois]\n\n with open(cache_file, 'wb') as fid:\n cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)\n print ('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def _load_selective_search_roidb(self, gt_roidb):\n # box_list = nrImages x nrBoxes x 4\n box_list = []\n for imgFilename, subdir in zip(self._image_index, self._image_subdirs):\n roiPath = \"{}/{}/{}.roi.txt\".format(self._roiDir, subdir, imgFilename[:-4])\n assert os.path.exists(roiPath), \"Error: rois file not found: \" + roiPath\n rois = np.loadtxt(roiPath, np.int32)\n box_list.append(rois)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_annotation(self, imgIndex):\n \"\"\"\n Load image and bounding boxes info from human annotations.\n\t\t\"\"\"\n #negative images do not have any ground truth annotations\n if self._image_subdirs[imgIndex].lower() == \"negative\":\n return None\n\n imgPath = self.image_path_at(imgIndex)\n bboxesPaths = imgPath[:-4] + \".bboxes.tsv\"\n labelsPaths = imgPath[:-4] + \".bboxes.labels.tsv\"\n assert os.path.exists(bboxesPaths), \"Error: ground truth bounding boxes file not found: \" + bboxesPaths\n assert os.path.exists(labelsPaths), \"Error: ground truth labels file not found: \" + bboxesPaths\n bboxes = np.loadtxt(bboxesPaths, np.float32)\n labels = readFile(labelsPaths)\n\n # in case there's only one annotation and numpy read the array as single array,\n # we need to make sure the input is treated as a multi dimensional array instead of a list/ 1D array\n #if len(bboxes.shape) == 1:\n if len(bboxes)>0 and type(bboxes[0]) == np.float32:\n bboxes = np.array([bboxes])\n\n #remove boxes marked as 'undecided' or 'exclude'\n indicesToKeep = find(labels, lambda x: x!='EXCLUDE' and x!='UNDECIDED')\n bboxes = [bboxes[i] for i in indicesToKeep]\n labels = [labels[i] for i in indicesToKeep]\n\n # Load object bounding boxes into a data frame.\n num_objs = len(bboxes)\n boxes = np.zeros((num_objs,4), dtype=np.uint16)\n gt_classes = np.zeros(num_objs, dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n for bboxIndex,(bbox,label) in enumerate(zip(bboxes,labels)):\n cls = self._class_to_ind[label] #.decode('utf-8')]\n boxes[bboxIndex, :] = bbox\n gt_classes[bboxIndex] = cls\n overlaps[bboxIndex, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps' : overlaps,\n 'flipped' : False}\n\n # main call to compute per-calass average precision\n # shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score\n # (see also test_net() in fastRCNN\\test.py)\n def evaluate_detections(self, all_boxes, output_dir, use_07_metric=False, overlapThreshold = 0.5):\n aps = []\n for classIndex, className in enumerate(self._classes):\n if className != '__background__':\n rec, prec, ap = self._evaluate_detections(classIndex, all_boxes, use_07_metric, overlapThreshold)\n aps += [[className,ap]]\n print('AP for {:>15} = {:.4f}'.format(className, ap))\n print('Mean AP = {:.4f}'.format(np.nanmean(getColumn(aps,1))))\n return aps\n\n def _evaluate_detections(self, classIndex, all_boxes, use_07_metric = False, overlapThreshold = 0.5):\n \"\"\"\n Top level function that does the PASCAL VOC evaluation.\n\n [overlapThreshold]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False)\n \"\"\"\n assert (len(all_boxes) == self.num_classes)\n assert (len(all_boxes[0]) == self.num_images)\n\n # load ground truth annotations for this class\n gtInfos = []\n for imgIndex in range(self.num_images):\n imgPath = self.image_path_at(imgIndex)\n imgSubir = os.path.normpath(imgPath).split(os.path.sep)[-2]\n if imgSubir != 'negative':\n gtBoxes, gtLabels = readGtAnnotation(imgPath)\n gtBoxes = [box for box, label in zip(gtBoxes, gtLabels) if label == self.classes[classIndex]] #.decode('utf-8')\n else:\n gtBoxes = []\n gtInfos.append({'bbox': np.array(gtBoxes),\n 'difficult': [False] * len(gtBoxes),\n 'det': [False] * len(gtBoxes)})\n\n # parse detections for this class\n # shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score\n detBboxes = []\n detImgIndices = []\n detConfidences = []\n for imgIndex in range(self.num_images):\n dets = all_boxes[classIndex][imgIndex]\n if dets != []:\n for k in range(dets.shape[0]):\n detImgIndices.append(imgIndex)\n detConfidences.append(dets[k, -1])\n # the VOCdevkit expects 1-based indices\n detBboxes.append([dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1])\n detBboxes = np.array(detBboxes)\n detConfidences = np.array(detConfidences)\n\n # debug: visualize GT and detections\n # if classIndex == 15: # and imgPath.endswith(\"WIN_20160803_11_42_36_Pro.jpg\"):\n # imgIndex = 6\n # imgPath = self.image_path_at(imgIndex)\n # img = imread(imgPath)\n # tmp_gtBoxes = gtInfos[imgIndex]['bbox']\n # inds = np.where(np.array(detImgIndices) == 1)[0]\n # tmp_detBoxes = detBboxes[inds]\n # print(detConfidences[inds])\n # drawRectangles(img, tmp_gtBoxes, color = (255, 0, 0)) #thickness=thickness)\n # drawRectangles(img, tmp_detBoxes, color= (0, 255, 0)) # thickness=thickness)\n # imshow(img, maxDim=800)\n\n # compute precision / recall / ap\n rec, prec, ap = self._voc_computePrecisionRecallAp(\n class_recs=gtInfos,\n confidence=detConfidences,\n image_ids=detImgIndices,\n BB=detBboxes,\n ovthresh=overlapThreshold,\n use_07_metric=use_07_metric)\n\n return rec, prec, ap\n\n\n #########################################################################\n # Python evaluation functions (copied/refactored from faster-RCNN)\n ##########################################################################\n def _voc_computePrecisionRecallAp(self, class_recs, confidence, image_ids, BB, ovthresh=0.5, use_07_metric=False):\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n npos = sum([len(cr['bbox']) for cr in class_recs])\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = computeAveragePrecision(rec, prec, use_07_metric)\n return rec, prec, ap"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.cumsum",
"numpy.finfo",
"numpy.max",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] |
disktnk/chainer-compiler | [
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde",
"5cfd027b40ea6e4abf73eb42be70b4fba74d1cde"
] | [
"ch2o/tests/syntax/MultiFunction.py",
"ch2o/ch2o/chainer2onnx.py",
"elichika/tests/node/ndarray/NpZeros.py",
"elichika/tests/node/ndarray/Shape.py",
"examples/imagenet/train_imagenet.py",
"ch2o/tests/syntax/UserDefinedFunc.py",
"elichika/tests/node/Softmax.py",
"elichika/tests/node/Functions/ExpandDims.py",
"ch2o/tests/node/EmbedID.py",
"ch2o/tests/node/SplitAxis.py"
] | [
"# coding: utf-8\n\nimport chainer\nimport chainer.links as L\n\n# Network definition\n\n\nclass A(chainer.Chain):\n\n def __init__(self):\n super(A, self).__init__()\n with self.init_scope():\n self.l0 = L.Linear(7)\n self.l1 = L.Linear(5)\n\n def g(self, y):\n return self.l1(y)\n\n def forward(sl, x):\n x1 = sl.l0(x)\n x2 = sl.g(x1)\n return x2\n\n\n# ======================================\n\nimport ch2o\n\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(314)\n\n model = A()\n\n v = np.random.rand(10, 20).astype(np.float32)\n ch2o.generate_testcase(model, [v])\n",
"# coding: utf-8\n\nimport ast\nimport gast\nimport inspect\n\nimport numpy as np\nimport onnx\nfrom onnx import checker\nfrom onnx import helper\nfrom onnx import TensorProto\n\nimport code\nimport logging\nimport sys\nimport types\n\nimport chainer\nimport numpy\n\nfrom ch2o.test_args import dprint\nfrom ch2o.env import Env\nfrom ch2o.utils import new_tensor, new_sequence, clip_head, ValueReturn, istensor, totensor, make_graph\nfrom ch2o.links import Link2NodeClass\nfrom ch2o.funcs import Func, Func2NodeClass, Function_Concat, Function_Dummy, castto\nfrom ch2o.builtin_funcs import builtin_functions\nfrom ch2o.value import Value\n\nimport builtins\n\n\nid2name_list = []\n\n\ndef init_id2name(ch):\n global id2name_list\n id2name_list = []\n for k, v in ch.namedlinks():\n # print('add link',k,v,id(v))\n id2name_list.append((id(v), k))\n\n\ndef id2name(nid):\n # print('nid',nid)\n for k, v in id2name_list:\n if k == nid:\n return v\n raise Exception(\"Not Found ID \", nid)\n\n\ndef _value(v):\n if (isinstance(v, User_Defined_Function) or\n isinstance(v, User_Defined_Func_In_Link)):\n return v\n return Value(v)\n\n\ndef convert_link(ch, env):\n res = None\n if type(ch) in Link2NodeClass:\n res = Link2NodeClass[type(ch)](ch)\n else:\n res = User_Defined_Link(ch, env)\n\n ts = res.init_tensors()\n if len(ts) != 0:\n pathname = id2name(id(ch))\n env.add_init(ts, pathname)\n return res\n\n\nclass Function_base(object):\n def stub_call(self, args, kwargs, loenv):\n # 関数引数は inspect.signature できれいにしたい\n\n astargs = list(map(lambda x: x.id, self.ast.args.args))\n args = dict(zip(astargs, args))\n\n defs = self.ast.args.defaults\n d = len(astargs) - len(args.keys())\n if d > 0:\n for i, v in enumerate(defs[::-1][:d]):\n args.update({astargs[-i-1]: eval_ast(v, loenv)})\n\n args.update(kwargs)\n\n assert(len(astargs) == len(args.keys()))\n loenv.update_vars(args)\n\n # このやり方は、If文などでコントロールフローが別れるような場合に\n # 複数ヶ所の return を変換する際に問題になる\n try:\n eval_ast(self.ast.body, loenv)\n return None\n except ValueReturn as v:\n return v.value\n\n\nclass User_Defined_Function(Function_base):\n def __init__(self, func):\n self.func = func\n src = clip_head(inspect.getsource(func))\n dprint(src)\n self.ast = gast.ast_to_gast(ast.parse(src)).body[0]\n assert(isinstance(self.ast, gast.gast.FunctionDef))\n\n def call(self, args, kwargs, env):\n loenv = env.localenv(sys.modules[self.func.__module__])\n return self.stub_call(args, kwargs, loenv)\n\n\nclass User_Defined_Func_In_Link(Function_base):\n def __init__(self, ch, fn):\n self.ch = ch\n src = clip_head(inspect.getsource(fn))\n dprint(src)\n self.ast = gast.ast_to_gast(ast.parse(src)).body[0]\n assert(isinstance(self.ast, gast.gast.FunctionDef))\n\n def call(self, args, kwargs, env):\n loenv = env.localenv(sys.modules[self.ch.__module__])\n args = [self.ch] + args\n return self.stub_call(args, kwargs, loenv)\n\n\nclass User_Defined_Link(object):\n def __init__(self, ch, env):\n src = clip_head(inspect.getsource(ch.forward))\n dprint(src)\n self.ast = gast.ast_to_gast(ast.parse(src)).body[0]\n\n self.call = User_Defined_Func_In_Link(ch, ch.forward).call\n\n # 以下、 最初の外からのためのやつ\n # code.InteractiveConsole({'v': self.ast}).interact()\n self.forward_arglen = len(self.ast.args.args)-1\n\n # ここで、初期化したやつを上書きしてやる必要が出てくる\n # あとでchainerで実行するために回復しないといけないので、\n # restore_funcs に復元すべきものを追加している\n self.inits = []\n\n for s, v in ch.namedparams():\n s = s[1:]\n if s.find('/') != -1:\n continue\n t = helper.make_tensor_value_info(\n '/'+s, TensorProto.FLOAT, list(v.shape))\n self.inits.append(t)\n mv = getattr(ch, s)\n setattr(ch, s, t)\n env.restore_funcs.append(lambda: setattr(ch, s, mv))\n\n # TODO(satos) Yieldをコンパイルできるとこれを消せる\n mv = getattr(ch, 'children')\n setattr(ch, 'children', Func(lambda _, __, ___: mv()))\n env.restore_funcs.append(lambda: setattr(ch, 'children', mv))\n\n def init_tensors(self):\n return self.inits\n\n\nclass User_Defined_Class(object):\n def __init__(self, classtype):\n # classtypeのmethod は持ってるが init は呼ばれてない、というobjectが必要になる。\n # ので、あえて parent のinit を呼ばない継承をする\n class Tmp(classtype):\n def __init__(_):\n pass\n\n # dprint('user defined class of',classtype)\n ch = Tmp()\n ch.__module__ = classtype.__module__\n\n # code.InteractiveConsole({'Tmp': Tmp,'v': ch}).interact()\n def f(args, kwargs, env):\n if not isinstance(classtype.__init__, type(str.__init__)): # slot wrapper というものらしい\n User_Defined_Func_In_Link(\n ch, classtype.__init__).call(args, kwargs, env)\n\n return ch\n\n self.init_wrapper = Func(f)\n\n\nimport logging\n\n# logging. なんとか\n# print( なんとか )\n# はデバッグ出力なのでコンパイルせずに読み飛ばしたい\n\n\ndef is_print_logging(s, env):\n return (\n isinstance(s, gast.Expr) and\n isinstance(s.value, gast.Call) and\n isinstance(s.value.func, gast.Attribute) and\n isinstance(eval_ast(s.value.func.value, env), logging.__class__)\n ) or (\n isinstance(s, gast.Expr) and\n isinstance(s.value, gast.Call) and\n isinstance(s.value.func, gast.Name) and\n s.value.func.id == 'print'\n )\n\n\ndef _find_in_out(localenv, env):\n used_onnx_names = set()\n for node in localenv.nodes:\n used_onnx_names |= set(node.input)\n\n outer_vars = env.get_var_dict()\n inner_vars = localenv.get_var_dict()\n\n # A tuple of (in-value, out-value, extra info for later setattr)\n # keyed by a variable name.\n in_out = {}\n for key, iv in inner_vars.items():\n ov = outer_vars.get(key, None)\n if isinstance(ov, Value):\n # Changing link or something to Value is not supported.\n assert isinstance(iv, Value), '%s => %s' % (ov, iv)\n elif ov is None or iv is None:\n pass\n else:\n # Changing Value to link or something is not supported.\n assert not isinstance(iv, Value), '%s => %s' % (ov, iv)\n continue\n\n if ov is None or iv is None or ov.value != iv.value:\n in_out[key] = (ov, iv, None)\n continue\n\n if ov.to_value_info(env).name in used_onnx_names:\n in_out[key] = (ov, None, None)\n\n var_ids = {}\n def attr_id(var, key):\n vid = id(var.value)\n if vid not in var_ids:\n var_ids[vid] = 'v%d' % (len(var_ids) + 1)\n return var_ids[vid] + '.' + key\n\n in_attrs = {}\n for var, key, value in localenv.read_attrs:\n k = attr_id(var, key)\n if k not in in_attrs:\n in_attrs[k] = value\n\n out_attrs = {}\n for var, key, value in localenv.wrote_attrs:\n k = attr_id(var, key)\n out_attrs[k] = (value, (var, key))\n\n for k in set(list(in_attrs.keys()) + list(out_attrs.keys())):\n iv = in_attrs.get(k, None)\n ov, setattr_info = out_attrs.get(k, (None, None))\n in_out[k] = (iv, ov, setattr_info)\n\n # ループ内で使われた link パラメータは\n # 1. 外の env にコピーしなければならない\n env.init_tensors.update(localenv.init_tensors)\n # 2. state としてループ内に持ち込まなければならない\n for init in localenv.init_tensors.values():\n key = '/' + init.name\n in_out[key] = (Value(init), None, None)\n\n return in_out\n\n\ndef eval_if(nast, env):\n cond = eval_ast(nast.test, env)\n if cond.is_py and cond.value is True:\n return eval_ast(nast.body, env)\n elif cond.is_py and cond.value is False:\n return eval_ast(nast.orelse, env)\n\n then_env = env.new_block()\n ty = eval_ast(nast.body, then_env)\n assert ty.is_none()\n\n else_env = env.new_block()\n ty = eval_ast(nast.orelse, else_env)\n assert ty.is_none()\n\n then_in_out = _find_in_out(then_env, env)\n else_in_out = _find_in_out(else_env, env)\n keys = set(list(then_in_out.keys()) + list(else_in_out.keys()))\n\n input_values = []\n then_outputs = []\n else_outputs = []\n final_outputs = []\n final_setattrs = []\n\n for key in keys:\n then_iv, then_ov, then_setattr_info = then_in_out.get(\n key, (None, None, None))\n else_iv, else_ov, else_setattr_info = else_in_out.get(\n key, (None, None, None))\n\n if then_setattr_info is None:\n setattr_info = else_setattr_info\n else:\n if else_setattr_info is not None:\n assert then_setattr_info == else_setattr_info\n setattr_info = then_setattr_info\n\n def set_final_output(key, out):\n out = out.copy(env, name=key)\n final_outputs.append((key, out.value))\n if setattr_info is not None:\n final_setattrs.append(tuple(list(setattr_info) + [out]))\n\n iv = else_iv if then_iv is None else then_iv\n if iv is None:\n iv = Value(False)\n input_values.append(iv.to_value_info(env))\n\n if then_ov is None and else_ov is None:\n continue\n if then_ov is None:\n then_outputs.append(iv.to_value_info(env))\n else_outputs.append(else_ov.to_value_info(else_env))\n set_final_output(key, else_ov)\n elif else_ov is None:\n then_outputs.append(then_ov.to_value_info(then_env))\n else_outputs.append(iv.to_value_info(env))\n set_final_output(key, then_ov)\n else:\n then_outputs.append(then_ov.to_value_info(then_env))\n else_outputs.append(else_ov.to_value_info(else_env))\n set_final_output(key, then_ov)\n\n then_graph = make_graph(\n then_env.nodes,\n \"If_then\",\n input_values,\n then_outputs,\n )\n\n else_graph = make_graph(\n else_env.nodes,\n \"If_else\",\n input_values,\n else_outputs,\n )\n\n env.addnode(\n 'If',\n inputs=[cond.to_value_info(env).name] + [i.name for i in input_values],\n outputs=[o.name for _, o in final_outputs],\n then_branch=then_graph,\n else_branch=else_graph,\n )\n\n for k, o in final_outputs:\n env.set_var(k, _value(o))\n\n for var, key, value in final_setattrs:\n setattr(var.value, key, value)\n\n return None\n\n\ndef eval_for(nast, env):\n assert nast.orelse == []\n ite = eval_ast(nast.iter, env)\n\n # A hack for ResNet50.\n # TODO(hamaji): Come up with a sophisticated way.\n # TODO(hamaji): This code doesn't handle scope properly, I think.\n if (isinstance(ite.value, types.GeneratorType) and\n 'ChainList.children' in str(ite.value)):\n # とりあえず実際にfor文を回す\n tg = nast.target.id\n env.set_var(tg, Value(None))\n for v in ite.value:\n env.set_var(tg, _value(v))\n eval_ast(nast.body, env)\n # print('looping',env.vars.keys())\n\n env.pop_var(tg)\n return None\n\n if ite.is_py:\n ite = Value([Value(v) for v in ite.value])\n\n assert isinstance(nast.target, gast.Name)\n x = nast.target.id\n\n # 新たなenv を作って、評価中にできた子グラフをもとにする\n localenv = env.new_block()\n\n cnt = new_tensor()\n gtx = new_sequence()\n localenv.set_var(x, _value(localenv.calc(\n \"ChainerSequenceLookup\",\n inputs=[gtx.name, cnt.name],\n )))\n ty = eval_ast(nast.body, localenv)\n assert ty.is_none()\n\n in_out = _find_in_out(localenv, env)\n\n input_values = []\n output_values = []\n final_outputs = []\n final_setattrs = []\n for key, (iv, ov, setattr_info) in in_out.items():\n if ov is None:\n continue\n if iv is None:\n iv = Value(False)\n out = ov.copy(env, name=key)\n final_outputs.append((key, out.value))\n if setattr_info is not None:\n final_setattrs.append(tuple(list(setattr_info) + [out]))\n input_values.append(iv.to_value_info(env))\n output_values.append(ov.to_value_info(env))\n\n cond = new_tensor(name='loop_cond')\n localgraph = make_graph(\n localenv.nodes,\n \"Loop_subgraph\",\n [cnt, cond, gtx] + input_values,\n [cond, gtx] + output_values\n )\n\n mtc = env.calc(\n \"ChainerGenericLen\",\n inputs=[ite.to_sequence(env).name],\n )\n\n env.addnode(\n 'Loop',\n inputs=([mtc.name, \"\", ite.to_sequence(env).name] +\n [i.name for i in input_values]),\n outputs=([new_tensor('out_generator').name] +\n [o.name for _, o in final_outputs]),\n body=localgraph\n )\n\n for k, o in final_outputs:\n if '.' not in k and '/' not in k:\n env.set_var(k, _value(o))\n\n for var, key, value in final_setattrs:\n setattr(var.value, key, value)\n\n return None\n\n\ndef eval_assign(nast, env):\n value = eval_ast(nast.value, env)\n targs = nast.targets\n assert(len(targs) == 1)\n # v,w = 1 も targetsは長さ1のlistになるので len(rargs) != 1 の状況は謎ですね\n\n # tgとして、下以外に\n # List, ListのIndex, Starred\n # またこれらを再帰的に組み合わせたものが存在しうる\n\n def set_var(k, v):\n v = _value(v)\n if not v.is_py:\n v = v.identity(env, name=k)\n env.set_var(k, v)\n\n tg = targs[0]\n if isinstance(tg, gast.Name):\n set_var(tg.id, value)\n elif isinstance(tg, gast.Tuple):\n assert(isinstance(value.value, tuple))\n value = value.value\n assert(len(tg.elts) == len(value))\n\n for i, v in enumerate(value):\n set_var(tg.elts[i].id, v) # TODO(satos) これこのあと更に再帰的に書く必要あるかも\n\n elif isinstance(tg, gast.Attribute):\n body = eval_ast(tg.value, env)\n # If the attr already exists, de-literalize and push it to\n # `read_attrs` by calling `get_attribute`. See lazy_self_init\n # test in ForAndIf.py.\n if hasattr(body.value, tg.attr):\n body.get_attribute(tg.attr, env)\n env.wrote_attrs.append((body, tg.attr, value))\n setattr(body.value, tg.attr, value)\n else:\n raise Exception('invalid assing lvalue', targs[0])\n return None\n\n\ndef eval_call(nast, env):\n fn = eval_ast(nast.func, env)\n if not fn.is_py:\n raise TypeError('Expected a callable: %s' % fn.value)\n fn = fn.value\n\n # TODO(hamaji): Merge this logic with is_print_logging. Also,\n # maybe it's better to try emitting ChainerPrint.\n if fn in (logging.debug, logging.info,\n logging.warn, logging.warning, logging.error):\n return None\n\n args = []\n for ag in nast.args:\n if isinstance(ag, gast.Starred):\n args += list(eval_ast(ag.value, env))\n else:\n args.append(eval_ast(ag, env))\n\n keywords = dict(\n map(lambda x: (x.arg, eval_ast(x.value, env)), nast.keywords))\n\n # code.InteractiveConsole({'fn': fn}).interact()\n\n # chainer.functions の関数とかは、ここでfookをかける。\n if fn in Func2NodeClass.keys():\n return Func2NodeClass[fn].call(args, keywords, env)\n\n dprint(fn, fn.__class__)\n if isinstance(fn, types.FunctionType):\n fn = User_Defined_Function(fn)\n elif isinstance(fn, types.MethodType):\n # apply はforwardにする\n # code.InteractiveConsole({'fn': fn}).interact()\n if fn.__func__ == chainer.FunctionNode.apply:\n fn = User_Defined_Func_In_Link(\n fn.__self__, fn.__self__.forward)\n elif fn.__func__ == chainer.FunctionNode.retain_inputs:\n # TODO(satos) これbackward側に何か伝える必要がありそう\n fn = Func(lambda _, __, ___: None)\n else:\n fn = User_Defined_Func_In_Link(fn.__self__, fn)\n elif fn in builtin_functions:\n fn = builtin_functions[fn]\n elif isinstance(fn, type):\n # なにがしかのinstanceを作成したはず\n assert fn.__module__ != 'builtins'\n fn = User_Defined_Class(fn).init_wrapper\n elif isinstance(fn, chainer.link.Link):\n fn = convert_link(fn, env)\n\n dprint('converted to', fn)\n return fn.call(args, keywords, env)\n\n\ndef eval_unary_op(nast, env):\n v = eval_ast(nast.operand, env)\n res = new_tensor()\n if isinstance(nast.op, gast.USub):\n # optype = \"*= -1\"\n def opfun(x): return -x\n elif isinstance(nast.op, gast.Not):\n # optype = \"Not\"\n def opfun(x): return not x\n else:\n raise Exception('unknown operator', nast.op)\n\n if not istensor(v):\n return opfun(v.value)\n else:\n raise Exception(\"Unimplemented yet\")\n\n\ndef eval_binary_op(nast, env):\n lv = eval_ast(nast.left, env)\n rv = eval_ast(nast.right, env)\n\n res = new_tensor(['TODO'])\n isfloor = False\n if isinstance(nast.op, gast.Add):\n optype = \"Add\"\n\n def opfun(a, b): return a + b\n\n elif isinstance(nast.op, gast.Sub):\n optype = \"Sub\"\n\n def opfun(a, b): return a - b\n\n elif isinstance(nast.op, gast.Mult):\n optype = \"Mul\"\n\n def opfun(a, b): return a * b\n\n elif isinstance(nast.op, gast.FloorDiv):\n optype = \"Div\"\n isfloor = True\n\n def opfun(a, b): return a // b\n\n elif isinstance(nast.op, gast.Div):\n optype = \"Div\"\n\n def opfun(a, b): return a / b\n\n else:\n raise Exception('unknown operator', nast.op)\n\n # code.InteractiveConsole({'lv': lv, 'rv': rv}).interact()\n\n # TODO(hamaji): Reconsider if constant folding is necessary in CH2O.\n #if not istensor(lv) and not istensor(rv):\n # # 定数畳み込みを行う\n # return opfun(lv, rv)\n\n lv.to_value_info(env)\n rv.to_value_info(env)\n if lv.is_sequence() and rv.is_sequence():\n assert optype == 'Add'\n lv = lv.to_sequence(env)\n rv = rv.to_sequence(env)\n\n state = new_sequence(name='seq_plus_state')\n cond = new_tensor(name='seq_plus_cond')\n index = new_tensor(name='seq_plus_index')\n elem = new_tensor(name='seq_plus_elem')\n out_state = new_tensor(name='seq_plus_out_state')\n nodes = []\n nodes.append(helper.make_node(\n 'ChainerSequenceLookup',\n inputs=[rv.name, index.name],\n outputs=[elem.name]\n ))\n nodes.append(helper.make_node(\n 'ChainerSequenceAppend',\n inputs=[state.name, elem.name],\n outputs=[out_state.name]\n ))\n loop = make_graph(\n nodes,\n \"SeqPlus\",\n [index, cond, state],\n [cond, out_state],\n )\n\n length = env.calc('ChainerGenericLen', inputs=[rv.name])\n res = new_sequence(name='seq_plus')\n env.addnode(\n 'Loop',\n inputs=[length.name, \"\", lv.name],\n outputs=[res.name],\n body=loop\n )\n else:\n if optype == 'Div' and not isfloor:\n lv = castto(lv.to_tensor(env), TensorProto.FLOAT, env)\n rv = castto(rv.to_tensor(env), TensorProto.FLOAT, env)\n else:\n lv = lv.to_tensor(env)\n rv = rv.to_tensor(env)\n res = env.calc(\n optype,\n inputs=[lv.name, rv.name],\n )\n\n if isfloor:\n res = env.calc(\n \"Floor\",\n inputs=[res.name],\n )\n\n return res\n\n\ndef eval_attribute(nast, env):\n body = eval_ast(nast.value, env)\n\n if not body.is_py:\n if nast.attr == 'shape':\n res = env.calc(\n 'Shape',\n inputs=[body.to_tensor(env).name],\n npdtype=np.int64,\n )\n res = env.calc_seq(\n 'ChainerSequenceSeparate',\n inputs=[res.name],\n )\n return res\n\n elif nast.attr == 'size':\n res = env.calc(\n 'Size',\n inputs=[body.to_tensor(env).name],\n npdtype=np.int64,\n )\n return res\n\n elif nast.attr == 'append':\n # TODO(satos) ごまかさない\n assert isinstance(\n nast.value, gast.Name) and nast.value.id in env.get_var_dict().keys()\n na = nast.value.id\n\n # あと、ここのnaがreferenceの場合不正確\n # たとえば\n # x = y\n # x.append(3)\n # のyが更新されないので問題\n\n def f(args, _, env):\n assert len(args) == 1\n v = args[0].to_tensor(env)\n env.set_var(na, _value(env.calc_seq(\n 'ChainerSequenceAppend',\n inputs=[body.to_sequence(env).name, v.name],\n )))\n return None\n\n return Func(f)\n\n raise Exception('Unimplemented attribute ',\n nast.attr, ' for tensor')\n return body.get_attribute(nast.attr, env)\n\n\ndef eval_compare(nast, env):\n lv = eval_ast(nast.left, env)\n vs = [eval_ast(x, env) for x in nast.comparators]\n\n if env.outer_block is None and all(v.is_py for v in [lv] + vs):\n # Constant folding.\n lv = lv.value\n res = True\n for op, r in zip(nast.ops, vs):\n r = r.value\n if isinstance(op, gast.Eq):\n res = res and (lv == r)\n elif isinstance(op, gast.NotEq):\n res = res and (lv != r)\n elif isinstance(op, gast.Is):\n res = res and (lv is r)\n elif isinstance(op, gast.IsNot):\n res = res and (lv is not r)\n elif isinstance(op, gast.Gt):\n res = res and (lv > r)\n elif isinstance(op, gast.GtE):\n res = res and (lv >= r)\n elif isinstance(op, gast.Lt):\n res = res and (lv < r)\n elif isinstance(op, gast.LtE):\n res = res and (lv <= r)\n else:\n raise Exception('unimplemented operator', op)\n return res\n\n assert len(vs) == 1, 'Multiple comparator not implemented yet'\n res = None\n for op, r in zip(nast.ops, vs):\n needs_not = False\n if isinstance(op, gast.Eq):\n optype = 'Equal'\n elif isinstance(op, gast.NotEq):\n needs_not = True\n optype = 'Equal'\n elif isinstance(op, gast.Is):\n optype = 'ChainerGenericIs'\n elif isinstance(op, gast.IsNot):\n needs_not = True\n optype = 'ChainerGenericIs'\n elif isinstance(op, gast.Gt):\n optype = 'Greater'\n elif isinstance(op, gast.GtE):\n # TODO(hamaji): This computation is wrong for NaNs.\n needs_not = True\n optype = 'Less'\n elif isinstance(op, gast.Lt):\n optype = 'Less'\n elif isinstance(op, gast.LtE):\n # TODO(hamaji): This computation is wrong for NaNs.\n needs_not = True\n optype = 'Greater'\n else:\n raise Exception('unimplemented operator', op)\n\n res = env.calc(optype,\n npdtype=np.bool,\n inputs=[lv.to_value_info(env).name,\n r.to_value_info(env).name])\n if needs_not:\n res = env.calc('Not', npdtype=np.bool, inputs=[res.name])\n return res\n\n\n\ndef eval_list_comp(nast, env):\n vn = \"dummy@\" + new_tensor().name # 重ならない名前にする(ループ内ループもあるため)\n assert len(nast.generators) >= 1\n tast = gast.ast_to_gast(ast.parse(\"v.append(w)\")).body[0]\n tast.value.func.value.id = vn\n tast.value.args[0] = nast.elt\n\n for gen in nast.generators:\n # とりあえず、このあたりはまだ実装しません\n assert len(gen.ifs) == 0 and gen.is_async == 0\n tast = gast.For(target=gen.target, iter=gen.iter,\n body=[tast], orelse=[])\n\n init = gast.ast_to_gast(ast.parse(\"v = []\")).body[0]\n init.targets[0].id = vn\n tast = [init, tast]\n\n rv = eval_ast(tast, env)\n assert rv.is_none()\n res = env.pop_var(vn)\n return res\n\n\ndef _concat(xs, axis, env):\n assert isinstance(xs, tuple) # 今のところ tuple 以外は concat できない\n return env.calc(\n \"Concat\",\n inputs=list(map(lambda x: x.name, xs)),\n axis=axis,\n )\n\n\ndef eval_subscript(nast, env):\n vs = eval_ast(nast.value, env)\n\n # TODO(hamaji): Use 2**63-1 instead.\n int_max = 2 ** 31 - 1\n\n def eval_with_default(nast, default_value):\n if nast is None:\n return Value(np.array(default_value)).to_tensor(env)\n return eval_ast(nast, env).to_tensor(env)\n\n if isinstance(nast.slice, gast.Index):\n if isinstance(nast.slice.value, gast.Tuple):\n assert vs.is_tensor(), 'Advanced indexing for Python list'\n indices = []\n slice_specs = []\n for index in nast.slice.value.elts:\n indices.append(eval_ast(index, env).to_tensor(env).name)\n slice_specs.append(1)\n return env.calc(\n 'ChainerGetItem',\n inputs=[vs.to_tensor(env).name] + indices,\n slice_specs=slice_specs\n )\n\n index = eval_ast(nast.slice.value, env).to_tensor(env)\n if vs.is_sequence():\n return env.calc(\n 'ChainerSequenceLookup',\n inputs=[vs.to_sequence(env).name, index.name]\n )\n else:\n return env.calc(\n 'ChainerGetItem',\n inputs=[vs.to_tensor(env).name, index.name],\n slice_specs=[1]\n )\n\n def get_slice_indices(slice):\n if slice.lower is None and slice.upper is None and slice.step is None:\n return []\n indices = [eval_with_default(slice.lower, 0).name,\n eval_with_default(slice.upper, int_max).name]\n if slice.step is not None:\n indices.append(eval_with_default(slice.step, 1).name)\n return indices\n\n if isinstance(nast.slice, gast.Slice):\n indices = get_slice_indices(nast.slice)\n if vs.is_sequence():\n return env.calc_seq(\n 'ChainerSequenceGetSlice',\n inputs=[vs.to_sequence(env).name] + indices\n )\n else:\n return env.calc(\n 'ChainerGetItem',\n inputs=[vs.to_tensor(env).name] + indices,\n slice_specs=[len(indices)]\n )\n\n if isinstance(nast.slice, gast.ExtSlice):\n assert vs.is_tensor(), 'Advanced indexing for Python list'\n indices = []\n slice_specs = []\n for dim in nast.slice.dims:\n if isinstance(dim, gast.Index):\n indices.append(eval_ast(dim.value, env).to_tensor(env).name)\n slice_specs.append(1)\n elif isinstance(dim, gast.Slice):\n ni = get_slice_indices(dim)\n indices.extend(ni)\n slice_specs.append(len(ni))\n else:\n assert False, 'Unknown slice: %s in %s' % (dim, nast.slice)\n\n return env.calc(\n 'ChainerGetItem',\n inputs=[vs.to_tensor(env).name] + indices,\n slice_specs=slice_specs\n )\n\n assert False, 'Unknown slice: %s' % nast.slice\n\n\ndef eval_list(nast, env):\n # Sequenceにしているが、ここはPythonのlistのままにしておきたいとのこと\n # Sequenceにする\n vs = list(map(lambda x: eval_ast(x, env), nast.elts))\n res = env.calc_seq(\n \"ChainerSequenceCreate\",\n inputs=[],\n )\n for v in vs:\n v = v.to_tensor(env)\n res = env.calc_seq(\n \"ChainerSequenceAppend\",\n inputs=[res.name, v.name],\n )\n return res\n\n\n_eval_ast_depth = 0\n\n\ndef eval_ast(nast, env):\n for k, v in env.get_var_dict().items():\n assert not isinstance(v, onnx.ValueInfoProto), '%s %s' % (k, v)\n\n global _eval_ast_depth\n if not isinstance(nast, list):\n dprint('-' * _eval_ast_depth, gast.dump(nast), env.get_var_dict().keys())\n\n _eval_ast_depth += 1\n r = eval_ast_impl(nast, env)\n _eval_ast_depth -= 1\n return _value(r)\n\n\ndef eval_ast_impl(nast, env):\n if isinstance(nast, list):\n # 逐次実行\n for s in nast:\n if is_print_logging(s, env):\n continue\n eval_ast(s, env)\n return None\n elif isinstance(nast, gast.For):\n return eval_for(nast, env)\n\n elif isinstance(nast, gast.Assign):\n return eval_assign(nast, env)\n\n elif isinstance(nast, gast.AugAssign):\n # referenceへの代入に対してこれは不正確\n ca = gast.Assign(targets=[nast.target], value=gast.BinOp(\n left=nast.target, op=nast.op, right=nast.value))\n return eval_ast(ca, env)\n\n elif isinstance(nast, gast.Call):\n return eval_call(nast, env)\n\n elif isinstance(nast, gast.UnaryOp):\n return eval_unary_op(nast, env)\n\n elif isinstance(nast, gast.BinOp):\n return eval_binary_op(nast, env)\n\n elif isinstance(nast, gast.BoolOp):\n # 現在は定数boleanのみ対応\n vs = list(map(lambda x: eval_ast(x, env), nast.values))\n res = new_tensor()\n if isinstance(nast.op, gast.And):\n def opfun(v): return all(v)\n else:\n raise Exception('unknown operator', nast.op)\n\n if not any(map(istensor, vs)):\n return opfun(vs)\n\n raise Exception('Unimplemented BoolOp for tensor', nast)\n\n elif isinstance(nast, gast.Attribute):\n return eval_attribute(nast, env)\n\n elif isinstance(nast, gast.Compare):\n return eval_compare(nast, env)\n\n elif isinstance(nast, gast.If):\n return eval_if(nast, env)\n\n elif isinstance(nast, gast.ListComp):\n return eval_list_comp(nast, env)\n\n elif isinstance(nast, gast.Subscript):\n return eval_subscript(nast, env)\n\n elif isinstance(nast, gast.Delete):\n # おのおの単に忘れる\n vs = nast.targets\n for v in vs:\n assert isinstance(v, gast.Name)\n env.pop_var(v.id)\n return None\n\n elif isinstance(nast, gast.Name):\n try:\n return env.get_var(nast.id)\n except NameError as ne:\n if nast.id in dir(env.module):\n return getattr(env.module, nast.id)\n elif nast.id in dir(builtins):\n return getattr(builtins, nast.id)\n raise\n elif isinstance(nast, gast.Num):\n return nast.n\n elif isinstance(nast, gast.NameConstant):\n return nast.value\n elif isinstance(nast, gast.Expr):\n return eval_ast(nast.value, env)\n elif isinstance(nast, gast.Str):\n return nast.s\n elif isinstance(nast, gast.Tuple):\n return tuple(map(lambda x: eval_ast(x, env), nast.elts))\n elif isinstance(nast, gast.List):\n return eval_list(nast, env)\n\n elif isinstance(nast, gast.Return):\n raise ValueReturn(eval_ast(nast.value, env))\n\n elif isinstance(nast, gast.Assert):\n # TODO(hamaji): Emit an assertion?\n return None\n\n # TODO(hamaji): Implement `with`.\n # elif isinstance(nast, gast.With):\n # sys.stderr.write(\n # 'WARNING: Currenctly, the context of `with` is just ignored\\n')\n # for s in nast.body:\n # eval_ast(s, env)\n # return None\n\n else:\n print('unknown ast')\n code.InteractiveConsole({'nast': nast, 'env': env}).interact()\n raise Exception('unknown ast', nast)\n\n raise Exception(\"shouldn't reach here\", nast)\n\n\ndef compile_model(model, inputs):\n # return helper.make_graph([],'dummy',[],[])\n\n init_id2name(model)\n # code.InteractiveConsole({'mo': model}).interact()\n env = Env(sys.modules[model.__module__])\n molk = User_Defined_Link(model, env)\n\n input_tensors = []\n for i in inputs:\n # TODO(hamaji): Set valid type info.\n if isinstance(i, (list, tuple)):\n x = new_sequence()\n elif i is None:\n x = new_tensor()\n else:\n if isinstance(i, int):\n i = np.array(i)\n else:\n # TODO(durswd): This code requires chainer6.x\n i = chainer.cuda.to_cpu(i)\n\n x = new_tensor(dims=i.shape, dtype=i.dtype)\n input_tensors.append(x)\n\n input_values = [Value(i) for i in input_tensors]\n v = molk.call(input_values, [], env)\n\n dprint('output_tensors', v)\n if isinstance(v.value, tuple):\n output_tensors = list(v.value) # ばらしてみる\n else:\n output_tensors = [v] # とりあえず1tensor\n\n # print('env.init_tensors ',env.init_tensors)\n input_tensors += list(env.init_tensors.values())\n\n for f in env.restore_funcs:\n f()\n\n # for no in env.nodes:\n # print(no.op_type)\n # print(env.nodes)\n # print(input_tensors)\n # print(output_tensors)\n # for ch in model.namedparams():\n # print(ch)\n\n outputs_vi = [o.to_value_info(env) for o in output_tensors]\n graph = make_graph(env.nodes,\n 'name_is_unknown_now',\n input_tensors,\n outputs_vi)\n\n # inputのうち、重みであるものにはinitializerをつける\n # batch_sizeやinput_sizeなどの可変なものはできる限りのそのままで\n\n # Chainer compiler 独自のノードを使うとcheckできなくなる...\n # checker.check_graph(graph)\n mo = helper.make_model(graph)\n\n # print(mo)\n return mo\n",
"# coding: utf-8\n\nimport numpy as np\nimport chainer\nimport chainer.functions as F\nimport testtools\nimport numpy as np\n\n\nclass A(chainer.Chain):\n def forward(self):\n y1 = np.zeros((3, 4), dtype=np.float32)\n return y1\n\n\n# ======================================\n\ndef main():\n testtools.generate_testcase(A(), [])\n\n\nif __name__ == '__main__':\n main()\n",
"# coding: utf-8\n\nimport chainer\nimport chainer.functions as F\n\n\nclass Shape(chainer.Chain):\n def forward(self, x):\n y1 = x.shape\n return list(y1)\n\n\nclass ShapeConcat(chainer.Chain):\n def forward(self, x):\n y1 = x.shape\n return np.array(y1 + (42,))\n\nclass ShapeIndex(chainer.Chain):\n def forward(self, x):\n y1 = x.shape\n return y1[0]\n\n# ======================================\n\nimport testtools\nimport numpy as np\n\ndef main():\n import numpy as np\n np.random.seed(314)\n\n x = np.random.rand(12, 6, 4).astype(np.float32)\n\n testtools.generate_testcase(Shape(), [x])\n\n testtools.generate_testcase(ShapeConcat(), [x], subname='concat')\n\n testtools.generate_testcase(ShapeIndex(), [x], subname='index')\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n\"\"\"Example code of learning a large scale convnet from ILSVRC2012 dataset.\n\nPrerequisite: To run this example, crop the center of ILSVRC2012 training and\nvalidation images, scale them to 256x256 and convert them to RGB, and make\ntwo lists of space-separated CSV whose first column is full path to image and\nsecond column is zero-origin label (this format is same as that used by Caffe's\nImageDataLayer).\n\n\"\"\"\nimport argparse\nimport json\nimport os\nimport random\nimport sys\n\nimport numpy as np\n\nimport chainer\nfrom chainer import dataset\nfrom chainer import function_hooks\nfrom chainer import training\nfrom chainer.training import extensions\nimport chainerx\n\nimport dali_util\n\nimport alex\nimport googlenet\nimport googlenetbn\nimport nin\nimport resnet50\nimport resnext50\n\nproject_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(os.path.join(project_root, 'ch2o'))\nsys.path.append(os.path.join(project_root, 'python'))\nsys.path.append(os.path.join(project_root, 'build/python'))\n\nimport chainer_compiler\n\n\nclass PreprocessedDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, path, root, mean, crop_size, random=True):\n self.base = chainer.datasets.LabeledImageDataset(path, root)\n self.mean = mean.astype(np.float32)\n self.crop_size = crop_size\n self.random = random\n\n def __len__(self):\n return len(self.base)\n\n def get_example(self, i):\n # It reads the i-th image/label pair and return a preprocessed image.\n # It applies following preprocesses:\n # - Cropping (random or center rectangular)\n # - Random flip\n # - Scaling to [0, 1] value\n crop_size = self.crop_size\n\n image, label = self.base[i]\n _, h, w = image.shape\n\n if self.random:\n # Randomly crop a region and flip the image\n top = random.randint(0, h - crop_size - 1)\n left = random.randint(0, w - crop_size - 1)\n if random.randint(0, 1):\n image = image[:, :, ::-1]\n else:\n # Crop the center\n top = (h - crop_size) // 2\n left = (w - crop_size) // 2\n bottom = top + crop_size\n right = left + crop_size\n\n image = image[:, top:bottom, left:right]\n image -= self.mean[:, top:bottom, left:right]\n image *= (1.0 / 255.0) # Scale to [0, 1]\n return image, label\n\n\ndef main():\n archs = {\n 'alex': alex.Alex,\n 'alex_fp16': alex.AlexFp16,\n 'googlenet': googlenet.GoogLeNet,\n 'googlenetbn': googlenetbn.GoogLeNetBN,\n 'googlenetbn_fp16': googlenetbn.GoogLeNetBNFp16,\n 'nin': nin.NIN,\n 'resnet50': resnet50.ResNet50,\n 'resnext50': resnext50.ResNeXt50,\n }\n\n parser = argparse.ArgumentParser(\n description='Learning convnet from ILSVRC2012 dataset')\n parser.add_argument('train', help='Path to training image-label list file')\n parser.add_argument('val', help='Path to validation image-label list file')\n parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',\n help='Convnet architecture')\n parser.add_argument('--batchsize', '-B', type=int, default=32,\n help='Learning minibatch size')\n parser.add_argument('--epoch', '-E', type=int, default=10,\n help='Number of epochs to train')\n parser.add_argument('--iterations', '-I', type=int, default=0,\n help='Number of iterations to train')\n parser.add_argument('--device', '-d', type=str, default='-1',\n help='Device specifier. Either ChainerX device '\n 'specifier or an integer. If non-negative integer, '\n 'CuPy arrays with specified device id are used. If '\n 'negative integer, NumPy arrays are used')\n parser.add_argument('--initmodel',\n help='Initialize the model from given file')\n parser.add_argument('--loaderjob', '-j', type=int,\n help='Number of parallel data loading processes')\n parser.add_argument('--mean', '-m', default='mean.npy',\n help='Mean file (computed by compute_mean.py)')\n parser.add_argument('--resume', '-r', default='',\n help='Initialize the trainer from given file')\n parser.add_argument('--out', '-o', default='result',\n help='Output directory')\n parser.add_argument('--root', '-R', default='.',\n help='Root directory path of image files')\n parser.add_argument('--val_batchsize', '-b', type=int, default=250,\n help='Validation minibatch size')\n parser.add_argument('--test', action='store_true')\n parser.set_defaults(test=False)\n parser.add_argument('--dali', action='store_true')\n parser.set_defaults(dali=False)\n group = parser.add_argument_group('deprecated arguments')\n group.add_argument('--gpu', '-g', dest='device',\n type=int, nargs='?', const=0,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--compile', action='store_true',\n help='Compile the model')\n parser.add_argument('--dump_onnx', action='store_true',\n help='Dump ONNX model after optimization')\n args = parser.parse_args()\n\n chainer.config.autotune = True\n chainer.config.cudnn_fast_batch_normalization = True\n\n device = chainer.get_device(args.device)\n\n print('Device: {}'.format(device))\n print('# Minibatch-size: {}'.format(args.batchsize))\n if args.iterations:\n print('# iterations: {}'.format(args.iterations))\n else:\n print('# epoch: {}'.format(args.epoch))\n print('')\n\n # Initialize the model to train\n model = archs[args.arch]()\n if args.initmodel:\n print('Load model from {}'.format(args.initmodel))\n chainer.serializers.load_npz(args.initmodel, model)\n insize = model.insize\n if args.compile:\n model = chainer_compiler.compile(model, dump_onnx=args.dump_onnx)\n model.to_device(device)\n device.use()\n\n # Load the mean file\n mean = np.load(args.mean)\n if args.dali:\n if not dali_util._dali_available:\n raise RuntimeError('DALI seems not available on your system.')\n num_threads = args.loaderjob\n if num_threads is None or num_threads <= 0:\n num_threads = 1\n ch_mean = list(np.average(mean, axis=(1, 2)))\n ch_std = [255.0, 255.0, 255.0]\n # Setup DALI pipelines\n train_pipe = dali_util.DaliPipelineTrain(\n args.train, args.root, insize, args.batchsize,\n num_threads, args.gpu, True, mean=ch_mean, std=ch_std)\n val_pipe = dali_util.DaliPipelineVal(\n args.val, args.root, insize, args.val_batchsize,\n num_threads, args.gpu, False, mean=ch_mean, std=ch_std)\n train_iter = chainer.iterators.DaliIterator(train_pipe)\n val_iter = chainer.iterators.DaliIterator(val_pipe, repeat=False)\n # converter = dali_converter\n converter = dali_util.DaliConverter(mean=mean, crop_size=insize)\n else:\n # Load the dataset files\n train = PreprocessedDataset(args.train, args.root, mean, insize)\n val = PreprocessedDataset(args.val, args.root, mean, insize,\n False)\n # These iterators load the images with subprocesses running in parallel\n # to the training/validation.\n train_iter = chainer.iterators.MultiprocessIterator(\n train, args.batchsize, n_processes=args.loaderjob)\n val_iter = chainer.iterators.MultiprocessIterator(\n val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)\n converter = dataset.concat_examples\n\n # Set up an optimizer\n optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)\n optimizer.setup(model)\n\n # Set up a trainer\n updater = training.updaters.StandardUpdater(\n train_iter, optimizer, converter=converter, device=device)\n if args.iterations:\n stop_trigger = (args.iterations, 'iteration')\n else:\n stop_trigger = (args.epoch, 'epoch')\n trainer = training.Trainer(updater, stop_trigger, args.out)\n\n val_interval = (1 if args.test else 100000), 'iteration'\n log_interval = ((1 if args.test else 10 if args.iterations else 1000),\n 'iteration')\n\n trainer.extend(extensions.Evaluator(val_iter, model, converter=converter,\n device=device), trigger=val_interval)\n # TODO(sonots): Temporarily disabled for chainerx. Fix it.\n if device.xp is not chainerx:\n trainer.extend(extensions.DumpGraph('main/loss'))\n trainer.extend(extensions.snapshot(), trigger=val_interval)\n trainer.extend(extensions.snapshot_object(\n model, 'model_iter_{.updater.iteration}'), trigger=val_interval)\n # Be careful to pass the interval directly to LogReport\n # (it determines when to emit log rather than when to read observations)\n trainer.extend(extensions.LogReport(trigger=log_interval))\n trainer.extend(extensions.observe_lr(), trigger=log_interval)\n trainer.extend(extensions.PrintReport([\n 'epoch', 'iteration', 'main/loss', 'validation/main/loss',\n 'main/accuracy', 'validation/main/accuracy', 'lr'\n ]), trigger=log_interval)\n trainer.extend(extensions.ProgressBar(update_interval=10))\n\n if args.resume:\n chainer.serializers.load_npz(args.resume, trainer)\n\n cuda_hook = function_hooks.CUDAProfileHook()\n with cuda_hook:\n trainer.run()\n\n with open('%s/log' % args.out) as f:\n logs = json.load(f)\n elapsed_times = []\n for prev, cur in zip(logs, logs[1:]):\n iters = cur['iteration'] - prev['iteration']\n elapsed = cur['elapsed_time'] - prev['elapsed_time']\n elapsed_times.append(elapsed / iters)\n sec_per_iter = sum(elapsed_times) / len(elapsed_times)\n print(sec_per_iter * 1000, 'msec/iter')\n print(args.batchsize / sec_per_iter, 'images/sec')\n\n\nif __name__ == '__main__':\n main()\n",
"# coding: utf-8\n\nimport chainer\n\n\nclass F(object):\n def __init__(self, a):\n self.a = a\n\n def g(self, x):\n return self.a + x\n\n\ndef h(x, y):\n return x + y\n\n\nclass A(chainer.Chain):\n\n def __init__(self):\n super(A, self).__init__()\n\n def forward(self, x, y, z):\n p = F(x).g(y)\n return h(p, z)\n\n\n# ======================================\n\nimport ch2o\nimport numpy as np\n\nif __name__ == '__main__':\n model = A()\n\n a = np.random.rand(3, 4).astype(np.float32)\n b = np.random.rand(3, 4).astype(np.float32)\n c = np.random.rand(3, 4).astype(np.float32)\n ch2o.generate_testcase(model, [a, b, c])\n",
"# coding: utf-8\n\nimport numpy as np\nimport chainer\nimport chainer.functions as F\n\n\nclass Softmax(chainer.Chain):\n def forward(self, x):\n return F.softmax(x)\n\n\nclass SoftmaxAxis(chainer.Chain):\n def forward(self, x):\n return F.softmax(x, axis=2)\n\n\n# ======================================\n\nimport testtools\nimport numpy as np\n\n\ndef main():\n np.random.seed(314)\n a = np.random.rand(3, 5, 4).astype(np.float32)\n\n testtools.generate_testcase(Softmax(), [a])\n\n testtools.generate_testcase(SoftmaxAxis(), [a], subname='axis')\n\n\nif __name__ == '__main__':\n main()\n",
"# coding: utf-8\n\nimport chainer\nimport chainer.functions as F\n\n\nclass ExpandDims(chainer.Chain):\n\n def __init__(self):\n super(ExpandDims, self).__init__()\n\n def forward(self, x):\n y = F.expand_dims(x, axis=1)\n y2 = F.expand_dims(x, 1)\n return y, y2\n\n\n# ======================================\n\nimport testtools\n\ndef main():\n import numpy as np\n np.random.seed(314)\n model = ExpandDims()\n\n x = np.random.rand(6, 4).astype(np.float32) - 0.5\n testtools.generate_testcase(model, [x])\n\n\nif __name__ == '__main__':\n main()\n",
"# coding: utf-8\n\nimport chainer\nimport chainer.links as L\n\n# Network definition\n\n\nclass A(chainer.Chain):\n\n def __init__(self, n_vocab, n_out):\n super(A, self).__init__()\n with self.init_scope():\n self.l1 = L.EmbedID(n_vocab, n_out)\n\n def forward(self, x):\n return self.l1(x)\n\n# ======================================\n\n\nimport ch2o\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(314)\n\n n_vocab = 7\n n_out = 3\n n_batch = 5\n\n model = A(n_vocab, n_out)\n\n v = np.random.randint(n_vocab, size=n_batch)\n ch2o.generate_testcase(model, [v], backprop=True)\n",
"# coding: utf-8\n\nimport chainer\nimport chainer.functions as F\nimport numpy as np\n\n\nclass SplitAxis(chainer.Chain):\n def forward(self, xs, ilens):\n y1 = F.split_axis(xs, ilens, axis=0)\n # この時点でTuple!! なのでrange based for でlistにする\n y1 = [x for x in y1]\n return y1\n\n\nclass SplitAxis1(chainer.Chain):\n def forward(self, xs, ilens):\n y1 = F.split_axis(xs, ilens, axis=1)\n # この時点でTuple!! なのでrange based for でlistにする\n y1 = [x for x in y1]\n return y1\n\n\nclass SplitAxisSections(chainer.Chain):\n def forward(self, xs):\n y1 = F.split_axis(xs, 2, 0)\n # この時点でTuple!! なのでrange based for でlistにする\n y1 = [x for x in y1]\n return y1\n\n\nclass SplitAxisSections1(chainer.Chain):\n def forward(self, xs):\n y1 = F.split_axis(xs, 2, axis=1)\n # この時点でTuple!! なのでrange based for でlistにする\n y1 = [x for x in y1]\n return y1\n\n# ======================================\n\n\nimport ch2o\n\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(12)\n\n xs = np.random.rand(20, 20).astype(np.float32)\n ilens = [1, 3, 5, 8, 14]\n\n ch2o.generate_testcase(SplitAxis, [xs, ilens])\n ch2o.generate_testcase(SplitAxis1, [xs, ilens], subname='axis1')\n ch2o.generate_testcase(SplitAxisSections, [xs], subname='sections')\n ch2o.generate_testcase(SplitAxisSections1, [xs], subname='sections_axis1')\n"
] | [
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.array"
],
[
"numpy.zeros"
],
[
"numpy.array",
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.load",
"numpy.average"
],
[
"numpy.random.rand"
],
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.random.rand",
"numpy.random.seed"
]
] |
888dahong888/open3dTest | [
"cf28df9f9f5d24b1ca614414804a1c18d349467c"
] | [
"test01.py"
] | [
"#读写点云,网格,图片文件\n\nimport numpy as np\nimport open3d as o3d\npcd=o3d.io.read_point_cloud(\"data/rs1.pcd\")\n\nprint(pcd) #打印点云数量\n\n#可视化一下\no3d.visualization.draw_geometries([pcd])\n \n#下采样\ndownpcd = pcd.voxel_down_sample(voxel_size=0.05)\no3d.visualization.draw_geometries([downpcd])\n \n#计算法向量\ndownpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\no3d.visualization.draw_geometries([downpcd])\n \n#原来这样获取\nprint(\"Print a normal vector of the 0th point\")\nprint(downpcd.normals[0])\nprint(\"Print the normal vectors of the first 10 points\")\nprint(np.asarray(downpcd.normals)[:10, :])\n\no3d.io.write_point_cloud(\"data/copy_rs1.pcd\",pcd)\n\n#打印网格\nmesh=o3d.io.read_triangle_mesh(\"data/Box.stl\")\no3d.visualization.draw_geometries([mesh])\nprint(mesh)\no3d.io.write_triangle_mesh(\"data/copy_box.stl\",mesh)\n\n#读写图像\nimg=o3d.io.read_image('data/image.jpg')\nprint(img)\no3d.io.write_image(\"data/copy_img.jpg\",img)\n"
] | [
[
"numpy.asarray"
]
] |
INK-USC/shifted-label-distribution | [
"3cf2b7ced3b2e18234db405f6014f049c4830d71",
"3cf2b7ced3b2e18234db405f6014f049c4830d71"
] | [
"NeuralATT/train.py",
"Neural/train.py"
] | [
"'''\nTraining script with ramdom splitting dev set\n'''\n__author__ = 'Maosen'\nimport torch\nfrom model import Model, Wrapper\nimport utils\nfrom utils import Dataset\nimport argparse\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nimport os\nimport random\n\ntorch.backends.cudnn.deterministic = True\n\n\ndef train(args):\n\t# Training\n\tlogging.info(str(args))\n\n\tmodel = Model(args, device, rel2id, emb_matrix)\n\twrapper = Wrapper(model, args, device, train_dset.rel2id)\n\n\tmax_dev_f1 = 0.0\n\ttest_result_on_max_dev_f1 = (0.0, 0.0, 0.0)\n\n\tfor iter in range(niter):\n\t\t# print('Iteration %d:' % iter)\n\t\tloss = 0.0\n\t\tfor idx, batch in enumerate(tqdm(train_dset.batched_data)):\n\t\t\tscope = train_dset.batched_scope[idx]\n\t\t\tloss_batch = wrapper.update(batch, scope)\n\t\t\tloss += loss_batch\n\t\tloss /= len(train_dset.batched_data)\n\n\t\tvalid_loss, (dev_prec, dev_recall, dev_f1), _, _, _ = wrapper.eval(dev_dset)\n\t\tlogging.info('Iteration %d, Train loss %f' % (iter, loss))\n\t\tlogging.info(\n\t\t\t'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdev_f1))\n\t\ttest_loss, (test_prec, test_recall, test_f1), _, _, _ = wrapper.eval(test_dset)\n\t\tlogging.info(\n\t\t\t'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t test_f1))\n\t\tif dev_f1 > max_dev_f1:\n\t\t\tmax_dev_f1 = dev_f1\n\t\t\ttest_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)\n\t\t\tsave_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))\n\t\t\twrapper.save(save_filename, iter)\n\n\t\twrapper.update_lr(valid_loss)\n\n\tlogging.info('Max dev F1: %f' % max_dev_f1)\n\ttest_p, test_r, test_f1 = test_result_on_max_dev_f1\n\tlogging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))\n\tlogging.info('\\n')\n\n\treturn max_dev_f1, test_result_on_max_dev_f1\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--data_dir', type=str, default='data/neural_att/KBP')\n\tparser.add_argument('--vocab_dir', type=str, default='data/neural/vocab')\n\n\tparser.add_argument('--encoder', type=str, default='pcnn', help='Model')\n\n\tparser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')\n\tparser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')\n\tparser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')\n\tparser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')\n\tparser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')\n\n\tparser.add_argument('--hidden', type=int, default=230, help='RNN hidden state size.')\n\tparser.add_argument('--window_size', type=int, default=3, help='Convolution window size')\n\tparser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')\n\n\tparser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.')\n\tparser.set_defaults(bidirectional=True)\n\n\t# Data Loading & Pre-processing\n\tparser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')\n\tparser.add_argument('--no-lower', dest='lower', action='store_false')\n\tparser.set_defaults(lower=True)\n\tparser.add_argument('--batch_size', type=int, default=64)\n\n\t# Optimization\n\tparser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')\n\tparser.add_argument('--lr_decay', type=float, default=0.9)\n\tparser.add_argument('--num_epoch', type=int, default=30)\n\tparser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')\n\n\t# Optimization - Dropout\n\tparser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')\n\tparser.add_argument('--in_drop', type=float, default=0.5, help='Input dropout rate.')\n\tparser.add_argument('--intra_drop', type=float, default=0.3, help='Intra-layer dropout rate.')\n\tparser.add_argument('--out_drop', type=float, default=0.7, help='Output dropout rate.')\n\n\t# Other options\n\tparser.add_argument('--seed', type=int, default=7698)\n\tparser.add_argument('--repeat', type=int, default=5)\n\tparser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')\n\tparser.add_argument('--info', type=str, default='KBP_default_ATT', help='Optional info for the experiment.')\n\n\targs = parser.parse_args()\n\n\trandom.seed(args.seed)\n\tnp.random.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\n\tlogger = logging.getLogger()\n\tlogger.setLevel(logging.INFO)\n\n\t# Load vocab file (id2word)\n\twith open(args.vocab_dir + '/vocab.pkl', 'rb') as f:\n\t\tvocab = pickle.load(f)\n\tword2id = {}\n\tfor idx, word in enumerate(vocab):\n\t\tword2id[word] = idx\n\n\t# Load word embedding\n\temb_file = args.vocab_dir + '/embedding.npy'\n\temb_matrix = np.load(emb_file)\n\tassert emb_matrix.shape[0] == len(vocab)\n\tassert emb_matrix.shape[1] == args.emb_dim\n\targs.vocab_size = len(vocab)\n\tniter = args.num_epoch\n\n\tdevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\tprint('Using device: %s' % device.type)\n\n\tprint('Reading data......')\n\trel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)\n\ttrain_filename = '%s/train.json' % args.data_dir\n\ttest_filename = '%s/test.json' % args.data_dir\n\tdev_filename = '%s/dev.json' % args.data_dir\n\ttrain_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, use_bag=True)\n\ttest_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, use_bag=False)\n\tdev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, use_bag=False)\n\n\tif not os.path.isdir(args.save_dir):\n\t\tos.makedirs(args.save_dir)\n\n\tfor runid in range(1, args.repeat + 1):\n\t\tlogging.info('Run model %d times......' % runid)\n\t\tdev_f1, test_result = train(args)\n\t\tlogging.info('')\n",
"'''\nTrain Neural RE Model\n'''\n__author__ = 'Maosen'\nimport os\nimport random\nimport torch\nimport logging\nimport argparse\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\n\nimport utils\nfrom model import Model\nfrom utils import Dataset\n\ntorch.backends.cudnn.deterministic = True\n\n\ndef train(args):\n\tmodel = Model(args, device, train_dset.rel2id, word_emb=emb_matrix)\n\tlogging.info('Model: %s, Parameter Number: %d' % (args.model, model.count_parameters()))\n\n\tmax_dev_f1 = 0.0\n\ttest_result_on_max_dev_f1 = (0.0, 0.0, 0.0)\n\n\tfor iter in range(niter):\n\t\tloss = 0.0\n\n\t\tif args.fix_bias:\n\t\t\tmodel.set_bias(train_lp)\n\n\t\tfor idx, batch in enumerate(tqdm(train_dset.batched_data)):\n\t\t\tloss_batch = model.update(batch)\n\t\t\tloss += loss_batch\n\t\tloss /= len(train_dset.batched_data)\n\n\t\tvalid_loss, (dev_prec, dev_recall, dev_f1) = model.eval(dev_dset)\n\t\tlogging.info('Iteration %d, Train loss %f' % (iter, loss))\n\t\tlogging.info(\n\t\t\t'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t dev_f1))\n\n\t\tif args.fix_bias:\n\t\t\tmodel.set_bias(test_lp)\n\t\t\tlogging.warn('Currently test evaluation is using gold test label distribution, only for reference.')\n\n\t\ttest_loss, (test_prec, test_recall, test_f1) = model.eval(test_dset)\n\t\tlogging.info(\n\t\t\t'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t test_f1))\n\t\tif dev_f1 > max_dev_f1:\n\t\t\tmax_dev_f1 = dev_f1\n\t\t\ttest_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)\n\n\t\t\t# the saved model should have train_lp as bias.\n\t\t\tif args.fix_bias:\n\t\t\t\tmodel.set_bias(train_lp)\n\t\t\tsave_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))\n\t\t\tmodel.save(save_filename, iter)\n\n\t\tmodel.update_lr(valid_loss)\n\n\tlogging.info('Max Dev F1: %.4f' % max_dev_f1)\n\ttest_p, test_r, test_f1 = test_result_on_max_dev_f1\n\tlogging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))\n\tlogging.info('\\n')\n\n\treturn max_dev_f1, test_result_on_max_dev_f1\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--data_dir', type=str, default='data/neural/KBP', help='specify dataset with directory')\n\tparser.add_argument('--vocab_dir', type=str, default='data/neural/vocab', help='directory storing word2id file and word embeddings.')\n\n\t# Model Specs\n\tparser.add_argument('--model', type=str, default='bgru', help='model name, (cnn|pcnn|bgru|lstm|palstm)')\n\n\tparser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')\n\tparser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')\n\tparser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')\n\tparser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')\n\tparser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')\n\n\tparser.add_argument('--hidden', type=int, default=200, help='RNN hidden state size.')\n\tparser.add_argument('--window_size', type=int, default=3, help='Convolution window size')\n\tparser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')\n\n\tparser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.' )\n\tparser.set_defaults(bidirectional=True)\n\tparser.add_argument('--bias', dest='bias', action='store_true', help='Whether Bias term is used for linear layer.')\n\tparser.set_defaults(bias=True)\n\tparser.add_argument('--fix_bias', dest='fix_bias', action='store_true', help='Train model with fix bias (not fixed by default).')\n\tparser.set_defaults(fix_bias=False)\n\n\t# Data Loading & Pre-processing\n\tparser.add_argument('--mask_no_type', dest='mask_with_type', action='store_false')\n\tparser.set_defaults(mask_with_type=True)\n\tparser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')\n\tparser.add_argument('--no-lower', dest='lower', action='store_false')\n\tparser.set_defaults(lower=False)\n\tparser.add_argument('--batch_size', type=int, default=64)\n\n\t# Optimization\n\tparser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')\n\tparser.add_argument('--lr_decay', type=float, default=0.9)\n\tparser.add_argument('--num_epoch', type=int, default=30)\n\tparser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')\n\n\t# Optimization - Dropout\n\tparser.add_argument('--in_drop', type=float, default=0.6, help='Input dropout rate.')\n\tparser.add_argument('--intra_drop', type=float, default=0.1, help='Intra-layer dropout rate.')\n\tparser.add_argument('--state_drop', type=float, default=0.5, help='RNN state dropout rate.')\n\tparser.add_argument('--out_drop', type=float, default=0.6, help='Output dropout rate.')\n\n\t# Other options\n\tparser.add_argument('--seed', type=int, default=7698)\n\tparser.add_argument('--repeat', type=int, default=5, help='train the model for multiple times.')\n\tparser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')\n\tparser.add_argument('--info', type=str, default='KBP_default', help='description, also used as filename to save model.')\n\tparser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())\n\tparser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')\n\n\targs = parser.parse_args()\n\n\t# Set random seed\n\trandom.seed(args.seed)\n\tnp.random.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\n\tlogger = logging.getLogger()\n\tlogger.setLevel(logging.INFO)\n\n\t# Load vocab file (id2word)\n\twith open(args.vocab_dir + '/vocab.pkl', 'rb') as f:\n\t\tvocab = pickle.load(f)\n\tword2id = {}\n\tfor idx, word in enumerate(vocab):\n\t\tword2id[word] = idx\n\n\t# Load word embedding\n\temb_file = args.vocab_dir + '/embedding.npy'\n\temb_matrix = np.load(emb_file)\n\tassert emb_matrix.shape[0] == len(vocab)\n\tassert emb_matrix.shape[1] == args.emb_dim\n\targs.vocab_size = len(vocab)\n\tniter = args.num_epoch\n\n\tif args.cpu:\n\t\targs.cuda = False\n\tdevice = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\n\tprint('Using device: %s' % device.type)\n\n\t# Load data.\n\tprint('Reading data......')\n\trel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)\n\ttrain_filename = '%s/train.json' % args.data_dir\n\ttest_filename = '%s/test.json' % args.data_dir\n\tdev_filename = '%s/dev.json' % args.data_dir\n\ttrain_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, mask_with_type=args.mask_with_type)\n\tdev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, mask_with_type=args.mask_with_type)\n\ttest_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, mask_with_type=args.mask_with_type)\n\n\t# Get label distribution from train set. Used in fix_bias.\n\ttrain_lp = torch.from_numpy(train_dset.log_prior).to(device)\n\ttest_lp = torch.from_numpy(test_dset.log_prior).to(device)\n\n\tif not os.path.isdir(args.save_dir):\n\t\tos.makedirs(args.save_dir)\n\n\tfor runid in range(1, args.repeat + 1):\n\t\tlogging.info('Run model #%d time......' % runid)\n\t\tdev_f1, test_result = train(args)\n\t\tlogging.info('')\n"
] | [
[
"torch.manual_seed",
"numpy.load",
"numpy.random.seed",
"torch.cuda.is_available"
],
[
"numpy.random.seed",
"torch.manual_seed",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.device",
"numpy.load"
]
] |
xiaoMrzhang/mmdetection3d | [
"1e7695297e60afe3e09834de1582c3437086ed49",
"1e7695297e60afe3e09834de1582c3437086ed49",
"1e7695297e60afe3e09834de1582c3437086ed49",
"1e7695297e60afe3e09834de1582c3437086ed49",
"1e7695297e60afe3e09834de1582c3437086ed49"
] | [
"mmdet3d/models/backbones/second_ran.py",
"tools/data_converter/waymo_converter.py",
"mmdet3d/datasets/waymo_dataset.py",
"mmdet3d/datasets/pipelines/formating.py",
"mmdet3d/models/necks/second_fpn_ran.py"
] | [
"from mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import load_checkpoint, force_fp32\nfrom torch import nn as nn\nimport torch\nimport numpy as np\n\nfrom mmdet.models import BACKBONES\nfrom mmdet3d.utils.soft_mask import SoftMask\n\[email protected]_module()\nclass SECOND_RAN(nn.Module):\n \"\"\"Backbone network for SECOND with residual attention network\n\n Args:\n in_channels (int): Input channels.\n out_channels (list[int]): Output channels for multi-scale feature maps.\n layer_nums (list[int]): Number of layers in each stage.\n layer_strides (list[int]): Strides of each stage.\n norm_cfg (dict): Config dict of normalization layers.\n conv_cfg (dict): Config dict of convolutional layers.\n \"\"\"\n\n def __init__(self,\n in_channels=128,\n out_channels=[128, 128, 256],\n layer_nums=[3, 5, 5],\n layer_strides=[2, 2, 2],\n norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),\n conv_cfg=dict(type='Conv2d', bias=False)):\n super(SECOND_RAN, self).__init__()\n assert len(layer_strides) == len(layer_nums)\n assert len(out_channels) == len(layer_nums)\n\n in_filters = [in_channels, *out_channels[:-1]]\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n blocks = []\n for i, layer_num in enumerate(layer_nums):\n block = [\n build_conv_layer(\n conv_cfg,\n in_filters[i],\n out_channels[i],\n 3,\n stride=layer_strides[i],\n padding=1),\n build_norm_layer(norm_cfg, out_channels[i])[1],\n nn.ReLU(inplace=True),\n ]\n for j in range(layer_num):\n block.append(\n build_conv_layer(\n conv_cfg,\n out_channels[i],\n out_channels[i],\n 3,\n padding=1))\n block.append(build_norm_layer(norm_cfg, out_channels[i])[1])\n block.append(nn.ReLU(inplace=True))\n\n block = nn.Sequential(*block)\n blocks.append(block)\n\n self.blocks = nn.ModuleList(blocks)\n\n first_layer_conv = build_conv_layer(\n conv_cfg,\n in_filters[0],\n out_channels[0],\n 3,\n stride=2,\n padding=1)\n first_bn = build_norm_layer(norm_cfg, out_channels[0])[1]\n first_relu = nn.ReLU(inplace=True)\n soft_mask = SoftMask(in_channels, [128, 128, 128], out_type=4)\n self.soft_mask_block = nn.Sequential(first_layer_conv, first_bn, first_relu, soft_mask)\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize weights of the 2D backbone.\"\"\"\n # Do not initialize the conv layers\n # to follow the original implementation\n if isinstance(pretrained, str):\n from mmdet3d.utils import get_root_logger\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n\n def forward(self, x):\n \"\"\"Forward function.\n\n Args:\n x (torch.Tensor): Input with shape (N, C, H, W).\n\n Returns:\n tuple[torch.Tensor]: Multi-scale features.\n \"\"\"\n masks = self.soft_mask_block(x)\n outs = []\n for i in range(len(self.blocks)):\n x = self.blocks[i](x)\n # x = torch.mul(x, masks[i]) + x\n outs.append(x)\n return tuple([outs, masks])\n\n @force_fp32(apply_to=('prediction'))\n def focal_loss(self, prediction, target):\n loss_dict = dict()\n self.alpha = 2\n self.beta = 4\n\n positive_index = target.eq(1).float()\n negative_index = target.lt(1).float()\n negative_weights = torch.pow(1 - target, self.beta)\n loss = 0.\n # prediction = torch.clamp(prediction, 1e-3, .999)\n positive_loss = torch.log(prediction + 1e-6) \\\n * torch.pow(1 - prediction, self.alpha) * positive_index\n negative_loss = torch.log(1 - prediction + 1e-6) \\\n * torch.pow(prediction, self.alpha) * negative_weights * negative_index\n\n num_positive = positive_index.float().sum()\n positive_loss = positive_loss.sum()\n negative_loss = negative_loss.sum()\n\n if num_positive == 0:\n loss -= negative_loss\n else:\n loss -= (positive_loss + negative_loss) / num_positive\n loss_dict[\"loss_heatmap\"] = loss\n\n # dice loss\n # intersection = (target * prediction).sum(axis=[1,2,3])\n # dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)\n # dice_loss = 1 - torch.mean(dice_score, axis=0)\n # loss_dict[\"loss_dice\"] = dice_loss * 0.2\n # if torch.isnan(loss) or torch.isnan(dice_loss):\n # import pdb;pdb.set_trace()\n\n return loss_dict\n\n @force_fp32(apply_to=('prediction'))\n def loss(self, prediction, target):\n positive_index = target.eq(1).float()\n loss = 0.\n loss_dict = dict()\n\n positive_loss = torch.log(prediction + 1e-6) * positive_index\n negative_loss = torch.log(1 - prediction + 1e-6) * (1 - positive_index)\n num_positive = positive_index.float().sum()\n num_negative = (1 - positive_index).float().sum()\n positive_loss = positive_loss.sum()\n negative_loss = negative_loss.sum()\n\n bec_loss = -(positive_loss / (num_positive+1) + negative_loss / (num_negative+1))\n loss_dict[\"loss_heatmap\"] = bec_loss\n\n # intersection = (target * prediction).sum(axis=[1,2,3])\n # dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)\n # dice_loss = 1 - dice_score.mean()\n # loss_dict[\"loss_dice\"] = dice_loss\n\n return loss_dict\n",
"r\"\"\"Adapted from `Waymo to KITTI converter\n <https://github.com/caizhongang/waymo_kitti_converter>`_.\n\"\"\"\n\ntry:\n from waymo_open_dataset import dataset_pb2\nexcept ImportError:\n raise ImportError(\n 'Please run \"pip install waymo-open-dataset-tf-2-1-0==1.2.0\" '\n 'to install the official devkit first.')\n\nimport mmcv\nimport numpy as np\nimport tensorflow as tf\nfrom glob import glob\nfrom os.path import join\nfrom waymo_open_dataset.utils import range_image_utils, transform_utils\nfrom waymo_open_dataset.utils.frame_utils import \\\n parse_range_image_and_camera_projection\n\n\nclass Waymo2KITTI(object):\n \"\"\"Waymo to KITTI converter.\n\n This class serves as the converter to change the waymo raw data to KITTI\n format.\n\n Args:\n load_dir (str): Directory to load waymo raw data.\n save_dir (str): Directory to save data in KITTI format.\n prefix (str): Prefix of filename. In general, 0 for training, 1 for\n validation and 2 for testing.\n workers (str): Number of workers for the parallel process.\n test_mode (bool): Whether in the test_mode. Default: False.\n \"\"\"\n\n def __init__(self,\n load_dir,\n save_dir,\n prefix,\n workers=64,\n test_mode=False):\n self.filter_empty_3dboxes = True\n self.filter_no_label_zone_points = True\n\n self.selected_waymo_classes = ['VEHICLE', 'PEDESTRIAN', 'CYCLIST']\n\n # Only data collected in specific locations will be converted\n # If set None, this filter is disabled\n # Available options: location_sf (main dataset)\n self.selected_waymo_locations = None\n self.save_track_id = False\n\n # turn on eager execution for older tensorflow versions\n if int(tf.__version__.split('.')[0]) < 2:\n tf.enable_eager_execution()\n\n self.lidar_list = [\n '_FRONT', '_FRONT_RIGHT', '_FRONT_LEFT', '_SIDE_RIGHT',\n '_SIDE_LEFT'\n ]\n self.type_list = [\n 'UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST'\n ]\n self.waymo_to_kitti_class_map = {\n 'UNKNOWN': 'DontCare',\n 'PEDESTRIAN': 'Pedestrian',\n 'VEHICLE': 'Car',\n 'CYCLIST': 'Cyclist',\n 'SIGN': 'Sign' # not in kitti\n }\n\n self.load_dir = load_dir\n self.save_dir = save_dir\n self.prefix = prefix\n self.workers = int(workers)\n self.test_mode = test_mode\n\n self.tfrecord_pathnames = sorted(\n glob(join(self.load_dir, '*.tfrecord')))\n\n self.label_save_dir = f'{self.save_dir}/label_'\n self.label_all_save_dir = f'{self.save_dir}/label_all'\n self.image_save_dir = f'{self.save_dir}/image_'\n self.calib_save_dir = f'{self.save_dir}/calib'\n self.point_cloud_save_dir = f'{self.save_dir}/velodyne'\n self.pose_save_dir = f'{self.save_dir}/pose'\n\n self.create_folder()\n\n def convert(self):\n \"\"\"Convert action.\"\"\"\n print('Start converting ...')\n mmcv.track_parallel_progress(self.convert_one, range(len(self)),\n self.workers)\n print('\\nFinished ...')\n\n def convert_one(self, file_idx):\n \"\"\"Convert action for single file.\n\n Args:\n file_idx (int): Index of the file to be converted.\n \"\"\"\n pathname = self.tfrecord_pathnames[file_idx]\n dataset = tf.data.TFRecordDataset(pathname, compression_type='')\n\n for frame_idx, data in enumerate(dataset):\n\n frame = dataset_pb2.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n if (self.selected_waymo_locations is not None\n and frame.context.stats.location\n not in self.selected_waymo_locations):\n continue\n\n # self.save_image(frame, file_idx, frame_idx)\n self.save_calib(frame, file_idx, frame_idx)\n self.save_lidar(frame, file_idx, frame_idx)\n self.save_pose(frame, file_idx, frame_idx)\n\n if not self.test_mode:\n self.save_label(frame, file_idx, frame_idx)\n\n def __len__(self):\n \"\"\"Length of the filename list.\"\"\"\n return len(self.tfrecord_pathnames)\n\n def save_image(self, frame, file_idx, frame_idx):\n \"\"\"Parse and save the images in png format.\n\n Args:\n frame (:obj:`Frame`): Open dataset frame proto.\n file_idx (int): Current file index.\n frame_idx (int): Current frame index.\n \"\"\"\n for img in frame.images:\n # only save front camera images\n if img.name != 1:\n continue\n img_path = f'{self.image_save_dir}{str(img.name - 1)}/' + \\\n f'{self.prefix}{str(file_idx).zfill(3)}' + \\\n f'{str(frame_idx).zfill(3)}.png'\n img = mmcv.imfrombytes(img.image)\n mmcv.imwrite(img, img_path)\n\n def save_calib(self, frame, file_idx, frame_idx):\n \"\"\"Parse and save the calibration data.\n\n Args:\n frame (:obj:`Frame`): Open dataset frame proto.\n file_idx (int): Current file index.\n frame_idx (int): Current frame index.\n \"\"\"\n # waymo front camera to kitti reference camera\n T_front_cam_to_ref = np.array([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0],\n [1.0, 0.0, 0.0]])\n camera_calibs = []\n R0_rect = [f'{i:e}' for i in np.eye(3).flatten()]\n Tr_velo_to_cams = []\n calib_context = ''\n\n for camera in frame.context.camera_calibrations:\n # extrinsic parameters\n T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape(\n 4, 4)\n T_vehicle_to_cam = np.linalg.inv(T_cam_to_vehicle)\n Tr_velo_to_cam = \\\n self.cart_to_homo(T_front_cam_to_ref) @ T_vehicle_to_cam\n if camera.name == 1: # FRONT = 1, see dataset.proto for details\n self.T_velo_to_front_cam = Tr_velo_to_cam.copy()\n Tr_velo_to_cam = Tr_velo_to_cam[:3, :].reshape((12, ))\n Tr_velo_to_cams.append([f'{i:e}' for i in Tr_velo_to_cam])\n\n # intrinsic parameters\n camera_calib = np.zeros((3, 4))\n camera_calib[0, 0] = camera.intrinsic[0]\n camera_calib[1, 1] = camera.intrinsic[1]\n camera_calib[0, 2] = camera.intrinsic[2]\n camera_calib[1, 2] = camera.intrinsic[3]\n camera_calib[2, 2] = 1\n camera_calib = list(camera_calib.reshape(12))\n camera_calib = [f'{i:e}' for i in camera_calib]\n camera_calibs.append(camera_calib)\n\n # all camera ids are saved as id-1 in the result because\n # camera 0 is unknown in the proto\n for i in range(5):\n calib_context += 'P' + str(i) + ': ' + \\\n ' '.join(camera_calibs[i]) + '\\n'\n calib_context += 'R0_rect' + ': ' + ' '.join(R0_rect) + '\\n'\n for i in range(5):\n calib_context += 'Tr_velo_to_cam_' + str(i) + ': ' + \\\n ' '.join(Tr_velo_to_cams[i]) + '\\n'\n\n with open(\n f'{self.calib_save_dir}/{self.prefix}' +\n f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt',\n 'w+') as fp_calib:\n fp_calib.write(calib_context)\n fp_calib.close()\n\n def save_lidar(self, frame, file_idx, frame_idx):\n \"\"\"Parse and save the lidar data in psd format.\n\n Args:\n frame (:obj:`Frame`): Open dataset frame proto.\n file_idx (int): Current file index.\n frame_idx (int): Current frame index.\n \"\"\"\n range_images, camera_projections, range_image_top_pose = \\\n parse_range_image_and_camera_projection(frame)\n\n # First return\n points_0, cp_points_0, intensity_0, elongation_0 = \\\n self.convert_range_image_to_point_cloud(\n frame,\n range_images,\n camera_projections,\n range_image_top_pose,\n ri_index=0\n )\n points_0 = np.concatenate(points_0, axis=0)\n intensity_0 = np.concatenate(intensity_0, axis=0)\n elongation_0 = np.concatenate(elongation_0, axis=0)\n\n # Second return\n points_1, cp_points_1, intensity_1, elongation_1 = \\\n self.convert_range_image_to_point_cloud(\n frame,\n range_images,\n camera_projections,\n range_image_top_pose,\n ri_index=1\n )\n points_1 = np.concatenate(points_1, axis=0)\n intensity_1 = np.concatenate(intensity_1, axis=0)\n elongation_1 = np.concatenate(elongation_1, axis=0)\n\n points = np.concatenate([points_0, points_1], axis=0)\n intensity = np.concatenate([intensity_0, intensity_1], axis=0)\n elongation = np.concatenate([elongation_0, elongation_1], axis=0)\n timestamp = frame.timestamp_micros * np.ones_like(intensity)\n\n # concatenate x,y,z, intensity, elongation, timestamp (6-dim)\n point_cloud = np.column_stack(\n (points, intensity, elongation, timestamp))\n\n pc_path = f'{self.point_cloud_save_dir}/{self.prefix}' + \\\n f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.bin'\n point_cloud.astype(np.float32).tofile(pc_path)\n\n def save_label(self, frame, file_idx, frame_idx):\n \"\"\"Parse and save the label data in txt format.\n The relation between waymo and kitti coordinates is noteworthy:\n 1. x, y, z correspond to l, w, h (waymo) -> l, h, w (kitti)\n 2. x-y-z: front-left-up (waymo) -> right-down-front(kitti)\n 3. bbox origin at volumetric center (waymo) -> bottom center (kitti)\n 4. rotation: +x around y-axis (kitti) -> +x around z-axis (waymo)\n\n Args:\n frame (:obj:`Frame`): Open dataset frame proto.\n file_idx (int): Current file index.\n frame_idx (int): Current frame index.\n \"\"\"\n fp_label_all = open(\n f'{self.label_all_save_dir}/{self.prefix}' +\n f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', 'w+')\n id_to_bbox = dict()\n id_to_name = dict()\n for labels in frame.projected_lidar_labels:\n name = labels.name\n for label in labels.labels:\n # TODO: need a workaround as bbox may not belong to front cam\n bbox = [\n label.box.center_x - label.box.length / 2,\n label.box.center_y - label.box.width / 2,\n label.box.center_x + label.box.length / 2,\n label.box.center_y + label.box.width / 2\n ]\n id_to_bbox[label.id] = bbox\n id_to_name[label.id] = name - 1\n\n for obj in frame.laser_labels:\n bounding_box = None\n name = None\n id = obj.id\n for lidar in self.lidar_list:\n if id + lidar in id_to_bbox:\n bounding_box = id_to_bbox.get(id + lidar)\n name = str(id_to_name.get(id + lidar))\n break\n\n if bounding_box is None or name is None:\n name = '0'\n bounding_box = (0, 0, 0, 0)\n\n my_type = self.type_list[obj.type]\n\n if my_type not in self.selected_waymo_classes:\n continue\n\n if self.filter_empty_3dboxes and obj.num_lidar_points_in_box < 1:\n continue\n\n my_type = self.waymo_to_kitti_class_map[my_type]\n\n height = obj.box.height\n width = obj.box.width\n length = obj.box.length\n\n x = obj.box.center_x\n y = obj.box.center_y\n z = obj.box.center_z - height / 2\n\n # project bounding box to the virtual reference frame\n pt_ref = self.T_velo_to_front_cam @ \\\n np.array([x, y, z, 1]).reshape((4, 1))\n x, y, z, _ = pt_ref.flatten().tolist()\n\n rotation_y = -obj.box.heading - np.pi / 2\n track_id = obj.id\n\n # not available\n truncated = 0\n occluded = 0\n alpha = -10\n\n line = my_type + \\\n ' {} {} {} {} {} {} {} {} {} {} {} {} {} {}\\n'.format(\n round(truncated, 2), occluded, round(alpha, 2),\n round(bounding_box[0], 2), round(bounding_box[1], 2),\n round(bounding_box[2], 2), round(bounding_box[3], 2),\n round(height, 2), round(width, 2), round(length, 2),\n round(x, 2), round(y, 2), round(z, 2),\n round(rotation_y, 2))\n\n if self.save_track_id:\n line_all = line[:-1] + ' ' + name + ' ' + track_id + '\\n'\n else:\n line_all = line[:-1] + ' ' + name + '\\n'\n\n fp_label = open(\n f'{self.label_save_dir}{name}/{self.prefix}' +\n f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', 'a')\n fp_label.write(line)\n fp_label.close()\n\n fp_label_all.write(line_all)\n\n fp_label_all.close()\n\n def save_pose(self, frame, file_idx, frame_idx):\n \"\"\"Parse and save the pose data.\n\n Note that SDC's own pose is not included in the regular training\n of KITTI dataset. KITTI raw dataset contains ego motion files\n but are not often used. Pose is important for algorithms that\n take advantage of the temporal information.\n\n Args:\n frame (:obj:`Frame`): Open dataset frame proto.\n file_idx (int): Current file index.\n frame_idx (int): Current frame index.\n \"\"\"\n pose = np.array(frame.pose.transform).reshape(4, 4)\n np.savetxt(\n join(f'{self.pose_save_dir}/{self.prefix}' +\n f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt'),\n pose)\n\n def create_folder(self):\n \"\"\"Create folder for data preprocessing.\"\"\"\n if not self.test_mode:\n dir_list1 = [\n self.label_all_save_dir, self.calib_save_dir,\n self.point_cloud_save_dir, self.pose_save_dir\n ]\n dir_list2 = [self.label_save_dir, self.image_save_dir]\n else:\n dir_list1 = [\n self.calib_save_dir, self.point_cloud_save_dir,\n self.pose_save_dir\n ]\n dir_list2 = [self.image_save_dir]\n for d in dir_list1:\n mmcv.mkdir_or_exist(d)\n for d in dir_list2:\n for i in range(5):\n mmcv.mkdir_or_exist(f'{d}{str(i)}')\n\n def convert_range_image_to_point_cloud(self,\n frame,\n range_images,\n camera_projections,\n range_image_top_pose,\n ri_index=0):\n \"\"\"Convert range images to point cloud.\n\n Args:\n frame (:obj:`Frame`): Open dataset frame.\n range_images (dict): Mapping from laser_name to list of two\n range images corresponding with two returns.\n camera_projections (dict): Mapping from laser_name to list of two\n camera projections corresponding with two returns.\n range_image_top_pose (:obj:`Transform`): Range image pixel pose for\n top lidar.\n ri_index (int): 0 for the first return, 1 for the second return.\n Default: 0.\n\n Returns:\n tuple[list[np.ndarray]]: (List of points with shape [N, 3],\n camera projections of points with shape [N, 6], intensity\n with shape [N, 1], elongation with shape [N, 1]). All the\n lists have the length of lidar numbers (5).\n \"\"\"\n calibrations = sorted(\n frame.context.laser_calibrations, key=lambda c: c.name)\n points = []\n cp_points = []\n intensity = []\n elongation = []\n\n frame_pose = tf.convert_to_tensor(\n value=np.reshape(np.array(frame.pose.transform), [4, 4]))\n # [H, W, 6]\n range_image_top_pose_tensor = tf.reshape(\n tf.convert_to_tensor(value=range_image_top_pose.data),\n range_image_top_pose.shape.dims)\n # [H, W, 3, 3]\n range_image_top_pose_tensor_rotation = \\\n transform_utils.get_rotation_matrix(\n range_image_top_pose_tensor[..., 0],\n range_image_top_pose_tensor[..., 1],\n range_image_top_pose_tensor[..., 2])\n range_image_top_pose_tensor_translation = \\\n range_image_top_pose_tensor[..., 3:]\n range_image_top_pose_tensor = transform_utils.get_transform(\n range_image_top_pose_tensor_rotation,\n range_image_top_pose_tensor_translation)\n for c in calibrations:\n range_image = range_images[c.name][ri_index]\n if len(c.beam_inclinations) == 0:\n beam_inclinations = range_image_utils.compute_inclination(\n tf.constant(\n [c.beam_inclination_min, c.beam_inclination_max]),\n height=range_image.shape.dims[0])\n else:\n beam_inclinations = tf.constant(c.beam_inclinations)\n\n beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])\n extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])\n\n range_image_tensor = tf.reshape(\n tf.convert_to_tensor(value=range_image.data),\n range_image.shape.dims)\n pixel_pose_local = None\n frame_pose_local = None\n if c.name == dataset_pb2.LaserName.TOP:\n pixel_pose_local = range_image_top_pose_tensor\n pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)\n frame_pose_local = tf.expand_dims(frame_pose, axis=0)\n range_image_mask = range_image_tensor[..., 0] > 0\n\n if self.filter_no_label_zone_points:\n nlz_mask = range_image_tensor[..., 3] != 1.0 # 1.0: in NLZ\n range_image_mask = range_image_mask & nlz_mask\n\n range_image_cartesian = \\\n range_image_utils.extract_point_cloud_from_range_image(\n tf.expand_dims(range_image_tensor[..., 0], axis=0),\n tf.expand_dims(extrinsic, axis=0),\n tf.expand_dims(tf.convert_to_tensor(\n value=beam_inclinations), axis=0),\n pixel_pose=pixel_pose_local,\n frame_pose=frame_pose_local)\n\n range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)\n points_tensor = tf.gather_nd(range_image_cartesian,\n tf.compat.v1.where(range_image_mask))\n\n cp = camera_projections[c.name][ri_index]\n cp_tensor = tf.reshape(\n tf.convert_to_tensor(value=cp.data), cp.shape.dims)\n cp_points_tensor = tf.gather_nd(\n cp_tensor, tf.compat.v1.where(range_image_mask))\n points.append(points_tensor.numpy())\n cp_points.append(cp_points_tensor.numpy())\n\n intensity_tensor = tf.gather_nd(range_image_tensor[..., 1],\n tf.where(range_image_mask))\n intensity.append(intensity_tensor.numpy())\n\n elongation_tensor = tf.gather_nd(range_image_tensor[..., 2],\n tf.where(range_image_mask))\n elongation.append(elongation_tensor.numpy())\n\n return points, cp_points, intensity, elongation\n\n def cart_to_homo(self, mat):\n \"\"\"Convert transformation matrix in Cartesian coordinates to\n homogeneous format.\n\n Args:\n mat (np.ndarray): Transformation matrix in Cartesian.\n The input matrix shape is 3x3 or 3x4.\n\n Returns:\n np.ndarray: Transformation matrix in homogeneous format.\n The matrix shape is 4x4.\n \"\"\"\n ret = np.eye(4)\n if mat.shape == (3, 3):\n ret[:3, :3] = mat\n elif mat.shape == (3, 4):\n ret[:3, :] = mat\n else:\n raise ValueError(mat.shape)\n return ret\n",
"import mmcv\nimport numpy as np\nimport os\nimport tempfile\nimport torch\nfrom mmcv.utils import print_log\nfrom os import path as osp\n\nfrom mmdet.datasets import DATASETS\nfrom ..core.bbox import Box3DMode, points_cam2img\nfrom .kitti_dataset import KittiDataset\n\n\[email protected]_module()\nclass WaymoDataset(KittiDataset):\n \"\"\"Waymo Dataset.\n\n This class serves as the API for experiments on the Waymo Dataset.\n\n Please refer to `<https://waymo.com/open/download/>`_for data downloading.\n It is recommended to symlink the dataset root to $MMDETECTION3D/data and\n organize them as the doc shows.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n split (str): Split of input data.\n pts_prefix (str, optional): Prefix of points files.\n Defaults to 'velodyne'.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'LiDAR' in this dataset. Available options includes\n\n - 'LiDAR': box in LiDAR coordinates\n - 'Depth': box in depth coordinates, usually for indoor dataset\n - 'Camera': box in camera coordinates\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n pcd_limit_range (list): The range of point cloud used to filter\n invalid predicted boxes. Default: [-85, -85, -5, 85, 85, 5].\n \"\"\"\n\n CLASSES = ('Car', 'Cyclist', 'Pedestrian')\n\n def __init__(self,\n data_root,\n ann_file,\n split,\n pts_prefix='velodyne',\n pipeline=None,\n classes=None,\n modality=None,\n box_type_3d='LiDAR',\n filter_empty_gt=True,\n test_mode=False,\n load_interval=1,\n pcd_limit_range=[-85, -85, -5, 85, 85, 5]):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n split=split,\n pts_prefix=pts_prefix,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode,\n pcd_limit_range=pcd_limit_range)\n\n # to load a subset, just set the load_interval in the dataset config\n self.data_infos = self.data_infos[::load_interval]\n if hasattr(self, 'flag'):\n self.flag = self.flag[::load_interval]\n\n def _get_pts_filename(self, idx):\n pts_filename = osp.join(self.root_split, self.pts_prefix,\n f'{idx:07d}.bin')\n return pts_filename\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Standard input_dict consists of the\n data information.\n\n - sample_idx (str): sample index\n - pts_filename (str): filename of point clouds\n - img_prefix (str | None): prefix of image files\n - img_info (dict): image info\n - lidar2img (list[np.ndarray], optional): transformations from\n lidar to different cameras\n - ann_info (dict): annotation info\n \"\"\"\n info = self.data_infos[index]\n sample_idx = info['image']['image_idx']\n img_filename = os.path.join(self.data_root,\n info['image']['image_path'])\n\n # TODO: consider use torch.Tensor only\n rect = info['calib']['R0_rect'].astype(np.float32)\n Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)\n P0 = info['calib']['P0'].astype(np.float32)\n lidar2img = P0 @ rect @ Trv2c\n\n pts_filename = self._get_pts_filename(sample_idx)\n input_dict = dict(\n sample_idx=sample_idx,\n pts_filename=pts_filename,\n img_prefix=None,\n img_info=dict(filename=img_filename),\n lidar2img=lidar2img)\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n\n return input_dict\n\n def format_results(self,\n outputs,\n pklfile_prefix=None,\n submission_prefix=None,\n data_format='waymo'):\n \"\"\"Format the results to pkl file.\n\n Args:\n outputs (list[dict]): Testing results of the dataset.\n pklfile_prefix (str | None): The prefix of pkl files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n submission_prefix (str | None): The prefix of submitted files. It\n includes the file path and the prefix of filename, e.g.,\n \"a/b/prefix\". If not specified, a temp file will be created.\n Default: None.\n data_format (str | None): Output data format. Default: 'waymo'.\n Another supported choice is 'kitti'.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing\n the json filepaths, tmp_dir is the temporal directory created\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n if pklfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n pklfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n assert ('waymo' in data_format or 'kitti' in data_format), \\\n f'invalid data_format {data_format}'\n\n if (not isinstance(outputs[0], dict)) or 'img_bbox' in outputs[0]:\n raise TypeError('Not supported type for reformat results.')\n elif 'pts_bbox' in outputs[0]:\n result_files = dict()\n for name in outputs[0]:\n results_ = [out[name] for out in outputs]\n pklfile_prefix_ = pklfile_prefix + name\n if submission_prefix is not None:\n submission_prefix_ = f'{submission_prefix}_{name}'\n else:\n submission_prefix_ = None\n result_files_ = self.bbox2result_kitti(results_, self.CLASSES,\n pklfile_prefix_,\n submission_prefix_)\n result_files[name] = result_files_\n else:\n result_files = self.bbox2result_kitti(outputs, self.CLASSES,\n pklfile_prefix,\n submission_prefix)\n if 'waymo' in data_format:\n from ..core.evaluation.waymo_utils.prediction_kitti_to_waymo import \\\n KITTI2Waymo # noqa\n waymo_root = osp.join(\n self.data_root.split('kitti_format')[0], 'waymo_format')\n if self.split == 'training':\n waymo_tfrecords_dir = osp.join(waymo_root, 'validation')\n prefix = '1'\n elif self.split == 'testing':\n waymo_tfrecords_dir = osp.join(waymo_root, 'testing')\n prefix = '2'\n else:\n raise ValueError('Not supported split value.')\n save_tmp_dir = tempfile.TemporaryDirectory()\n waymo_results_save_dir = save_tmp_dir.name\n waymo_results_final_path = f'{pklfile_prefix}.bin'\n if 'pts_bbox' in result_files:\n converter = KITTI2Waymo(result_files['pts_bbox'],\n waymo_tfrecords_dir,\n waymo_results_save_dir,\n waymo_results_final_path, prefix)\n else:\n converter = KITTI2Waymo(result_files, waymo_tfrecords_dir,\n waymo_results_save_dir,\n waymo_results_final_path, prefix)\n converter.convert()\n save_tmp_dir.cleanup()\n\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='waymo',\n logger=None,\n pklfile_prefix=None,\n submission_prefix=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in KITTI protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n Default: 'waymo'. Another supported metric is 'kitti'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n pklfile_prefix (str | None): The prefix of pkl files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n submission_prefix (str | None): The prefix of submission datas.\n If not specified, the submission data will not be generated.\n show (bool): Whether to visualize.\n Default: False.\n out_dir (str): Path to save the visualization results.\n Default: None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict[str: float]: results of each evaluation metric\n \"\"\"\n assert ('waymo' in metric or 'kitti' in metric), \\\n f'invalid metric {metric}'\n if 'kitti' in metric:\n result_files, tmp_dir = self.format_results(\n results,\n pklfile_prefix,\n submission_prefix,\n data_format='kitti')\n from mmdet3d.core.evaluation import kitti_eval\n gt_annos = [info['annos'] for info in self.data_infos]\n\n if isinstance(result_files, dict):\n ap_dict = dict()\n for name, result_files_ in result_files.items():\n eval_types = ['bev', '3d']\n ap_result_str, ap_dict_ = kitti_eval(\n gt_annos,\n result_files_,\n self.CLASSES,\n eval_types=eval_types)\n for ap_type, ap in ap_dict_.items():\n ap_dict[f'{name}/{ap_type}'] = float(\n '{:.4f}'.format(ap))\n\n print_log(\n f'Results of {name}:\\n' + ap_result_str, logger=logger)\n\n else:\n ap_result_str, ap_dict = kitti_eval(\n gt_annos,\n result_files,\n self.CLASSES,\n eval_types=['bev', '3d'])\n print_log('\\n' + ap_result_str, logger=logger)\n if 'waymo' in metric:\n waymo_root = osp.join(\n self.data_root.split('kitti_format')[0], 'waymo_format')\n if pklfile_prefix is None:\n eval_tmp_dir = tempfile.TemporaryDirectory()\n pklfile_prefix = osp.join(eval_tmp_dir.name, 'results')\n else:\n eval_tmp_dir = None\n result_files, tmp_dir = self.format_results(\n results,\n pklfile_prefix,\n submission_prefix,\n data_format='waymo')\n import subprocess\n ret_bytes = subprocess.check_output(\n 'mmdet3d/core/evaluation/waymo_utils/' +\n f'compute_detection_metrics_main {pklfile_prefix}.bin ' +\n f'{waymo_root}/gt.bin',\n shell=True)\n ret_texts = ret_bytes.decode('utf-8')\n print_log(ret_texts)\n # parse the text to get ap_dict\n ap_dict = {\n 'Vehicle/L1 mAP': 0,\n 'Vehicle/L1 mAPH': 0,\n 'Vehicle/L2 mAP': 0,\n 'Vehicle/L2 mAPH': 0,\n 'Pedestrian/L1 mAP': 0,\n 'Pedestrian/L1 mAPH': 0,\n 'Pedestrian/L2 mAP': 0,\n 'Pedestrian/L2 mAPH': 0,\n 'Sign/L1 mAP': 0,\n 'Sign/L1 mAPH': 0,\n 'Sign/L2 mAP': 0,\n 'Sign/L2 mAPH': 0,\n 'Cyclist/L1 mAP': 0,\n 'Cyclist/L1 mAPH': 0,\n 'Cyclist/L2 mAP': 0,\n 'Cyclist/L2 mAPH': 0,\n 'Overall/L1 mAP': 0,\n 'Overall/L1 mAPH': 0,\n 'Overall/L2 mAP': 0,\n 'Overall/L2 mAPH': 0\n }\n mAP_splits = ret_texts.split('mAP ')\n mAPH_splits = ret_texts.split('mAPH ')\n for idx, key in enumerate(ap_dict.keys()):\n split_idx = int(idx / 2) + 1\n if idx % 2 == 0: # mAP\n ap_dict[key] = float(mAP_splits[split_idx].split(']')[0])\n else: # mAPH\n ap_dict[key] = float(mAPH_splits[split_idx].split(']')[0])\n ap_dict['Overall/L1 mAP'] = \\\n (ap_dict['Vehicle/L1 mAP'] + ap_dict['Pedestrian/L1 mAP'] +\n ap_dict['Cyclist/L1 mAP']) / 3\n ap_dict['Overall/L1 mAPH'] = \\\n (ap_dict['Vehicle/L1 mAPH'] + ap_dict['Pedestrian/L1 mAPH'] +\n ap_dict['Cyclist/L1 mAPH']) / 3\n ap_dict['Overall/L2 mAP'] = \\\n (ap_dict['Vehicle/L2 mAP'] + ap_dict['Pedestrian/L2 mAP'] +\n ap_dict['Cyclist/L2 mAP']) / 3\n ap_dict['Overall/L2 mAPH'] = \\\n (ap_dict['Vehicle/L2 mAPH'] + ap_dict['Pedestrian/L2 mAPH'] +\n ap_dict['Cyclist/L2 mAPH']) / 3\n if eval_tmp_dir is not None:\n eval_tmp_dir.cleanup()\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n if show:\n self.show(results, out_dir, pipeline=pipeline)\n return ap_dict\n\n def bbox2result_kitti(self,\n net_outputs,\n class_names,\n pklfile_prefix=None,\n submission_prefix=None):\n \"\"\"Convert results to kitti format for evaluation and test submission.\n\n Args:\n net_outputs (List[np.ndarray]): list of array storing the\n bbox and score\n class_nanes (List[String]): A list of class names\n pklfile_prefix (str | None): The prefix of pkl file.\n submission_prefix (str | None): The prefix of submission file.\n\n Returns:\n List[dict]: A list of dict have the kitti 3d format\n \"\"\"\n assert len(net_outputs) == len(self.data_infos), \\\n 'invalid list length of network outputs'\n if submission_prefix is not None:\n mmcv.mkdir_or_exist(submission_prefix)\n\n det_annos = []\n print('\\nConverting prediction to KITTI format')\n for idx, pred_dicts in enumerate(\n mmcv.track_iter_progress(net_outputs)):\n annos = []\n info = self.data_infos[idx]\n sample_idx = info['image']['image_idx']\n # image_shape = info['image']['image_shape'][:2]\n\n box_dict = self.convert_valid_bboxes(pred_dicts, info)\n if len(box_dict['bbox']) > 0:\n box_2d_preds = box_dict['bbox']\n box_preds = box_dict['box3d_camera']\n scores = box_dict['scores']\n box_preds_lidar = box_dict['box3d_lidar']\n label_preds = box_dict['label_preds']\n\n anno = {\n 'name': [],\n 'truncated': [],\n 'occluded': [],\n 'alpha': [],\n 'bbox': [],\n 'dimensions': [],\n 'location': [],\n 'rotation_y': [],\n 'score': []\n }\n\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n # bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n # bbox[2:] = np.minimum(bbox[2:], (1280, 1920))\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n anno['name'].append(class_names[int(label)])\n anno['truncated'].append(0.0)\n anno['occluded'].append(0)\n anno['alpha'].append(\n -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6])\n anno['bbox'].append(bbox)\n anno['dimensions'].append(box[3:6])\n anno['location'].append(box[:3])\n anno['rotation_y'].append(box[6])\n anno['score'].append(score)\n\n anno = {k: np.stack(v) for k, v in anno.items()}\n annos.append(anno)\n\n if submission_prefix is not None:\n curr_file = f'{submission_prefix}/{sample_idx:07d}.txt'\n with open(curr_file, 'w') as f:\n bbox = anno['bbox']\n loc = anno['location']\n dims = anno['dimensions'] # lhw -> hwl\n\n for idx in range(len(bbox)):\n print(\n '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '\n '{:.4f} {:.4f} {:.4f} '\n '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.\n format(anno['name'][idx], anno['alpha'][idx],\n bbox[idx][0], bbox[idx][1],\n bbox[idx][2], bbox[idx][3],\n dims[idx][1], dims[idx][2],\n dims[idx][0], loc[idx][0], loc[idx][1],\n loc[idx][2], anno['rotation_y'][idx],\n anno['score'][idx]),\n file=f)\n else:\n annos.append({\n 'name': np.array([]),\n 'truncated': np.array([]),\n 'occluded': np.array([]),\n 'alpha': np.array([]),\n 'bbox': np.zeros([0, 4]),\n 'dimensions': np.zeros([0, 3]),\n 'location': np.zeros([0, 3]),\n 'rotation_y': np.array([]),\n 'score': np.array([]),\n })\n annos[-1]['sample_idx'] = np.array(\n [sample_idx] * len(annos[-1]['score']), dtype=np.int64)\n\n det_annos += annos\n\n if pklfile_prefix is not None:\n if not pklfile_prefix.endswith(('.pkl', '.pickle')):\n out = f'{pklfile_prefix}.pkl'\n mmcv.dump(det_annos, out)\n mmcv.dump(det_annos, \"work_dirs/hv_pointpillars_second_fpn_pillar_supervise_160e_kitti-3d-3class/result_kitti.pkl\")\n print(f'Result is saved to {out}.')\n\n return det_annos\n\n def convert_valid_bboxes(self, box_dict, info):\n \"\"\"Convert the boxes into valid format.\n\n Args:\n box_dict (dict): Bounding boxes to be converted.\n\n - boxes_3d (:obj:``LiDARInstance3DBoxes``): 3D bounding boxes.\n - scores_3d (np.ndarray): Scores of predicted boxes.\n - labels_3d (np.ndarray): Class labels of predicted boxes.\n info (dict): Dataset information dictionary.\n\n Returns:\n dict: Valid boxes after conversion.\n\n - bbox (np.ndarray): 2D bounding boxes (in camera 0).\n - box3d_camera (np.ndarray): 3D boxes in camera coordinates.\n - box3d_lidar (np.ndarray): 3D boxes in lidar coordinates.\n - scores (np.ndarray): Scores of predicted boxes.\n - label_preds (np.ndarray): Class labels of predicted boxes.\n - sample_idx (np.ndarray): Sample index.\n \"\"\"\n # TODO: refactor this function\n box_preds = box_dict['boxes_3d']\n scores = box_dict['scores_3d']\n labels = box_dict['labels_3d']\n sample_idx = info['image']['image_idx']\n # TODO: remove the hack of yaw\n box_preds.limit_yaw(offset=0.5, period=np.pi * 2)\n\n if len(box_preds) == 0:\n return dict(\n bbox=np.zeros([0, 4]),\n box3d_camera=np.zeros([0, 7]),\n box3d_lidar=np.zeros([0, 7]),\n scores=np.zeros([0]),\n label_preds=np.zeros([0, 4]),\n sample_idx=sample_idx)\n\n rect = info['calib']['R0_rect'].astype(np.float32)\n Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)\n P0 = info['calib']['P0'].astype(np.float32)\n P0 = box_preds.tensor.new_tensor(P0)\n\n box_preds_camera = box_preds.convert_to(Box3DMode.CAM, rect @ Trv2c)\n\n box_corners = box_preds_camera.corners\n box_corners_in_image = points_cam2img(box_corners, P0)\n # box_corners_in_image: [N, 8, 2]\n minxy = torch.min(box_corners_in_image, dim=1)[0]\n maxxy = torch.max(box_corners_in_image, dim=1)[0]\n box_2d_preds = torch.cat([minxy, maxxy], dim=1)\n # Post-processing\n # check box_preds\n limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range)\n valid_pcd_inds = ((box_preds.center > limit_range[:3]) &\n (box_preds.center < limit_range[3:]))\n valid_inds = valid_pcd_inds.all(-1)\n\n if valid_inds.sum() > 0:\n return dict(\n bbox=box_2d_preds[valid_inds, :].numpy(),\n box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),\n box3d_lidar=box_preds[valid_inds].tensor.numpy(),\n scores=scores[valid_inds].numpy(),\n label_preds=labels[valid_inds].numpy(),\n sample_idx=sample_idx,\n )\n else:\n return dict(\n bbox=np.zeros([0, 4]),\n box3d_camera=np.zeros([0, 7]),\n box3d_lidar=np.zeros([0, 7]),\n scores=np.zeros([0]),\n label_preds=np.zeros([0, 4]),\n sample_idx=sample_idx,\n )\n",
"import numpy as np\nfrom mmcv.parallel import DataContainer as DC\n\nfrom mmdet3d.core.bbox import BaseInstance3DBoxes\nfrom mmdet3d.core.points import BasePoints\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines import to_tensor\n\nPIPELINES._module_dict.pop('DefaultFormatBundle')\n\n\[email protected]_module()\nclass DefaultFormatBundle(object):\n \"\"\"Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including \"img\",\n \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and \"gt_semantic_seg\".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \\\n (3)to DataContainer (stack=True)\n \"\"\"\n\n def __init__(self, ):\n return\n\n def __call__(self, results):\n \"\"\"Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with\n default bundle.\n \"\"\"\n if 'img' in results:\n if isinstance(results['img'], list):\n # process multiple imgs in single frame\n imgs = [img.transpose(2, 0, 1) for img in results['img']]\n imgs = np.ascontiguousarray(np.stack(imgs, axis=0))\n results['img'] = DC(to_tensor(imgs), stack=True)\n else:\n img = np.ascontiguousarray(results['img'].transpose(2, 0, 1))\n results['img'] = DC(to_tensor(img), stack=True)\n for key in [\n 'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels',\n 'gt_labels_3d', 'attr_labels', 'pts_instance_mask',\n 'pts_semantic_mask', 'centers2d', 'depths'\n ]:\n if key not in results:\n continue\n if isinstance(results[key], list):\n results[key] = DC([to_tensor(res) for res in results[key]])\n else:\n results[key] = DC(to_tensor(results[key]))\n if 'gt_bboxes_3d' in results:\n if isinstance(results['gt_bboxes_3d'], BaseInstance3DBoxes):\n results['gt_bboxes_3d'] = DC(\n results['gt_bboxes_3d'], cpu_only=True)\n else:\n results['gt_bboxes_3d'] = DC(\n to_tensor(results['gt_bboxes_3d']))\n\n if 'gt_masks' in results:\n results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)\n if 'gt_semantic_seg' in results:\n results['gt_semantic_seg'] = DC(\n to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)\n if 'bev_seg_image' in results:\n results['bev_seg_image'] = DC(to_tensor(results['bev_seg_image'][None, ...]), stack=True)\n return results\n\n def __repr__(self):\n return self.__class__.__name__\n\n\[email protected]_module()\nclass Collect3D(object):\n \"\"\"Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\", \"proposals\", \"gt_bboxes\",\n \"gt_bboxes_ignore\", \"gt_labels\", and/or \"gt_masks\".\n\n The \"img_meta\" item is always populated. The contents of the \"img_meta\"\n dictionary depends on \"meta_keys\". By default this includes:\n\n - 'img_shape': shape of the image input to the network as a tuple \\\n (h, w, c). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n - 'scale_factor': a float indicating the preprocessing scale\n - 'flip': a boolean indicating if image flip transform was used\n - 'filename': path to the image file\n - 'ori_shape': original shape of the image as a tuple (h, w, c)\n - 'pad_shape': image shape after padding\n - 'lidar2img': transform from lidar to image\n - 'pcd_horizontal_flip': a boolean indicating if point cloud is \\\n flipped horizontally\n - 'pcd_vertical_flip': a boolean indicating if point cloud is \\\n flipped vertically\n - 'box_mode_3d': 3D box mode\n - 'box_type_3d': 3D box type\n - 'img_norm_cfg': a dict of normalization information:\n\n - mean: per channel mean subtraction\n - std: per channel std divisor\n - to_rgb: bool indicating if bgr was converted to rgb\n - 'rect': rectification matrix\n - 'Trv2c': transformation from velodyne to camera coordinate\n - 'P2': transformation betweeen cameras\n - 'pcd_trans': point cloud transformations\n - 'sample_idx': sample index\n - 'pcd_scale_factor': point cloud scale factor\n - 'pcd_rotation': rotation applied to point cloud\n - 'pts_filename': path to point cloud file.\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ('filename', 'ori_shape', 'img_shape', 'lidar2img', \\\n 'pad_shape', 'scale_factor', 'flip', 'pcd_horizontal_flip', \\\n 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', \\\n 'img_norm_cfg', 'rect', 'Trv2c', 'P2', 'pcd_trans', \\\n 'sample_idx', 'pcd_scale_factor', 'pcd_rotation', 'pts_filename')\n \"\"\"\n\n def __init__(self,\n keys,\n meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img',\n 'pad_shape', 'scale_factor', 'flip',\n 'cam_intrinsic', 'pcd_horizontal_flip',\n 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d',\n 'img_norm_cfg', 'rect', 'Trv2c', 'P2', 'pcd_trans',\n 'sample_idx', 'pcd_scale_factor', 'pcd_rotation',\n 'pts_filename', 'transformation_3d_flow')):\n self.keys = keys\n self.meta_keys = meta_keys\n\n def __call__(self, results):\n \"\"\"Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n - keys in ``self.keys``\n - ``img_metas``\n \"\"\"\n data = {}\n img_metas = {}\n for key in self.meta_keys:\n if key in results:\n img_metas[key] = results[key]\n\n data['img_metas'] = DC(img_metas, cpu_only=True)\n for key in self.keys:\n data[key] = results[key]\n return data\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\[email protected]_module()\nclass DefaultFormatBundle3D(DefaultFormatBundle):\n \"\"\"Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields for voxels,\n including \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and\n \"gt_semantic_seg\".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n \"\"\"\n\n def __init__(self, class_names, with_gt=True, with_label=True):\n super(DefaultFormatBundle3D, self).__init__()\n self.class_names = class_names\n self.with_gt = with_gt\n self.with_label = with_label\n\n def __call__(self, results):\n \"\"\"Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with\n default bundle.\n \"\"\"\n # Format 3D data\n if 'points' in results:\n assert isinstance(results['points'], BasePoints)\n results['points'] = DC(results['points'].tensor)\n\n for key in ['voxels', 'coors', 'voxel_centers', 'num_points']:\n if key not in results:\n continue\n results[key] = DC(to_tensor(results[key]), stack=False)\n\n if self.with_gt:\n # Clean GT bboxes in the final\n if 'gt_bboxes_3d_mask' in results:\n gt_bboxes_3d_mask = results['gt_bboxes_3d_mask']\n results['gt_bboxes_3d'] = results['gt_bboxes_3d'][\n gt_bboxes_3d_mask]\n if 'gt_names_3d' in results:\n results['gt_names_3d'] = results['gt_names_3d'][\n gt_bboxes_3d_mask]\n if 'centers2d' in results:\n results['centers2d'] = results['centers2d'][\n gt_bboxes_3d_mask]\n if 'depths' in results:\n results['depths'] = results['depths'][gt_bboxes_3d_mask]\n if 'gt_bboxes_mask' in results:\n gt_bboxes_mask = results['gt_bboxes_mask']\n if 'gt_bboxes' in results:\n results['gt_bboxes'] = results['gt_bboxes'][gt_bboxes_mask]\n results['gt_names'] = results['gt_names'][gt_bboxes_mask]\n if self.with_label:\n if 'gt_names' in results and len(results['gt_names']) == 0:\n results['gt_labels'] = np.array([], dtype=np.int64)\n results['attr_labels'] = np.array([], dtype=np.int64)\n elif 'gt_names' in results and isinstance(\n results['gt_names'][0], list):\n # gt_labels might be a list of list in multi-view setting\n results['gt_labels'] = [\n np.array([self.class_names.index(n) for n in res],\n dtype=np.int64) for res in results['gt_names']\n ]\n elif 'gt_names' in results:\n results['gt_labels'] = np.array([\n self.class_names.index(n) for n in results['gt_names']\n ],\n dtype=np.int64)\n # we still assume one pipeline for one frame LiDAR\n # thus, the 3D name is list[string]\n if 'gt_names_3d' in results:\n results['gt_labels_3d'] = np.array([\n self.class_names.index(n)\n for n in results['gt_names_3d']\n ],\n dtype=np.int64)\n results = super(DefaultFormatBundle3D, self).__call__(results)\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(class_names={self.class_names}, '\n repr_str += f'with_gt={self.with_gt}, with_label={self.with_label})'\n return repr_str\n",
"\"\"\"Second FPN with Residual attention\"\"\"\n\nimport numpy as np\nimport torch\nfrom mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer,\n constant_init, is_norm, kaiming_init)\nfrom mmcv.runner import auto_fp16\nfrom torch import nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.models import NECKS\n\n\[email protected]_module()\nclass SECONDFPN_RAN(nn.Module):\n \"\"\"FPN used in SECOND/PointPillars/PartA2/MVXNet.\n\n Args:\n in_channels (list[int]): Input channels of multi-scale feature maps.\n out_channels (list[int]): Output channels of feature maps.\n upsample_strides (list[int]): Strides used to upsample the\n feature maps.\n norm_cfg (dict): Config dict of normalization layers.\n upsample_cfg (dict): Config dict of upsample layers.\n conv_cfg (dict): Config dict of conv layers.\n use_conv_for_no_stride (bool): Whether to use conv when stride is 1.\n \"\"\"\n\n def __init__(self,\n in_channels=[128, 128, 256],\n out_channels=[256, 256, 256],\n upsample_strides=[1, 2, 4],\n norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),\n upsample_cfg=dict(type='deconv', bias=False),\n conv_cfg=dict(type='Conv2d', bias=False),\n use_conv_for_no_stride=False):\n # if for GroupNorm,\n # cfg is dict(type='GN', num_groups=num_groups, eps=1e-3, affine=True)\n super(SECONDFPN_RAN, self).__init__()\n assert len(out_channels) == len(upsample_strides) == len(in_channels)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.fp16_enabled = False\n\n deblocks = []\n spitals = []\n channel_blocks = []\n for i, out_channel in enumerate(out_channels):\n stride = upsample_strides[i]\n if stride > 1 or (stride == 1 and not use_conv_for_no_stride):\n upsample_layer = build_upsample_layer(\n upsample_cfg,\n in_channels=in_channels[i],\n out_channels=out_channel,\n kernel_size=upsample_strides[i],\n stride=upsample_strides[i])\n conv_1 = build_upsample_layer(\n upsample_cfg,\n in_channels=in_channels[i],\n out_channels=out_channel,\n kernel_size=upsample_strides[i],\n stride=upsample_strides[i])\n else:\n stride = np.round(1 / stride).astype(np.int64)\n upsample_layer = build_conv_layer(\n conv_cfg,\n in_channels=in_channels[i],\n out_channels=out_channel,\n kernel_size=stride,\n stride=stride)\n conv_1 = build_conv_layer(\n conv_cfg,\n in_channels=in_channels[i],\n out_channels=out_channel,\n kernel_size=stride,\n stride=stride)\n\n deblock = nn.Sequential(upsample_layer,\n build_norm_layer(norm_cfg, out_channel)[1],\n nn.ReLU(inplace=True))\n deblocks.append(deblock)\n\n conv_2 = build_conv_layer(conv_cfg, in_channels=out_channel, out_channels=out_channel,\n kernel_size=1, stride=1)\n spital = nn.Sequential(conv_1, build_norm_layer(norm_cfg, out_channel)[1], nn.ReLU(inplace=True),\n conv_2, nn.Sigmoid())\n # [64, 64, 128]\n conv_c = build_conv_layer(conv_cfg, in_channels=in_channels[i], out_channels=out_channel,\n kernel_size=1, stride=1)\n channel_layer = nn.Sequential(conv_c, nn.Sigmoid())\n spitals.append(spital)\n channel_blocks.append(channel_layer)\n\n self.deblocks = nn.ModuleList(deblocks)\n self.spitals = nn.ModuleList(spitals)\n self.channel_blocks = nn.ModuleList(channel_blocks)\n\n def init_weights(self):\n \"\"\"Initialize weights of FPN.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif is_norm(m):\n constant_init(m, 1)\n\n @auto_fp16()\n def forward(self, x, seg_mask=None):\n \"\"\"Forward function.\n\n Args:\n x (torch.Tensor): 4D Tensor in (N, C, H, W) shape.\n\n Returns:\n list[torch.Tensor]: Multi-level feature maps.\n \"\"\"\n assert len(x) == len(self.in_channels)\n ups = [deblock(x[i]) for i, deblock in enumerate(self.deblocks)]\n if seg_mask is None:\n ras = [spital(x[i]) for i, spital in enumerate(self.spitals)]\n elif len(seg_mask) == 3:\n # ras = [spital(seg_mask[i]) for i, spital in enumerate(self.spitals)]\n ras = [seg_mask[0],\n F.interpolate(seg_mask[1], scale_factor=2, mode='bilinear'),\n F.interpolate(seg_mask[2], scale_factor=4, mode='bilinear')]\n ras = [channel_block(ras[i]) for i, channel_block in enumerate(self.channel_blocks)]\n else:\n if isinstance(seg_mask, np.ndarray):\n seg_mask = torch.from_numpy(seg_mask).to(x[0].device).float()\n seg_mask = seg_mask.unsqueeze(1)\n if seg_mask.size(2) != ups[0].size(2):\n scale_factor = ups[0].size(2) / seg_mask.size(2)\n ras = [F.interpolate(seg_mask, scale_factor=scale_factor, mode='bilinear')]\n else:\n ras = [seg_mask]\n if len(ups) > 1:\n out = torch.cat(ups, dim=1)\n att = torch.cat(ras, dim=1)\n else:\n out = ups[0]\n att = ras[0]\n out = torch.mul(out, att) + out\n return [out]\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torch.log",
"torch.nn.ReLU",
"torch.pow"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.reverse",
"tensorflow.enable_eager_execution",
"numpy.ones_like",
"tensorflow.constant",
"tensorflow.__version__.split",
"numpy.linalg.inv",
"tensorflow.data.TFRecordDataset",
"numpy.eye",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"numpy.concatenate",
"tensorflow.where",
"numpy.column_stack",
"numpy.array",
"numpy.zeros",
"tensorflow.compat.v1.where"
],
[
"numpy.maximum",
"torch.max",
"torch.cat",
"torch.min",
"numpy.stack",
"numpy.arctan2",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array",
"numpy.stack"
],
[
"torch.cat",
"torch.nn.ModuleList",
"torch.from_numpy",
"torch.nn.Sigmoid",
"numpy.round",
"torch.mul",
"torch.nn.functional.interpolate",
"torch.nn.ReLU"
]
] |
ZJU-lishuang/mmaction2 | [
"ee34d952e792fd1adea2c2e397b29faff68eaec9"
] | [
"tests/test_models/test_recognizers/test_recognizer2d.py"
] | [
"import torch\n\nfrom mmaction.models import build_recognizer\nfrom ..base import generate_recognizer_demo_inputs, get_recognizer_cfg\n\n\ndef test_tsn():\n config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 3, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n\n\ndef test_tsm():\n config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 8, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # test twice sample + 3 crops\n input_shape = (2, 48, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n imgs = demo_inputs['imgs']\n\n config.model.test_cfg = dict(average_clips='prob')\n recognizer = build_recognizer(config.model)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n\n\ndef test_tpn():\n config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 8, 3, 224, 224)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n assert 'loss_aux' in losses and 'loss_cls' in losses\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n\n # Test forward dummy\n with torch.no_grad():\n _recognizer = build_recognizer(config.model)\n img_list = [img[None, :] for img in imgs]\n if hasattr(_recognizer, 'forward_dummy'):\n _recognizer.forward = _recognizer.forward_dummy\n for one_img in img_list:\n _recognizer(one_img)\n\n\ndef test_tanet():\n config = get_recognizer_cfg(\n 'tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 8, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # test twice sample + 3 crops\n input_shape = (2, 48, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n imgs = demo_inputs['imgs']\n\n config.model.test_cfg = dict(average_clips='prob')\n recognizer = build_recognizer(config.model)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n"
] | [
[
"torch.no_grad"
]
] |
yikir/mmdetection | [
"dfceb61b0252f81b010f550f2acbe46c7dad6ef6"
] | [
"port.py"
] | [
"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# n(net) o(oil) h(hang) r(rust) 检测模块\nimport os\nimport sys\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(current_dir)\n\nfrom mmdet.models import build_detector\nimport mmcv\nimport torch\nimport cv2\nimport time\nimport json\nfrom mmcv.runner import load_checkpoint\nimport PIL.Image as Image\nimport numpy as np\nfrom torchvision.transforms import transforms\nimport pycocotools.mask as maskUtils\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nconfig_file = os.path.join(current_dir, 'configs','config_cascade_rcnn.py')\nweight_file = '/home/kilox/weights/nohr_best.pth'\n# weight_file = '/Weights/verified/oil_detection_v1/oil_best.pth'\n\nclass Object(object):\n def __init__(self):\n self.class_name = \"Unknown\"\n self.trust = 0.0\n self.rank = 0\n \n def to_json(self):\n return json.dumps(self.__dict__)\n\n\nclass Port:\n def __init__(self):\n self.cfg = mmcv.Config.fromfile(config_file)\n # 创建模型 , test_cfg 是rpn rcnn的nms等配置\n self.detector = build_detector(self.cfg.model, train_cfg=None, test_cfg=self.cfg.test_cfg)\n # 加载权重\n load_checkpoint(self.detector, weight_file, map_location='cpu')\n self.detector = self.detector.to('cuda')\n self.detector.eval()\n self.class_names = ('油污','鸟巢','锈蚀','飘挂物')\n \n def process(self, image,save=None):\n \"\"\"\n :param image: PIL.Image 输入图像\n \"\"\"\n np_image = np.asarray(image)\n img, img_meta = self.prepare_single(np_image)\n # forward\n with torch.no_grad():\n # 传入rescale则代表返回的mask是原图的\n result = self.detector.simple_test(img, [img_meta], proposals=None, rescale=True)\n # 将mask 以及bbox画在图上\n img = self.draw_image(np_image, img_meta, result)\n real_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))\n output_file_name = os.path.join(real_time + '.jpg')\n cv2.imwrite(output_file_name, img)\n return False,None,output_file_name\n\n # 将图片添加meta的函数\n def prepare_single(self,img):\n img_info = {'height': img.shape[0], 'width': img.shape[1]}\n img_norm_cfg = self.cfg.img_norm_cfg\n size_divisor = self.cfg.data.test.size_divisor\n \n img, scale_factor = mmcv.imrescale(img, (4014,2400), return_scale=True)\n img_shape = img.shape\n \n img = mmcv.imnormalize(img, img_norm_cfg.mean, img_norm_cfg.std, img_norm_cfg.to_rgb)\n img = mmcv.impad_to_multiple(img, size_divisor)\n pad_shape = img.shape\n _img = transforms.ToTensor()(img).float()\n _img = _img.unsqueeze(0)\n _img_meta = dict(\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flip=False)\n _img = _img.to('cuda')\n return _img, _img_meta,\n\n def draw_image(self,img, meta, result, score_thr=0.9):\n def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):\n num_imgs = tensor.size(0)\n mean = np.array(mean, dtype=np.float32)\n std = np.array(std, dtype=np.float32)\n imgs = []\n for img_id in range(num_imgs):\n img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)\n img = mmcv.imdenormalize(\n img, mean, std, to_bgr=to_rgb).astype(np.uint8)\n imgs.append(np.ascontiguousarray(img))\n return imgs\n \n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n \n h, w, _ = meta['ori_shape']\n img_show = img[:h, :w, :].copy()\n \n bboxes = np.vstack(bbox_result)\n # 画mask\n # # draw segmentation masks\n # if segm_result is not None:\n # segms = mmcv.concat_list(segm_result)\n # inds = np.where(bboxes[:, -1] > score_thr)[0]\n # for i in inds:\n # color_mask = np.random.randint(\n # 0, 256, (1, 3), dtype=np.uint8)\n # mask = maskUtils.decode(segms[i]).astype(np.bool)\n # # todo fix dimension not equal\n # img_check_shape = tuple(img_show.shape[0:2])\n # if mask.shape != img_check_shape:\n # width_diff = mask.shape[1] - img_check_shape[1]\n # if mask.shape[1] < img_check_shape[1]:\n # mask = np.pad(mask, (0, width_diff), mode='constant', constant_values=False)\n # np.insert(mask, False, )\n # else:\n # mask = mask[:, :-width_diff]\n # img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5\n # 画bbox\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n assert bboxes.shape[1] == 5\n scores = bboxes[:, -1]\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds]\n \n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n right_bottom = (bbox_int[2], bbox_int[3])\n cv2.rectangle(\n img_show, left_top, right_bottom, (0, 255, 0), thickness=2)\n label_text = self.class_names[\n label] if self.class_names is not None else 'cls {}'.format(label)\n if len(bbox) > 4:\n label_text += '|{:.02f}'.format(bbox[-1])\n cv2.putText(img_show, label_text, (bbox_int[0], bbox_int[1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0))\n \n return img_show\n\ndef test():\n pass\n\n\nif __name__ == '__main__':\n im = Image.open('/home/kilox/3.jpg')\n port = Port()\n print(port.process(im,True))\n"
] | [
[
"numpy.asarray",
"numpy.ascontiguousarray",
"numpy.full",
"numpy.concatenate",
"torch.no_grad",
"numpy.array",
"numpy.vstack"
]
] |
balakrishnan273818/AdvancedLaneDetection | [
"c0993aa9422654258a41fe9616ab4e24b29e6a7a"
] | [
"examples/unwanted/example.py"
] | [
"'''\ndef warper(img, src, dst):\n\n # Compute and apply perpective transform\n img_size = (img.shape[1], img.shape[0])\n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image\n\n return warped\n'''\n\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\n#%matplotlib qt\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*9,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('../camera_cal/*.jpg')\n\n# Step through the list and search for chessboard corners\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6),None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (9,6), corners, ret)\n cv2.imshow('img',img)\n cv2.waitKey(30)\n\ncv2.destroyAllWindows()"
] | [
[
"numpy.zeros"
]
] |
DanielSun94/kgenlu | [
"bbf377c6740040cb1a8b656785e7c5bfdb8371d5"
] | [
"src/test/rnn_test.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nNLP From Scratch: Translation with a Sequence to Sequence Network and Attention\n*******************************************************************************\n**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_\n\nThis is the third and final tutorial on doing \"NLP From Scratch\", where we\nwrite our own classes and functions to preprocess the data to do our NLP\nmodeling tasks. We hope after you complete this tutorial that you'll proceed to\nlearn how `torchtext` can handle much of this preprocessing for you in the\nthree tutorials immediately following this one.\n\nIn this project we will be teaching a neural network to translate from\nFrench to English.\n\n::\n\n [KEY: > input, = target, < output]\n\n > il est en train de peindre un tableau .\n = he is painting a picture .\n < he is painting a picture .\n\n > pourquoi ne pas essayer ce vin delicieux ?\n = why not try that delicious wine ?\n < why not try that delicious wine ?\n\n > elle n est pas poete mais romanciere .\n = she is not a poet but a novelist .\n < she not not a poet but a novelist .\n\n > vous etes trop maigre .\n = you re too skinny .\n < you re all alone .\n\n... to varying degrees of success.\n\nThis is made possible by the simple but powerful idea of the `sequence\nto sequence network <https://arxiv.org/abs/1409.3215>`__, in which two\nrecurrent neural networks work together to transform one sequence to\nanother. An encoder network condenses an input sequence into a vector,\nand a decoder network unfolds that vector into a new sequence.\n\n.. figure:: /_static/img/seq-seq-images/seq2seq.png\n :alt:\n\nTo improve upon this model we'll use an `attention\nmechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder\nlearn to focus over a specific range of the input sequence.\n\n**Recommended Reading:**\n\nI assume you have at least installed PyTorch, know Python, and\nunderstand Tensors:\n\n- https://pytorch.org/ For installation instructions\n- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general\n- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview\n- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user\n\n\nIt would also be useful to know about Sequence to Sequence networks and\nhow they work:\n\n- `Learning Phrase Representations using RNN Encoder-Decoder for\n Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`__\n- `Sequence to Sequence Learning with Neural\n Networks <https://arxiv.org/abs/1409.3215>`__\n- `Neural Machine Translation by Jointly Learning to Align and\n Translate <https://arxiv.org/abs/1409.0473>`__\n- `A Neural Conversational Model <https://arxiv.org/abs/1506.05869>`__\n\nYou will also find the previous tutorials on\n:doc:`/intermediate/char_rnn_classification_tutorial`\nand :doc:`/intermediate/char_rnn_generation_tutorial`\nhelpful as those concepts are very similar to the Encoder and Decoder\nmodels, respectively.\n\n**Requirements**\n\"\"\"\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n######################################################################\n# Loading data files\n# ==================\n#\n# The data for this project is a set of many thousands of English to\n# French translation pairs.\n#\n# `This question on Open Data Stack\n# Exchange <https://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages>`__\n# pointed me to the open translation site https://tatoeba.org/ which has\n# downloads available at https://tatoeba.org/eng/downloads - and better\n# yet, someone did the extra work of splitting language pairs into\n# individual text files here: https://www.manythings.org/anki/\n#\n# The English to French pairs are too big to include in the repo, so\n# download to ``data/eng-fra.txt`` before continuing. The file is a tab\n# separated list of translation pairs:\n#\n# ::\n#\n# I am cold. J'ai froid.\n#\n# .. Note::\n# Download the data from\n# `here <https://download.pytorch.org/tutorial/data.zip>`_\n# and extract it to the current directory.\n\n######################################################################\n# Similar to the character encoding used in the character-level RNN\n# tutorials, we will be representing each word in a language as a one-hot\n# vector, or giant vector of zeros except for a single one (at the index\n# of the word). Compared to the dozens of characters that might exist in a\n# language, there are many many more words, so the encoding vector is much\n# larger. We will however cheat a bit and trim the data to only use a few\n# thousand words per language.\n#\n# .. figure:: /_static/img/seq-seq-images/word-encoding.png\n# :alt:\n#\n#\n\n\n######################################################################\n# We'll need a unique index per word to use as the inputs and targets of\n# the networks later. To keep track of all this we will use a helper class\n# called ``Lang`` which has word → index (``word2index``) and index → word\n# (``index2word``) dictionaries, as well as a count of each word\n# ``word2count`` which will be used to replace rare words later.\n#\n\nSOS_token = 0\nEOS_token = 1\n\n\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {}\n self.word2count = {}\n self.index2word = {0: \"SOS\", 1: \"EOS\"}\n self.n_words = 2 # Count SOS and EOS\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\n######################################################################\n# The files are all in Unicode, to simplify we will turn Unicode\n# characters to ASCII, make everything lowercase, and trim most\n# punctuation.\n#\n\n# Turn a Unicode string to plain ASCII, thanks to\n# https://stackoverflow.com/a/518232/2809427\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n# Lowercase, trim, and remove non-letter characters\n\n\ndef normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\n######################################################################\n# To read the data file we will split the file into lines, and then split\n# lines into pairs. The files are all English → Other Language, so if we\n# want to translate from Other Language → English I added the ``reverse``\n# flag to reverse the pairs.\n#\n\ndef readLangs(lang1, lang2, reverse=False):\n print(\"Reading lines...\")\n\n # Read the file and split into lines\n lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\\\n read().strip().split('\\n')\n\n # Split every line into pairs and normalize\n pairs = [[normalizeString(s) for s in l.split('\\t')] for l in lines]\n\n # Reverse pairs, make Lang instances\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n input_lang = Lang(lang2)\n output_lang = Lang(lang1)\n else:\n input_lang = Lang(lang1)\n output_lang = Lang(lang2)\n\n return input_lang, output_lang, pairs\n\n\n######################################################################\n# Since there are a *lot* of example sentences and we want to train\n# something quickly, we'll trim the data set to only relatively short and\n# simple sentences. Here the maximum length is 10 words (that includes\n# ending punctuation) and we're filtering to sentences that translate to\n# the form \"I am\" or \"He is\" etc. (accounting for apostrophes replaced\n# earlier).\n#\n\nMAX_LENGTH = 10\n\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\n\n\ndef filterPair(p):\n return len(p[0].split(' ')) < MAX_LENGTH and \\\n len(p[1].split(' ')) < MAX_LENGTH and \\\n p[1].startswith(eng_prefixes)\n\n\ndef filterPairs(pairs):\n return [pair for pair in pairs if filterPair(pair)]\n\n\n######################################################################\n# The full process for preparing the data is:\n#\n# - Read text file and split into lines, split lines into pairs\n# - Normalize text, filter by length and content\n# - Make word lists from sentences in pairs\n#\n\ndef prepareData(lang1, lang2, reverse=False):\n input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)\n print(\"Read %s sentence pairs\" % len(pairs))\n pairs = filterPairs(pairs)\n print(\"Trimmed to %s sentence pairs\" % len(pairs))\n print(\"Counting words...\")\n for pair in pairs:\n input_lang.addSentence(pair[0])\n output_lang.addSentence(pair[1])\n print(\"Counted words:\")\n print(input_lang.name, input_lang.n_words)\n print(output_lang.name, output_lang.n_words)\n return input_lang, output_lang, pairs\n\n\ninput_lang, output_lang, pairs = prepareData('eng', 'fra', True)\nprint(random.choice(pairs))\n\n\n######################################################################\n# The Seq2Seq Model\n# =================\n#\n# A Recurrent Neural Network, or RNN, is a network that operates on a\n# sequence and uses its own output as input for subsequent steps.\n#\n# A `Sequence to Sequence network <https://arxiv.org/abs/1409.3215>`__, or\n# seq2seq network, or `Encoder Decoder\n# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model\n# consisting of two RNNs called the encoder and decoder. The encoder reads\n# an input sequence and outputs a single vector, and the decoder reads\n# that vector to produce an output sequence.\n#\n# .. figure:: /_static/img/seq-seq-images/seq2seq.png\n# :alt:\n#\n# Unlike sequence prediction with a single RNN, where every input\n# corresponds to an output, the seq2seq model frees us from sequence\n# length and order, which makes it ideal for translation between two\n# languages.\n#\n# Consider the sentence \"Je ne suis pas le chat noir\" → \"I am not the\n# black cat\". Most of the words in the input sentence have a direct\n# translation in the output sentence, but are in slightly different\n# orders, e.g. \"chat noir\" and \"black cat\". Because of the \"ne/pas\"\n# construction there is also one more word in the input sentence. It would\n# be difficult to produce a correct translation directly from the sequence\n# of input words.\n#\n# With a seq2seq model the encoder creates a single vector which, in the\n# ideal case, encodes the \"meaning\" of the input sequence into a single\n# vector — a single point in some N dimensional space of sentences.\n#\n\n\n######################################################################\n# The Encoder\n# -----------\n#\n# The encoder of a seq2seq network is a RNN that outputs some value for\n# every word from the input sentence. For every input word the encoder\n# outputs a vector and a hidden state, and uses the hidden state for the\n# next input word.\n#\n# .. figure:: /_static/img/seq-seq-images/encoder-network.png\n# :alt:\n#\n#\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n\n def forward(self, input, hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = embedded\n output, hidden = self.gru(output, hidden)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n######################################################################\n# The Decoder\n# -----------\n#\n# The decoder is another RNN that takes the encoder output vector(s) and\n# outputs a sequence of words to create the translation.\n#\n\n\n######################################################################\n# Simple Decoder\n# ^^^^^^^^^^^^^^\n#\n# In the simplest seq2seq decoder we use only last output of the encoder.\n# This last output is sometimes called the *context vector* as it encodes\n# context from the entire sequence. This context vector is used as the\n# initial hidden state of the decoder.\n#\n# At every step of decoding, the decoder is given an input token and\n# hidden state. The initial input token is the start-of-string ``<SOS>``\n# token, and the first hidden state is the context vector (the encoder's\n# last hidden state).\n#\n# .. figure:: /_static/img/seq-seq-images/decoder-network.png\n# :alt:\n#\n#\n\nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n output = self.embedding(input).view(1, 1, -1)\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n######################################################################\n# I encourage you to train and observe the results of this model, but to\n# save space we'll be going straight for the gold and introducing the\n# Attention Mechanism.\n#\n\n\n######################################################################\n# Attention Decoder\n# ^^^^^^^^^^^^^^^^^\n#\n# If only the context vector is passed between the encoder and decoder,\n# that single vector carries the burden of encoding the entire sentence.\n#\n# Attention allows the decoder network to \"focus\" on a different part of\n# the encoder's outputs for every step of the decoder's own outputs. First\n# we calculate a set of *attention weights*. These will be multiplied by\n# the encoder output vectors to create a weighted combination. The result\n# (called ``attn_applied`` in the code) should contain information about\n# that specific part of the input sequence, and thus help the decoder\n# choose the right output words.\n#\n# .. figure:: https://i.imgur.com/1152PYf.png\n# :alt:\n#\n# Calculating the attention weights is done with another feed-forward\n# layer ``attn``, using the decoder's input and hidden state as inputs.\n# Because there are sentences of all sizes in the training data, to\n# actually create and train this layer we have to choose a maximum\n# sentence length (input length, for encoder outputs) that it can apply\n# to. Sentences of the maximum length will use all the attention weights,\n# while shorter sentences will only use the first few.\n#\n# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png\n# :alt:\n#\n#\n\nclass AttnDecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):\n super(AttnDecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout_p = dropout_p\n self.max_length = max_length\n\n self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n self.attn = nn.Linear(self.hidden_size * 2, self.max_length)\n self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.gru = nn.GRU(self.hidden_size, self.hidden_size)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward(self, input, hidden, encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n attn_weights = F.softmax(\n self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0),\n encoder_outputs.unsqueeze(0))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\n######################################################################\n# .. note:: There are other forms of attention that work around the length\n# limitation by using a relative position approach. Read about \"local\n# attention\" in `Effective Approaches to Attention-based Neural Machine\n# Translation <https://arxiv.org/abs/1508.04025>`__.\n#\n# Training\n# ========\n#\n# Preparing Training Data\n# -----------------------\n#\n# To train, for each pair we will need an input tensor (indexes of the\n# words in the input sentence) and target tensor (indexes of the words in\n# the target sentence). While creating these vectors we will append the\n# EOS token to both sequences.\n#\n\ndef indexesFromSentence(lang, sentence):\n return [lang.word2index[word] for word in sentence.split(' ')]\n\n\ndef tensorFromSentence(lang, sentence):\n indexes = indexesFromSentence(lang, sentence)\n indexes.append(EOS_token)\n return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)\n\n\ndef tensorsFromPair(pair):\n input_tensor = tensorFromSentence(input_lang, pair[0])\n target_tensor = tensorFromSentence(output_lang, pair[1])\n return (input_tensor, target_tensor)\n\n\n######################################################################\n# Training the Model\n# ------------------\n#\n# To train we run the input sentence through the encoder, and keep track\n# of every output and the latest hidden state. Then the decoder is given\n# the ``<SOS>`` token as its first input, and the last hidden state of the\n# encoder as its first hidden state.\n#\n# \"Teacher forcing\" is the concept of using the real target outputs as\n# each next input, instead of using the decoder's guess as the next input.\n# Using teacher forcing causes it to converge faster but `when the trained\n# network is exploited, it may exhibit\n# instability <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.378.4095&rep=rep1&type=pdf>`__.\n#\n# You can observe outputs of teacher-forced networks that read with\n# coherent grammar but wander far from the correct translation -\n# intuitively it has learned to represent the output grammar and can \"pick\n# up\" the meaning once the teacher tells it the first few words, but it\n# has not properly learned how to create the sentence from the translation\n# in the first place.\n#\n# Because of the freedom PyTorch's autograd gives us, we can randomly\n# choose to use teacher forcing or not with a simple if statement. Turn\n# ``teacher_forcing_ratio`` up to use more of it.\n#\n\nteacher_forcing_ratio = 0.5\n\n\ndef train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):\n encoder_hidden = encoder.initHidden()\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n loss = 0\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(\n input_tensor[ei], encoder_hidden)\n encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device)\n\n decoder_hidden = encoder_hidden\n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n loss += criterion(decoder_output, target_tensor[di])\n decoder_input = target_tensor[di] # Teacher forcing\n\n else:\n # Without teacher forcing: use its own predictions as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += criterion(decoder_output, target_tensor[di])\n if decoder_input.item() == EOS_token:\n break\n\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item() / target_length\n\n\n######################################################################\n# This is a helper function to print time elapsed and estimated time\n# remaining given the current time and progress %.\n#\n\nimport time\nimport math\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\n######################################################################\n# The whole training process looks like this:\n#\n# - Start a timer\n# - Initialize optimizers and criterion\n# - Create set of training pairs\n# - Start empty losses array for plotting\n#\n# Then we call ``train`` many times and occasionally print the progress (%\n# of examples, time so far, estimated time) and average loss.\n#\n\ndef trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n training_pairs = [tensorsFromPair(random.choice(pairs))\n for i in range(n_iters)]\n criterion = nn.NLLLoss()\n\n for iter in range(1, n_iters + 1):\n training_pair = training_pairs[iter - 1]\n input_tensor = training_pair[0]\n target_tensor = training_pair[1]\n\n loss = train(input_tensor, target_tensor, encoder,\n decoder, encoder_optimizer, decoder_optimizer, criterion)\n print_loss_total += loss\n plot_loss_total += loss\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),\n iter, iter / n_iters * 100, print_loss_avg))\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n showPlot(plot_losses)\n\n\n######################################################################\n# Plotting results\n# ----------------\n#\n# Plotting is done with matplotlib, using the array of loss values\n# ``plot_losses`` saved while training.\n#\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.ticker as ticker\nimport numpy as np\n\n\ndef showPlot(points):\n plt.figure()\n fig, ax = plt.subplots()\n # this locator puts ticks at regular intervals\n loc = ticker.MultipleLocator(base=0.2)\n ax.yaxis.set_major_locator(loc)\n plt.plot(points)\n\n\n######################################################################\n# Evaluation\n# ==========\n#\n# Evaluation is mostly the same as training, but there are no targets so\n# we simply feed the decoder's predictions back to itself for each step.\n# Every time it predicts a word we add it to the output string, and if it\n# predicts the EOS token we stop there. We also store the decoder's\n# attention outputs for display later.\n#\n\ndef evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):\n with torch.no_grad():\n input_tensor = tensorFromSentence(input_lang, sentence)\n input_length = input_tensor.size()[0]\n encoder_hidden = encoder.initHidden()\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei],\n encoder_hidden)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device) # SOS\n\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n decoder_attentions = torch.zeros(max_length, max_length)\n\n for di in range(max_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n if topi.item() == EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n\n decoder_input = topi.squeeze().detach()\n\n return decoded_words, decoder_attentions[:di + 1]\n\n\n######################################################################\n# We can evaluate random sentences from the training set and print out the\n# input, target, and output to make some subjective quality judgements:\n#\n\ndef evaluateRandomly(encoder, decoder, n=10):\n for i in range(n):\n pair = random.choice(pairs)\n print('>', pair[0])\n print('=', pair[1])\n output_words, attentions = evaluate(encoder, decoder, pair[0])\n output_sentence = ' '.join(output_words)\n print('<', output_sentence)\n print('')\n\n\n######################################################################\n# Training and Evaluating\n# =======================\n#\n# With all these helper functions in place (it looks like extra work, but\n# it makes it easier to run multiple experiments) we can actually\n# initialize a network and start training.\n#\n# Remember that the input sentences were heavily filtered. For this small\n# dataset we can use relatively small networks of 256 hidden nodes and a\n# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some\n# reasonable results.\n#\n# .. Note::\n# If you run this notebook you can train, interrupt the kernel,\n# evaluate, and continue training later. Comment out the lines where the\n# encoder and decoder are initialized and run ``trainIters`` again.\n#\n\nhidden_size = 256\nencoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)\nattn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)\n\ntrainIters(encoder1, attn_decoder1, 75000, print_every=5000)\n\n######################################################################\n#\n\nevaluateRandomly(encoder1, attn_decoder1)\n\n\n######################################################################\n# Visualizing Attention\n# ---------------------\n#\n# A useful property of the attention mechanism is its highly interpretable\n# outputs. Because it is used to weight specific encoder outputs of the\n# input sequence, we can imagine looking where the network is focused most\n# at each time step.\n#\n# You could simply run ``plt.matshow(attentions)`` to see attention output\n# displayed as a matrix, with the columns being input steps and rows being\n# output steps:\n#\n\noutput_words, attentions = evaluate(\n encoder1, attn_decoder1, \"je suis trop froid .\")\nplt.matshow(attentions.numpy())\n\n\n######################################################################\n# For a better viewing experience we will do the extra work of adding axes\n# and labels:\n#\n\ndef showAttention(input_sentence, output_words, attentions):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions.numpy(), cmap='bone')\n fig.colorbar(cax)\n\n # Set up axes\n ax.set_xticklabels([''] + input_sentence.split(' ') +\n ['<EOS>'], rotation=90)\n ax.set_yticklabels([''] + output_words)\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.show()\n\n\ndef evaluateAndShowAttention(input_sentence):\n output_words, attentions = evaluate(\n encoder1, attn_decoder1, input_sentence)\n print('input =', input_sentence)\n print('output =', ' '.join(output_words))\n showAttention(input_sentence, output_words, attentions)\n\n\nevaluateAndShowAttention(\"elle a cinq ans de moins que moi .\")\n\nevaluateAndShowAttention(\"elle est trop petit .\")\n\nevaluateAndShowAttention(\"je ne crains pas de mourir .\")\n\nevaluateAndShowAttention(\"c est un jeune directeur plein de talent .\")\n\n\n######################################################################\n# Exercises\n# =========\n#\n# - Try with a different dataset\n#\n# - Another language pair\n# - Human → Machine (e.g. IOT commands)\n# - Chat → Response\n# - Question → Answer\n#\n# - Replace the embeddings with pre-trained word embeddings such as word2vec or\n# GloVe\n# - Try with more layers, more hidden units, and more sentences. Compare\n# the training time and results.\n# - If you use a translation file where pairs have two of the same phrase\n# (``I am test \\t I am test``), you can use this as an autoencoder. Try\n# this:\n#\n# - Train as an autoencoder\n# - Save only the Encoder network\n# - Train a new Decoder for translation from there\n#"
] | [
[
"torch.nn.NLLLoss",
"matplotlib.ticker.MultipleLocator",
"torch.nn.LogSoftmax",
"torch.nn.Dropout",
"torch.zeros",
"torch.cat",
"matplotlib.pyplot.switch_backend",
"torch.nn.GRU",
"matplotlib.pyplot.subplots",
"torch.nn.Embedding",
"torch.tensor",
"matplotlib.pyplot.plot",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.cuda.is_available",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
theKasra/14-puzzle-problem-bidirectionalsearch | [
"f6fe4e0d8a1db1b1675933d8b2461981ac08686b"
] | [
"Project_2.py"
] | [
"from copy import deepcopy\r\nfrom collections import deque\r\nimport time\r\nimport numpy as np\r\n\r\nclass Node:\r\n def __init__(self, parent, grid):\r\n self.parent = parent\r\n self.grid = grid\r\n\r\ndef print_answer(p1, p2):\r\n initial_to_middle = []\r\n while p1:\r\n initial_to_middle.insert(0, p1.grid)\r\n p1 = p1.parent\r\n print(\"\\nStep by step solution:\\n\")\r\n for i in initial_to_middle:\r\n print(np.matrix(i), \"\\n\")\r\n print(\"-----------middle--------------\", \"\\n\")\r\n while p2:\r\n print(np.matrix(p2.grid), \"\\n\")\r\n p2 = p2.parent\r\n\r\ndef search(node, frontier):\r\n frontier_len = len(frontier)\r\n \r\n for i in range(frontier_len):\r\n if frontier[i].grid == node.grid:\r\n return frontier[i]\r\n return None\r\n\r\ndef check_grid(grid, frontier, explored):\r\n frontier_len = len(frontier)\r\n if frontier_len == 0:\r\n if grid not in explored:\r\n return True\r\n else:\r\n if grid not in explored:\r\n for i in range(frontier_len):\r\n if frontier[i].grid == grid:\r\n return False\r\n else:\r\n return False\r\n return True\r\n\r\ndef expand(node, frontier, explored):\r\n first_0 = [None, None]\r\n second_0 = [None, None]\r\n\r\n found_first_0 = False\r\n found_all_0 = False\r\n for i in range(4):\r\n if not found_all_0:\r\n for j in range(4):\r\n if node.grid[i][j] == 0:\r\n if not found_first_0:\r\n first_0 = [i, j]\r\n found_first_0 = True\r\n else:\r\n second_0 = [i, j]\r\n found_all_0 = True\r\n break\r\n else:\r\n break\r\n \r\n move_left(node, first_0, frontier, explored)\r\n move_left(node, second_0, frontier, explored)\r\n move_right(node, first_0, frontier, explored)\r\n move_right(node, second_0, frontier, explored)\r\n move_up(node, first_0, frontier, explored)\r\n move_up(node, second_0, frontier, explored)\r\n move_down(node, first_0, frontier, explored)\r\n move_down(node, second_0, frontier, explored)\r\n\r\ndef add_to_frontier(node, child_grid, frontier):\r\n child = Node(node, child_grid)\r\n frontier.append(child)\r\n\r\ndef move_left(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if j == 0 or node.grid[i][j-1] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i][j-1] = child_grid[i][j-1], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef move_right(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if j == 3 or node.grid[i][j+1] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i][j+1] = child_grid[i][j+1], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef move_up(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if i == 0 or node.grid[i-1][j] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i-1][j] = child_grid[i-1][j], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef move_down(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if i == 3 or node.grid[i+1][j] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i+1][j] = child_grid[i+1][j], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef bidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal):\r\n while frontier_initial and frontier_goal:\r\n node_initial = deque.popleft(frontier_initial)\r\n result_initial = search(node_initial, frontier_goal)\r\n if result_initial:\r\n p1 = node_initial\r\n p2 = result_initial\r\n break\r\n else:\r\n explored_initial.append(node_initial.grid)\r\n expand(node_initial, frontier_initial, explored_initial)\r\n \r\n node_goal = deque.popleft(frontier_goal)\r\n result_goal = search(node_goal, frontier_initial)\r\n if result_goal:\r\n p1 = result_goal\r\n p2 = node_goal\r\n break\r\n else:\r\n explored_goal.append(node_goal.grid)\r\n expand(node_goal, frontier_goal, explored_goal)\r\n print_answer(p1, p2)\r\n\r\ndef read_input_file(filename, grid):\r\n numbers = \"\"\r\n numbers_counter = 0\r\n\r\n f = open(filename, \"r\")\r\n numbers = f.readline().split(\" \")\r\n f.close()\r\n\r\n for i in range(4):\r\n for j in range(4):\r\n grid[i][j] = int(numbers[numbers_counter])\r\n numbers_counter += 1\r\n \r\n return grid\r\n\r\ngrid = [[None for _ in range(4)] for _ in range(4)]\r\ngrid = read_input_file(\"input.txt\", grid)\r\n\r\ninitial = Node(None, grid)\r\nfrontier_initial = deque()\r\nfrontier_initial.append(initial)\r\nexplored_initial = []\r\n\r\ngoal_grid = [[1, 2, 3, 4],\r\n [5, 6, 7, 8],\r\n [9, 10, 11, 12],\r\n [13, 14, 0, 0]]\r\ngoal = Node(None, goal_grid)\r\nfrontier_goal = deque()\r\nfrontier_goal.append(goal)\r\nexplored_goal = []\r\n\r\nstart_time = time.time()\r\n\r\nbidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal)\r\n\r\nprint(\"Initial side\")\r\nprint(\"frontier: \", len(frontier_initial))\r\nprint(\"explored: \", len(explored_initial), \"\\n\")\r\nprint(\"Goal side\")\r\nprint(\"frontier: \", len(frontier_goal))\r\nprint(\"explored: \", len(explored_goal))\r\n\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n"
] | [
[
"numpy.matrix"
]
] |
cloudcomputinghust/IoT | [
"5db3f9078be427fa23549add1747a067c2add767"
] | [
"test-component/draw_graph_2.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom influxdb import InfluxDBClient\nimport time\nimport datetime\nimport collections\n\n\ntime_min = '2017-04-03 16:35:00'\ntime_max = '2017-04-03 22:35:00'\ntime_min_2 = '2017-04-06 09:30:00'\ntime_max_2 = '2017-04-06 14:30:00'\n# time_min = '2017-03-25 00:00:00'\n# time_max = '2017-03-25 11:28:16'\n\ntime_grouped = '30s'\ntime_step = 5\nonem2m = ['onem2m-1', 'onem2m-2', 'onem2m-3']\nonem2m_naming = {'onem2m-1': '10 messages/m', 'onem2m-2': '20 messages/m', 'onem2m-3': '40 messages/m'}\nopenhab = ['openhab-1', 'openhab-2', 'openhab-3']\nopenhab_naming = {'openhab-1': '10 messages/m', 'openhab-2': '20 messages/m', 'openhab-3': '40 messages/m'}\ncluster = ['128.199.91.17', '139.59.98.138', '139.59.98.157']\nfog_mqtt = ['mqtt']\ncloud_mqtt = ['mqtt']\ncloud_processing = 'measure-data-rate'\ntime_range = 'AND time >\\'' + time_min + '\\' AND time < \\'' + time_max + '\\' '\nfog_namespace = 'kube-system'\ncloud_namespace = 'cloud-kube-system'\n# sensing_topic = ['onem2m_pf_1/temperature', 'onem2m_pf_6/temperature', 'onem2m_pf_11/temperature',\n# 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']\nsensing_topic = ['onem2m_pf_1/temperature','onem2m_pf_6/temperature', 'onem2m_pf_11/temperature', 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']\n\ndef cpu_cluster_query(_cluster_name):\n return 'SELECT sum(\"value\")/20 FROM \"cpu/usage_rate\" WHERE \"type\" = \\'node\\' AND \"nodename\"=\\'' + _cluster_name + '\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time(' + str(\n time_grouped) + '), \"nodename\" fill(null);'\n\ndef memory_cluster_query(_cluster_name):\n return 'SELECT sum(\"value\")*100/(1024*1.95) FROM \"memory/usage\" WHERE \"type\" = \\'node\\' ' +time_range+\\\n ' AND \"nodename\"=\\''+_cluster_name+'\\' ' +\\\n 'GROUP BY time('+time_grouped+'), \"nodename\" fill(null);'\n\ndef net_cluster_query(_cluster_name):\n return 'SELECT sum(\"value\") FROM \"network/tx_rate\" WHERE \"type\" = \\'node\\' '+\\\n time_range + ' AND \"nodename\"=\\''+_cluster_name+'\\' ' + \\\n ' GROUP BY time('+time_grouped+'), \"nodename\" fill(null);'\n\ndef cpu_query(_pod_name, _namespace):\n return 'SELECT sum(\"value\") FROM \"cpu/usage_rate\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"pod_name\" = \\'{pod_name}\\' AND time >\\''.format(\n pod_name=_pod_name) + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef _cpu_query(_namespace):\n return 'SELECT sum(\"value\")/10 FROM \"cpu/usage_rate\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef _mem_query(_namespace):\n return 'SELECT sum(\"value\")/(1024*1024) FROM \"memory/usage\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef _mem_query_2(_namespace):\n return 'SELECT * FROM \"memory/usage\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"container_name\"=\\'onem2m-1\\' AND time =\\'' + \\\n time_min + '\\' ;'.format(\n time_grouped=time_grouped)\n\ndef _net_query(_namespace, _group_by):\n return 'SELECT sum(\"value\")/1024 FROM \"network/tx_rate\" WHERE \"type\" = \\'pod\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"{group_by}\" fill(null);'.format(\n time_grouped=time_grouped, group_by=_group_by)\n\ndef mem_query(_pod_name, _namespace):\n return 'SELECT sum(\"value\")/(1024*1024) FROM \"memory/usage\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"pod_name\" = \\'{pod_name}\\' AND time >\\''.format(\n pod_name=_pod_name) + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\n\ndef net_query(_pod_name, _namespace):\n return 'SELECT sum(\"value\")/1024 FROM \"network/tx_rate\" WHERE \"type\" = \\'pod\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"pod_name\" = \\'{pod_name}\\' AND time >\\''.format(\n pod_name=_pod_name) + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}) fill(null);'.format(\n time_grouped=time_grouped)\n\n\ndef data_rate_query():\n return 'SELECT sum(\"num_of_message\") FROM \"data_collect_rate\" WHERE time >\\'' + time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped});'.format(\n time_grouped=time_grouped)\n\ndef data_sensing_query():\n return 'SELECT mean(\"value\") FROM \"data_collect_rate\" WHERE time >\\'' + time_min_2 + '\\' AND time < \\'' + time_max_2 + '\\' GROUP BY time({time_grouped}), \"topic_id\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef data_deplay_query(select_field):\n return 'SELECT mean(\"'+select_field+'\") FROM \"data_collect_rate\" WHERE time >\\'' + time_min_2 + '\\' AND time < \\'' + time_max_2 + '\\' GROUP BY \"num_of_sensor\" fill(null);'\n\n# def query_metric(_query):\n# result = client.query(_query)\n# x_val = list()\n# y_val = list()\n# for k, v in result.items():\n# _list = list(v)\n# _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n# for item in _list:\n# val = 0\n# if len(y_val) > 0:\n# val = y_val[len(y_val) - 1]\n# if item['sum']:\n# val = item['sum']\n# time_stamp = time.mktime(datetime.datetime.strptime(item['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n# x_val.append((time_stamp - _time_start) / 60)\n# y_val.append(val)\n# break\n# time.sleep(2)\n# return {'x': x_val, 'y': y_val}\n\ndef query_metric(_query, _group_by=None, _aggre_metric=None):\n if (not _group_by) and (not _aggre_metric):\n result = client.query(_query)\n x_val = list()\n y_val = list()\n for k, v in result.items():\n _list = list(v)\n _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n for item in _list:\n # val = 0\n # if len(y_val) > 0:\n # val = y_val[len(y_val) - 1]\n val = None\n if item['sum']:\n val = item['sum']\n time_stamp = time.mktime(datetime.datetime.strptime(item['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n x_val.append((time_stamp - _time_start) / 60)\n y_val.append(val)\n break\n time.sleep(2)\n return {'x': x_val, 'y': y_val}\n result = client.query(_query)\n lines = dict()\n for k, v in result.items():\n _list = list(v)\n _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n for item in _list:\n # val = 0\n val = None\n if item[_aggre_metric]:\n val = item[_aggre_metric]\n time_stamp = time.mktime(datetime.datetime.strptime(item['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n if not lines.get(k[1][_group_by]):\n lines[k[1][_group_by]] = {'x': list(), 'y': list()}\n lines.get(k[1][_group_by]).get('x').append((time_stamp - _time_start) / 60)\n lines.get(k[1][_group_by]).get('y').append(val)\n time.sleep(2)\n return lines\n\ndef mean_values(values, field_1='x', field_2='y'):\n result = []\n result_2 = []\n min_len = len(values[0][field_2])\n if len(values[0][field_1]) > len(values[1][field_1]):\n min_len = len(values[1][field_2])\n if min_len > len(values[2][field_2]):\n min_len = len(values[2][field_2])\n for index in range(0, min_len):\n if values[0][field_2][index] and values[1][field_2][index] and values[2][field_2][index]:\n result.append((values[0][field_2][index] + values[1][field_2][index] + values[2][field_2][index]) / 3)\n else:\n result.append(None)\n result_2.append(values[0][field_1][index])\n return {field_1: result_2, field_2: result}\n\ndef gen_plot_by_row(plt, data, y_index,num_col, num_row, row_label, titles, line_type, marker=None, scale=False):\n # num_of_col = len(data)\n x_index = 0\n for item in data:\n if x_index == 0:\n gen_plot(plt=plt, data=item, index=(x_index+y_index*num_col+1), line_type=line_type, y_label=row_label,\n title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)\n else:\n gen_plot(plt=plt, data=item, index=(x_index + y_index * num_col + 1), line_type=line_type,\n title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)\n x_index += 1\n\ndef gen_plot(plt, data, index, line_type, num_col, nul_row,y_label=None, x_label='time(s)', title=None, marker=None, scale=False):\n plt.subplot(int('{}{}{}'.format(nul_row, num_col, index)))\n if isinstance(data, list):\n for line in data:\n plt.plot(line['x'], line['y'])\n elif isinstance(data, dict):\n if data.get('x', 0) == 0:\n count = 0\n temp = dict()\n keys = data.keys()\n sorted(keys)\n for k in keys:\n temp[k] = data[k]\n for _key_group, _values in temp.items():\n series1 = np.array(_values['y']).astype(np.double)\n s1mask = np.isfinite(series1)\n series = np.array(_values['x'])\n if len(data) > 3:\n # plt.plot(series[s1mask], series1[s1mask], marker=marker[count], linewidth=1)\n plt.plot(series[s1mask], series1[s1mask], linewidth=2, linestyle = line_type[count])\n else:\n plt.plot(series[s1mask], series1[s1mask], linewidth=1)\n if scale:\n plt.yscale('log')\n count += 1\n # plt.plot(_values['x'], _values['y'])\n # plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper left')\n plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper right', columnspacing=1.5, labelspacing=0.0,\n handletextpad=0.0, handlelength=1.0, fontsize='small')\n else:\n plt.plot(data['x'], data['y'], line_type[0])\n if y_label:\n plt.ylabel(y_label)\n if x_label:\n plt.xlabel(x_label)\n plt.title(title)\n plt.grid(True)\n plt.xticks(np.arange(0, 360 + 1, 30.0))\n # plt.xticks(np.arange(0, 120 + 1, 10.0))\n\ndef draw_graps(data=dict()):\n line_type = ['-', '-.', '--', ':', '-.', '--']\n marker = ['.', 'o', 'v', 'x', '+', '<', '*']\n # plot with various axes scales\n plt.figure(1)\n # cpu\n # col_1 = {onem2m_naming[k]: data['fog']['cpu'][k] for k in onem2m}\n # # col_1['mean'] = mean_values(list(col_1.values()))\n # col_2 = {openhab_naming[k]: data['fog']['cpu'][k] for k in openhab}\n # # col_2['mean'] = mean_values(list(col_2.values()))\n # col_3 = {k: data['fog']['cpu'][k] for k in fog_mqtt}\n # rows = [col_1, col_2, col_3]\n # titles = ['ONEM2M CPU USAGE', 'OPENHAB CPU USAGE', 'MQTT CPU USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='cpu_usage(%)', titles=titles, num_col=len(data['fog']), num_row=3,\n # line_type=line_type)\n #\n # col_1 = {onem2m_naming[k]: data['fog']['memory'][k] for k in onem2m}\n # # col_1['mean'] = mean_values(list(col_1.values()))\n # col_2 = {openhab_naming[k]: data['fog']['memory'][k] for k in openhab}\n # # col_2['mean'] = mean_values(list(col_2.values()))\n # col_3 = {k: data['fog']['memory'][k] for k in fog_mqtt}\n # rows = [col_1, col_2, col_3]\n # titles = ['ONEM2M MEM USAGE', 'OPENHAB MEM USAGE', 'MQTT MEM USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=1, row_label='memory_usage(MB)', titles=titles, num_col=len(data['fog']), num_row=3,\n # line_type=line_type)\n #\n # col_1 = {onem2m_naming[k]: data['fog']['network'].get('app:{}'.format(k)) for k in onem2m}\n # # col_1['mean'] = mean_values(list(col_1.values()))\n # col_2 = {openhab_naming[k]: data['fog']['network'].get('app:{}'.format(k)) for k in openhab}\n # # col_2['mean'] = mean_values(list(col_2.values()))\n # col_3 = {k: data['fog']['network'].get('app:{}'.format(k)) for k in fog_mqtt}\n # rows = [col_1, col_2, col_3]\n # titles = ['ONEM2M NET USAGE', 'OPENHAB NET USAGE', 'MQTT NET USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=2, row_label='network_usage(kBps)', titles=titles, num_col=len(data['fog']), num_row=3,\n # line_type=line_type)\n # plt.subplots_adjust(top=0.93, bottom=0.07, left=0.05, right=0.96, hspace=0.51,\n # wspace=0.19)\n # plt.show()\n # #\n # # ################\n # plt.figure(2)\n # col_1 = {cloud_processing: data['cloud']['cpu'][cloud_processing]}\n # # col_2 = {cloud_mqtt: data['cloud']['cpu'][cloud_mqtt]}\n # col_2 = {k: data['cloud']['cpu'][k] for k in cloud_mqtt}\n # rows = [col_1, col_2]\n # titles = ['DATA_PROCESSING CPU USAGE', 'CLOUD MQTT CPU USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='cpu_usage(%)', titles=titles, num_col=2, num_row=3,\n # line_type=line_type)\n #\n # col_1 = {cloud_processing: data['cloud']['memory'][cloud_processing]}\n # # col_2 = {cloud_mqtt: data['cloud']['memory'][cloud_mqtt]}\n # col_2 = {k: data['cloud']['memory'][k] for k in cloud_mqtt}\n # rows = [col_1, col_2]\n # # rows = [data['cloud']['memory'][cloud_processing], data['cloud']['memory'][cloud_mqtt]]\n # titles = ['DATA_PROCESSING MEM USAGE', 'CLOUD MQTT MEM USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=1, row_label='memory_usage(MB)', titles=titles, num_col=2, num_row=3,\n # line_type=line_type)\n #\n # col_1 = {cloud_processing: data['cloud']['network'][cloud_processing]}\n # # col_2 = {cloud_mqtt: data['cloud']['network'][cloud_mqtt]}\n # col_2 = {k: data['cloud']['network'][k] for k in cloud_mqtt}\n # rows = [col_1, col_2]\n # # rows = [data['cloud']['network'][cloud_processing], data['cloud']['network'][cloud_mqtt]]\n # titles = ['DATA_PROCESSING NET USAGE', 'CLOUD MQTT NET USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=2, row_label='network_usage(kBps)', titles=titles, num_col=2, num_row=3,\n # line_type=line_type)\n # plt.show()\n\n #################\n plt.figure(3)\n\n rows = [{k: data['cloud']['sensing_data'][k] for k in sensing_topic}]\n titles = ['SENSING DATA']\n gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='Value', titles=titles, num_col=1,\n num_row=1,\n line_type=line_type, marker=marker)\n\n # show\n plt.subplots_adjust(top=0.93, bottom=0.07, left=0.05, right=0.99, hspace=0.85,\n wspace=0.19)\n plt.show()\n return\n\n\nclient = InfluxDBClient('188.166.238.158', 32485, 'root', 'root', 'k8s')\ndata = dict()\n\n# get metric\npod_names = {'fog': {'onem2m': onem2m, 'openhab': openhab, 'mqtt': fog_mqtt}, 'cloud': {'mqtt': cloud_mqtt, 'processing': cloud_processing}}\nnamespaces = {'fog': fog_namespace, 'cloud': cloud_namespace}\nresource_metrics = {'cpu', 'memory', 'network'}\nresource_query = {'cpu': _cpu_query, 'memory': _mem_query, 'network': _net_query}\ndata['fog'] = dict()\ndata['cloud'] = dict()\n# data['fog']['cpu'] = query_metric(_cpu_query(namespaces['fog']), 'container_name', 'sum')\n# data['fog']['memory'] = query_metric(_mem_query(namespaces['fog']), 'container_name', 'sum')\n# data['fog']['network'] = query_metric(_net_query(namespaces['fog'], 'labels'), 'labels', 'sum')\n# temp = dict(data['fog']['network'])\n# for key, value in temp.items():\n# for check_key in onem2m:\n# if key.find(check_key) >= 0:\n# data['fog']['network'][check_key] = value\n# continue\n# for check_key in openhab:\n# if key.find(check_key) >= 0:\n# data['fog']['network'][check_key] = value\n# continue\n# for check_key in fog_mqtt:\n# if key.find(check_key) >= 0:\n# data['fog']['network'][check_key] = value\n# continue\n#\n# print('query fog done')\n# data['cloud']['cpu'] = query_metric(_cpu_query(namespaces['cloud']), 'container_name', 'sum')\n# data['cloud']['memory'] = query_metric(_mem_query(namespaces['cloud']), 'container_name', 'sum')\n# data['cloud']['network'] = query_metric(_net_query(namespaces['cloud'], 'pod_name'), 'pod_name', 'sum')\n# temp = dict(data['cloud']['network'])\n# for key, value in temp.items():\n# for check_key in cloud_mqtt:\n# if key.find(check_key) >= 0:\n# data['cloud']['network'][check_key] = value\n# continue\n# if key.find(cloud_processing) >= 0:\n# data['cloud']['network'][cloud_processing] = value\n# continue\n# data['cloud']['sensing_data'] = query_metric(data_sensing_query(), 'topic_id', 'mean')\n# for k,v in data['cloud']['sensing_data'].items():\n# print(k)\n# print(v)\nprint('query cloud done')\n# draw_graps(data)\n\n# _data = client.query(data_deplay_query('round_trip_3'))\n# for k, v in _data.items():\n# print(k[1]['num_of_sensor'])\n# print(list(v)[0]['mean'])\n# print('-----------------------------------------------')\n\n# _data_1 = client.query(data_deplay_query('time_send_cloud'))\n# series_1 = {'x': list(), 'y': list()}\n# for k, v in _data_1.items():\n# # series_1['x'].append(int(k[1]['num_of_sensor']))\n# # series_1['y'].append(float(list(v)[0]['mean']))\n# print(k[1]['num_of_sensor'])\n# print(list(v)[0])\n # print(list(v)[0]['mean'])\n#\n_data_1 = client.query(data_deplay_query('round_trip_1'))\nseries_1 = {'x': list(), 'y': list()}\nfor k, v in _data_1.items():\n series_1['x'].append(int(k[1]['num_of_sensor']))\n series_1['y'].append(float(list(v)[0]['mean']))\n # print(k[1]['num_of_sensor'])\n # print(list(v)[0]['mean'])\nprint('-----------------------------------------------')\nseries_2 = {'x': list(), 'y': list()}\n_data_2 = client.query(data_deplay_query('round_trip_2'))\n\nfor k, v in _data_2.items():\n # print(k[1]['num_of_sensor'])\n # print(list(v)[0]['mean'])\n series_2['x'].append(int(k[1]['num_of_sensor']))\n series_2['y'].append(float(list(v)[0]['mean']+1))\n\nprint(series_1)\nprint(series_2)\n\nwidth = 1 # the width of the bars: can also be len(x) sequence\n\np1 = plt.bar(series_1['x'], series_1['y'], width, color='#d62728')\np2 = plt.bar(series_2['x'], series_2['y'], width,\n bottom=series_1['y'])\n\n\nplt.ylabel('Transmission Time (seconds)')\nplt.xlabel('Number of sensors per platform (on 5 platforms)')\nplt.title('Tranmission time by number of sensor')\nplt.xticks(series_1['x'])\n# plt.yticks(np.arange(0, 300, 10))\nplt.legend((p1[0], p2[0]), ('Sensor - Platform Transmission Time', 'Platform - Cloud Transmission Time'))\n\n# def autolabel(rects):\n# \"\"\"\n# Attach a text label above each bar displaying its height\n# \"\"\"\n# for rect in rects:\n# height = rect.get_height()\n# plt.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n# '%d' % int(height),\n# ha='center', va='bottom')\n\nplt.show()\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.isfinite",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
slipperlobster/flipper | [
"527952a74bc76f76cf3a2d25755386f8db285885",
"527952a74bc76f76cf3a2d25755386f8db285885",
"527952a74bc76f76cf3a2d25755386f8db285885",
"527952a74bc76f76cf3a2d25755386f8db285885",
"8482edd77604fcec2ea08913f1748c21be80dac7",
"527952a74bc76f76cf3a2d25755386f8db285885"
] | [
"jesse/indicators/gatorosc.py",
"jesse/indicators/voss.py",
"jesse/indicators/gauss.py",
"jesse/factories/candle_factory.py",
"jesse/indicators/vwap.py",
"jesse/indicators/mwdx.py"
] | [
"from collections import namedtuple\n\nimport numpy as np\nimport talib\n\nfrom jesse.helpers import get_candle_source, np_shift\nfrom jesse.helpers import slice_candles\n\nGATOR = namedtuple('GATOR', ['upper', 'lower', 'upper_change', 'lower_change'])\n\n\ndef gatorosc(candles: np.ndarray, source_type: str = \"close\", sequential: bool = False) -> GATOR:\n \"\"\"\n Gator Oscillator by Bill M. Williams\n\n :param candles: np.ndarray\n :param source_type: str - default: \"close\"\n :param sequential: bool - default: False\n\n :return: GATOR(upper, lower, upper_change, lower_change)\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)\n teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)\n lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)\n\n upper = np.abs(jaw - teeth)\n lower = -np.abs(teeth - lips)\n\n upper_change = talib.MOM(upper, timeperiod=1)\n lower_change = -talib.MOM(lower, timeperiod=1)\n\n if sequential:\n return GATOR(upper, lower, upper_change, lower_change)\n else:\n return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])\n\n\ndef numpy_ewma(data, window):\n \"\"\"\n\n :param data:\n :param window:\n :return:\n \"\"\"\n alpha = 1 / window\n # scale = 1 / (1 - alpha)\n n = data.shape[0]\n scale_arr = (1 - alpha) ** (-1 * np.arange(n))\n weights = (1 - alpha) ** np.arange(n)\n pw0 = (1 - alpha) ** (n - 1)\n mult = data * pw0 * scale_arr\n cumsums = mult.cumsum()\n return cumsums * scale_arr[::-1] / weights.cumsum()\n",
"from collections import namedtuple\n\nimport numpy as np\ntry:\n from numba import njit\nexcept ImportError:\n njit = lambda a : a\n\nfrom jesse.helpers import get_candle_source, slice_candles\n\nVossFilter = namedtuple('VossFilter', ['voss', 'filt'])\n\n\ndef voss(candles: np.ndarray, period: int = 20, predict: int = 3, bandwith: float = 0.25, source_type: str = \"close\",\n sequential: bool = False) -> VossFilter:\n \"\"\"\n Voss indicator by John F. Ehlers\n\n :param candles: np.ndarray\n :param period: int - default: 20\n :param predict: int - default: 3\n :param bandwith: float - default: 0.25\n :param source_type: str - default: \"close\"\n :param sequential: bool - default: False\n\n :return: float | np.ndarray\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n voss_val, filt = voss_fast(source, period, predict, bandwith)\n\n if sequential:\n return VossFilter(voss_val, filt)\n else:\n return VossFilter(voss_val[-1], filt[-1])\n\n\n@njit\ndef voss_fast(source, period, predict, bandwith):\n voss = np.full_like(source, 0)\n filt = np.full_like(source, 0)\n\n pi = np.pi\n\n order = 3 * predict\n f1 = np.cos(2 * pi / period)\n g1 = np.cos(bandwith * 2 * pi / period)\n s1 = 1 / g1 - np.sqrt(1 / (g1 * g1) - 1)\n\n for i in range(source.shape[0]):\n if i > period and i > 5 and i > order:\n filt[i] = 0.5 * (1 - s1) * (source[i] - source[i - 2]) + f1 * (1 + s1) * filt[i - 1] - s1 * filt[i - 2]\n\n for i in range(source.shape[0]):\n if not (i <= period or i <= 5 or i <= order):\n sumc = 0\n for count in range(order):\n sumc = sumc + ((count + 1) / float(order)) * voss[i - (order - count)]\n voss[i] = ((3 + order) / 2) * filt[i] - sumc\n return voss, filt\n",
"from typing import Union\n\nimport numpy as np\ntry:\n from numba import njit\nexcept ImportError:\n njit = lambda a : a\n\nfrom jesse.helpers import get_candle_source, slice_candles\n\n\ndef gauss(candles: np.ndarray, period: int = 14, poles: int = 4, source_type: str = \"close\",\n sequential: bool = False) -> Union[float, np.ndarray]:\n \"\"\"\n Gaussian Filter\n\n :param candles: np.ndarray\n :param period: int - default: 14\n :param poles: int - default: 4\n :param source_type: str - default: \"close\"\n :param sequential: bool - default: False\n\n :return: float | np.ndarray\n \"\"\"\n\n if len(candles.shape) == 1:\n source = candles\n else:\n candles = slice_candles(candles, sequential)\n source = get_candle_source(candles, source_type=source_type)\n\n fil, to_fill = gauss_fast(source, period, poles)\n\n if to_fill != 0:\n res = np.insert(fil[poles:], 0, np.repeat(np.nan, to_fill))\n else:\n res = fil[poles:]\n\n return res if sequential else res[-1]\n\n\n@njit\ndef gauss_fast(source, period, poles):\n N = source.size\n source = source[~np.isnan(source)]\n to_fill = N - source.size\n PI = np.pi\n beta = (1 - np.cos(2 * PI / period)) / (np.power(2, 1 / poles) - 1)\n alpha = -beta + np.sqrt(np.power(beta, 2) + 2 * beta)\n\n fil = np.zeros(poles + source.size)\n if poles == 1:\n coeff = np.array([alpha, (1 - alpha)])\n elif poles == 2:\n coeff = np.array([alpha ** 2, 2 * (1 - alpha), -(1 - alpha) ** 2])\n elif poles == 3:\n coeff = np.array([alpha ** 3, 3 * (1 - alpha), -3 * (1 - alpha) ** 2, (1 - alpha) ** 3])\n elif poles == 4:\n coeff = np.array([alpha ** 4, 4 * (1 - alpha), -6 * (1 - alpha) ** 2, 4 * (1 - alpha) ** 3, -(1 - alpha) ** 4])\n\n for i in range(source.size):\n if poles == 1:\n val = np.array([source[i].item(), fil[i]])\n elif poles == 2:\n val = np.array([source[i].item(), fil[1 + i], fil[i]])\n elif poles == 3:\n val = np.array([source[i].item(), fil[2 + i], fil[1 + i], fil[i]])\n elif poles == 4:\n val = np.array([source[i].item(), fil[3 + i], fil[2 + i], fil[1 + i], fil[i]])\n\n fil[poles + i] = np.dot(coeff, val)\n\n return fil, to_fill\n",
"from random import randint\nfrom typing import Union\n\nimport numpy as np\n\nfirst_timestamp = 1552309186171\nopen_price = randint(40, 100)\nclose_price = randint(open_price, 110) if randint(0, 1) else randint(\n 30, open_price)\nmax_price = max(open_price, close_price)\nhigh_price = max_price if randint(0, 1) else randint(max_price, max_price + 10)\nmin_price = min(open_price, close_price)\nlow_price = min_price if randint(0, 1) else randint(min_price, min_price + 10)\n\n\ndef fake_range_candle(count: int) -> np.ndarray:\n fake_candle(reset=True)\n arr = np.zeros((count, 6))\n for i in range(count):\n arr[i] = fake_candle()\n return arr\n\n\ndef fake_range_candle_from_range_prices(prices: Union[list, range]) -> np.ndarray:\n fake_candle(reset=True)\n global first_timestamp\n arr = []\n prev_p = np.nan\n for p in prices:\n # first prev_p\n if np.isnan(prev_p):\n prev_p = p - 0.5\n\n first_timestamp += 60000\n open_p = prev_p\n close_p = p\n high_p = max(open_p, close_p)\n low_p = min(open_p, close_p)\n vol = randint(0, 200)\n\n arr.append([first_timestamp, open_p, close_p, high_p, low_p, vol])\n\n # save prev_p for next candle\n prev_p = p\n\n return np.array(arr)\n\n\ndef fake_candle(attributes: dict = None, reset: bool = False) -> np.ndarray:\n global first_timestamp\n global open_price\n global close_price\n global max_price\n global high_price\n global min_price\n global low_price\n\n if reset:\n first_timestamp = 1552309186171\n open_price = randint(40, 100)\n close_price = randint(open_price, 110)\n high_price = max(open_price, close_price)\n low_price = min(open_price, close_price)\n\n if attributes is None:\n attributes = {}\n\n first_timestamp += 60000\n open_price = close_price\n close_price += randint(1, 8)\n high_price = max(open_price, close_price)\n low_price = min(open_price - 1, close_price)\n volume = randint(1, 100)\n timestamp = first_timestamp\n\n return np.array([\n attributes.get('timestamp', timestamp),\n attributes.get('open', open_price),\n attributes.get('close', close_price),\n attributes.get('high', high_price),\n attributes.get('low', low_price),\n attributes.get('volume', volume)\n ], dtype=np.float64)\n",
"from typing import Union\n\nimport numpy as np\ntry:\n from numba import njit\n from numpy_groupies import aggregate_nb as aggregate\nexcept ImportError:\n from numpy_groupies import aggregate\n\nfrom jesse.helpers import get_candle_source, slice_candles\n\n\ndef vwap(\n candles: np.ndarray, source_type: str = \"hlc3\", anchor: str = \"D\", sequential: bool = False\n) -> Union[float, np.ndarray]:\n \"\"\"\n VWAP\n\n :param candles: np.ndarray\n :param source_type: str - default: \"close\"\n :param anchor: str - default: \"D\"\n :param sequential: bool - default: False\n\n :return: float | np.ndarray\n \"\"\"\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n group_idx = candles[:, 0].astype('datetime64[ms]').astype(f'datetime64[{anchor}]').astype('int')\n vwap_values = aggregate(group_idx, candles[:, 5] * source, func='cumsum')\n vwap_values /= aggregate(group_idx, candles[:, 5], func='cumsum')\n\n if sequential:\n return vwap_values\n else:\n return None if np.isnan(vwap_values[-1]) else vwap_values[-1]\n",
"from typing import Union\n\nimport numpy as np\n\ntry:\n from numba import njit\nexcept ImportError:\n njit = lambda a: a\n\nfrom jesse.helpers import get_candle_source, slice_candles\n\n\ndef mwdx(candles: np.ndarray, factor: float = 0.2, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n MWDX Average\n\n :param candles: np.ndarray\n :param factor: float - default: 0.2\n :param source_type: str - default: \"close\"\n :param sequential: bool - default: False\n\n :return: float | np.ndarray\n \"\"\"\n\n # Accept normal array too.\n if len(candles.shape) == 1:\n source = candles\n else:\n candles = slice_candles(candles, sequential)\n source = get_candle_source(candles, source_type=source_type)\n\n val2 = (2 / factor) - 1\n fac = 2 / (val2 + 1)\n\n res = mwdx_fast(source, fac)\n\n return res if sequential else res[-1]\n\n\n@njit\ndef mwdx_fast(source, fac):\n newseries = np.copy(source)\n for i in range(1, source.shape[0]):\n newseries[i] = (fac * source[i]) + ((1 - fac) * newseries[i - 1])\n return newseries\n"
] | [
[
"numpy.arange",
"numpy.abs"
],
[
"numpy.full_like",
"numpy.sqrt",
"numpy.cos"
],
[
"numpy.dot",
"numpy.power",
"numpy.isnan",
"numpy.cos",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
],
[
"numpy.isnan",
"numpy.array",
"numpy.zeros"
],
[
"numpy.isnan"
],
[
"numpy.copy"
]
] |
legend-of-zyda/LuxPythonEnvGym | [
"7d818b5943dad1b7fae3c66b612aae93c743bd0e"
] | [
"examples/agent_policy.py"
] | [
"import sys\nimport time\nfrom functools import partial # pip install functools\nimport copy\nimport random\n\nimport numpy as np\nfrom gym import spaces\n\nfrom luxai2021.env.agent import Agent, AgentWithModel\nfrom luxai2021.game.actions import *\nfrom luxai2021.game.game_constants import GAME_CONSTANTS\nfrom luxai2021.game.position import Position\n\n\n# https://codereview.stackexchange.com/questions/28207/finding-the-closest-point-to-a-list-of-points\ndef closest_node(node, nodes):\n dist_2 = np.sum((nodes - node) ** 2, axis=1)\n return np.argmin(dist_2)\ndef furthest_node(node, nodes):\n dist_2 = np.sum((nodes - node) ** 2, axis=1)\n return np.argmax(dist_2)\n\ndef smart_transfer_to_nearby(game, team, unit_id, unit, target_type_restriction=None, **kwarg):\n \"\"\"\n Smart-transfers from the specified unit to a nearby neighbor. Prioritizes any\n nearby carts first, then any worker. Transfers the resource type which the unit\n has most of. Picks which cart/worker based on choosing a target that is most-full\n but able to take the most amount of resources.\n\n Args:\n team ([type]): [description]\n unit_id ([type]): [description]\n\n Returns:\n Action: Returns a TransferAction object, even if the request is an invalid\n transfer. Use TransferAction.is_valid() to check validity.\n \"\"\"\n\n # Calculate how much resources could at-most be transferred\n resource_type = None\n resource_amount = 0\n target_unit = None\n\n if unit != None:\n for type, amount in unit.cargo.items():\n if amount > resource_amount:\n resource_type = type\n resource_amount = amount\n\n # Find the best nearby unit to transfer to\n unit_cell = game.map.get_cell_by_pos(unit.pos)\n adjacent_cells = game.map.get_adjacent_cells(unit_cell)\n\n \n for c in adjacent_cells:\n for id, u in c.units.items():\n # Apply the unit type target restriction\n if target_type_restriction == None or u.type == target_type_restriction:\n if u.team == team:\n # This unit belongs to our team, set it as the winning transfer target\n # if it's the best match.\n if target_unit is None:\n target_unit = u\n else:\n # Compare this unit to the existing target\n if target_unit.type == u.type:\n # Transfer to the target with the least capacity, but can accept\n # all of our resources\n if( u.get_cargo_space_left() >= resource_amount and \n target_unit.get_cargo_space_left() >= resource_amount ):\n # Both units can accept all our resources. Prioritize one that is most-full.\n if u.get_cargo_space_left() < target_unit.get_cargo_space_left():\n # This new target it better, it has less space left and can take all our\n # resources\n target_unit = u\n \n elif( target_unit.get_cargo_space_left() >= resource_amount ):\n # Don't change targets. Current one is best since it can take all\n # the resources, but new target can't.\n pass\n \n elif( u.get_cargo_space_left() > target_unit.get_cargo_space_left() ):\n # Change targets, because neither target can accept all our resources and \n # this target can take more resources.\n target_unit = u\n elif u.type == Constants.UNIT_TYPES.CART:\n # Transfer to this cart instead of the current worker target\n target_unit = u\n \n # Build the transfer action request\n target_unit_id = None\n if target_unit is not None:\n target_unit_id = target_unit.id\n\n # Update the transfer amount based on the room of the target\n if target_unit.get_cargo_space_left() < resource_amount:\n resource_amount = target_unit.get_cargo_space_left()\n \n return TransferAction(team, unit_id, target_unit_id, resource_type, resource_amount)\n\n########################################################################################################################\n# This is the Agent that you need to design for the competition\n########################################################################################################################\nclass AgentPolicy(AgentWithModel):\n def __init__(self, mode=\"train\", model=None) -> None:\n \"\"\"\n Arguments:\n mode: \"train\" or \"inference\", which controls if this agent is for training or not.\n model: The pretrained model, or if None it will operate in training mode.\n \"\"\"\n super().__init__(mode, model)\n\n # Define action and observation space\n # They must be gym.spaces objects\n # Example when using discrete actions:\n self.actions_units = [\n partial(MoveAction, direction=Constants.DIRECTIONS.CENTER), # This is the do-nothing action\n partial(MoveAction, direction=Constants.DIRECTIONS.NORTH),\n partial(MoveAction, direction=Constants.DIRECTIONS.WEST),\n partial(MoveAction, direction=Constants.DIRECTIONS.SOUTH),\n partial(MoveAction, direction=Constants.DIRECTIONS.EAST),\n partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.CART), # Transfer to nearby cart\n partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.WORKER), # Transfer to nearby worker\n SpawnCityAction,\n PillageAction,\n ]\n self.actions_cities = [\n SpawnWorkerAction,\n SpawnCartAction,\n ResearchAction,\n ]\n self.action_space = spaces.Discrete(max(len(self.actions_units), len(self.actions_cities)))\n\n # Observation space: (Basic minimum for a miner agent)\n # Object:\n # 1x is worker\n # 1x is cart\n # 1x is citytile\n #\n # 5x direction_nearest_wood\n # 1x distance_nearest_wood\n # 1x amount\n #\n # 5x direction_nearest_coal\n # 1x distance_nearest_coal\n # 1x amount\n #\n # 5x direction_nearest_uranium\n # 1x distance_nearest_uranium\n # 1x amount\n #\n # 5x direction_nearest_city\n # 1x distance_nearest_city\n # 1x amount of fuel\n #\n # 28x (the same as above, but direction, distance, and amount to the furthest of each)\n #\n # 5x direction_nearest_worker\n # 1x distance_nearest_worker\n # 1x amount of cargo\n # Unit:\n # 1x cargo size\n # State:\n # 1x is night\n # 1x percent of game done\n # 2x citytile counts [cur player, opponent]\n # 2x worker counts [cur player, opponent]\n # 2x cart counts [cur player, opponent]\n # 1x research points [cur player]\n # 1x researched coal [cur player]\n # 1x researched uranium [cur player]\n self.observation_shape = (3 + 7 * 5 * 2 + 1 + 1 + 1 + 2 + 2 + 2 + 3,)\n self.observation_space = spaces.Box(low=0, high=1, shape=\n self.observation_shape, dtype=np.float16)\n\n self.object_nodes = {}\n\n def get_agent_type(self):\n \"\"\"\n Returns the type of agent. Use AGENT for inference, and LEARNING for training a model.\n \"\"\"\n if self.mode == \"train\":\n return Constants.AGENT_TYPE.LEARNING\n else:\n return Constants.AGENT_TYPE.AGENT\n\n def get_observation(self, game, unit, city_tile, team, is_new_turn):\n \"\"\"\n Implements getting a observation from the current game for this unit or city\n \"\"\"\n observation_index = 0\n if is_new_turn:\n # It's a new turn this event. This flag is set True for only the first observation from each turn.\n # Update any per-turn fixed observation space that doesn't change per unit/city controlled.\n\n # Build a list of object nodes by type for quick distance-searches\n self.object_nodes = {}\n\n # Add resources\n for cell in game.map.resources:\n if cell.resource.type not in self.object_nodes:\n self.object_nodes[cell.resource.type] = np.array([[cell.pos.x, cell.pos.y]])\n else:\n self.object_nodes[cell.resource.type] = np.concatenate(\n (\n self.object_nodes[cell.resource.type],\n [[cell.pos.x, cell.pos.y]]\n ),\n axis=0\n )\n\n # Add your own and opponent units\n for t in [team, (team + 1) % 2]:\n for u in game.state[\"teamStates\"][team][\"units\"].values():\n key = str(u.type)\n if t != team:\n key = str(u.type) + \"_opponent\"\n\n if key not in self.object_nodes:\n self.object_nodes[key] = np.array([[u.pos.x, u.pos.y]])\n else:\n self.object_nodes[key] = np.concatenate(\n (\n self.object_nodes[key],\n [[u.pos.x, u.pos.y]]\n )\n , axis=0\n )\n\n # Add your own and opponent cities\n for city in game.cities.values():\n for cells in city.city_cells:\n key = \"city\"\n if city.team != team:\n key = \"city_opponent\"\n\n if key not in self.object_nodes:\n self.object_nodes[key] = np.array([[cells.pos.x, cells.pos.y]])\n else:\n self.object_nodes[key] = np.concatenate(\n (\n self.object_nodes[key],\n [[cells.pos.x, cells.pos.y]]\n )\n , axis=0\n )\n\n # Observation space: (Basic minimum for a miner agent)\n # Object:\n # 1x is worker\n # 1x is cart\n # 1x is citytile\n # 5x direction_nearest_wood\n # 1x distance_nearest_wood\n # 1x amount\n #\n # 5x direction_nearest_coal\n # 1x distance_nearest_coal\n # 1x amount\n #\n # 5x direction_nearest_uranium\n # 1x distance_nearest_uranium\n # 1x amount\n #\n # 5x direction_nearest_city\n # 1x distance_nearest_city\n # 1x amount of fuel\n #\n # 5x direction_nearest_worker\n # 1x distance_nearest_worker\n # 1x amount of cargo\n #\n # 28x (the same as above, but direction, distance, and amount to the furthest of each)\n #\n # Unit:\n # 1x cargo size\n # State:\n # 1x is night\n # 1x percent of game done\n # 2x citytile counts [cur player, opponent]\n # 2x worker counts [cur player, opponent]\n # 2x cart counts [cur player, opponent]\n # 1x research points [cur player]\n # 1x researched coal [cur player]\n # 1x researched uranium [cur player]\n obs = np.zeros(self.observation_shape)\n \n # Update the type of this object\n # 1x is worker\n # 1x is cart\n # 1x is citytile\n observation_index = 0\n if unit is not None:\n if unit.type == Constants.UNIT_TYPES.WORKER:\n obs[observation_index] = 1.0 # Worker\n else:\n obs[observation_index+1] = 1.0 # Cart\n if city_tile is not None:\n obs[observation_index+2] = 1.0 # CityTile\n observation_index += 3\n \n pos = None\n if unit is not None:\n pos = unit.pos\n else:\n pos = city_tile.pos\n\n if pos is None:\n observation_index += 7 * 5 * 2\n else:\n # Encode the direction to the nearest objects\n # 5x direction_nearest\n # 1x distance\n for distance_function in [closest_node, furthest_node]:\n for key in [\n Constants.RESOURCE_TYPES.WOOD,\n Constants.RESOURCE_TYPES.COAL,\n Constants.RESOURCE_TYPES.URANIUM,\n \"city\",\n str(Constants.UNIT_TYPES.WORKER)]:\n # Process the direction to and distance to this object type\n\n # Encode the direction to the nearest object (excluding itself)\n # 5x direction\n # 1x distance\n if key in self.object_nodes:\n if (\n (key == \"city\" and city_tile is not None) or\n (unit is not None and str(unit.type) == key and len(game.map.get_cell_by_pos(unit.pos).units) <= 1 )\n ):\n # Filter out the current unit from the closest-search\n closest_index = closest_node((pos.x, pos.y), self.object_nodes[key])\n filtered_nodes = np.delete(self.object_nodes[key], closest_index, axis=0)\n else:\n filtered_nodes = self.object_nodes[key]\n\n if len(filtered_nodes) == 0:\n # No other object of this type\n obs[observation_index + 5] = 1.0\n else:\n # There is another object of this type\n closest_index = distance_function((pos.x, pos.y), filtered_nodes)\n\n if closest_index is not None and closest_index >= 0:\n closest = filtered_nodes[closest_index]\n closest_position = Position(closest[0], closest[1])\n direction = pos.direction_to(closest_position)\n mapping = {\n Constants.DIRECTIONS.CENTER: 0,\n Constants.DIRECTIONS.NORTH: 1,\n Constants.DIRECTIONS.WEST: 2,\n Constants.DIRECTIONS.SOUTH: 3,\n Constants.DIRECTIONS.EAST: 4,\n }\n obs[observation_index + mapping[direction]] = 1.0 # One-hot encoding direction\n\n # 0 to 1 distance\n distance = pos.distance_to(closest_position)\n obs[observation_index + 5] = min(distance / 20.0, 1.0)\n\n # 0 to 1 value (amount of resource, cargo for unit, or fuel for city)\n if key == \"city\":\n # City fuel as % of upkeep for 200 turns\n c = game.cities[game.map.get_cell_by_pos(closest_position).city_tile.city_id]\n obs[observation_index + 6] = min(\n c.fuel / (c.get_light_upkeep() * 200.0),\n 1.0\n )\n elif key in [Constants.RESOURCE_TYPES.WOOD, Constants.RESOURCE_TYPES.COAL,\n Constants.RESOURCE_TYPES.URANIUM]:\n # Resource amount\n obs[observation_index + 6] = min(\n game.map.get_cell_by_pos(closest_position).resource.amount / 500,\n 1.0\n )\n else:\n # Unit cargo\n obs[observation_index + 6] = min(\n next(iter(game.map.get_cell_by_pos(\n closest_position).units.values())).get_cargo_space_left() / 100,\n 1.0\n )\n\n observation_index += 7\n\n if unit is not None:\n # Encode the cargo space\n # 1x cargo size\n obs[observation_index] = unit.get_cargo_space_left() / GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][\n \"WORKER\"]\n observation_index += 1\n else:\n observation_index += 1\n\n # Game state observations\n\n # 1x is night\n obs[observation_index] = game.is_night()\n observation_index += 1\n\n # 1x percent of game done\n obs[observation_index] = game.state[\"turn\"] / GAME_CONSTANTS[\"PARAMETERS\"][\"MAX_DAYS\"]\n observation_index += 1\n\n # 2x citytile counts [cur player, opponent]\n # 2x worker counts [cur player, opponent]\n # 2x cart counts [cur player, opponent]\n max_count = 30\n for key in [\"city\", str(Constants.UNIT_TYPES.WORKER), str(Constants.UNIT_TYPES.CART)]:\n if key in self.object_nodes:\n obs[observation_index] = len(self.object_nodes[key]) / max_count\n if (key + \"_opponent\") in self.object_nodes:\n obs[observation_index + 1] = len(self.object_nodes[(key + \"_opponent\")]) / max_count\n observation_index += 2\n\n # 1x research points [cur player]\n # 1x researched coal [cur player]\n # 1x researched uranium [cur player]\n obs[observation_index] = game.state[\"teamStates\"][team][\"researchPoints\"] / 200.0\n obs[observation_index+1] = float(game.state[\"teamStates\"][team][\"researched\"][\"coal\"])\n obs[observation_index+2] = float(game.state[\"teamStates\"][team][\"researched\"][\"uranium\"])\n\n return obs\n\n def action_code_to_action(self, action_code, game, unit=None, city_tile=None, team=None):\n \"\"\"\n Takes an action in the environment according to actionCode:\n action_code: Index of action to take into the action array.\n Returns: An action.\n \"\"\"\n # Map action_code index into to a constructed Action object\n try:\n x = None\n y = None\n if city_tile is not None:\n x = city_tile.pos.x\n y = city_tile.pos.y\n elif unit is not None:\n x = unit.pos.x\n y = unit.pos.y\n \n if city_tile != None:\n action = self.actions_cities[action_code%len(self.actions_cities)](\n game=game,\n unit_id=unit.id if unit else None,\n unit=unit,\n city_id=city_tile.city_id if city_tile else None,\n citytile=city_tile,\n team=team,\n x=x,\n y=y\n )\n else:\n action = self.actions_units[action_code%len(self.actions_units)](\n game=game,\n unit_id=unit.id if unit else None,\n unit=unit,\n city_id=city_tile.city_id if city_tile else None,\n citytile=city_tile,\n team=team,\n x=x,\n y=y\n )\n \n return action\n except Exception as e:\n # Not a valid action\n print(e)\n return None\n\n def take_action(self, action_code, game, unit=None, city_tile=None, team=None):\n \"\"\"\n Takes an action in the environment according to actionCode:\n actionCode: Index of action to take into the action array.\n \"\"\"\n action = self.action_code_to_action(action_code, game, unit, city_tile, team)\n self.match_controller.take_action(action)\n\n def game_start(self, game):\n \"\"\"\n This function is called at the start of each game. Use this to\n reset and initialize per game. Note that self.team may have\n been changed since last game. The game map has been created\n and starting units placed.\n\n Args:\n game ([type]): Game.\n \"\"\"\n self.units_last = 0\n self.city_tiles_last = 0\n self.fuel_collected_last = 0\n\n def get_reward(self, game, is_game_finished, is_new_turn, is_game_error):\n \"\"\"\n Returns the reward function for this step of the game. Reward should be a\n delta increment to the reward, not the total current reward.\n \"\"\"\n if is_game_error:\n # Game environment step failed, assign a game lost reward to not incentivise this\n print(\"Game failed due to error\")\n return -1.0\n\n if not is_new_turn and not is_game_finished:\n # Only apply rewards at the start of each turn or at game end\n return 0\n\n # Get some basic stats\n unit_count = len(game.state[\"teamStates\"][self.team][\"units\"])\n\n city_count = 0\n city_count_opponent = 0\n city_tile_count = 0\n city_tile_count_opponent = 0\n for city in game.cities.values():\n if city.team == self.team:\n city_count += 1\n else:\n city_count_opponent += 1\n\n for cell in city.city_cells:\n if city.team == self.team:\n city_tile_count += 1\n else:\n city_tile_count_opponent += 1\n \n rewards = {}\n \n # Give a reward for unit creation/death. 0.05 reward per unit.\n rewards[\"rew/r_units\"] = (unit_count - self.units_last) * 0.05\n self.units_last = unit_count\n\n # Give a reward for city creation/death. 0.1 reward per city.\n rewards[\"rew/r_city_tiles\"] = (city_tile_count - self.city_tiles_last) * 0.1\n self.city_tiles_last = city_tile_count\n\n # Reward collecting fuel\n fuel_collected = game.stats[\"teamStats\"][self.team][\"fuelGenerated\"]\n rewards[\"rew/r_fuel_collected\"] = ( (fuel_collected - self.fuel_collected_last) / 20000 )\n self.fuel_collected_last = fuel_collected\n \n # Give a reward of 1.0 per city tile alive at the end of the game\n rewards[\"rew/r_city_tiles_end\"] = 0\n if is_game_finished:\n self.is_last_turn = True\n rewards[\"rew/r_city_tiles_end\"] = city_tile_count\n\n '''\n # Example of a game win/loss reward instead\n if game.get_winning_team() == self.team:\n rewards[\"rew/r_game_win\"] = 100.0 # Win\n else:\n rewards[\"rew/r_game_win\"] = -100.0 # Loss\n '''\n \n reward = 0\n for name, value in rewards.items():\n reward += value\n\n return reward\n\n def turn_heurstics(self, game, is_first_turn):\n \"\"\"\n This is called pre-observation actions to allow for hardcoded heuristics\n to control a subset of units. Any unit or city that gets an action from this\n callback, will not create an observation+action.\n\n Args:\n game ([type]): Game in progress\n is_first_turn (bool): True if it's the first turn of a game.\n \"\"\"\n return\n\n \n\n"
] | [
[
"numpy.concatenate",
"numpy.delete",
"numpy.argmax",
"numpy.argmin",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
pritesh-mehta/dwi-utilities | [
"f1e307fcf51ef4e4cc95ac311f031e3521c1fbbf"
] | [
"dwi_utilities/comp_high_b.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\n@author: pritesh-mehta\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom pathlib import Path\nfrom argparse import ArgumentParser\n\nfrom dwi_utilities.monoexponential_decay import log_func, func\nimport dwi_utilities.nifti_utilities as nutil\n\ndef comp_high_b_case(case_dir, target_bval, save_case=False, output_dir=None, extension='.nii.gz'):\n \"\"\"Generate high b-value DWI using low b-value DWI (case)\n \"\"\"\n eps = 1e-8 \n \n data_stack = []\n bval_list = []\n filepaths = nutil.path_generator(case_dir)\n for path in filepaths:\n name, nii, data = nutil.load(path)\n data_stack.append(data)\n bval_list.append(name.replace('.nii.gz','').replace('b',''))\n \n # order data stack in order of ascending b-value\n bval_list, data_stack = \\\n zip(*sorted(zip(bval_list, data_stack)))\n \n # generate high b-value\n bval_list = np.array(bval_list)\n data = np.array(data_stack)\n \n shape = np.shape(data[0])\n highb_data = np.zeros(shape)\n \n for i in range(shape[0]):\n for j in range(shape[1]):\n for k in range(shape[2]):\n y = []\n for array in data:\n y.append(array[i][j][k])\n x = bval_list\n y = np.array(y) + eps\n z = np.log(y)\n popt, pcov = curve_fit(log_func, x, z)\n if popt[1] < 0:\n highb_data[i][j][k] = 0\n else:\n highb_data[i][j][k] = func(target_bval, np.exp(popt[0]), popt[1]) \n \n if save_case:\n case_name = Path(case_dir).parts[-1]\n save_path = Path(output_dir) / (case_name + extension)\n nutil.save(save_path, nii, highb_data)\n \n return highb_data\n\ndef comp_high_b_dir(cases_dir, target_bval, output_dir, extension='.nii.gz'):\n \"\"\"Generate high b-value DWI using low b-value DWI (directory)\n \"\"\"\n for case_dir in Path(cases_dir).iterdir():\n print(\"Processing:\", case_dir)\n comp_high_b_case(case_dir, target_bval, save_case=True, output_dir=output_dir, extension=extension)\n return None\n\ndef process():\n parser = ArgumentParser()\n parser.add_argument('--input_dir', required=True, type=str)\n parser.add_argument('--target_bval', required=True, type=int)\n parser.add_argument('--output_dir', required=True, type=str)\n parser.add_argument('--case', required=False, action=\"store_true\")\n parser.add_argument('--extension', required=False, type=str, default='.nii.gz')\n \n args = parser.parse_args()\n \n if args.case:\n comp_high_b_case(args.input_dir, args.target_bval, save_case=True, output_dir=args.output_dir, \n extension=args.extension)\n else:\n comp_high_b_dir(args.input_dir, args.target_bval, args.output_dir,\n extension=args.extension)\n \nif __name__ == \"__main__\":\n process()\n"
] | [
[
"numpy.log",
"numpy.shape",
"numpy.exp",
"numpy.array",
"scipy.optimize.curve_fit",
"numpy.zeros"
]
] |
SarderLab/HistomicsTK_PodoSighter | [
"9a75302f645bfb3dfd9688d247388c9948f4eadb"
] | [
"histomicstk/deeplab/utils/get_dataset_colormap.py"
] | [
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Visualizes the segmentation results via specified color map.\n\nVisualizes the semantic segmentation results by the color map\ndefined by the different datasets. Supported colormaps are:\n\n* ADE20K (http://groups.csail.mit.edu/vision/datasets/ADE20K/).\n\n* Cityscapes dataset (https://www.cityscapes-dataset.com).\n\n* Mapillary Vistas (https://research.mapillary.com).\n\n* PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom six.moves import range\n\n# Dataset names.\n_ADE20K = 'ade20k'\n_CITYSCAPES = 'cityscapes'\n_MAPILLARY_VISTAS = 'mapillary_vistas'\n_PASCAL = 'pascal'\n_PC1 = 'PC1'\n\n# Max number of entries in the colormap for each dataset.\n_DATASET_MAX_ENTRIES = {\n _ADE20K: 151,\n _CITYSCAPES: 256,\n _MAPILLARY_VISTAS: 66,\n _PASCAL: 512,\n _PC1: 256,\n}\n\ndef create_pc1_label_colormap():\n \"\"\"Creates a label colormap used in PC1 segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=np.uint8)\n colormap[0] = [128, 64, 128]\n colormap[1] = [244, 35, 232]\n colormap[2] = [70, 70, 70]\n colormap[3] = [102, 102, 156]\n return colormap\n\ndef create_ade20k_label_colormap():\n \"\"\"Creates a label colormap used in ADE20K segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n return np.asarray([\n [0, 0, 0],\n [120, 120, 120],\n [180, 120, 120],\n [6, 230, 230],\n [80, 50, 50],\n [4, 200, 3],\n [120, 120, 80],\n [140, 140, 140],\n [204, 5, 255],\n [230, 230, 230],\n [4, 250, 7],\n [224, 5, 255],\n [235, 255, 7],\n [150, 5, 61],\n [120, 120, 70],\n [8, 255, 51],\n [255, 6, 82],\n [143, 255, 140],\n [204, 255, 4],\n [255, 51, 7],\n [204, 70, 3],\n [0, 102, 200],\n [61, 230, 250],\n [255, 6, 51],\n [11, 102, 255],\n [255, 7, 71],\n [255, 9, 224],\n [9, 7, 230],\n [220, 220, 220],\n [255, 9, 92],\n [112, 9, 255],\n [8, 255, 214],\n [7, 255, 224],\n [255, 184, 6],\n [10, 255, 71],\n [255, 41, 10],\n [7, 255, 255],\n [224, 255, 8],\n [102, 8, 255],\n [255, 61, 6],\n [255, 194, 7],\n [255, 122, 8],\n [0, 255, 20],\n [255, 8, 41],\n [255, 5, 153],\n [6, 51, 255],\n [235, 12, 255],\n [160, 150, 20],\n [0, 163, 255],\n [140, 140, 140],\n [250, 10, 15],\n [20, 255, 0],\n [31, 255, 0],\n [255, 31, 0],\n [255, 224, 0],\n [153, 255, 0],\n [0, 0, 255],\n [255, 71, 0],\n [0, 235, 255],\n [0, 173, 255],\n [31, 0, 255],\n [11, 200, 200],\n [255, 82, 0],\n [0, 255, 245],\n [0, 61, 255],\n [0, 255, 112],\n [0, 255, 133],\n [255, 0, 0],\n [255, 163, 0],\n [255, 102, 0],\n [194, 255, 0],\n [0, 143, 255],\n [51, 255, 0],\n [0, 82, 255],\n [0, 255, 41],\n [0, 255, 173],\n [10, 0, 255],\n [173, 255, 0],\n [0, 255, 153],\n [255, 92, 0],\n [255, 0, 255],\n [255, 0, 245],\n [255, 0, 102],\n [255, 173, 0],\n [255, 0, 20],\n [255, 184, 184],\n [0, 31, 255],\n [0, 255, 61],\n [0, 71, 255],\n [255, 0, 204],\n [0, 255, 194],\n [0, 255, 82],\n [0, 10, 255],\n [0, 112, 255],\n [51, 0, 255],\n [0, 194, 255],\n [0, 122, 255],\n [0, 255, 163],\n [255, 153, 0],\n [0, 255, 10],\n [255, 112, 0],\n [143, 255, 0],\n [82, 0, 255],\n [163, 255, 0],\n [255, 235, 0],\n [8, 184, 170],\n [133, 0, 255],\n [0, 255, 92],\n [184, 0, 255],\n [255, 0, 31],\n [0, 184, 255],\n [0, 214, 255],\n [255, 0, 112],\n [92, 255, 0],\n [0, 224, 255],\n [112, 224, 255],\n [70, 184, 160],\n [163, 0, 255],\n [153, 0, 255],\n [71, 255, 0],\n [255, 0, 163],\n [255, 204, 0],\n [255, 0, 143],\n [0, 255, 235],\n [133, 255, 0],\n [255, 0, 235],\n [245, 0, 255],\n [255, 0, 122],\n [255, 245, 0],\n [10, 190, 212],\n [214, 255, 0],\n [0, 204, 255],\n [20, 0, 255],\n [255, 255, 0],\n [0, 153, 255],\n [0, 41, 255],\n [0, 255, 204],\n [41, 0, 255],\n [41, 255, 0],\n [173, 0, 255],\n [0, 245, 255],\n [71, 0, 255],\n [122, 0, 255],\n [0, 255, 184],\n [0, 92, 255],\n [184, 255, 0],\n [0, 133, 255],\n [255, 214, 0],\n [25, 194, 194],\n [102, 255, 0],\n [92, 0, 255],\n ])\n\n\ndef create_cityscapes_label_colormap():\n \"\"\"Creates a label colormap used in CITYSCAPES segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=np.uint8)\n colormap[0] = [128, 64, 128]\n colormap[1] = [244, 35, 232]\n colormap[2] = [70, 70, 70]\n colormap[3] = [102, 102, 156]\n colormap[4] = [190, 153, 153]\n colormap[5] = [153, 153, 153]\n colormap[6] = [250, 170, 30]\n colormap[7] = [220, 220, 0]\n colormap[8] = [107, 142, 35]\n colormap[9] = [152, 251, 152]\n colormap[10] = [70, 130, 180]\n colormap[11] = [220, 20, 60]\n colormap[12] = [255, 0, 0]\n colormap[13] = [0, 0, 142]\n colormap[14] = [0, 0, 70]\n colormap[15] = [0, 60, 100]\n colormap[16] = [0, 80, 100]\n colormap[17] = [0, 0, 230]\n colormap[18] = [119, 11, 32]\n return colormap\n\n\ndef create_mapillary_vistas_label_colormap():\n \"\"\"Creates a label colormap used in Mapillary Vistas segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n return np.asarray([\n [165, 42, 42],\n [0, 192, 0],\n [196, 196, 196],\n [190, 153, 153],\n [180, 165, 180],\n [102, 102, 156],\n [102, 102, 156],\n [128, 64, 255],\n [140, 140, 200],\n [170, 170, 170],\n [250, 170, 160],\n [96, 96, 96],\n [230, 150, 140],\n [128, 64, 128],\n [110, 110, 110],\n [244, 35, 232],\n [150, 100, 100],\n [70, 70, 70],\n [150, 120, 90],\n [220, 20, 60],\n [255, 0, 0],\n [255, 0, 0],\n [255, 0, 0],\n [200, 128, 128],\n [255, 255, 255],\n [64, 170, 64],\n [128, 64, 64],\n [70, 130, 180],\n [255, 255, 255],\n [152, 251, 152],\n [107, 142, 35],\n [0, 170, 30],\n [255, 255, 128],\n [250, 0, 30],\n [0, 0, 0],\n [220, 220, 220],\n [170, 170, 170],\n [222, 40, 40],\n [100, 170, 30],\n [40, 40, 40],\n [33, 33, 33],\n [170, 170, 170],\n [0, 0, 142],\n [170, 170, 170],\n [210, 170, 100],\n [153, 153, 153],\n [128, 128, 128],\n [0, 0, 142],\n [250, 170, 30],\n [192, 192, 192],\n [220, 220, 0],\n [180, 165, 180],\n [119, 11, 32],\n [0, 0, 142],\n [0, 60, 100],\n [0, 0, 142],\n [0, 0, 90],\n [0, 0, 230],\n [0, 80, 100],\n [128, 64, 64],\n [0, 0, 110],\n [0, 0, 70],\n [0, 0, 192],\n [32, 32, 32],\n [0, 0, 0],\n [0, 0, 0],\n ])\n\n\ndef create_pascal_label_colormap():\n \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int)\n ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int)\n\n for shift in reversed(list(range(8))):\n for channel in range(3):\n colormap[:, channel] |= bit_get(ind, channel) << shift\n ind >>= 3\n\n return colormap\n\n\ndef get_ade20k_name():\n return _ADE20K\n\n\ndef get_cityscapes_name():\n return _CITYSCAPES\n\n\ndef get_mapillary_vistas_name():\n return _MAPILLARY_VISTAS\n\n\ndef get_pascal_name():\n return _PASCAL\n\ndef get_pc1_name():\n return _PC1\n\n\ndef bit_get(val, idx):\n \"\"\"Gets the bit value.\n\n Args:\n val: Input value, int or numpy int array.\n idx: Which bit of the input val.\n\n Returns:\n The \"idx\"-th bit of input val.\n \"\"\"\n return (val >> idx) & 1\n\n\ndef create_label_colormap(dataset=_PC1):\n \"\"\"Creates a label colormap for the specified dataset.\n\n Args:\n dataset: The colormap used in the dataset.\n\n Returns:\n A numpy array of the dataset colormap.\n\n Raises:\n ValueError: If the dataset is not supported.\n \"\"\"\n if dataset == _ADE20K:\n return create_ade20k_label_colormap()\n elif dataset == _CITYSCAPES:\n return create_cityscapes_label_colormap()\n elif dataset == _MAPILLARY_VISTAS:\n return create_mapillary_vistas_label_colormap()\n elif dataset == _PASCAL:\n return create_pascal_label_colormap()\n elif dataset == _PC1:\n return create_pc1_label_colormap()\n else:\n raise ValueError('Unsupported dataset.')\n\n\ndef label_to_color_image(label, dataset=_PC1):\n \"\"\"Adds color defined by the dataset colormap to the label.\n\n Args:\n label: A 2D array with integer type, storing the segmentation label.\n dataset: The colormap used in the dataset.\n\n Returns:\n result: A 2D array with floating type. The element of the array\n is the color indexed by the corresponding element in the input label\n to the dataset color map.\n\n Raises:\n ValueError: If label is not of rank 2 or its value is larger than color\n map maximum entry.\n \"\"\"\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))\n\n if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]:\n raise ValueError(\n 'label value too large: {} >= {}.'.format(\n np.max(label), _DATASET_MAX_ENTRIES[dataset]))\n\n colormap = create_label_colormap(dataset)\n return colormap[label]\n\n\ndef get_dataset_colormap_max_entries(dataset):\n return _DATASET_MAX_ENTRIES[dataset]\n"
] | [
[
"numpy.asarray",
"numpy.arange",
"numpy.max",
"numpy.zeros"
]
] |
Jasonandy/Python-X | [
"2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe",
"2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe"
] | [
"cn/opencv/finger/finger.py",
"cn/PyTorch/demo_1/demo.py"
] | [
"import cv2 as cv\nimport numpy as np\nimport math\nimport time\n\ncapture = cv.VideoCapture(0)\n\n# video = \"http://admin:[email protected]:8081/\" # admin是账号:admin是密码 后面是局域网\n# capture = cv.VideoCapture(video)\n\n\n# 获得欧几里距离\ndef _get_eucledian_distance(vect1, vect2):\n distant = vect1[0] - vect2[0]\n dist = np.sqrt(np.sum(np.square(distant)))\n # 或者用numpy内建方法\n # vect1 = list(vect1)\n # vect2 = list(vect2)\n # dist = np.linalg.norm(vect1 - vect2)\n return dist\n\n\ndef gesture_recognition():\n\n while True:\n ret, frame = capture.read() # 读取摄像头\n # frame = cv.flip(frame, 1)\n fgbg = cv.createBackgroundSubtractorMOG2() # 利用BackgroundSubtractorMOG2算法消除背景\n # fgmask = bgModel.apply(frame)\n fgmask = fgbg.apply(frame)\n # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n kernel = np.ones((5, 5), np.uint8)\n fgmask = cv.erode(fgmask, kernel, iterations=1) # 膨胀\n res = cv.bitwise_and(frame, frame, mask=fgmask)\n ycrcb = cv.cvtColor(res, cv.COLOR_BGR2YCrCb) # 分解为YUV图像,得到CR分量\n (_, cr, _) = cv.split(ycrcb)\n cr1 = cv.GaussianBlur(cr, (5, 5), 0) # 高斯滤波\n _, skin = cv.threshold(cr1, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU) # OTSU图像二值化\n # dst = cv.GaussianBlur(frame, (3, 3), 0)\n # gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n # ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n # cv.imshow(\"binary_image\", binary)\n # hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) # hsv 色彩空间 分割肤色\n # ycrcb = cv.cvtColor(frame, cv.COLOR_BGR2YCrCb) # Ycrcb 色彩空间 分割肤色\n # # lower_hsv = np.array([0, 15, 0])\n # # upper_hsv = np.array([17, 170, 255])\n # lower_ycrcb = np.array([0, 135, 85])\n # upper_ycrcb = np.array([255, 180, 135])\n # # mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv) # hsv 掩码\n # mask = cv.inRange(ycrcb, lowerb=lower_ycrcb, upperb=upper_ycrcb) # ycrcb 掩码\n # dst = cv.GaussianBlur(mask, (11, 11), 0) # 高斯去噪\n # gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n\n # edge_output = cv.Canny(gray, 50, 150) # 图像边缘提取\n # kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) # 获取图像结构化元素\n # # dst = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel) # 开操作\n # dst = cv.erode(skin, kernel) # 膨胀操作\n gesture_roi = skin[0:350, 380:700]\n cv.imshow(\"dst_demo\", skin)\n # cv.imshow(\"gesture_roi\", gesture_roi)\n contours, heriachy = cv.findContours(gesture_roi, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 获取轮廓点集(坐标)\n # if contours[0] < [30, 260]:\n # cnt = contours[0]\n # elif 270 <= contours[0] < [60, 260]:\n # cnt = contours[1]\n # else:\n # cnt = contours[2]\n # cnt = contours[0]\n # print(cnt)\n # print(contours)\n # cnt = contours[0]\n for i, contour in enumerate(contours): # 获取轮廓\n cv.drawContours(frame[0:350, 380:700], contours, i, (255, 0, 0), 1) # 绘制轮廓\n # 得到面积\n # area = cv.contourArea(contour)\n # 得到外接矩形\n # x, y, w, h = cv.boundingRect(contour)\n # 得到的几何距是字典类型的\n # mm = cv.moments(contour)\n # cx = mm['m10']/mm['m00']\n # cy = mm['m01']/mm['m00']\n # center, radius = cv.minEnclosingCircle(contour)\n # center = (int(x), int(y))\n # radius = int(radius)\n # cv.circle(frame, center, radius, (0, 255, 255), 2)\n # cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\n # print(i)\n # cv.imshow(\"measure_contures\", frame)\n x, y, w, h = cv.boundingRect(contour)\n # center = (int(x), int(y))\n cv.rectangle(frame[0:350, 380:700], (x, y), (x + w, y + h), (100, 100, 0), 1)\n # approxcurve = cv.approxPolyDP(contour, 4, False)\n # if approxcurve.shape[0] < 5:\n # cv.drawContours(frame, contours, -1, (0, 255, 0), 3)\n\n hull = cv.convexHull(contour, True, returnPoints=False) # 获得凸包点 x, y坐标\n defects = cv.convexityDefects(contour, hull) # 计算轮廓的凹点\n # print(hull, defects)\n # cv.polylines(frame[0:350, 380:700], [hull], True, (0, 255, 0), 3)\n \"\"\"\n defect反馈的是Nx4的数组,\n 第一列表示的是起点(轮廓集合中点的编号)\n 第二列表示的是终点(轮廓集合中点的编号)\n 第三列表示的是最远点(轮廓集合中点的编号)\n 第四列表示的是最远点到凸轮廓的最短距离\n \"\"\"\n # cv.drawContours(frame[0:350, 380:700], hull, -1, (255, 0, 0), 5, 8) # 绘制凸包\n\n # dist = np.sqrt(np.sum(np.square(vect1 - vect2)))\n ndefects = 0\n if defects is not None: # 重要!\n\n for i in range(defects.shape[0]):\n s, e, f, d = defects[i, 0]\n # float(s)\n # float(e)\n # float(f)\n # float(d)\n start = tuple(contour[s][0]) # 起点\n end = tuple(contour[e][0]) # 终点\n far = tuple(contour[f][0]) # 最远点\n a = _get_eucledian_distance(start, end)\n b = _get_eucledian_distance(start, far)\n c = _get_eucledian_distance(end, far)\n angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n cv.line(frame[0:350, 380:700], start, end, [255, 255, 0], 2)\n cv.circle(frame[0:350, 380:700], far, 5, [0, 0, 255], -1)\n if angle <= math.pi / 5: # <30度:\n ndefects = ndefects + 1\n print(\"数字 = %f\" % ndefects)\n\n\n # cv.polylines(frame[50:350, 380:700], [hull], True, (0, 255, 0), 2)\n # retval = cv.pointPolygonTest(contour, center, True)\n # cv.drawContours(frame, defects, -1, (0, 255, 0), 3)\n # cv.imshow(\"defects\", defects)\n cv.imshow(\"video\", frame)\n c = cv.waitKey(50)\n if c == 27:\n\n break\n\n\ndef gesture_recognition_two():\n img = cv.imread(\"E:/pictureprocessing/practice/picture/practice_one.png\")\n img = cv.flip(img, 1)\n # dst = cv.GaussianBlur(frame, (3, 3), 0)\n # gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n # ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n # cv.imshow(\"binary_image\", binary)\n # hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # 通过hsv将颜色过滤出来\n # lower_hsv = np.array([100, 43, 46])\n # upper_hsv = np.array([124, 255, 255])\n # mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n dst = cv.GaussianBlur(binary, (1, 1), 0) # 高斯去噪\n # cv.imshow(\"dst_demo\", dst)\n contours, heriachy = cv.findContours(dst, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 获取轮廓本身\n for i, contour in enumerate(contours): # 获取轮廓\n cv.drawContours(img, contours, i, (0, 255, 0), 3) # 绘制轮廓\n print(i)\n\n cv.imshow(\"img_demo\", img)\n\n\ncv.namedWindow(\"video\")\ngesture_recognition()\n# gesture_recognition_two()\n\ncv.waitKey(0)\ncapture.release()\ncv.destroyAllWindows()",
"# coding=utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nm = nn.LeakyReLU(0.1)\ninput = Variable(torch.randn(2))\nprint(input)\nprint\n'---' * 10\nprint(m(input))\nprint\n'--' * 10\nr = nn.ReLU()\nprint\nr(input)\n\nm = nn.Threshold(0.1, 20)\ninput = Variable(torch.randn(2))\nprint(input)\nprint(m(input))\n\nm = nn.Sigmoid()\ninput = Variable(torch.randn(2))\nprint(input)\nprint(m(input))\n\nm = nn.BatchNorm1d(3, affine=False)\ninput = Variable(torch.randn([[1, 2, 3], [4, 5, 6]]))\noutput = m(input)\nprint\noutput\nprint\ninput\n\nrnn = nn.RNN(10, 20, 2) # input_size,hidden_size,num_layers\ninput = Variable(torch.randn(5, 3, 10)) # seq_len,batch,input_size\nh0 = Variable(torch.randn(2, 3, 20)) # num_layers * num_direction,batch,hidden_size\noutput, hn = rnn(input, h0)\nprint\noutput # 5*3*20 seq_len ,batch , hidden_size * num_directions\nprint\n'--' * 10\nprint\nhn\n"
] | [
[
"numpy.square",
"numpy.ones"
],
[
"torch.nn.BatchNorm1d",
"torch.randn",
"torch.nn.RNN",
"torch.nn.Sigmoid",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.Threshold"
]
] |
winnerineast/pythia | [
"b6fe288405490f6e02a3e59dbf32a181aee35645",
"b6fe288405490f6e02a3e59dbf32a181aee35645"
] | [
"pythia/utils/general.py",
"pythia/modules/metrics.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport collections\nimport gc\nimport os\nfrom bisect import bisect\n\nimport requests\nimport torch\nimport tqdm\nimport yaml\nfrom torch import nn\n\n\ndef lr_lambda_update(i_iter, cfg):\n if (\n cfg[\"training_parameters\"][\"use_warmup\"] is True\n and i_iter <= cfg[\"training_parameters\"][\"warmup_iterations\"]\n ):\n alpha = float(i_iter) / float(cfg[\"training_parameters\"][\"warmup_iterations\"])\n return cfg[\"training_parameters\"][\"warmup_factor\"] * (1.0 - alpha) + alpha\n else:\n idx = bisect(cfg[\"training_parameters\"][\"lr_steps\"], i_iter)\n return pow(cfg[\"training_parameters\"][\"lr_ratio\"], idx)\n\n\ndef clip_gradients(model, i_iter, writer, config):\n # TODO: Fix question model retrieval\n max_grad_l2_norm = config[\"training_parameters\"][\"max_grad_l2_norm\"]\n clip_norm_mode = config[\"training_parameters\"][\"clip_norm_mode\"]\n\n if max_grad_l2_norm is not None:\n if clip_norm_mode == \"all\":\n norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_l2_norm)\n\n writer.add_scalars({\"grad_norm\": norm}, i_iter)\n\n elif clip_norm_mode == \"question\":\n question_embedding = model.module.question_embedding_module\n norm = nn.utils.clip_grad_norm(\n question_embedding.parameters(), max_grad_l2_norm\n )\n\n writer.add_scalars({\"question_grad_norm\": norm}, i_iter)\n else:\n raise NotImplementedError(\n \"Clip norm mode %s not implemented\" % clip_norm_mode\n )\n\n\ndef ckpt_name_from_core_args(config):\n return \"%s_%s_%s_%d\" % (\n config[\"tasks\"],\n config[\"datasets\"],\n config[\"model\"],\n config[\"training_parameters\"][\"seed\"],\n )\n\n\ndef foldername_from_config_override(args):\n cfg_override = None\n if hasattr(args, \"config_override\"):\n cfg_override = args.config_override\n elif \"config_override\" in args:\n cfg_override = args[\"config_override\"]\n\n folder_name = \"\"\n if cfg_override is not None and len(cfg_override) > 0:\n folder_name = yaml.safe_dump(cfg_override, default_flow_style=True)\n folder_name = folder_name.replace(\":\", \".\").replace(\"\\n\", \" \")\n folder_name = folder_name.replace(\"/\", \"_\")\n folder_name = \" \".join(folder_name.split())\n folder_name = folder_name.replace(\". \", \".\").replace(\" \", \"_\")\n folder_name = \"_\" + folder_name\n return folder_name\n\n\ndef get_pythia_root():\n from pythia.common.registry import registry\n\n pythia_root = registry.get(\"pythia_root\", no_warning=True)\n if pythia_root is None:\n pythia_root = os.path.dirname(os.path.abspath(__file__))\n pythia_root = os.path.abspath(os.path.join(pythia_root, \"..\"))\n registry.register(\"pythia_root\", pythia_root)\n return pythia_root\n\n\ndef download_file(url, output_dir=\".\", filename=\"\"):\n if len(filename) == 0:\n filename = os.path.join(\".\", url.split(\"/\")[-1])\n\n os.makedirs(output_dir, exist_ok=True)\n\n filename = os.path.join(output_dir, filename)\n r = requests.get(url, stream=True)\n\n file_size = int(r.headers[\"Content-Length\"])\n chunk_size = 1024 * 1024\n num_bars = int(file_size / chunk_size)\n\n with open(filename, \"wb\") as fh:\n for chunk in tqdm.tqdm(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit=\"MB\",\n desc=filename,\n leave=True,\n ):\n fh.write(chunk)\n\n\ndef get_optimizer_parameters(model, config):\n parameters = model.parameters()\n\n has_custom = hasattr(model, \"get_optimizer_parameters\")\n if has_custom:\n parameters = model.get_optimizer_parameters(config)\n\n is_parallel = isinstance(model, nn.DataParallel)\n\n if is_parallel and hasattr(model.module, \"get_optimizer_parameters\"):\n parameters = model.module.get_optimizer_parameters(config)\n\n return parameters\n\n\ndef dict_to_string(dictionary):\n logs = []\n if dictionary is None:\n return \"\"\n for key, val in dictionary.items():\n if hasattr(val, \"item\"):\n val = val.item()\n # if key.count('_') == 2:\n # key = key[key.find('_') + 1:]\n logs.append(\"%s: %.4f\" % (key, val))\n\n return \", \".join(logs)\n\n\ndef get_overlap_score(candidate, target):\n \"\"\"Takes a candidate word and a target word and returns the overlap\n score between the two.\n\n Parameters\n ----------\n candidate : str\n Candidate word whose overlap has to be detected.\n target : str\n Target word against which the overlap will be detected\n\n Returns\n -------\n float\n Overlap score betwen candidate and the target.\n\n \"\"\"\n if len(candidate) < len(target):\n temp = candidate\n candidate = target\n target = temp\n overlap = 0.0\n while len(target) >= 2:\n if target in candidate:\n overlap = len(target)\n return overlap * 1.0 / len(candidate)\n else:\n target = target[:-1]\n return 0.0\n\n\ndef updir(d, n):\n \"\"\"Given path d, go up n dirs from d and return that path\"\"\"\n ret_val = d\n for _ in range(n):\n ret_val = os.path.dirname(ret_val)\n return ret_val\n\n\ndef print_cuda_usage():\n print(\"Memory Allocated:\", torch.cuda.memory_allocated() / (1024 * 1024))\n print(\"Max Memory Allocated:\", torch.cuda.max_memory_allocated() / (1024 * 1024))\n print(\"Memory Cached:\", torch.cuda.memory_cached() / (1024 * 1024))\n print(\"Max Memory Cached:\", torch.cuda.max_memory_cached() / (1024 * 1024))\n\n\ndef get_current_tensors():\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (\n hasattr(obj, \"data\") and torch.is_tensor(obj.data)\n ):\n print(type(obj), obj.size())\n except:\n pass\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nThe metrics module contains implementations of various metrics used commonly to\nunderstand how well our models are performing. For e.g. accuracy, vqa_accuracy,\nr@1 etc.\n\nFor implementing your own metric, you need to follow these steps:\n\n1. Create your own metric class and inherit ``BaseMetric`` class.\n2. In the ``__init__`` function of your class, make sure to call\n ``super().__init__('name')`` where 'name' is the name of your metric. If\n you require any parameters in your ``__init__`` function, you can use\n keyword arguments to represent them and metric constructor will take care of\n providing them to your class from config.\n3. Implement a ``calculate`` function which takes in ``SampleList`` and\n `model_output` as input and return back a float tensor/number.\n4. Register your metric with a key 'name' by using decorator,\n ``@registry.register_metric('name')``.\n\nExample::\n\n import torch\n\n from pythia.common.registry import registry\n from pythia.modules.metrics import BaseMetric\n\n @registry.register_metric(\"some\")\n class SomeMetric(BaseMetric):\n def __init__(self, some_param=None):\n super().__init__(\"some\")\n ....\n\n def calculate(self, sample_list, model_output):\n metric = torch.tensor(2, dtype=torch.float)\n return metric\n\nExample config for above metric::\n\n model_attributes:\n pythia:\n metrics:\n - type: some\n params:\n some_param: a\n\"\"\"\n\nimport collections\n\nimport torch\n\nfrom pythia.common.registry import registry\n\n\nclass Metrics:\n \"\"\"Internally used by Pythia, Metrics acts as wrapper for handling\n calculation of metrics over various metrics specified by the model in\n the config. It initializes all of the metrics and when called it runs\n calculate on each of them one by one and returns back a dict with proper\n naming back. For e.g. an example dict returned by Metrics class:\n ``{'val/vqa_accuracy': 0.3, 'val/r@1': 0.8}``\n\n Args:\n metric_list (List[ConfigNode]): List of ConfigNodes where each ConfigNode\n specifies name and parameters of the\n metrics used.\n \"\"\"\n\n def __init__(self, metric_list):\n if not isinstance(metric_list, list):\n metrics_list = [metric_list]\n\n self.writer = registry.get(\"writer\")\n self.metrics = self._init_metrics(metric_list)\n\n def _init_metrics(self, metric_list):\n metrics = {}\n for metric in metric_list:\n params = {}\n if isinstance(metric, collections.abc.Mapping):\n if not hasattr(metric, \"type\"):\n raise ValueError(\n \"Metric {} needs to have 'type' attribute\".format(metric)\n )\n metric = metric.type\n params = getattr(metric, \"params\", {})\n else:\n if not isinstance(metric, str):\n raise TypeError(\n \"Metric {} has inappropriate type\"\n \"'dict' or 'str' allowed\".format(metric)\n )\n\n metric_cls = registry.get_metric_class(metric)\n if metric_cls is None:\n raise ValueError(\n \"No metric named {} registered to registry\".format(metric)\n )\n metrics[metric] = metric_cls(**params)\n\n return metrics\n\n def __call__(self, sample_list, model_output, *args, **kwargs):\n values = {}\n if not hasattr(sample_list, \"targets\"):\n return values\n\n dataset_type = sample_list.dataset_type\n\n with torch.no_grad():\n for metric_name, metric_object in self.metrics.items():\n key = \"{}/{}\".format(dataset_type, metric_name)\n values[key] = metric_object._calculate_with_checks(\n sample_list, model_output, *args, **kwargs\n )\n\n if not isinstance(values[key], torch.Tensor):\n values[key] = torch.tensor(values[key], dtype=torch.float)\n\n if values[key].dim() == 0:\n values[key] = values[key].view(1)\n\n registry.register(\n \"{}.{}.{}\".format(\"metrics\", sample_list.dataset_name, dataset_type), values\n )\n\n return values\n\n\nclass BaseMetric:\n \"\"\"Base class to be inherited by all metrics registered to Pythia. See\n the description on top of the file for more information. Child class must\n implement ``calculate`` function.\n\n Args:\n name (str): Name of the metric.\n\n \"\"\"\n\n def __init__(self, name, *args, **kwargs):\n self.name = name\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Abstract method to be implemented by the child class. Takes\n in a ``SampleList`` and a dict returned by model as output and\n returns back a float tensor/number indicating value for this metric.\n\n Args:\n sample_list (SampleList): SampleList provided by the dataloader for the\n current iteration.\n model_output (Dict): Output dict from the model for the current\n SampleList\n\n Returns:\n torch.Tensor|float: Value of the metric.\n\n \"\"\"\n # Override in your child class\n raise NotImplementedError(\n \"'calculate' must be implemented in the child class\"\n )\n\n def __call__(self, *args, **kwargs):\n return self.calculate(*args, **kwargs)\n\n def _calculate_with_checks(self, *args, **kwargs):\n value = self.calculate(*args, **kwargs)\n return value\n\n\[email protected]_metric(\"accuracy\")\nclass Accuracy(BaseMetric):\n \"\"\"Metric for calculating accuracy.\n\n **Key:** ``accuracy``\n \"\"\"\n\n def __init__(self):\n super().__init__(\"accuracy\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate accuracy and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: accuracy.\n\n \"\"\"\n output = model_output[\"scores\"]\n expected = sample_list[\"targets\"]\n output = torch.max(output, 1)[1]\n\n correct = (expected == output.squeeze()).sum()\n\n correct = correct\n total = len(expected)\n\n value = correct / total\n return value\n\n\[email protected]_metric(\"caption_bleu4\")\nclass CaptionBleu4Metric(BaseMetric):\n \"\"\"Metric for calculating caption accuracy using BLEU4 Score.\n\n **Key:** ``caption_bleu4``\n \"\"\"\n\n import nltk.translate.bleu_score as bleu_score\n\n def __init__(self):\n super().__init__(\"caption_bleu4\")\n self.caption_processor = registry.get(\"coco_caption_processor\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate accuracy and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: bleu4 score.\n\n \"\"\"\n # Create reference and hypotheses captions.\n references = []\n hypotheses = []\n\n # References\n targets = sample_list.answers\n for j, p in enumerate(targets):\n img_captions = [\n self.caption_processor(c)[\"tokens\"] for c in targets[j].tolist()\n ]\n references.append(img_captions)\n\n # Hypotheses\n scores = torch.max(model_output[\"scores\"], dim=-1)[1]\n scores = scores.tolist()\n predictions = []\n for j, p in enumerate(scores):\n caption = self.caption_processor(scores[j])[\"tokens\"]\n predictions.append(caption)\n hypotheses.extend(predictions)\n\n assert len(references) == len(hypotheses)\n\n bleu4 = self.bleu_score.corpus_bleu(references, hypotheses)\n\n return targets.new_tensor(bleu4, dtype=torch.float)\n\n\[email protected]_metric(\"vqa_accuracy\")\nclass VQAAccuracy(BaseMetric):\n \"\"\"\n Calculate VQAAccuracy. Find more information here_\n\n **Key**: ``vqa_accuracy``.\n\n .. _here: https://visualqa.org/evaluation.html\n \"\"\"\n\n def __init__(self):\n super().__init__(\"vqa_accuracy\")\n\n def _masked_unk_softmax(self, x, dim, mask_idx):\n x1 = torch.nn.functional.softmax(x, dim=dim)\n x1[:, mask_idx] = 0\n x1_sum = torch.sum(x1, dim=1, keepdim=True)\n y = x1 / x1_sum\n return y\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate vqa accuracy and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: VQA Accuracy\n\n \"\"\"\n output = model_output[\"scores\"]\n expected = sample_list[\"targets\"]\n\n output = self._masked_unk_softmax(output, 1, 0)\n output = output.argmax(dim=1) # argmax\n\n one_hots = expected.new_zeros(*expected.size())\n one_hots.scatter_(1, output.view(-1, 1), 1)\n scores = one_hots * expected\n accuracy = torch.sum(scores) / expected.size(0)\n\n return accuracy\n\n\nclass RecallAtK(BaseMetric):\n def __init__(self, name=\"recall@k\"):\n super().__init__(name)\n\n def score_to_ranks(self, scores):\n # sort in descending order - largest score gets highest rank\n sorted_ranks, ranked_idx = scores.sort(1, descending=True)\n\n # convert from ranked_idx to ranks\n ranks = ranked_idx.clone().fill_(0)\n for i in range(ranked_idx.size(0)):\n for j in range(100):\n ranks[i][ranked_idx[i][j]] = j\n ranks += 1\n return ranks\n\n def get_gt_ranks(self, ranks, ans_ind):\n _, ans_ind = ans_ind.max(dim=1)\n ans_ind = ans_ind.view(-1)\n gt_ranks = torch.LongTensor(ans_ind.size(0))\n\n for i in range(ans_ind.size(0)):\n gt_ranks[i] = int(ranks[i, ans_ind[i].long()])\n return gt_ranks\n\n def get_ranks(self, sample_list, model_output, *args, **kwargs):\n output = model_output[\"scores\"]\n expected = sample_list[\"targets\"]\n\n ranks = self.score_to_ranks(output)\n gt_ranks = self.get_gt_ranks(ranks, expected)\n\n ranks = self.process_ranks(gt_ranks)\n return ranks.float()\n\n def calculate(self, sample_list, model_output, k, *args, **kwargs):\n ranks = self.get_ranks(sample_list, model_output)\n recall = float(torch.sum(torch.le(ranks, k))) / ranks.size(0)\n return recall\n\n\[email protected]_metric(\"r@1\")\nclass RecallAt1(RecallAtK):\n \"\"\"\n Calculate Recall@1 which specifies how many time the chosen candidate\n was rank 1.\n\n **Key**: ``r@1``.\n \"\"\"\n\n def __init__(self):\n super().__init__(\"r@1\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate Recall@1 and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: Recall@1\n\n \"\"\"\n return self.calculate(sample_list, model_output, k=1)\n\n\[email protected]_metric(\"r@5\")\nclass RecallAt5(RecallAtK):\n \"\"\"\n Calculate Recall@5 which specifies how many time the chosen candidate\n was among first 5 rank.\n\n **Key**: ``r@5``.\n \"\"\"\n\n def __init__(self):\n super().__init__(\"r@5\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate Recall@5 and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: Recall@5\n\n \"\"\"\n return self.calculate(sample_list, model_output, k=5)\n\n\[email protected]_metric(\"r@10\")\nclass RecallAt10(RecallAtK):\n \"\"\"\n Calculate Recall@10 which specifies how many time the chosen candidate\n was among first 10 ranks.\n\n **Key**: ``r@10``.\n \"\"\"\n\n def __init__(self):\n super().__init__(\"r@10\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate Recall@10 and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: Recall@10\n\n \"\"\"\n return self.calculate(sample_list, model_output, k=10)\n\n\[email protected]_metric(\"mean_r\")\nclass MeanRank(RecallAtK):\n \"\"\"\n Calculate MeanRank which specifies what was the average rank of the chosen\n candidate.\n\n **Key**: ``mean_r``.\n \"\"\"\n\n def __init__(self):\n super().__init__(\"mean_r\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate Mean Rank and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: mean rank\n\n \"\"\"\n ranks = self.get_ranks(sample_list, model_output)\n return torch.mean(ranks)\n\n\[email protected]_metric(\"mean_rr\")\nclass MeanReciprocalRank(RecallAtK):\n \"\"\"\n Calculate reciprocal of mean rank..\n\n **Key**: ``mean_rr``.\n \"\"\"\n\n def __init__(self):\n super().__init__(\"mean_rr\")\n\n def calculate(self, sample_list, model_output, *args, **kwargs):\n \"\"\"Calculate Mean Reciprocal Rank and return it back.\n\n Args:\n sample_list (SampleList): SampleList provided by DataLoader for\n current iteration\n model_output (Dict): Dict returned by model.\n\n Returns:\n torch.FloatTensor: Mean Reciprocal Rank\n\n \"\"\"\n ranks = self.get_ranks(sample_list, model_output)\n return torch.mean(ranks.reciprocal())\n"
] | [
[
"torch.is_tensor",
"torch.cuda.max_memory_allocated",
"torch.cuda.max_memory_cached",
"torch.cuda.memory_allocated",
"torch.cuda.memory_cached"
],
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.max",
"torch.sum",
"torch.tensor",
"torch.le",
"torch.no_grad"
]
] |
Qianna00/mmdetection | [
"31e7dff4c61000002d27117543b85e68d2619b4c",
"31e7dff4c61000002d27117543b85e68d2619b4c",
"31e7dff4c61000002d27117543b85e68d2619b4c",
"31e7dff4c61000002d27117543b85e68d2619b4c"
] | [
"mmdet/models/detectors/two_stage_with_MetaEmbedding.py",
"mmdet/models/roi_heads/bbox_heads/bbox_head_separate.py",
"mmdet/mmcv/multi_optim_runner.py",
"mmdet/models/detectors/two_stage_extra_backbone.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\n\n# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\nfrom tqdm import tqdm\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmcv import Config\nfrom mmdet.core import bbox2roi\nfrom functools import partial\nfrom torch.utils.data.dataloader import DataLoader\n\n\[email protected]_module()\nclass TwoStageDetectorMetaEmbedding(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n init_centroids=False,\n pretrained=None):\n super(TwoStageDetectorMetaEmbedding, self).__init__()\n self.backbone = build_backbone(backbone)\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.init_centroids = init_centroids\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n \"\"\"if self.init_centroids:\n for p in self.parameters():\n p.requires_grad = False\"\"\"\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n self.roi_head = build_head(roi_head)\n\n if self.init_centroids:\n self.centroids = self.roi_head.loss_feat.centroids.data\n else:\n self.centroids = None\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n if roi_head[\"type\"] == \"MetaEmbedding_RoIHead\":\n # calculate init_centroids using training dataset\n if self.train_cfg is not None:\n if init_centroids:\n cfg = Config.fromfile(\n \"/mmdetection/configs/faster_rcnn_meta/faster_rcnn_r50_c4_meta_smd_stage2.py\")\n dataset = build_dataset(cfg.centroids_cal)\n # data = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=0, num_gpus=1, shuffle=False)\n # print(data[0])\n self.roi_head.loss_feat.centroids.data = self.centroids_cal(dataset)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def init_weights(self, pretrained=None):\n super(TwoStageDetectorMetaEmbedding, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_roi_head:\n self.roi_head.init_weights(pretrained)\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck\n \"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n \"\"\"roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\"\"\"\n\n\n roi_losses = self.roi_head(x,\n centroids=self.centroids,\n img_metas=img_metas,\n proposal_list=proposal_list,\n gt_bboxes=gt_bboxes,\n gt_labels=gt_labels,\n gt_bboxes_ignore=gt_bboxes_ignore,\n gt_masks=gt_masks,\n test=False,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n # assert self.with_bbox, 'Bbox head must be implemented.'\n\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head(x,\n centroids=self.centroids,\n proposal_list=proposal_list,\n img_metas=img_metas,\n test=True)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def centroids_cal(self, data):\n\n centroids = torch.zeros(self.roi_head.num_classes,\n self.roi_head.feat_dim,\n 14,\n 14).cuda()\n\n print('Calculating centroids.')\n\n # Calculate initial centroids only on training data.\n with torch.set_grad_enabled(False):\n self.backbone.cuda()\n self.rpn_head.cuda()\n self.roi_head.cuda()\n class_data_num = [0, 0, 0, 0, 0, 0]\n # class_data_num = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for i in tqdm(range(len(data))):\n \"\"\"imgs, gt_labels, gt_bboxes, img_metas = inputs[\"img\"], \\\n inputs[\"gt_labels\"], \\\n inputs[\"gt_bboxes\"],\\\n inputs[\"img_metas\"]\"\"\"\n imgs, gt_labels, gt_bboxes, img_metas = \\\n torch.unsqueeze(data[i]['img'], 0).to(next(self.backbone.parameters()).device), \\\n [data[i]['gt_labels'].to(next(self.backbone.parameters()).device)], \\\n [data[i]['gt_bboxes'].to(next(self.backbone.parameters()).device)], \\\n [data[i]['img_metas']]\n # Calculate Features of each training data\n feats = self.backbone(imgs)\n \"\"\"proposal_list = self.rpn_head.simple_test_rpn(feats, img_metas)\n num_imgs = len(img_metas)\n # if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = self.roi_head.std_roi_head.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.roi_head.std_roi_head.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in feats])\n sampling_results.append(sampling_result)\n\n rois = bbox2roi([res.bboxes for res in sampling_results])\"\"\"\n rois = bbox2roi(gt_bboxes)\n bbox_feats = self.roi_head.std_roi_head.bbox_roi_extractor(\n feats[:self.roi_head.std_roi_head.bbox_roi_extractor.num_inputs], rois)\n\n \"\"\"labels = self.roi_head.std_roi_head.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels, self.train_cfg.rcnn)[0]\n # Add all calculated features to center tensor\n for i in range(len(labels)):\n label = labels[i]\n if label < self.roi_head.num_classes:\n centroids[label] += bbox_feats[i]\n class_data_num[label] += 1\"\"\"\n for j in range(len(gt_labels[0])):\n label = gt_labels[0][j]\n centroids[label] += bbox_feats[j]\n class_data_num[label] += 1\n for i in range(len(class_data_num)):\n if class_data_num[i] == 0:\n class_data_num[i] = 1\n\n # Average summed features with class count\n centroids /= torch.tensor(class_data_num).float().unsqueeze(1).unsqueeze(2).\\\n unsqueeze(3).repeat(1, 1024, 14, 14).cuda()\n\n return centroids\n\n\ndef class_count(data):\n labels = np.array(data.dataset.labels)\n class_data_num = []\n for l in np.unique(labels):\n class_data_num.append(len(labels[labels == l]))\n return class_data_num",
"import torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.models.builder import HEADS\nfrom .bbox_head import BBoxHead\n\n\[email protected]_module()\nclass ConvFCBBoxHeadSeparate(BBoxHead):\n r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_shared_convs=0,\n num_shared_fcs=0,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n conv_out_channels=256,\n fc_out_channels=1024,\n conv_cfg=None,\n norm_cfg=None,\n *args,\n **kwargs):\n super(ConvFCBBoxHeadSeparate, self).__init__(*args, **kwargs)\n assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n if num_cls_convs > 0 or num_reg_convs > 0:\n assert num_shared_fcs == 0\n if not self.with_cls:\n assert num_cls_convs == 0 and num_cls_fcs == 0\n if not self.with_reg:\n assert num_reg_convs == 0 and num_reg_fcs == 0\n self.num_shared_convs = num_shared_convs\n self.num_shared_fcs = num_shared_fcs\n self.num_cls_convs = num_cls_convs\n self.num_cls_fcs = num_cls_fcs\n self.num_reg_convs = num_reg_convs\n self.num_reg_fcs = num_reg_fcs\n self.conv_out_channels = conv_out_channels\n self.fc_out_channels = fc_out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n # add shared convs and fcs\n self.shared_convs, self.shared_fcs, last_layer_dim = \\\n self._add_conv_fc_branch(\n self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n True)\n self.shared_out_channels = last_layer_dim\n\n # add cls specific branch\n self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n self._add_conv_fc_branch(\n self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n # add reg specific branch\n self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n self._add_conv_fc_branch(\n self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n if self.num_shared_fcs == 0 and not self.with_avg_pool:\n if self.num_cls_fcs == 0:\n self.cls_last_dim *= self.roi_feat_area\n if self.num_reg_fcs == 0:\n self.reg_last_dim *= self.roi_feat_area\n\n self.relu = nn.ReLU(inplace=True)\n # reconstruct fc_cls and fc_reg since input channels are changed\n if self.with_cls:\n self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)\n if self.with_reg:\n out_dim_reg = (4 if self.reg_class_agnostic else 4 *\n self.num_classes)\n self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)\n\n def _add_conv_fc_branch(self,\n num_branch_convs,\n num_branch_fcs,\n in_channels,\n is_shared=False):\n \"\"\"Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n \"\"\"\n last_layer_dim = in_channels\n # add branch specific conv layers\n branch_convs = nn.ModuleList()\n if num_branch_convs > 0:\n for i in range(num_branch_convs):\n conv_in_channels = (\n last_layer_dim if i == 0 else self.conv_out_channels)\n branch_convs.append(\n ConvModule(\n conv_in_channels,\n self.conv_out_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n last_layer_dim = self.conv_out_channels\n # add branch specific fc layers\n branch_fcs = nn.ModuleList()\n if num_branch_fcs > 0:\n # for shared branch, only consider self.with_avg_pool\n # for separated branches, also consider self.num_shared_fcs\n if (is_shared\n or self.num_shared_fcs == 0) and not self.with_avg_pool:\n last_layer_dim *= self.roi_feat_area\n for i in range(num_branch_fcs):\n fc_in_channels = (\n last_layer_dim if i == 0 else self.fc_out_channels)\n branch_fcs.append(\n nn.Linear(fc_in_channels, self.fc_out_channels))\n last_layer_dim = self.fc_out_channels\n return branch_convs, branch_fcs, last_layer_dim\n\n def init_weights(self):\n super(ConvFCBBoxHeadSeparate, self).init_weights()\n # conv layers are already initialized by ConvModule\n for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:\n for m in module_list.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x_cat, x):\n # shared part\n if self.num_shared_convs > 0:\n for conv in self.shared_convs:\n x = conv(x)\n\n if self.num_shared_fcs > 0:\n if self.with_avg_pool:\n x = self.avg_pool(x)\n\n x = x.flatten(1)\n\n for fc in self.shared_fcs:\n x = self.relu(fc(x))\n # separate branches\n x_cls = x_cat\n x_reg = x\n\n for conv in self.cls_convs:\n x_cls = conv(x_cls)\n if x_cls.dim() > 2:\n if self.with_avg_pool:\n x_cls = self.avg_pool(x_cls)\n x_cls = x_cls.flatten(1)\n for fc in self.cls_fcs:\n x_cls = self.relu(fc(x_cls))\n\n for conv in self.reg_convs:\n x_reg = conv(x_reg)\n if x_reg.dim() > 2:\n if self.with_avg_pool:\n x_reg = self.avg_pool(x_reg)\n x_reg = x_reg.flatten(1)\n for fc in self.reg_fcs:\n x_reg = self.relu(fc(x_reg))\n\n cls_score = self.fc_cls(x_cls) if self.with_cls else None\n bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n return cls_score, bbox_pred\n",
"import mmcv\nfrom mmcv.runner import EpochBasedRunner\nfrom mmcv.runner.hooks import IterTimerHook, HOOKS, Hook\nfrom mmcv.runner.priority import get_priority\nimport torch\nimport os.path as osp\nfrom mmcv.runner.checkpoint import save_checkpoint\nimport time\nfrom mmcv.parallel import is_module_wrapper\nfrom torch.optim import Optimizer\nfrom mmcv.runner.checkpoint import weights_to_cpu\n\n\nclass MultiOptimRunner(EpochBasedRunner):\n\n def __init__(self,\n model,\n optimizer_b=None,\n optimizer_g=None,\n optimizer_d=None,\n work_dir=None,\n logger=None,\n meta=None):\n self.optimizer_b = optimizer_b\n self.optimizer_g = optimizer_g\n self.optimizer_d = optimizer_d\n # optimizer = {\"optimizer_b\": optimizer_b, \"optimizer_g\": optimizer_g, \"optimizer_d\": optimizer_d}\n super(MultiOptimRunner, self).__init__(model=model, work_dir=work_dir, logger=logger, meta=meta)\n\n def current_lr(self):\n \"\"\"Get current learning rates.\n\n Returns:\n list[float] | dict[str, list[float]]: Current learning rates of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n if isinstance(self.optimizer_b, torch.optim.Optimizer):\n lr = [group['lr'] for group in self.optimizer_b.param_groups]\n elif isinstance(self.optimizer_b, dict):\n lr = dict()\n for name, optim in self.optimizer_b.items():\n lr[name] = [group['lr'] for group in optim.param_groups]\n else:\n raise RuntimeError(\n 'lr is not applicable because optimizer does not exist.')\n return lr\n\n def register_hook(self, hook, priority='HIGH'):\n \"\"\"Register a hook into the hook list.\n\n The hook will be inserted into a priority queue, with the specified\n priority (See :cls:`Priority` for details of priorities).\n For hooks with the same priority, they will be triggered in the same\n order as they are registered.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n \"\"\"\n assert isinstance(hook, Hook)\n if hasattr(hook, 'priority'):\n raise ValueError('\"priority\" is a reserved attribute for hooks')\n priority = get_priority(priority)\n hook.priority = priority\n # insert the hook to a sorted list\n inserted = False\n for i in range(len(self._hooks) - 1, -1, -1):\n if priority >= self._hooks[i].priority:\n self._hooks.insert(i + 1, hook)\n inserted = True\n break\n if not inserted:\n self._hooks.insert(0, hook)\n\n def register_lr_hook(self, lr_config, type='B'):\n if isinstance(lr_config, dict):\n assert 'policy' in lr_config\n policy_type = lr_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of Lr updater.\n # Since this is not applicable for `CosineAnealingLrUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'LrUpdaterHook'\n lr_config['type'] = hook_type + type\n hook = mmcv.build_from_cfg(lr_config, HOOKS)\n else:\n hook = lr_config\n self.register_hook(hook)\n\n def register_momentum_hook(self, momentum_config):\n if momentum_config is None:\n return\n if isinstance(momentum_config, dict):\n assert 'policy' in momentum_config\n policy_type = momentum_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of momentum updater.\n # Since this is not applicable for `CosineAnealingMomentumUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'MomentumUpdaterHook'\n momentum_config['type'] = hook_type\n hook = mmcv.build_from_cfg(momentum_config, HOOKS)\n else:\n hook = momentum_config\n self.register_hook(hook)\n\n def register_optimizer_hook(self, optimizer_config, priority='NORMAL', optim_type=\"OptimHookB\"):\n if optimizer_config is None:\n return\n if isinstance(optimizer_config, dict):\n optimizer_config.setdefault('type', optim_type)\n hook = mmcv.build_from_cfg(optimizer_config, HOOKS)\n else:\n hook = optimizer_config\n self.register_hook(hook, priority)\n\n def register_training_hooks(self,\n lr_config_b,\n lr_config_g=None,\n lr_config_d=None,\n optimizer_b_config=None,\n optimizer_g_config=None,\n optimizer_d_config=None,\n checkpoint_config=None,\n log_config=None,\n momentum_config=None,\n e2e_training=False):\n self.register_lr_hook(lr_config_b, type='B')\n self.register_lr_hook(lr_config_g, type='G')\n self.register_lr_hook(lr_config_d, type='D')\n self.register_momentum_hook(momentum_config)\n if e2e_training:\n self.register_optimizer_hook(optimizer_b_config, priority=\"HIGH\", optim_type=\"OptimHookB\")\n self.register_optimizer_hook(optimizer_g_config, priority=\"NORMAL\", optim_type=\"OptimHookG\")\n self.register_optimizer_hook(optimizer_d_config, priority=\"LOW\", optim_type=\"OptimHookD\")\n # self.register_optimizer_hook(optimizer_b_config, priority=\"NORMAL\", optim_type=\"MultiOptimHook\")\n self.register_checkpoint_hook(checkpoint_config)\n self.register_hook(IterTimerHook())\n self.register_logger_hooks(log_config)\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='epoch_{}.pth',\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n \"\"\"Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to 'epoch_{}.pth'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n \"latest.pth\" to point to the latest checkpoint.\n Defaults to True.\n \"\"\"\n if meta is None:\n meta = dict(epoch=self.epoch + 1, iter=self.iter)\n else:\n meta.update(epoch=self.epoch + 1, iter=self.iter)\n\n filename = filename_tmpl.format(self.epoch + 1)\n filepath = osp.join(out_dir, filename)\n optimizer_b = self.optimizer_b if save_optimizer else None\n optimizer_g = self.optimizer_g if save_optimizer else None\n optimizer_d = self.optimizer_g if save_optimizer else None\n _save_checkpoint(self.model, filepath, optimizer_b=optimizer_b, optimizer_g=optimizer_g,\n optimizer_d=optimizer_d, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n mmcv.symlink(filename, osp.join(out_dir, 'latest.pth'))\n\ndef _save_checkpoint(model, filename, optimizer_b=None, optimizer_g=None, optimizer_d=None, meta=None):\n \"\"\"Save checkpoint to file.\n\n The checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n ``optimizer``. By default ``meta`` will contain version and time info.\n\n Args:\n model (Module): Module whose params are to be saved.\n filename (str): Checkpoint filename.\n optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.\n meta (dict, optional): Metadata to be saved in checkpoint.\n \"\"\"\n if meta is None:\n meta = {}\n elif not isinstance(meta, dict):\n raise TypeError(f'meta must be a dict or None, but got {type(meta)}')\n meta.update(mmcv_version=mmcv.__version__, time=time.asctime())\n\n mmcv.mkdir_or_exist(osp.dirname(filename))\n if is_module_wrapper(model):\n model = model.module\n\n checkpoint = {\n 'meta': meta,\n 'state_dict': weights_to_cpu(model.state_dict())\n }\n # save optimizer state dict in the checkpoint\n if isinstance(optimizer_b, Optimizer):\n checkpoint['optimizer_b'] = optimizer_b.state_dict()\n elif isinstance(optimizer_b, dict):\n checkpoint['optimizer_b'] = {}\n for name, optim in optimizer_b.items():\n checkpoint['optimizer_b'][name] = optim.state_dict()\n if isinstance(optimizer_g, Optimizer):\n checkpoint['optimizer_g'] = optimizer_g.state_dict()\n elif isinstance(optimizer_g, dict):\n checkpoint['optimizer_g'] = {}\n for name, optim in optimizer_g.items():\n checkpoint['optimizer_g'][name] = optim.state_dict()\n if isinstance(optimizer_d, Optimizer):\n checkpoint['optimizer_d'] = optimizer_d.state_dict()\n elif isinstance(optimizer_d, dict):\n checkpoint['optimizer_d'] = {}\n for name, optim in optimizer_d.items():\n checkpoint['optimizer_d'][name] = optim.state_dict()\n # immediately flush buffer\n with open(filename, 'wb') as f:\n torch.save(checkpoint, f)\n f.flush()\n",
"import torch\nimport torch.nn as nn\n\n# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\nfrom mmcv.cnn import kaiming_init\n\n\[email protected]_module()\nclass TwoStageDetectorWithExtraBackbone(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n extra_backbone=None,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n pretrained_extra=None):\n super(TwoStageDetectorWithExtraBackbone, self).__init__()\n self.backbone = build_backbone(backbone)\n if extra_backbone is not None:\n self.extra_backbone = build_backbone(extra_backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained, pretrained_extra=pretrained_extra)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n @property\n def with_extra_backbone(self):\n return hasattr(self, 'extra_backbone') and self.extra_backbone is not None\n\n def init_weights(self, pretrained=None, pretrained_extra=None):\n # super(TwoStageDetectorWithExtraBackbone, self).init_weights(pretrained, pretrained_extra)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_roi_head:\n self.roi_head.init_weights(pretrained)\n if self.with_extra_backbone:\n self.extra_backbone.init_weights(pretrained=pretrained_extra)\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck\n \"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def extract_extra_feats(self, img):\n x = self.extra_backbone(img)\n if self.with_neck:\n x = self.neck(x)\n x_new = []\n for i in x:\n x_new.append(i.detach())\n return tuple(x)\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n if self.with_extra_backbone:\n x_extra = self.extract_extra_feats(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n if self.with_extra_backbone:\n roi_losses = self.roi_head.forward_train(x, x_extra, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n else:\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n x = self.extract_feat(img)\n if self.with_extra_backbone:\n x_extra = self.extract_extra_feats(img)\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, x_extra, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n"
] | [
[
"numpy.unique",
"torch.zeros",
"torch.randn",
"torch.unsqueeze",
"torch.tensor",
"torch.set_grad_enabled",
"torch.cuda.is_available",
"numpy.array"
],
[
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU"
],
[
"torch.save"
],
[
"torch.randn"
]
] |
BUPT-GAMMA/GammaGL | [
"2b9f32e1ac3533cb75a063243e8a2fa654466d18",
"2b9f32e1ac3533cb75a063243e8a2fa654466d18"
] | [
"profiler/mpops/ms_gpu.py",
"gammagl/utils/coalesce.py"
] | [
"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n# @Time : 2022/04/14 08:36\n# @Author : clear\n# @FileName: ms_gpu.py\n\nimport os\nos.environ['TL_BACKEND'] = 'mindspore'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\nimport time\nimport numpy as np\nimport tensorlayerx as tlx\nfrom gammagl.mpops import *\n\nedge_index = np.load('edge_index.npy')\nnum_nodes = int(np.max(edge_index))+1\nsrc = edge_index[0,:]\ndst = edge_index[1,:]\nsrc = tlx.convert_to_tensor(src, tlx.int32)\ndst = tlx.convert_to_tensor(dst, tlx.int32)\nmsg = tlx.convert_to_tensor(np.random.randn(edge_index.shape[1], 500), dtype=tlx.float32)\n\n\nstart_t = time.time()\nfor j in range(200):\n # msg = tlx.gather(x, src)\n # unsorted_segment_sum(msg, dst, num_nodes)\n # unsorted_segment_mean(msg, dst, num_nodes)\n unsorted_segment_max(msg, dst, num_nodes)\nprint(\"{:.3f}\".format(time.time()-start_t))\n# pf.stop()\n# print(pf.output_text(unicode=True, color=True))\n\n\ndst = tlx.convert_to_numpy(dst)\nidx = np.argsort(dst)\ndst = tlx.gather(tlx.convert_to_tensor(dst, dtype=tlx.int32), tlx.convert_to_tensor(idx,dtype=tlx.int32))\n\n# pf.start()\nstart_t = time.time()\nfor j in range(200):\n # msg = tlx.gather(x, src)\n # segment_sum(msg, dst, num_nodes)\n # segment_mean(msg, dst, num_nodes)\n segment_max(msg, dst, num_nodes)\nprint(\"{:.3f}\".format(time.time()-start_t))\n# pf.stop()\n# print(pf.output_text(unicode=True, color=True))",
"import numpy as np\nimport tensorlayerx as tlx\nimport gammagl.mpops as mpops\nfrom .num_nodes import maybe_num_nodes\nfrom .check import check_is_numpy\n\n\ndef coalesce(edge_index, edge_attr=None, num_nodes=None, reduce=\"add\", is_sorted=False, sort_by_row=True):\n \"\"\"Row-wise sorts :obj:`edge_index` and removes its duplicated entries.\n Duplicate entries in :obj:`edge_attr` are merged by scattering them\n together according to the given :obj:`reduce` option.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-\n dimensional edge features.\n If given as a list, will re-shuffle and remove duplicates for all\n its entries. (default: :obj:`None`)\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)\n reduce (string, optional): The reduce operation to use for merging edge\n features (:obj:`\"add\"`, :obj:`\"mean\"`, :obj:`\"min\"`, :obj:`\"max\"`,\n :obj:`\"mul\"`). (default: :obj:`\"add\"`)\n is_sorted (bool, optional): If set to :obj:`True`, will expect\n :obj:`edge_index` to be already sorted row-wise.\n sort_by_row (bool, optional): If set to :obj:`False`, will sort\n :obj:`edge_index` column-wise.\n\n :rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else\n (:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)\n \"\"\"\n if tlx.is_tensor(edge_index):\n edge_index = tlx.convert_to_numpy(edge_index)\n nnz = edge_index.shape[1]\n\n num_nodes = maybe_num_nodes(edge_index, num_nodes)\n idx = np.zeros(nnz+1)\n idx[0] = -1\n idx[1:] = edge_index[1 - int(sort_by_row)]\n idx[1:] = (np.add(np.multiply(idx[1:], num_nodes), edge_index[int(sort_by_row)]))\n\n if not is_sorted:\n perm = np.argsort(idx[1:])\n idx[1:] = np.sort(idx[1:])\n edge_index = edge_index[:, perm]\n if edge_attr is not None and tlx.ops.is_tensor(edge_attr):\n edge_attr = tlx.gather(edge_attr, tlx.convert_to_tensor(perm), axis=0)\n elif edge_attr is not None and check_is_numpy(edge_attr):\n edge_attr = edge_attr[perm]\n elif edge_attr is not None: # edge_attr is List.\n edge_attr = [tlx.gather(e, perm, axis=0) for e in edge_attr]\n\n mask = idx[1:] > idx[:-1]\n\n # Only perform expensive merging in case there exists duplicates:\n if mask.all():\n edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)\n return edge_index if edge_attr is None else (edge_index, edge_attr)\n\n edge_index = edge_index[:, mask]\n edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)\n if edge_attr is None:\n return edge_index\n\n idx = np.arange(0, nnz)\n idx = tlx.convert_to_tensor(idx - (1 - mask).cumsum(axis=0))\n\n if tlx.ops.is_tensor(edge_attr):\n edge_attr = mpops.segment_sum(edge_attr, idx)\n\n return edge_index, edge_attr"
] | [
[
"numpy.argsort",
"numpy.load",
"numpy.max",
"numpy.random.randn"
],
[
"numpy.multiply",
"numpy.arange",
"numpy.sort",
"numpy.argsort",
"numpy.zeros"
]
] |
karimul/ebm-sampling | [
"c8c8565a21fde52ac71598f20625857c4ccb8b67",
"c8c8565a21fde52ac71598f20625857c4ccb8b67"
] | [
"sampling/resgld.py",
"models/mnist_model.py"
] | [
"import torch\nimport numpy as np\nfrom autograd.numpy import sqrt\n\ndef gen_image_resgld(label, FLAGS, model, im_neg, num_steps, sample=False):\n\n im_noise = torch.randn_like(im_neg).detach()\n\n T_multiply=0.9\n T = 0.9\n var=0.1\n resgld_beta_high = im_neg\n resgld_beta_low = im_neg\n swaps = 0\n\n noise_scale = sqrt(2e-6 * FLAGS.step_lr * T)\n\n print(\"noise_scale : \", noise_scale)\n print(\"noise_scale * T_multiply: \", noise_scale* T_multiply)\n\n im_negs_samples = []\n\n for i in range(num_steps):\n im_noise.normal_()\n\n resgld_beta_low = resgld_beta_low + noise_scale * im_noise\n resgld_beta_high = resgld_beta_high + noise_scale * T_multiply * im_noise\n\n resgld_beta_high.requires_grad_(requires_grad=True)\n energy_high = model.forward(resgld_beta_high, label)\n\n resgld_beta_low.requires_grad_(requires_grad=True)\n energy_low = model.forward(resgld_beta_low, label)\n\n im_grad_low = torch.autograd.grad([energy_low.sum()], [resgld_beta_low])[0]\n im_grad_high = torch.autograd.grad([energy_high.sum()], [resgld_beta_high])[0]\n \n if i == num_steps - 1:\n im_neg_orig = resgld_beta_low\n resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low \n resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high \n\n if FLAGS.dataset in (\"cifar10\", \"celeba\", \"cats\"):\n n = 128\n elif FLAGS.dataset == \"celebahq\":\n # Save space\n n = 128\n elif FLAGS.dataset == \"lsun\":\n # Save space\n n = 32\n elif FLAGS.dataset == \"object\":\n # Save space\n n = 32\n elif FLAGS.dataset == \"mnist\":\n n = 128\n elif FLAGS.dataset == \"imagenet\":\n n = 32\n elif FLAGS.dataset == \"stl\":\n n = 32\n\n im_neg_kl = im_neg_orig[:n]\n if sample:\n pass\n else:\n energy = model.forward(im_neg_kl, label)\n im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]\n\n im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]\n im_neg_kl = torch.clamp(im_neg_kl, 0, 1)\n else:\n resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low\n resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high * T_multiply\n\n dT = 1 / T - 1 / (T * T_multiply)\n swap_rate = torch.exp(dT * (energy_low - energy_high - dT * var))\n intensity_r = 0.1\n # print(\"swap_rate\", swap_rate)\n swap_rate = swap_rate.mean().item()\n print(\"swap_rate\", swap_rate)\n random = np.random.uniform(0, 1)\n print(\"random\", random)\n if random < intensity_r * swap_rate:\n resgld_beta_high, resgld_beta_low = resgld_beta_low, resgld_beta_high\n swaps += 1\n print(\"swaps : \", swaps)\n\n im_neg = resgld_beta_low.detach()\n\n if sample:\n im_negs_samples.append(im_neg)\n\n im_neg = torch.clamp(im_neg, 0, 1)\n\n if sample:\n return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad_low.detach().cpu().numpy()).mean()\n else:\n return im_neg, im_neg_kl, np.abs(im_grad_low.detach().cpu().numpy()).mean()",
"import torch.nn as nn\nfrom models.network import swish, CondResBlock\n\nclass MNISTModel(nn.Module):\n def __init__(self, args):\n super(MNISTModel, self).__init__()\n self.act = swish\n # self.relu = torch.nn.ReLU(inplace=True)\n\n self.args = args\n self.filter_dim = args.filter_dim\n self.init_main_model()\n self.init_label_map()\n self.filter_dim = args.filter_dim\n\n # self.act = self.relu\n self.cond = args.cond\n self.sigmoid = args.sigmoid\n\n\n def init_main_model(self):\n args = self.args\n filter_dim = self.filter_dim\n im_size = 28\n self.conv1 = nn.Conv2d(1, filter_dim, kernel_size=3, stride=1, padding=1)\n self.res1 = CondResBlock(args, filters=filter_dim, latent_dim=1, im_size=im_size)\n self.res2 = CondResBlock(args, filters=2*filter_dim, latent_dim=1, im_size=im_size)\n\n self.res3 = CondResBlock(args, filters=4*filter_dim, latent_dim=1, im_size=im_size)\n self.energy_map = nn.Linear(filter_dim*8, 1)\n\n\n def init_label_map(self):\n args = self.args\n\n self.map_fc1 = nn.Linear(10, 256)\n self.map_fc2 = nn.Linear(256, 256)\n\n def main_model(self, x, latent):\n x = x.view(-1, 1, 28, 28)\n x = self.act(self.conv1(x))\n x = self.res1(x, latent)\n x = self.res2(x, latent)\n x = self.res3(x, latent)\n x = self.act(x)\n x = x.mean(dim=2).mean(dim=2)\n energy = self.energy_map(x)\n\n return energy\n\n def label_map(self, latent):\n x = self.act(self.map_fc1(latent))\n x = self.map_fc2(x)\n\n return x\n\n def forward(self, x, latent):\n args = self.args\n x = x.view(x.size(0), -1)\n\n if self.cond:\n latent = self.label_map(latent)\n else:\n latent = None\n\n energy = self.main_model(x, latent)\n\n return energy"
] | [
[
"torch.exp",
"numpy.random.uniform",
"torch.clamp",
"torch.randn_like"
],
[
"torch.nn.Linear",
"torch.nn.Conv2d"
]
] |
jkkummerfeld/lamb | [
"769adaa76394dc74746c2fd8d31afe9c3ca7895b",
"769adaa76394dc74746c2fd8d31afe9c3ca7895b"
] | [
"lamb/nascell.py",
"lamb/training.py"
] | [
"# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"rnn_cell.NASCell adapted to support transforms.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\n\nclass NASCell(tf.nn.rnn_cell.RNNCell):\n \"\"\"Neural Architecture Search (NAS) recurrent network cell.\n\n This implements the recurrent cell from the paper:\n\n https://arxiv.org/abs/1611.01578\n\n Barret Zoph and Quoc V. Le.\n \"Neural Architecture Search with Reinforcement Learning\" Proc. ICLR 2017.\n\n The class uses an optional projection layer.\n \"\"\"\n\n def __init__(self, num_units, num_proj=None,\n use_biases=False, reuse=None,\n initializer=None,\n input_transform=None,\n state_transform=None,\n update_transform=None):\n \"\"\"Initialize the parameters for a NAS cell.\n\n Args:\n num_units: int, The number of units in the NAS cell\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n use_biases: (optional) bool, If True then use biases within the cell. This\n is False by default.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n initializer: Initializer for the variables.\n input_transform: None, or a function of one argument that\n massages the input in some way. For example, variational\n dropout can be implemted by passing a Dropout object here.\n state_transform: Similar to input_transform, this is\n applied to the recurrent state.\n update_transform: Similar to input_transform, this is\n applied to the proposed update ('j').\n \"\"\"\n super(NASCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._num_proj = num_proj\n self._use_biases = use_biases\n self._reuse = reuse\n\n if num_proj is not None:\n self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)\n self._output_size = num_proj\n else:\n self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)\n self._output_size = num_units\n self._initializer = initializer\n self._input_transform = input_transform\n self._state_transform = state_transform\n assert update_transform is None\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def call(self, inputs, state):\n \"\"\"Run one step of NAS Cell.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: This must be a tuple of state Tensors, both `2-D`, with column\n sizes `c_state` and `m_state`.\n\n Returns:\n A tuple containing:\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n NAS Cell after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of NAS Cell after reading `inputs`\n when the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n sigmoid = tf.sigmoid\n tanh = tf.tanh\n relu = tf.nn.relu\n\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n\n def maybe_transform(transform, x):\n if transform is None:\n return x\n else:\n return transform(x)\n\n (c_prev, m_prev) = state\n m_prev = maybe_transform(self._state_transform, m_prev)\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n inputs = maybe_transform(self._input_transform, inputs)\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n # Variables for the NAS cell. W_m is all matrices multiplying the\n # hiddenstate and W_inputs is all matrices multiplying the inputs.\n concat_w_m = tf.get_variable(\n \"recurrent_kernel\", [num_proj, 8 * self._num_units],\n initializer=self._initializer, dtype=dtype)\n concat_w_inputs = tf.get_variable(\n \"kernel\", [input_size.value, 8 * self._num_units],\n initializer=self._initializer, dtype=dtype)\n\n m_matrix = tf.matmul(m_prev, concat_w_m)\n inputs_matrix = tf.matmul(inputs, concat_w_inputs)\n\n if self._use_biases:\n b = tf.get_variable(\n \"bias\",\n shape=[8 * self._num_units],\n initializer=tf.zeros_initializer(),\n dtype=dtype)\n m_matrix = tf.nn.bias_add(m_matrix, b)\n\n # The NAS cell branches into 8 different splits for both the hiddenstate\n # and the input\n m_matrix_splits = tf.split(axis=1, num_or_size_splits=8,\n value=m_matrix)\n inputs_matrix_splits = tf.split(axis=1, num_or_size_splits=8,\n value=inputs_matrix)\n\n # First layer\n layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])\n layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])\n layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])\n layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])\n layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])\n layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])\n layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])\n layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])\n\n # Second layer\n l2_0 = tanh(layer1_0 * layer1_1)\n l2_1 = tanh(layer1_2 + layer1_3)\n l2_2 = tanh(layer1_4 * layer1_5)\n l2_3 = sigmoid(layer1_6 + layer1_7)\n\n # Inject the cell\n l2_0 = tanh(l2_0 + c_prev)\n\n # Third layer\n l3_0_pre = l2_0 * l2_1\n new_c = l3_0_pre # create new cell\n l3_0 = l3_0_pre\n l3_1 = tanh(l2_2 + l2_3)\n\n # Final layer\n new_m = tanh(l3_0 * l3_1)\n\n # Projection layer if specified\n if self._num_proj is not None:\n concat_w_proj = tf.get_variable(\n \"projection_weights\", [self._num_units, self._num_proj],\n dtype)\n new_m = tf.matmul(new_m, concat_w_proj)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_m)\n return new_m, new_state\n",
"# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"The training loop.\"\"\"\n\n# pylint: disable=missing-docstring\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport time\n\nfrom absl import flags\nfrom absl import logging\nfrom lamb import corpus\nfrom lamb import evaluation\nfrom lamb import lamb_flags\nfrom lamb import lm\nfrom lamb import monitoring\nfrom lamb import utils\nfrom lamb.averaged import Averaged\nfrom lamb.dyneval import Dyneval\nimport numpy as np\nimport six\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import framework as contrib_framework\n\nnest = contrib_framework.nest\nFLAGS = flags.FLAGS\n\n\ndef _load_checkpoint(checkpoint_filename, extra_vars, trainable_only=False):\n if tf.gfile.IsDirectory(checkpoint_filename):\n checkpoint_filename = tf.train.latest_checkpoint(checkpoint_filename)\n logging.info('Loading checkpoint %s', checkpoint_filename)\n saveables = (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) +\n tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS))\n if trainable_only:\n saveables = list(set(saveables) & set(tf.trainable_variables()))\n # Try to restore all saveables, if that fails try without extra_vars.\n try:\n saver = tf.train.Saver(var_list=saveables)\n saver.restore(tf.get_default_session(), checkpoint_filename)\n except (ValueError, tf.errors.NotFoundError):\n logging.info('Missing key in checkpoint. Trying old checkpoint format.')\n saver = tf.train.Saver(var_list=list(set(saveables) - set(extra_vars)))\n saver.restore(tf.get_default_session(), checkpoint_filename)\n\n\ndef train(tuner, data, vocab, config, experiment_dir, seed=None):\n \"\"\"Main training loop.\n\n Args:\n tuner: .\n data: .\n vocab: .\n config: A config object (see get_config()).\n experiment_dir: Path of a directory where to log training events.\n seed: suitable for tf.set_random_seed\n\n Returns:\n The second return value of _maybe_report_measure.\n \"\"\"\n\n if FLAGS.save_config:\n config.save(os.path.join(experiment_dir, 'config'))\n\n session_config = tf.ConfigProto(\n log_device_placement=FLAGS.log_device_placement)\n with tf.Graph().as_default():\n tf.set_random_seed(seed)\n logging.info('Creating the model.')\n config = lamb_flags.handle_config_defaults(config, lm.LM.num_params)\n model = lm.LM(config)\n logging.info('Model created.')\n\n if FLAGS.trigger_averaging_turns >= 0:\n averaged = Averaged(tf.trainable_variables())\n else:\n averaged = None\n\n # The monitor and the lr scheduler have some state that we need to\n # checkpoint in case of preemption. We do that by serializing them into the\n # graph.\n training_state = utils.TFSerializer('training_state')\n def sync_training_state_from_graph():\n state = training_state.retrieve()\n logging.info('Loaded training state: %s', state)\n if state.get('monitor_state', None):\n monitor.set_state(state['monitor_state'])\n if state.get('learning_rate_state', None):\n lr_scheduler.set_state(state['learning_rate_state'])\n def sync_training_state_to_graph():\n state = {\n # To help maintain backwards compatibility.\n 'state_version': 1,\n 'monitor_state': monitor.state(),\n 'learning_rate_state': lr_scheduler.state()\n }\n training_state.store(state)\n\n # Checkpoint saving.\n logging.info('Creating savers.')\n best_turn_saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)\n last_turn_saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)\n best_checkpoint_dir = os.path.join(experiment_dir, 'best/')\n last_checkpoint_dir = os.path.join(experiment_dir, 'last/')\n best_checkpoint_filename = os.path.join(best_checkpoint_dir, 'model.ckpt')\n last_checkpoint_filename = os.path.join(last_checkpoint_dir, 'model.ckpt')\n # Upon resuming from a checkpoint the saver won't count the old checkpoints\n # against max_to_keep. Recover its state.\n best_checkpoint_states = tf.train.get_checkpoint_state(best_checkpoint_dir)\n last_checkpoint_states = tf.train.get_checkpoint_state(last_checkpoint_dir)\n if best_checkpoint_states is not None:\n logging.info('Previous best checkpoint paths: %s',\n best_checkpoint_states.all_model_checkpoint_paths)\n best_turn_saver.recover_last_checkpoints(\n best_checkpoint_states.all_model_checkpoint_paths)\n if last_checkpoint_states is not None:\n logging.info('Previous last checkpoint paths: %s',\n last_checkpoint_states.all_model_checkpoint_paths)\n last_turn_saver.recover_last_checkpoints(\n last_checkpoint_states.all_model_checkpoint_paths)\n def maybe_save_checkpoint(saver, filename):\n if FLAGS.save_checkpoints:\n logging.info('Saving checkpoint %s', filename)\n sync_training_state_to_graph()\n saver.save(tf.get_default_session(), filename,\n global_step=model.global_step())\n # Callback for monitor.\n def save_best_checkpoint():\n maybe_save_checkpoint(best_turn_saver, best_checkpoint_filename)\n # Callback for train_loop.\n def save_last_checkpoint():\n maybe_save_checkpoint(last_turn_saver, last_checkpoint_filename)\n\n # The monitor keeps track of the best result so far, does early stopping.\n monitor = monitoring.TrainingMonitor(\n max_turns=config.turns,\n tuner=tuner,\n new_best_fn=save_best_checkpoint,\n es_turns=FLAGS.early_stopping_turns,\n es_rampup_turns=FLAGS.early_stopping_rampup_turns,\n es_slowest_rate=FLAGS.early_stopping_slowest_rate)\n # Set up the learning rate scheduler\n lr_scheduler = monitoring.LearningRateScheduler(\n base_learning_rate=config.learning_rate,\n monitor=monitor,\n drop_multiplier=config.drop_learning_rate_multiplier,\n drop_turns=config.drop_learning_rate_turns,\n drop_at_turn_at_the_latest=config.drop_learning_rate_at_the_latest)\n\n with tf.Session(config=session_config) as sess:\n logging.info('Initializing model.')\n sess.run(tf.global_variables_initializer())\n\n # Load the checkpoint specified by the user or try to resume from last.\n if FLAGS.load_checkpoint:\n checkpoint_filename = os.path.join(experiment_dir,\n FLAGS.load_checkpoint)\n _load_checkpoint(checkpoint_filename, training_state.variables(),\n not FLAGS.load_optimizer_state)\n if FLAGS.load_optimizer_state:\n sync_training_state_from_graph()\n if averaged and FLAGS.load_averaged:\n averaged.switch_to_average()\n averaged.reset()\n else:\n try:\n _load_checkpoint(last_checkpoint_dir, training_state.variables())\n sync_training_state_from_graph()\n # TODO(melisgl): The training iterator state and last_state are not\n # saved currently. They should be, of course, but failing that random\n # initialization of dataset iterators ensures that there is no bias\n # introduced if training is repeatedly interrupted and continued from\n # a checkpoint. So use a random seed in this case.\n random.seed()\n np.random.seed()\n except (ValueError, tf.errors.NotFoundError):\n logging.info('Last checkpoint file %s does not exist.',\n last_checkpoint_filename)\n\n # Takes a lot of space. Disabled for now.\n # summary_writer = tf.summary.FileWriter(\n # experiment_dir, graph=sess.graph,\n # flush_secs=FLAGS.summary_flush_secs)\n summary_writer = None\n\n if FLAGS.dyneval:\n dyneval = Dyneval(model.clipped_grads_and_vars,\n learning_rate=FLAGS.dyneval_learning_rate,\n decay_rate=FLAGS.dyneval_decay_rate,\n epsilon=FLAGS.dyneval_epsilon)\n else:\n dyneval = None\n\n if config.turns > 0:\n logging.info('Starting training.')\n else:\n logging.info('Starting testing.')\n metrics = _train_loop(\n monitor, lr_scheduler, averaged, dyneval, model, data, vocab, config,\n summary_writer, save_last_checkpoint)\n logging.info('Training finished.')\n\n return metrics, monitor.turn()\n\n\ndef _train_loop(monitor, lr_scheduler, averaged, dyneval, model,\n data, vocab, config, summary_writer, save_last_checkpoint_fn):\n source_iterator = corpus.get_batches(\n data['training'], vocab,\n config.batch_size,\n config.max_time_steps,\n num_samples=config.num_training_samples,\n episodic=FLAGS.episodic,\n deterministic=False,\n conditioning_separator=config.conditioning_separator)\n last_state = None\n steps_per_sec = 0.0\n\n def munge_max_batches_flag_value(max_batches):\n if max_batches == -1:\n return None\n else:\n return max_batches\n\n def evaluate0():\n # KLUDGE: This depends on monitor calling this function before using the\n # worst target.\n monitor.set_es_worst_target(es_worst_target())\n global_step = model.global_step()\n logging.info('turn: %s (eval), step: %d (opt) (%.2f/s)',\n monitor.turn(), global_step, steps_per_sec)\n if config.accum_batch_size == -1:\n eval_batch_size = config.batch_size\n else:\n eval_batch_size = config.accum_batch_size\n training_xe, valid_xe, test_xe = evaluation.evaluate_all(\n model, data, vocab, eval_batch_size, config.max_time_steps,\n FLAGS.min_non_episodic_eval_examples_per_stripe,\n munge_max_batches_flag_value(FLAGS.max_training_eval_batches),\n munge_max_batches_flag_value(FLAGS.max_eval_eval_batches),\n munge_max_batches_flag_value(FLAGS.max_test_eval_batches),\n FLAGS.episodic,\n config.eval_softmax_temperature,\n config.eval_softmax_temperature_estimation_num_tokens,\n config.eval_method,\n config.num_eval_samples,\n config.eval_power_mean_power,\n config.eval_dropout_multiplier,\n config.validation_prediction_file,\n dyneval,\n conditioning_separator=config.conditioning_separator)\n return valid_xe, {'training_xe': training_xe,\n 'test_xe': test_xe,\n 'global_step': global_step}\n\n def evaluate():\n if monitor.averaging_triggered():\n with averaged:\n logging.info('Evaluating with averaged parameters.')\n return evaluate0()\n else:\n return evaluate0()\n\n def add_summary(summary_str):\n if summary_writer is not None:\n summary_writer.add_summary(summary_str, model.global_step())\n\n def add_summaries_for_metrics():\n metrics = monitor.metrics()\n summary = tf.Summary()\n for key in metrics:\n summary.value.add(tag=key, simple_value=metrics[key])\n add_summary(summary)\n\n # Compute the early stopping worst target. It may change when the learning\n # rate is dropped.\n def es_worst_target():\n if FLAGS.early_stopping_worst_xe_target is None:\n return -1.0\n else:\n targets_for_lr_drops = [\n float(string) for string\n in FLAGS.early_stopping_worst_xe_target.split(',')\n if string\n ]\n num_drops = lr_scheduler.num_drops()\n if targets_for_lr_drops:\n return targets_for_lr_drops[min(num_drops, len(targets_for_lr_drops)-1)]\n else:\n return None\n\n def log_summaries(summary):\n utils.log_scalar_summaries(summary)\n add_summary(summary)\n\n while monitor.next_turn(evaluate):\n\n logging.info('metrics: %r', monitor.metrics())\n logging.info(\n 'early stopping: turns: %s, worst xe target: %s, best expected xe: %s',\n monitor.effective_es_turns(), monitor.es_worst_target(),\n monitor.best_expected_xe())\n add_summaries_for_metrics()\n\n # If enough turns passed without improvement, turn on averaging.\n best_turn = monitor.best_xe_turn() or 0\n num_tuns_since_best = monitor.turn() - best_turn\n if (averaged and\n ((monitor.turn() > 0 and\n num_tuns_since_best >= FLAGS.trigger_averaging_turns) or\n (FLAGS.trigger_averaging_at_the_latest >= 0 and\n monitor.turn() >= FLAGS.trigger_averaging_at_the_latest))):\n monitor.set_averaging_triggered(True)\n\n start_time = time.time()\n sum_cost = 0.0\n sum_tokens = 0\n for _ in range(FLAGS.steps_per_turn):\n cost, summary, last_state, num_tokens = train_1(\n model, source_iterator, last_state,\n learning_rate=lr_scheduler.learning_rate(),\n accum_batch_size=model.config.accum_batch_size)\n if monitor.averaging_triggered():\n averaged.take_sample()\n sum_cost += cost\n sum_tokens += num_tokens\n # Log summaries at the very beginning of training to make it easier to\n # debug initialization problems.\n if (model.global_step() == 1 or\n (model.global_step()+1) %\n FLAGS.print_training_stats_every_num_steps == 1):\n log_summaries(summary)\n logging.info('avg training cost at step %d: %.5f',\n model.global_step(), sum_cost / sum_tokens)\n sum_cost = 0.0\n sum_tokens = 0\n steps_per_sec = FLAGS.steps_per_turn / (time.time()-start_time)\n\n # TODO(melisgl): Is this the right frequency for saving?\n save_last_checkpoint_fn()\n\n metrics = monitor.metrics()\n logging.info('Finished at turn %d for reason: %s',\n monitor.turn(), monitor.finished_reason())\n logging.info('Best XE was %5.5f at turn %d',\n metrics['best_xe'], metrics['best_xe_turn'])\n return metrics\n\n\ndef train_1(model, source_iterator, last_state,\n learning_rate, extra_feed=None, accum_batch_size=-1):\n \"\"\"Trains model for a a single iteration.\"\"\"\n if accum_batch_size == -1:\n cond, cond_len, source, source_len, target = next(source_iterator)\n feed = _make_train_feed(model, cond, cond_len, source, source_len, target,\n last_state, learning_rate, extra_feed)\n batch_size = feed[model.source_len].shape[0]\n num_tokens = feed[model.source_len].sum()\n cost, summary, last_state = model.fit(feed)\n return cost*batch_size, summary, last_state, num_tokens\n else:\n return _train_1_with_accum(model, source_iterator, last_state,\n learning_rate, extra_feed, accum_batch_size)\n\n\ndef _train_1_with_accum(model, source_iterator, last_state,\n learning_rate, extra_feed, accum_batch_size):\n \"\"\"Trains model for a a single iteration.\"\"\"\n cond, cond_len, source, source_len, target = next(source_iterator)\n (conds, cond_lens, sources, source_lens,\n targets, last_states) = _maybe_split_batch(\n cond, cond_len, source, source_len, target, last_state, accum_batch_size)\n num_accum_batches = len(sources)\n cost = 0.0\n new_last_states = []\n batch_size = 0\n num_tokens = 0\n for i in six.moves.range(num_accum_batches):\n cond = conds[i] if cond is not None else None\n cond_len = cond_lens[i] if cond_len is not None else None\n source = sources[i]\n source_len = source_lens[i]\n target = targets[i]\n if last_states is not None:\n last_state = last_states[i]\n else:\n last_state = None\n feed = _make_train_feed(model, cond, cond_len, source, source_len, target,\n last_state, learning_rate, extra_feed)\n batch_size1 = feed[model.source_len].shape[0]\n batch_size += batch_size1\n num_tokens += feed[model.source_len].sum()\n cost1, summary1, last_state1 = model.accumulate_gradients(feed)\n cost += cost1*batch_size1\n new_last_states.append(last_state1)\n model.fit_accumulated(feed)\n last_state = _concat_last_states(new_last_states)\n return cost, summary1, last_state, num_tokens\n\n\ndef _make_train_feed(model, cond, cond_len, source, source_len, target,\n last_state, learning_rate, extra_feed=None):\n feed = {}\n model.add_input_to_feed(feed, cond, cond_len, source, source_len, target)\n model.add_dropout_to_feed(feed)\n feed.update({\n model.num_samples: model.config.num_training_samples,\n model.learning_rate: learning_rate\n })\n if extra_feed:\n feed.update(extra_feed)\n if not FLAGS.episodic and last_state is not None:\n # At test time we start from zero state, so let's forget the\n # current state during training too. Simply not feeding the\n # previous state back would be simpler, but it distorts the\n # objective too much.\n if model.config.drop_state_probability > 0.0:\n mask = [None]\n def ensure_mask(x):\n if mask[0] is None:\n mask[0] = np.random.binomial(\n 1, 1.0-model.config.drop_state_probability,\n size=[x.shape[0]*model.config.num_training_samples, 1])\n return mask[0]\n last_state = utils.map_nested(lambda x: ensure_mask(x)*x, last_state)\n feed.update({model.initial_state: last_state})\n return feed\n\n\ndef _maybe_split_batch(cond, cond_len, source, source_len, target, last_state,\n accum_batch_size):\n batch_size = source_len.shape[0]\n assert batch_size % accum_batch_size == 0\n n = batch_size // accum_batch_size\n return (np.split(cond, n, axis=1) if cond is not None else None,\n np.split(cond_len, n, axis=0) if cond_len is not None else None,\n np.split(source, n, axis=1),\n np.split(source_len, n, axis=0),\n np.split(target, n, axis=1),\n _split_last_state(last_state, n) if last_state is not None else None)\n\n\ndef _split_last_state(last_state, n):\n list_of_split_arrays = [np.split(array, n)\n for array in nest.flatten(last_state)]\n list_of_split_states = zip(*list_of_split_arrays)\n return [nest.pack_sequence_as(last_state, split_state)\n for split_state in list_of_split_states]\n\n\ndef _concat_last_states(last_states):\n list_of_flat_states = [nest.flatten(last_state) for last_state in last_states]\n flat_list_of_states = zip(*list_of_flat_states)\n flat_state = [np.concatenate(list_of_states, axis=0) for list_of_states\n in flat_list_of_states]\n return nest.pack_sequence_as(last_states[0], flat_state)\n"
] | [
[
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.nn.rnn_cell.LSTMStateTuple",
"tensorflow.compat.v1.nn.bias_add"
],
[
"numpy.split",
"tensorflow.compat.v1.ConfigProto",
"numpy.random.seed",
"tensorflow.compat.v1.train.get_checkpoint_state",
"tensorflow.compat.v1.get_default_session",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.concatenate",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.gfile.IsDirectory",
"numpy.random.binomial",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.Summary"
]
] |
sert121/ivy | [
"286f86e487b0c83d46a3ef8d30aa96316337db32",
"286f86e487b0c83d46a3ef8d30aa96316337db32"
] | [
"ivy/functional/backends/tensorflow/array_api/linear_algebra.py",
"ivy/functional/backends/numpy/core/general.py"
] | [
"# global\nimport tensorflow as tf\nfrom tensorflow.python.types.core import Tensor\nfrom typing import Union, Optional, Tuple, Literal\n\n# local\nfrom ivy import inf\n\n\n# noinspection PyUnusedLocal,PyShadowingBuiltins\ndef vector_norm(x: Tensor,\n axis: Optional[Union[int, Tuple[int]]] = None, \n keepdims: bool = False,\n ord: Union[int, float, Literal[inf, - inf]] = 2)\\\n -> Tensor:\n\n if ord == -float('inf'):\n tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)\n elif ord == -1:\n tn_normalized_vector = tf.reduce_sum(tf.abs(x)**ord, axis, keepdims)**(1./ord)\n\n elif ord == 0:\n tn_normalized_vector = tf.reduce_sum(tf.cast(x != 0, 'float32'), axis, keepdims).numpy()\n\n else:\n tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)\n\n if tn_normalized_vector.shape == tuple():\n return tf.expand_dims(tn_normalized_vector, 0)\n return tn_normalized_vector\n",
"\"\"\"\nCollection of Numpy general functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport logging\nimport numpy as _np\nimport math as _math\nfrom operator import mul as _mul\nfrom functools import reduce as _reduce\nimport multiprocessing as _multiprocessing\n\n# local\nimport ivy\nfrom ivy.functional.ivy.core import default_dtype\nfrom ivy.functional.backends.numpy.core.device import _dev_callable\n\n\nDTYPE_TO_STR = {_np.dtype('int8'): 'int8',\n _np.dtype('int16'): 'int16',\n _np.dtype('int32'): 'int32',\n _np.dtype('int64'): 'int64',\n _np.dtype('uint8'): 'uint8',\n _np.dtype('uint16'): 'uint16',\n _np.dtype('uint32'): 'uint32',\n _np.dtype('uint64'): 'uint64',\n 'bfloat16': 'bfloat16',\n _np.dtype('float16'): 'float16',\n _np.dtype('float32'): 'float32',\n _np.dtype('float64'): 'float64',\n _np.dtype('bool'): 'bool',\n\n _np.int8: 'int8',\n _np.int16: 'int16',\n _np.int32: 'int32',\n _np.int64: 'int64',\n _np.uint8: 'uint8',\n _np.uint16: 'uint16',\n _np.uint32: 'uint32',\n _np.uint64: 'uint64',\n _np.float16: 'float16',\n _np.float32: 'float32',\n _np.float64: 'float64',\n _np.bool_: 'bool'}\n\nDTYPE_FROM_STR = {'int8': _np.dtype('int8'),\n 'int16': _np.dtype('int16'),\n 'int32': _np.dtype('int32'),\n 'int64': _np.dtype('int64'),\n 'uint8': _np.dtype('uint8'),\n 'uint16': _np.dtype('uint16'),\n 'uint32': _np.dtype('uint32'),\n 'uint64': _np.dtype('uint64'),\n 'bfloat16': 'bfloat16',\n 'float16': _np.dtype('float16'),\n 'float32': _np.dtype('float32'),\n 'float64': _np.dtype('float64'),\n 'bool': _np.dtype('bool')}\n\n\n# Helpers #\n# --------#\n\ndef _to_dev(x, dev):\n if dev is not None:\n if 'gpu' in dev:\n raise Exception('Native Numpy does not support GPU placement, consider using Jax instead')\n elif 'cpu' in dev:\n pass\n else:\n raise Exception('Invalid device specified, must be in the form [ \"cpu:idx\" | \"gpu:idx\" ],'\n 'but found {}'.format(dev))\n return x\n\n\ndef _flat_array_to_1_dim_array(x):\n return x.reshape((1,)) if x.shape == () else x\n\n\n# API #\n# ----#\n\n# noinspection PyShadowingNames\ndef array(object_in, dtype=None, dev=None):\n return _to_dev(_np.array(object_in, dtype=default_dtype(dtype, object_in)), dev)\n\n\nasarray = array\n\n\ndef is_array(x, exclusive=False):\n if isinstance(x, _np.ndarray):\n return True\n return False\n\n\nequal = lambda x1, x2: x1 == x2\n\n\ndef dtype_bits(dtype_in):\n dtype_str = dtype_to_str(dtype_in)\n if 'bool' in dtype_str:\n return 1\n return int(dtype_str.replace('uint', '').replace('int', '').replace('bfloat', '').replace('float', ''))\n\n\nequal.__name__ = 'equal'\ncopy_array = lambda x: x.copy()\narray_equal = _np.array_equal\nto_numpy = lambda x: x\nto_numpy.__name__ = 'to_numpy'\nto_scalar = lambda x: x.item()\nto_scalar.__name__ = 'to_scalar'\nto_list = lambda x: x.tolist()\nto_list.__name__ = 'to_list'\nshape = lambda x, as_tensor=False: _np.asarray(_np.shape(x)) if as_tensor else x.shape\nshape.__name__ = 'shape'\nget_num_dims = lambda x, as_tensor=False: _np.asarray(len(_np.shape(x))) if as_tensor else len(x.shape)\nminimum = _np.minimum\nmaximum = _np.maximum\nclip = lambda x, x_min, x_max: _np.asarray(_np.clip(x, x_min, x_max))\nround = lambda x: _np.asarray(_np.round(x))\nfloormod = lambda x, y: _np.asarray(x % y)\nfloor = lambda x: _np.asarray(_np.floor(x))\nceil = lambda x: _np.asarray(_np.ceil(x))\nabs = lambda x: _np.asarray(_np.absolute(x))\n\ndef argmax(x, axis=0):\n ret = _np.asarray(_np.argmax(x, axis))\n if ret.shape == ():\n return ret.reshape(-1)\n return ret\n\n\ndef argmin(x, axis=0):\n ret = _np.asarray(_np.argmin(x, axis))\n if ret.shape == ():\n return ret.reshape(-1)\n return ret\n\n\ndef cast(x, dtype):\n return x.astype(dtype_from_str(dtype))\n\n\nastype = cast\n\n\n# noinspection PyShadowingNames\ndef arange(stop, start=0, step=1, dtype=None, dev=None):\n if dtype:\n dtype = dtype_from_str(dtype)\n res = _to_dev(_np.arange(start, stop, step=step, dtype=dtype), dev)\n if not dtype:\n if res.dtype == _np.float64:\n return res.astype(_np.float32)\n elif res.dtype == _np.int64:\n return res.astype(_np.int32)\n return res\n\n\ndef linspace(start, stop, num, axis=None, dev=None):\n if axis is None:\n axis = -1\n return _to_dev(_np.linspace(start, stop, num, axis=axis), dev)\n\n\ndef logspace(start, stop, num, base=10., axis=None, dev=None):\n if axis is None:\n axis = -1\n return _to_dev(_np.logspace(start, stop, num, base=base, axis=axis), dev)\n\n\ndef concatenate(xs, axis=-1):\n if xs[0].shape == ():\n return _np.concatenate([_np.expand_dims(x, 0) for x in xs], axis)\n return _np.concatenate(xs, axis)\n\n\nstack = _np.stack\n\n\ndef unstack(x, axis, keepdims=False):\n if x.shape == ():\n return [x]\n x_split = _np.split(x, x.shape[axis], axis)\n if keepdims:\n return x_split\n return [_np.squeeze(item, axis) for item in x_split]\n\n\ndef split(x, num_or_size_splits=None, axis=0, with_remainder=False):\n if x.shape == ():\n if num_or_size_splits is not None and num_or_size_splits != 1:\n raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))\n return [x]\n if num_or_size_splits is None:\n num_or_size_splits = x.shape[axis]\n elif isinstance(num_or_size_splits, int) and with_remainder:\n num_chunks = x.shape[axis] / num_or_size_splits\n num_chunks_int = _math.floor(num_chunks)\n remainder = num_chunks - num_chunks_int\n if remainder != 0:\n num_or_size_splits = [num_or_size_splits]*num_chunks_int + [int(remainder*num_or_size_splits)]\n if isinstance(num_or_size_splits, (list, tuple)):\n num_or_size_splits = _np.cumsum(num_or_size_splits[:-1])\n return _np.split(x, num_or_size_splits, axis)\n\n\nrepeat = _np.repeat\ntile = _np.tile\nconstant_pad = lambda x, pad_width, value=0: _np.pad(_flat_array_to_1_dim_array(x), pad_width, constant_values=value)\nzero_pad = lambda x, pad_width: _np.pad(_flat_array_to_1_dim_array(x), pad_width)\nswapaxes = _np.swapaxes\n\n\ndef transpose(x, axes=None):\n if axes is None:\n num_dims = len(x.shape)\n axes = list(range(num_dims))\n axes.reverse()\n return _np.transpose(x, axes)\n\n\nexpand_dims = _np.expand_dims\nwhere = lambda condition, x1, x2: _np.where(condition, x1, x2)\n\n\ndef indices_where(x):\n where_x = _np.where(x)\n if len(where_x) == 1:\n return _np.expand_dims(where_x[0], -1)\n res = _np.concatenate([_np.expand_dims(item, -1) for item in where_x], -1)\n return res\n\n\nisinf = _np.isinf\n\n\nreshape = _np.reshape\nbroadcast_to = _np.broadcast_to\n\n\ndef squeeze(x, axis=None):\n if x.shape == ():\n if axis is None or axis == 0 or axis == -1:\n return x\n raise Exception('tried to squeeze a zero-dimensional input by axis {}'.format(axis))\n return _np.squeeze(x, axis)\n\n\n\n\n# noinspection PyShadowingNames\ndef zeros_like(x, dtype=None, dev=None):\n if dtype:\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n else:\n dtype = x.dtype\n return _to_dev(_np.zeros_like(x, dtype=dtype), dev)\n\n\ndef full(shape, fill_value, dtype=None, device=None):\n return _to_dev(_np.full(shape, fill_value, dtype_from_str(default_dtype(dtype, fill_value))), device)\n\n\n# noinspection PyShadowingNames\ndef ones_like(x, dtype=None, dev=None):\n if dtype:\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n else:\n dtype = x.dtype\n return _to_dev(_np.ones_like(x, dtype=dtype), dev)\n\n\n# noinspection PyUnusedLocal\ndef one_hot(indices, depth, dev=None):\n # from https://stackoverflow.com/questions/38592324/one-hot-encoding-using-numpy\n res = _np.eye(depth)[_np.array(indices).reshape(-1)]\n return res.reshape(list(indices.shape) + [depth])\n\n\ncross = _np.cross\nmatmul = lambda x1, x2: _np.matmul(x1, x2)\ncumsum = _np.cumsum\n\n\ndef cumprod(x, axis=0, exclusive=False):\n if exclusive:\n x = _np.swapaxes(x, axis, -1)\n x = _np.concatenate((_np.ones_like(x[..., -1:]), x[..., :-1]), -1)\n res = _np.cumprod(x, -1)\n return _np.swapaxes(res, axis, -1)\n return _np.cumprod(x, axis)\n\n\n# noinspection PyShadowingNames\ndef identity(n, dtype='float32', batch_shape=None, dev=None):\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n mat = _np.identity(n, dtype=dtype)\n if batch_shape is None:\n return_mat = mat\n else:\n reshape_dims = [1] * len(batch_shape) + [n, n]\n tile_dims = list(batch_shape) + [1, 1]\n return_mat = _np.tile(_np.reshape(mat, reshape_dims), tile_dims)\n return _to_dev(return_mat, dev)\n\n\nmeshgrid = lambda *xs, indexing='ij': _np.meshgrid(*xs, indexing=indexing)\n\n\ndef scatter_flat(indices, updates, size=None, tensor=None, reduction='sum', dev=None):\n target = tensor\n target_given = ivy.exists(target)\n if ivy.exists(size) and ivy.exists(target):\n assert len(target.shape) == 1 and target.shape[0] == size\n if dev is None:\n dev = _dev_callable(updates)\n if reduction == 'sum':\n if not target_given:\n target = _np.zeros([size], dtype=updates.dtype)\n _np.add.at(target, indices, updates)\n elif reduction == 'replace':\n if not target_given:\n target = _np.zeros([size], dtype=updates.dtype)\n target = _np.asarray(target).copy()\n target.setflags(write=1)\n target[indices] = updates\n elif reduction == 'min':\n if not target_given:\n target = _np.ones([size], dtype=updates.dtype) * 1e12\n _np.minimum.at(target, indices, updates)\n if not target_given:\n target = _np.where(target == 1e12, 0., target)\n elif reduction == 'max':\n if not target_given:\n target = _np.ones([size], dtype=updates.dtype) * -1e12\n _np.maximum.at(target, indices, updates)\n if not target_given:\n target = _np.where(target == -1e12, 0., target)\n else:\n raise Exception('reduction is {}, but it must be one of \"sum\", \"min\" or \"max\"'.format(reduction))\n return _to_dev(target, dev)\n\n\n# noinspection PyShadowingNames\ndef scatter_nd(indices, updates, shape=None, tensor=None, reduction='sum', dev=None):\n target = tensor\n target_given = ivy.exists(target)\n if ivy.exists(shape) and ivy.exists(target):\n assert ivy.shape_to_tuple(target.shape) == ivy.shape_to_tuple(shape)\n if dev is None:\n dev = _dev_callable(updates)\n shape = list(shape) if ivy.exists(shape) else list(tensor.shape)\n indices_flat = indices.reshape(-1, indices.shape[-1]).T\n indices_tuple = tuple(indices_flat) + (Ellipsis,)\n if reduction == 'sum':\n if not target_given:\n target = _np.zeros(shape, dtype=updates.dtype)\n _np.add.at(target, indices_tuple, updates)\n elif reduction == 'replace':\n if not target_given:\n target = _np.zeros(shape, dtype=updates.dtype)\n target = _np.asarray(target).copy()\n target.setflags(write=1)\n target[indices_tuple] = updates\n elif reduction == 'min':\n if not target_given:\n target = _np.ones(shape, dtype=updates.dtype) * 1e12\n _np.minimum.at(target, indices_tuple, updates)\n if not target_given:\n target = _np.where(target == 1e12, 0., target)\n elif reduction == 'max':\n if not target_given:\n target = _np.ones(shape, dtype=updates.dtype) * -1e12\n _np.maximum.at(target, indices_tuple, updates)\n if not target_given:\n target = _np.where(target == -1e12, 0., target)\n else:\n raise Exception('reduction is {}, but it must be one of \"sum\", \"min\" or \"max\"'.format(reduction))\n return _to_dev(target, dev)\n\n\ndef gather(params, indices, axis=-1, dev=None):\n if dev is None:\n dev = _dev_callable(params)\n return _to_dev(_np.take_along_axis(params, indices, axis), dev)\n\n\ndef gather_nd(params, indices, dev=None):\n if dev is None:\n dev = _dev_callable(params)\n indices_shape = indices.shape\n params_shape = params.shape\n num_index_dims = indices_shape[-1]\n result_dim_sizes_list = [_reduce(_mul, params_shape[i + 1:], 1) for i in range(len(params_shape) - 1)] + [1]\n result_dim_sizes = _np.array(result_dim_sizes_list)\n implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())\n flat_params = _np.reshape(params, (-1,))\n new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]\n indices_scales = _np.reshape(result_dim_sizes[0:num_index_dims], new_shape)\n indices_for_flat_tiled = _np.tile(_np.reshape(_np.sum(indices * indices_scales, -1, keepdims=True), (-1, 1)), (1, implicit_indices_factor))\n implicit_indices = _np.tile(_np.expand_dims(_np.arange(implicit_indices_factor), 0), (indices_for_flat_tiled.shape[0], 1))\n indices_for_flat = indices_for_flat_tiled + implicit_indices\n flat_indices_for_flat = _np.reshape(indices_for_flat, (-1,)).astype(_np.int32)\n flat_gather = _np.take(flat_params, flat_indices_for_flat, 0)\n new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])\n res = _np.reshape(flat_gather, new_shape)\n return _to_dev(res, dev)\n\n\ndef linear_resample(x, num_samples, axis=-1):\n x_shape = list(x.shape)\n num_x_dims = len(x_shape)\n axis = axis % num_x_dims\n x_pre_shape = x_shape[0:axis]\n x_pre_size = _reduce(_mul, x_pre_shape) if x_pre_shape else 1\n num_pre_dims = len(x_pre_shape)\n num_vals = x.shape[axis]\n x_post_shape = x_shape[axis+1:]\n x_post_size = _reduce(_mul, x_post_shape) if x_post_shape else 1\n num_post_dims = len(x_post_shape)\n xp = _np.reshape(_np.arange(num_vals*x_pre_size*x_post_size), x_shape)\n x_coords = _np.arange(num_samples) * ((num_vals-1)/(num_samples-1)) * x_post_size\n x_coords = _np.reshape(x_coords, [1]*num_pre_dims + [num_samples] + [1]*num_post_dims)\n x_coords = _np.broadcast_to(x_coords, x_pre_shape + [num_samples] + x_post_shape)\n slc = [slice(None)] * num_x_dims\n slc[axis] = slice(0, 1, 1)\n x_coords = x_coords + xp[tuple(slc)]\n x = _np.reshape(x, (-1,))\n xp = _np.reshape(xp, (-1,))\n x_coords = _np.reshape(x_coords, (-1,))\n ret = _np.interp(x_coords, xp, x)\n return _np.reshape(ret, x_pre_shape + [num_samples] + x_post_shape)\n\n\ndef dtype(x, as_str=False):\n dt = x.dtype\n if as_str:\n return dtype_to_str(dt)\n return dt\n\n\ndef dtype_to_str(dtype_in):\n if isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_TO_STR[dtype_in]\n\n\ndef dtype_from_str(dtype_in):\n if not isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_FROM_STR[dtype_in]\n\n\n# noinspection PyUnusedLocal\ndef compile(func, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):\n logging.warning('Numpy does not support compiling functions.\\n'\n 'Now returning the unmodified function.')\n return func\n\n\ncurrent_framework_str = lambda: 'numpy'\ncurrent_framework_str.__name__ = 'current_framework_str'\nmultiprocessing = lambda context=None: _multiprocessing if context is None else _multiprocessing.get_context(context)\ncontainer_types = lambda: []\n\n\ndef inplace_update(x, val):\n x.data = val\n return x\n\n\ndef inplace_decrement(x, val):\n x -= val\n return x\n\n\ndef inplace_increment(x, val):\n x += val\n return x\n\n\ninplace_arrays_supported = lambda: True\ninplace_variables_supported = lambda: True\n"
] | [
[
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.abs",
"tensorflow.linalg.norm"
],
[
"numpy.take_along_axis",
"numpy.split",
"numpy.expand_dims",
"numpy.take",
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"numpy.cumsum",
"numpy.dtype",
"numpy.concatenate",
"numpy.round",
"numpy.maximum.at",
"numpy.argmin",
"numpy.zeros_like",
"numpy.where",
"numpy.swapaxes",
"numpy.ones_like",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.matmul",
"numpy.ceil",
"numpy.argmax",
"numpy.interp",
"numpy.zeros",
"numpy.logspace",
"numpy.minimum.at",
"numpy.cumprod",
"numpy.identity",
"numpy.floor",
"numpy.transpose",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"numpy.add.at",
"numpy.absolute",
"numpy.ones",
"numpy.shape",
"numpy.broadcast_to"
]
] |
ddboline/kaggle_predict_west_nile | [
"b4dbb3eed450beaf2b73d2a772e0fb3266926418"
] | [
"feature_extraction.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 1 16:28:06 2015\n\n@author: ddboline\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport csv\nimport gzip\n\nimport numpy as np\nimport pandas as pd\n\nfrom dateutil.parser import parse\n\nWEATHER_VARS_WITH_M_T = (u'Tmax', u'Tmin', u'Tavg', u'Depart', u'DewPoint',\n u'WetBulb', u'Heat', u'Cool', u'Snowfall',\n u'PrecipTotal', u'StnPressure', u'SeaLevel',\n u'ResultSpeed', u'ResultDir', u'AvgSpeed', u'Water1')\n\nWEATHER_PHENOMENA = ('BCFG', 'BLDU', 'BLSN', 'BR', 'DU', 'DZ', 'FG', 'FG+',\n 'FU', 'FZDZ', 'FZFG', 'FZRA', 'GR', 'GS', 'HZ', 'MIFG',\n 'PL', 'PRFG', 'RA', 'SG', 'SN', 'SQ', 'TS', 'TSRA',\n 'TSSN', 'UP', 'VCFG', 'VCTS')\n\ndef haversine_distance(lat1, lon1, lat2, lon2):\n r_earth = 6371.\n dlat = np.abs(lat1-lat2)*np.pi/180.\n dlon = np.abs(lon1-lon2)*np.pi/180.\n lat1 *= np.pi/180.\n lat2 *= np.pi/180.\n dist = 2. * r_earth * np.arcsin(\n np.sqrt(\n np.sin(dlat/2.)**2 +\n np.cos(lat1) * np.cos(lat2) *\n np.sin(dlon/2.)**2))\n return dist\n\ndef lat_lon_box(lat, lon, dist):\n r_earth = 6371.\n d_2r = dist/(2.*r_earth)\n dlat = 2. * (d_2r)\n dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat)))\n dlat *= 180./np.pi\n dlon *= 180./np.pi\n return abs(dlat), abs(dlon)\n\ndef feature_extraction():\n spray_df = pd.read_csv('spray.csv.gz', compression='gzip')\n\n spray_lat_lon_list = []\n for idx, row in spray_df.iterrows():\n spray_lat_lon_list.append((row['Latitude'], row['Longitude']))\n\n weather_features = []\n cumu_labels = ('Tmax', 'Tmin', 'PrecipTotal')\n cumu_features = {}\n cumu_total = 0\n current_year = -1\n with gzip.open('weather.csv.gz', 'r') as wfile:\n wcsv = csv.reader(wfile)\n weather_labels = next(wcsv)\n for row in wcsv:\n rowdict = dict(zip(weather_labels, row))\n rowdict['Date'] = parse(rowdict['Date'])\n current_date = rowdict['Date']\n if current_date.year != current_year:\n current_year = current_date.year\n cumu_features = {k: 0 for k in cumu_labels}\n cumu_total = 0\n for k in WEATHER_VARS_WITH_M_T:\n if k in rowdict:\n rowdict[k] = rowdict[k].replace('M', 'nan')\n rowdict[k] = rowdict[k].replace('T', '0.0')\n for k in rowdict:\n if rowdict[k] == '-':\n rowdict[k] = 'nan'\n if type(rowdict[k]) == str:\n rowdict[k] = rowdict[k].strip()\n for ph in WEATHER_PHENOMENA:\n rowdict['wp%s' % ph] = '0'\n for ph in rowdict['CodeSum'].split():\n if ph in WEATHER_PHENOMENA:\n rowdict['wp%s' % ph] = '1'\n for lab in cumu_labels:\n _tmp = float(rowdict[lab])\n if not np.isnan(_tmp):\n cumu_features[lab] += _tmp\n cumu_total += 1\n for lab in ('Tmax', 'Tmin', 'PrecipTotal'):\n rowdict['%s_cumu' % lab] = cumu_features[lab] / cumu_total\n weather_features.append(rowdict)\n# print('\\n'.join(['%s: %s' % (k, rowdict[k]) for k in rowdict]))\n# exit(0)\n for ph in WEATHER_PHENOMENA:\n weather_labels.append('wp%s' % ph)\n for lab in cumu_labels:\n weather_labels.append('%s_cumu' % lab)\n\n\n for prefix in 'train', 'test':\n with gzip.open('%s.csv.gz' % prefix, 'rb') as csvfile:\n outfile = gzip.open('%s_full.csv.gz' % prefix, 'wb')\n csv_reader = csv.reader(csvfile)\n labels = next(csv_reader)\n\n out_labels = labels +\\\n ['n_spray_%d' % x for x in range(1,11)]\n for lab in weather_labels:\n if lab == 'Date':\n continue\n out_labels.append(lab)\n\n csv_writer = csv.writer(outfile)\n csv_writer.writerow(out_labels)\n\n for idx, row in enumerate(csv_reader):\n if idx % 1000 == 0:\n print('processed %d' % idx)\n# if idx > 100:\n# exit(0)\n row_dict = dict(zip(labels, row))\n\n current_date = parse(row_dict['Date'])\n cur_lat = float(row_dict['Latitude'])\n cur_lon = float(row_dict['Longitude'])\n\n for idx in range(1, 11):\n row_dict['n_spray_%d' % idx] = 0\n dlat, dlon = lat_lon_box(cur_lat, cur_lon, 1.5)\n for slat, slon in spray_lat_lon_list:\n# print(dlat, dlon, abs(slat-cur_lat), abs(slon-cur_lon))\n if abs(slat-cur_lat) > dlat or abs(slon-cur_lon) > dlon:\n continue\n sdist = haversine_distance(cur_lat, cur_lon, slat, slon)\n for idx in range(1,11):\n if sdist < idx/10.0:\n row_dict['n_spray_%d' % idx] += 1\n\n for lab in ['Tmax_cumu', 'Tmin_cumu', 'PrecipTotal_cumu']:\n row_dict[lab] = 0\n most_recent = 1000000\n most_recent_w = weather_features[0]\n for wfeat in weather_features:\n wdate = wfeat['Date']\n if current_date.year != wdate.year:\n continue\n wdur = abs((current_date - wdate).days)\n if wdur < most_recent:\n most_recent = wdur\n most_recent_w = wfeat\n for lab in weather_labels:\n if lab == 'Date':\n continue\n row_dict[lab] = most_recent_w[lab]\n row_val = [row_dict[col] for col in out_labels]\n csv_writer.writerow(row_val)\n# outfile.flush()\n# print('\\n'.join(['%s: %s' % (k, row_dict[k]) for k in row_dict]))\n# exit(0)\n return\n\nif __name__ == '__main__':\n feature_extraction()\n"
] | [
[
"pandas.read_csv",
"numpy.abs",
"numpy.isnan",
"numpy.cos",
"numpy.sin"
]
] |
neulab/idiomata-bot | [
"f397e49fb9d1d59b9b74e0e528a72307637a18e9"
] | [
"lang_id.py"
] | [
"import numpy as np\nimport iso639\nfrom collections import defaultdict\n\nall_langs = ('cay', 'dan', 'deu', 'eng', 'fra', 'kwk', 'see', 'swe')\n\ncodelang = [('cay', 'Cayuga'), ('see', 'Seneca'), ('other', 'Other')]\ncode2lang_dict = {c:l for (c,l) in codelang}\nlang2code_dict = {l:c for (c,l) in codelang}\n\ndef code2lang(code):\n if code in code2lang_dict:\n return code2lang_dict[code]\n elif code in iso639.languages.terminology:\n return iso639.languages.terminology[code].inverted\n else:\n return None\n\ndef lang2code(lang):\n if lang in lang2code_dict:\n return lang2code_dict[lang]\n elif lang in iso639.languages.inverted:\n return iso639.languages.inverted[lang].terminology\n else:\n return None\n\nclass LanguageID(object):\n\n def __init__(self, langs=all_langs):\n \"\"\"\n Create a language identifier for the specified languages.\n\n Args:\n langs: The ISO-639 lexographic language codes for each language.\n Defaults to all_langs.\n \"\"\"\n self.langs = langs\n raise NotImplementedError('Need to implement in a subclass')\n\n def predict_word(word):\n \"\"\"\n Calculate the log probability of a word belonging to a particular language specified in `langs`. If `langs` is not specified, it will use `all_langs`.\n\n Args:\n word: A single word string\n\n Returns:\n A numpy array with the log probability of each language\n \"\"\"\n raise NotImplementedError('Need to implement in a subclass')\n\n def predict_words(self, words):\n \"\"\"\n Calculate the log probability of words in a sentence belonging to a particular language specified in `langs`. If `langs` is not specified, it will use `all_langs`.\n\n Args:\n words: A tokenized list of word strings\n langs: A list of three-letter language codes\n\n Returns:\n A numpy array with the log probability of each word (rows) for each language or other (columns)\n \"\"\"\n ret = np.zeros( (len(words), len(self.langs)+1) )\n for i, word in enumerate(words):\n ret[i] = self.predict_word(word)\n return ret\n\n def id_words(self, words, id_type='pos'):\n ret = list(np.argmax(self.predict_words(words), axis=1))\n if id_type == 'pos': return ret\n ret = ['other' if pos == len(self.langs) else self.langs[pos] for pos in ret]\n if id_type == 'code': return ret\n ret = [code2lang(code) for code in ret]\n return ret\n\n\nclass WordCountBasedLanguageID(LanguageID):\n\n def __init__(self, langs=all_langs, other_alpha=1.0e-9, lang_alpha=1.0e-10):\n self.langs = langs\n self.other_alpha = other_alpha\n self.lang_alpha = lang_alpha\n self.counts = [self.load_counts(lang) for lang in langs]\n\n def load_counts(self, lang):\n counts = {}\n with open(f'data/word_counts/{lang}.txt', 'r') as f:\n for line in f:\n word, count = line.strip().split()\n counts[word.lower()] = int(count)\n my_sum = float(sum(counts.values()))\n counts = {word: count/my_sum for (word, count) in counts.items()}\n return counts\n\n def predict_word(self, word):\n my_counts = np.zeros(len(self.langs)+1)\n my_counts[len(self.langs)] = self.other_alpha\n for i, counts in enumerate(self.counts):\n my_counts[i] = counts.get(word.lower(), self.lang_alpha)\n return np.log(my_counts/np.sum(my_counts))\n\nclass WordClassifierLanguageID(LanguageID):\n\n def __init__(self, langs=all_langs, alpha=0.5, ns=(3,4,5), other_bias=1):\n self.langs = langs\n self.alpha = alpha\n self.other_bias = other_bias\n self.ns = ns\n self.ngram_probs = defaultdict(lambda: np.zeros(len(langs)+1) + alpha)\n for i, lang in enumerate(langs):\n with open(f'data/word_counts/{lang}.txt', 'r') as f:\n for line in f:\n word, count = line.strip().split()\n for ngram in self.get_ngrams(word):\n self.ngram_probs[ngram][i] += 1\n for k, v in self.ngram_probs.items():\n self.ngram_probs[k] = np.log(v/np.sum(v))\n\n def predict_word(self, word):\n my_counts = np.zeros(len(self.langs)+1)\n my_counts[len(self.langs)] = self.other_bias\n for ngram in self.get_ngrams(word):\n if ngram in self.ngram_probs:\n my_counts += self.ngram_probs[ngram]\n my_counts -= np.max(my_counts)\n my_counts -= np.log(np.sum(np.exp(my_counts)))\n print(my_counts)\n return my_counts\n\n def get_ngrams(self, word):\n word = word.lower()\n for n in self.ns:\n for i in range(len(word)-n+1):\n yield word[i:i+n]\n\nif __name__ == \"__main__\":\n my_lid = WordClassifierLanguageID()\n words = 'Danke , Bonjour'.split()\n print(' '.join([str(x) for x in my_lid.id_words(words, id_type='name')]))\n"
] | [
[
"numpy.max",
"numpy.exp",
"numpy.sum"
]
] |
drat/Neural-Voice-Cloning-With-Few-Samples | [
"4febde43ccc143fc88d74d5fa0c5a117636778b4"
] | [
"Modules/Attention.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\n\nfrom Modules.MultiHeadAttention import MultiHeadAttention\n\nclass Attention(nn.Module):\n def __init__(self, dim):\n super(Attention, self).__init__()\n\n self.encoders = self._build_model(dim)\n\n def _build_model(self, dim):\n layers = []\n dim = dim\n layers.append(MultiHeadAttention(dim, dim, dim))\n\n return nn.ModuleList(layers)\n\n def forward(self, inputs):\n net_inputs = inputs\n net_inputs.contiguous()\n for enc in self.encoders:\n net_inputs = enc(net_inputs, net_inputs)\n return net_inputs\n"
] | [
[
"torch.nn.ModuleList"
]
] |
galvinw/fairmotdocker | [
"032d50a4025788b97ca36b0d97b7df15ddb5986c",
"032d50a4025788b97ca36b0d97b7df15ddb5986c"
] | [
"monoloco/monoloco/visuals/plot_3d_box.py",
"monofair2/monoloco/monoloco/eval/eval_kitti.py"
] | [
"\nimport numpy as np\n\n\ndef correct_boxes(boxes, hwls, xyzs, yaws, path_calib):\n\n with open(path_calib, \"r\") as ff:\n file = ff.readlines()\n p2_str = file[2].split()[1:]\n p2_list = [float(xx) for xx in p2_str]\n P = np.array(p2_list).reshape(3, 4)\n boxes_new = []\n for idx in range(boxes):\n hwl = hwls[idx]\n xyz = xyzs[idx]\n yaw = yaws[idx]\n corners_2d, _ = compute_box_3d(hwl, xyz, yaw, P)\n box_new = project_8p_to_4p(corners_2d).reshape(-1).tolist()\n boxes_new.append(box_new)\n return boxes_new\n\n\ndef compute_box_3d(hwl, xyz, ry, P):\n \"\"\" Takes an object and a projection matrix (P) and projects the 3d\n bounding box into the image plane.\n Returns:\n corners_2d: (8,2) array in left image coord.\n corners_3d: (8,3) array in in rect camera coord.\n \"\"\"\n # compute rotational matrix around yaw axis\n R = roty(ry)\n\n # 3d bounding box dimensions\n l = hwl[2]\n w = hwl[1]\n h = hwl[0]\n\n # 3d bounding box corners\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]\n y_corners = [0, 0, 0, 0, -h, -h, -h, -h]\n z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]\n\n # rotate and translate 3d bounding box\n corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))\n # print corners_3d.shape\n corners_3d[0, :] = corners_3d[0, :] + xyz[0]\n corners_3d[1, :] = corners_3d[1, :] + xyz[1]\n corners_3d[2, :] = corners_3d[2, :] + xyz[2]\n # print 'cornsers_3d: ', corners_3d\n # only draw 3d bounding box for objs in front of the camera\n if np.any(corners_3d[2, :] < 0.1):\n corners_2d = None\n return corners_2d, np.transpose(corners_3d)\n\n # project the 3d bounding box into the image plane\n corners_2d = project_to_image(np.transpose(corners_3d), P)\n # print 'corners_2d: ', corners_2d\n return corners_2d, np.transpose(corners_3d)\n\n\ndef roty(t):\n \"\"\" Rotation about the y-axis. \"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])\n\n\ndef project_to_image(pts_3d, P):\n \"\"\" Project 3d points to image plane.\n Usage: pts_2d = projectToImage(pts_3d, P)\n input: pts_3d: nx3 matrix\n P: 3x4 projection matrix\n output: pts_2d: nx2 matrix\n P(3x4) dot pts_3d_extended(4xn) = projected_pts_2d(3xn)\n => normalize projected_pts_2d(2xn)\n <=> pts_3d_extended(nx4) dot P'(4x3) = projected_pts_2d(nx3)\n => normalize projected_pts_2d(nx2)\n \"\"\"\n n = pts_3d.shape[0]\n pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))\n # print(('pts_3d_extend shape: ', pts_3d_extend.shape))\n pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n return pts_2d[:, 0:2]\n\n\ndef project_8p_to_4p(pts_2d):\n x0 = np.min(pts_2d[:, 0])\n x1 = np.max(pts_2d[:, 0])\n y0 = np.min(pts_2d[:, 1])\n y1 = np.max(pts_2d[:, 1])\n x0 = max(0, x0)\n y0 = max(0, y0)\n return np.array([x0, y0, x1, y1])\n",
"\"\"\"\nEvaluate MonStereo code on KITTI dataset using ALE metric\n\"\"\"\n\n# pylint: disable=attribute-defined-outside-init\n\nimport os\nimport math\nimport logging\nimport copy\nimport datetime\nfrom collections import defaultdict\n\nimport numpy as np\ntry:\n import tabulate\n TABULATE = copy.copy(tabulate.tabulate)\nexcept ImportError:\n TABULATE = None\n\nfrom ..utils import get_iou_matches, get_task_error, get_pixel_error, check_conditions, \\\n get_difficulty, split_training, get_iou_matches_matrix, average, find_cluster\nfrom ..prep import parse_ground_truth\nfrom ..visuals import show_results, show_spread, show_task_error, show_box_plot\n\n\nclass EvalKitti:\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n CLUSTERS = ('easy', 'moderate', 'hard', 'all', '3', '5', '7', '9', '11', '13', '15', '17', '19', '21', '23', '25',\n '27', '29', '31', '49')\n ALP_THRESHOLDS = ('<0.5m', '<1m', '<2m')\n OUR_METHODS = ['geometric', 'monoloco', 'monoloco_pp', 'pose', 'reid', 'monstereo']\n METHODS_MONO = ['m3d', 'monopsr', 'smoke', 'monodis']\n METHODS_STEREO = ['3dop', 'psf', 'pseudo-lidar', 'e2e', 'oc-stereo']\n BASELINES = ['task_error', 'pixel_error']\n HEADERS = ('method', '<0.5', '<1m', '<2m', 'easy', 'moderate', 'hard', 'all')\n CATEGORIES = ('pedestrian',) # extendable with person_sitting and/or cyclists\n methods = OUR_METHODS + METHODS_MONO + METHODS_STEREO\n\n # Set directories\n main_dir = os.path.join('data', 'kitti')\n dir_gt = os.path.join(main_dir, 'gt')\n path_train = os.path.join('splits', 'kitti_train.txt')\n path_val = os.path.join('splits', 'kitti_val.txt')\n dir_logs = os.path.join('data', 'logs')\n assert os.path.exists(dir_logs), \"No directory to save final statistics\"\n dir_fig = os.path.join('figures', 'results')\n\n # Set thresholds to obtain comparable recalls\n thresh_iou_monoloco = 0.3\n thresh_iou_base = 0.3\n thresh_conf_monoloco = 0.2\n thresh_conf_base = 0.5\n\n def __init__(self, args):\n self.mode = args.mode\n assert self.mode in ('mono', 'stereo'), \"mode not recognized\"\n self.net = 'monstereo' if self.mode == 'stereo' else 'monoloco_pp'\n self.verbose = args.verbose\n self.save = args.save\n self.show = args.show\n\n now = datetime.datetime.now()\n now_time = now.strftime(\"%Y%m%d-%H%M\")[2:]\n self.path_results = os.path.join(self.dir_logs, 'eval-' + now_time + '.json')\n\n # Set thresholds for comparable recalls\n self.dic_thresh_iou = {method: (self.thresh_iou_monoloco if method in self.OUR_METHODS\n else self.thresh_iou_base)\n for method in self.methods}\n self.dic_thresh_conf = {method: (self.thresh_conf_monoloco if method in self.OUR_METHODS\n else self.thresh_conf_base)\n for method in self.methods}\n\n # Set thresholds to obtain comparable recall\n self.dic_thresh_conf['monopsr'] += 0.4\n self.dic_thresh_conf['e2e-pl'] = -100\n self.dic_thresh_conf['oc-stereo'] = -100\n self.dic_thresh_conf['smoke'] = -100\n self.dic_thresh_conf['monodis'] = -100\n\n # Extract validation images for evaluation\n names_gt = tuple(os.listdir(self.dir_gt))\n _, self.set_val = split_training(names_gt, self.path_train, self.path_val)\n\n # self.set_val = ('002282.txt', )\n\n # Define variables to save statistics\n self.dic_methods = self.errors = self.dic_stds = self.dic_stats = self.dic_cnt = self.cnt_gt = self.category \\\n = None\n self.cnt = 0\n\n # Filter methods with empty or non existent directory\n filter_directories(self.main_dir, self.methods)\n\n def run(self):\n \"\"\"Evaluate Monoloco performances on ALP and ALE metrics\"\"\"\n\n for self.category in self.CATEGORIES:\n # Initialize variables\n self.errors = defaultdict(lambda: defaultdict(list))\n self.dic_stds = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n self.dic_stats = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))))\n self.dic_cnt = defaultdict(int)\n self.cnt_gt = defaultdict(int)\n\n # Iterate over each ground truth file in the training set\n # self.set_val = ('000063.txt',)\n for name in self.set_val:\n path_gt = os.path.join(self.dir_gt, name)\n self.name = name\n\n # Iterate over each line of the gt file and save box location and distances\n out_gt = parse_ground_truth(path_gt, self.category)\n methods_out = defaultdict(tuple) # Save all methods for comparison\n\n # Count ground_truth:\n boxes_gt, _, truncs_gt, occs_gt, _ = out_gt # pylint: disable=unbalanced-tuple-unpacking\n for idx, box in enumerate(boxes_gt):\n mode = get_difficulty(box, truncs_gt[idx], occs_gt[idx])\n self.cnt_gt[mode] += 1\n self.cnt_gt['all'] += 1\n\n if out_gt[0]:\n for method in self.methods:\n # Extract annotations\n dir_method = os.path.join(self.main_dir, method)\n path_method = os.path.join(dir_method, name)\n methods_out[method] = self._parse_txts(path_method, method=method)\n\n # Compute the error with ground truth\n self._estimate_error(out_gt, methods_out[method], method=method)\n\n # Update statistics of errors and uncertainty\n for key in self.errors:\n add_true_negatives(self.errors[key], self.cnt_gt['all'])\n for clst in self.CLUSTERS[:-1]:\n\n try:\n get_statistics(self.dic_stats['test'][key][clst],\n self.errors[key][clst],\n self.dic_stds[key][clst], key)\n except ZeroDivisionError:\n print('\\n'+'-'*100 + '\\n'+f'ERROR: method {key} at cluster {clst} is empty' + '\\n'+'-'*100+'\\n')\n raise\n\n # Show statistics\n print('\\n' + self.category.upper() + ':')\n self.show_statistics()\n\n def printer(self):\n if self.save:\n os.makedirs(self.dir_fig, exist_ok=True)\n if self.save or self.show:\n print('-' * 100)\n show_results(self.dic_stats, self.CLUSTERS, self.net, self.dir_fig, show=self.show, save=self.save)\n show_spread(self.dic_stats, self.CLUSTERS, self.net, self.dir_fig, show=self.show, save=self.save)\n if self.net == 'monstereo':\n show_box_plot(self.errors, self.CLUSTERS, self.dir_fig, show=self.show, save=self.save)\n else:\n show_task_error(self.dir_fig, show=self.show, save=self.save)\n\n def _parse_txts(self, path, method):\n\n boxes = []\n dds = []\n cat = []\n\n if method == 'psf':\n path = os.path.splitext(path)[0] + '.png.txt'\n if method in self.OUR_METHODS:\n bis, epis = [], []\n output = (boxes, dds, cat, bis, epis)\n else:\n output = (boxes, dds, cat)\n try:\n with open(path, \"r\") as ff:\n for line_str in ff:\n if method == 'psf':\n line = line_str.split(\", \")\n box = [float(x) for x in line[4:8]]\n boxes.append(box)\n loc = ([float(x) for x in line[11:14]])\n dd = math.sqrt(loc[0] ** 2 + loc[1] ** 2 + loc[2] ** 2)\n dds.append(dd)\n cat.append('Pedestrian')\n else:\n line = line_str.split()\n if check_conditions(line,\n category='pedestrian',\n method=method,\n thresh=self.dic_thresh_conf[method]):\n box = [float(x) for x in line[4:8]]\n box.append(float(line[15])) # Add confidence\n loc = ([float(x) for x in line[11:14]])\n dd = math.sqrt(loc[0] ** 2 + loc[1] ** 2 + loc[2] ** 2)\n cat.append(line[0])\n boxes.append(box)\n dds.append(dd)\n if method in self.OUR_METHODS:\n bis.append(float(line[16]))\n epis.append(float(line[17]))\n self.dic_cnt[method] += 1\n\n return output\n except FileNotFoundError:\n return output\n\n def _estimate_error(self, out_gt, out, method):\n \"\"\"Estimate localization error\"\"\"\n\n boxes_gt, ys, truncs_gt, occs_gt, _ = out_gt\n\n if method in self.OUR_METHODS:\n boxes, dds, cat, bis, epis = out\n else:\n boxes, dds, cat = out\n\n if method == 'psf':\n matches = get_iou_matches_matrix(boxes, boxes_gt, self.dic_thresh_iou[method])\n else:\n matches = get_iou_matches(boxes, boxes_gt, self.dic_thresh_iou[method])\n\n for (idx, idx_gt) in matches:\n # Update error if match is found\n dd_gt = ys[idx_gt][3]\n zz_gt = ys[idx_gt][2]\n mode = get_difficulty(boxes_gt[idx_gt], truncs_gt[idx_gt], occs_gt[idx_gt])\n\n if cat[idx].lower() in (self.category, 'pedestrian'):\n self.update_errors(dds[idx], dd_gt, mode, self.errors[method])\n if method == 'monoloco':\n dd_task_error = dd_gt + (get_task_error(zz_gt))**2\n dd_pixel_error = dd_gt + get_pixel_error(zz_gt)\n self.update_errors(dd_task_error, dd_gt, mode, self.errors['task_error'])\n self.update_errors(dd_pixel_error, dd_gt, mode, self.errors['pixel_error'])\n if method in self.OUR_METHODS:\n epi = max(epis[idx], bis[idx])\n self.update_uncertainty(bis[idx], epi, dds[idx], dd_gt, mode, self.dic_stds[method])\n\n def update_errors(self, dd, dd_gt, cat, errors):\n \"\"\"Compute and save errors between a single box and the gt box which match\"\"\"\n diff = abs(dd - dd_gt)\n clst = find_cluster(dd_gt, self.CLUSTERS[4:])\n errors['all'].append(diff)\n errors[cat].append(diff)\n errors[clst].append(diff)\n\n # Check if the distance is less than one or 2 meters\n if diff <= 0.5:\n errors['<0.5m'].append(1)\n else:\n errors['<0.5m'].append(0)\n\n if diff <= 1:\n errors['<1m'].append(1)\n else:\n errors['<1m'].append(0)\n\n if diff <= 2:\n errors['<2m'].append(1)\n else:\n errors['<2m'].append(0)\n\n def update_uncertainty(self, std_ale, std_epi, dd, dd_gt, mode, dic_stds):\n\n clst = find_cluster(dd_gt, self.CLUSTERS[4:])\n dic_stds['all']['ale'].append(std_ale)\n dic_stds[clst]['ale'].append(std_ale)\n dic_stds[mode]['ale'].append(std_ale)\n dic_stds['all']['epi'].append(std_epi)\n dic_stds[clst]['epi'].append(std_epi)\n dic_stds[mode]['epi'].append(std_epi)\n dic_stds['all']['epi_rel'].append(std_epi / dd)\n dic_stds[clst]['epi_rel'].append(std_epi / dd)\n dic_stds[mode]['epi_rel'].append(std_epi / dd)\n\n # Number of annotations inside the confidence interval\n std = std_epi if std_epi > 0 else std_ale # consider aleatoric uncertainty if epistemic is not calculated\n if abs(dd - dd_gt) <= std:\n dic_stds['all']['interval'].append(1)\n dic_stds[clst]['interval'].append(1)\n dic_stds[mode]['interval'].append(1)\n else:\n dic_stds['all']['interval'].append(0)\n dic_stds[clst]['interval'].append(0)\n dic_stds[mode]['interval'].append(0)\n\n # Annotations at risk inside the confidence interval\n if dd_gt <= dd:\n dic_stds['all']['at_risk'].append(1)\n dic_stds[clst]['at_risk'].append(1)\n dic_stds[mode]['at_risk'].append(1)\n\n if abs(dd - dd_gt) <= std_epi:\n dic_stds['all']['at_risk-interval'].append(1)\n dic_stds[clst]['at_risk-interval'].append(1)\n dic_stds[mode]['at_risk-interval'].append(1)\n else:\n dic_stds['all']['at_risk-interval'].append(0)\n dic_stds[clst]['at_risk-interval'].append(0)\n dic_stds[mode]['at_risk-interval'].append(0)\n\n else:\n dic_stds['all']['at_risk'].append(0)\n dic_stds[clst]['at_risk'].append(0)\n dic_stds[mode]['at_risk'].append(0)\n\n # Precision of uncertainty\n eps = 1e-4\n task_error = get_task_error(dd)\n prec_1 = abs(dd - dd_gt) / (std_epi + eps)\n\n prec_2 = abs(std_epi - task_error)\n dic_stds['all']['prec_1'].append(prec_1)\n dic_stds[clst]['prec_1'].append(prec_1)\n dic_stds[mode]['prec_1'].append(prec_1)\n dic_stds['all']['prec_2'].append(prec_2)\n dic_stds[clst]['prec_2'].append(prec_2)\n dic_stds[mode]['prec_2'].append(prec_2)\n\n def show_statistics(self):\n\n all_methods = self.methods + self.BASELINES\n print('-'*90)\n self.summary_table(all_methods)\n\n # Uncertainty\n for net in ('monoloco_pp', 'monstereo'):\n print(('-'*100))\n print(net.upper())\n for clst in ('easy', 'moderate', 'hard', 'all'):\n print(\" Annotations in clst {}: {:.0f}, Recall: {:.1f}. Precision: {:.2f}, Relative size is {:.1f} %\"\n .format(clst,\n self.dic_stats['test'][net][clst]['cnt'],\n self.dic_stats['test'][net][clst]['interval']*100,\n self.dic_stats['test'][net][clst]['prec_1'],\n self.dic_stats['test'][net][clst]['epi_rel']*100))\n\n if self.verbose:\n for key in all_methods:\n print(key.upper())\n for clst in self.CLUSTERS[:4]:\n print(\" {} Average error in cluster {}: {:.2f} with a max error of {:.1f}, \"\n \"for {} annotations\"\n .format(key, clst, self.dic_stats['test'][key][clst]['mean'],\n self.dic_stats['test'][key][clst]['max'],\n self.dic_stats['test'][key][clst]['cnt']))\n\n for perc in self.ALP_THRESHOLDS:\n print(\"{} Instances with error {}: {:.2f} %\"\n .format(key, perc, 100 * average(self.errors[key][perc])))\n\n print(\"\\nMatched annotations: {:.1f} %\".format(self.errors[key]['matched']))\n print(\" Detected annotations : {}/{} \".format(self.dic_cnt[key], self.cnt_gt['all']))\n print(\"-\" * 100)\n\n print(\"precision 1: {:.2f}\".format(self.dic_stats['test']['monoloco']['all']['prec_1']))\n print(\"precision 2: {:.2f}\".format(self.dic_stats['test']['monoloco']['all']['prec_2']))\n\n def summary_table(self, all_methods):\n \"\"\"Tabulate table for ALP and ALE metrics\"\"\"\n\n alp = [[str(100 * average(self.errors[key][perc]))[:5]\n for perc in ['<0.5m', '<1m', '<2m']]\n for key in all_methods]\n\n ale = [[str(round(self.dic_stats['test'][key][clst]['mean'], 2))[:4] + ' [' +\n str(round(self.dic_stats['test'][key][clst]['cnt'] / self.cnt_gt[clst] * 100))[:2] + '%]'\n for clst in self.CLUSTERS[:4]]\n for key in all_methods]\n\n results = [[key] + alp[idx] + ale[idx] for idx, key in enumerate(all_methods)]\n print(TABULATE(results, headers=self.HEADERS))\n print('-' * 90 + '\\n')\n\n def stats_height(self):\n heights = []\n for name in self.set_val:\n path_gt = os.path.join(self.dir_gt, name)\n self.name = name\n # Iterate over each line of the gt file and save box location and distances\n out_gt = parse_ground_truth(path_gt, 'pedestrian')\n for label in out_gt[1]:\n heights.append(label[4])\n tail1, tail2 = np.nanpercentile(np.array(heights), [5, 95])\n print(average(heights))\n print(len(heights))\n print(tail1, tail2)\n\n\ndef get_statistics(dic_stats, errors, dic_stds, key):\n \"\"\"Update statistics of a cluster\"\"\"\n\n try:\n dic_stats['mean'] = average(errors)\n dic_stats['max'] = max(errors)\n dic_stats['cnt'] = len(errors)\n except ValueError:\n dic_stats['mean'] = - 1\n dic_stats['max'] = - 1\n dic_stats['cnt'] = - 1\n\n if key in ('monoloco', 'monoloco_pp', 'monstereo'):\n dic_stats['std_ale'] = average(dic_stds['ale'])\n dic_stats['std_epi'] = average(dic_stds['epi'])\n dic_stats['epi_rel'] = average(dic_stds['epi_rel'])\n dic_stats['interval'] = average(dic_stds['interval'])\n dic_stats['at_risk'] = average(dic_stds['at_risk'])\n dic_stats['prec_1'] = average(dic_stds['prec_1'])\n dic_stats['prec_2'] = average(dic_stds['prec_2'])\n\n\ndef add_true_negatives(err, cnt_gt):\n \"\"\"Update errors statistics of a specific method with missing detections\"\"\"\n\n matched = len(err['all'])\n missed = cnt_gt - matched\n zeros = [0] * missed\n err['<0.5m'].extend(zeros)\n err['<1m'].extend(zeros)\n err['<2m'].extend(zeros)\n err['matched'] = 100 * matched / cnt_gt\n\n\ndef extract_indices(idx_to_check, *args):\n \"\"\"\n Look if a given index j_gt is present in all the other series of indices (_, j)\n and return the corresponding one for argument\n\n idx_check --> gt index to check for correspondences in other method\n idx_method --> index corresponding to the method\n idx_gt --> index gt of the method\n idx_pred --> index of the predicted box of the method\n indices --> list of predicted indices for each method corresponding to the ground truth index to check\n \"\"\"\n\n checks = [False]*len(args)\n indices = []\n for idx_method, method in enumerate(args):\n for (idx_pred, idx_gt) in method:\n if idx_gt == idx_to_check:\n checks[idx_method] = True\n indices.append(idx_pred)\n return all(checks), indices\n\n\ndef filter_directories(main_dir, methods):\n for method in methods:\n dir_method = os.path.join(main_dir, method)\n if not os.path.exists(dir_method):\n methods.remove(method)\n print(f\"\\nMethod {method}. No directory found. Skipping it..\")\n elif not os.listdir(dir_method):\n methods.remove(method)\n print(f\"\\nMethod {method}. Directory is empty. Skipping it..\")\n"
] | [
[
"numpy.min",
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.max",
"numpy.any",
"numpy.transpose",
"numpy.array",
"numpy.vstack"
],
[
"numpy.array"
]
] |
Ditskih/Project | [
"87170245e55e615b0a14966d60afe41caece0434"
] | [
"processingfcmsvd.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 21 15:38:52 2019\r\n\r\n@author: Ditskih\r\n\"\"\"\r\nimport os\r\nimport json\r\nimport re\r\nimport csv\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n#from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.random_projection import GaussianRandomProjection as GRP\r\nimport numpy as np\r\nimport sys\r\nsys.path.insert(0, 'FCMeans')\r\nfrom fcmeans import fcmeans\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom scipy.sparse import csr_matrix\r\nimport pandas as pd\r\n\r\ndef my_preprocessor(tweet):\r\n #Convert to lower case\r\n tweet = tweet.lower()\r\n #Convert www.* or https?://* to URL\r\n tweet = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','URL',tweet)\r\n #Convert @username to AT_USER\r\n tweet = re.sub('@[^\\s]+','AT_USER',tweet)\r\n #Remove additional white spaces\r\n tweet = re.sub('[\\s]+', ' ', tweet)\r\n #Replace #word with word\r\n tweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\r\n #trim\r\n tweet = tweet.strip('\\'\"')\r\n return tweet\r\n\r\ndef my_tokenizer(tweet):\r\n words = word_tokenize(tweet)\r\n tokens=[]\r\n for w in words:\r\n #replace two or more with two occurrences\r\n pattern = re.compile(r\"(.)\\1{1,}\", re.DOTALL)\r\n w = pattern.sub(r\"\\1\\1\", w)\r\n #strip punctuation\r\n w = w.strip('\\'\"?,.')\r\n #choose words with a pattern\r\n val = re.search(r\"^[a-zA-Z0-9][a-zA-Z0-9]*$\", w)\r\n #add tokens\r\n if(w in ['AT_USER','URL'] or val is None):\r\n continue\r\n else:\r\n tokens.append(w.lower())\r\n\r\n return tokens\r\n\r\n\r\nfor i in range (1):\r\n\r\n # -------\r\n # Loading\r\n # -------\r\n print (\"Loading dataset .... \")\r\n df = csv.reader(open(\"D:\\\\Private Property\\\\Data Kuliah\\\\Akademis\\\\Skripsweet\\\\program\\\\Program1\\\\Program\\\\nyoba\\\\dicobaduluajafix.csv\", encoding=\"utf8\"))\r\n data = []\r\n for column in df:\r\n data.append(column[0].strip() + ' ' + column[1].strip())\r\n\r\n # -----------\r\n # Vectorizing : Preprocessing, Tokenizing, Filtering, Weighting\r\n # -----------\r\n print (\"Vectorizing .....\")\r\n\r\n data_file = csv.reader(open('D:\\Private Property\\Data Kuliah\\Akademis\\Skripsweet\\program\\Program1\\Program\\\\nyoba\\\\stopwords_id.csv'))\r\n stopwords = []\r\n for column in data_file:\r\n stopwords.append(column[0])\r\n my_stop_words = stopwords + ['untuk','toko','nya','false','none''0', '01', '02', '0223', '03', '04', '05', '06', '07', '08', '09',\r\n '0pertandingan', '1', '10', '100', '1001', '101', '102', '1020', '103', '104', '105', '106', '108', '109',\r\n '10th', '11', '110', '112', '113', '115', '12', '120', '121', '122', '123', '125', '129', '13', '130', '131',\r\n '132', '135', '136', '137', '138', '139', '14', '140', '141', '142', '145', '148', '15', '150', '1500',\r\n '152', '153', '154', '155', '157', '16', '160', '161', '162', '165', '166', '168', '17', '170', '1700',\r\n '172', '1731', '175', '1763', '18', '180', '1800', '181', '184', '1848', '185', '187', '19', '190',\r\n '1906', '191', '1930', '1936', '1945', '1947', '1948', '1949', '1950', '1954', '1955', '1958', '196',\r\n '1961', '1962', '1964', '1965', '1967', '1968', '1972', '1973', '1974', '1984', '1985', '1987', '199',\r\n '1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1a', '1musim', '1st', '2', '20',\r\n '200', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '200cc', '201', '2010',\r\n '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021', '2022', '2025',\r\n '2041', '2045', '205', '2050', '207', '21', '210', '211', '215', '22', '221', '223', '225', '227', '229',\r\n '23', '230', '234', '235', '238', '239', '24', '240', '241', '25', '250', '250cc', '2560x1440', '258', '259',\r\n '26', '260', '263', '265', '267', '268', '27', '278', '28', '280', '282', '283', '284', '286', '29',\r\n '2pm', '3', '30', '300', '306', '308', '31', '310', '315', '32', '33', '330', '34', '345', '35', '350',\r\n '359', '36', '360', '369', '37', '370', '378', '38', '386', '387', '39', '399', '3c', '3d', '3s', '4',\r\n '40', '400', '407', '41', '410', '42', '43', '44', '45', '450', '46', '4640', '47', '4720', '48', '480',\r\n '49', '4g', '4minute', '4x2', '4x4', '5', '50', '500', '500c', '508', '50mp', '51', '52', '53', '54', '55',\r\n '550', '56', '560', '57', '58', '59', '595', '5c', '5g', '5s', '5th', '6', '60', '600', '61', '62', '623',\r\n '625', '63', '634', '64', '640', '65', '650', '656', '66', '67', '68', '69', '69053', '6a', '6x6', '7', '70',\r\n '700', '71', '72', '720', '73', '737', '74', '7442', '75', '750', '7569', '76', '77', '78', '79', '8', '80',\r\n '800', '80an', '81', '814', '816', '82', '83', '84', '85', '8500', '86', '865', '86th', '87', '88', '889',\r\n '89', '8gb', '9', '90', '900', '91', '911', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'a3', 'a320', 'a66s', 'aa']\r\n\r\n vectorizer = TfidfVectorizer(preprocessor=my_preprocessor,tokenizer=my_tokenizer,\r\n stop_words=my_stop_words,min_df=2,max_df=0.95)\r\n data = vectorizer.fit_transform(data)\r\n feature_names = vectorizer.get_feature_names()\r\n \r\n #print (feature_names)\r\n #break\r\n #print (data)\r\n\r\n # ------------------------------------------\r\n # Model to Transform Data into a Lower Space\r\n # ------------------------------------------\r\n grps = GRP(n_components = 5)\r\n new_data = grps.fit_transform(data)\r\n\r\n # Learning\r\n # --------\r\n for n_topics in range(100,110,10):\r\n print (\"Learning ....\" + str(n_topics))\r\n \r\n #membership (u) calculation in the lower space\r\n m=1.5\r\n cntr, u= fcmeans(new_data.T, n_topics, m, error=0.005, maxiter=1000)\r\n\r\n #centroid (cntr) calculation in the original space\r\n temp = csr_matrix(np.ones((data.shape[1],1)).dot(np.atleast_2d(u.sum(axis=1))).T)\r\n u = csr_matrix(u)\r\n cntr = np.asarray(u.dot(data) / temp)\r\n \r\n ''' \r\n # Find centroids for initialization\r\n svd = TruncatedSVD(n_components = n_topics)\r\n svd.fit(new_data)\r\n cntr = svd.components_\r\n #cntr[cntr<0.001]=0.0\r\n \r\n # Find centroids by FCM\r\n cntr, u = fcmeans(new_data.T, n_topics, m=1.5, error=0.005, maxiter=1000, init=cntr.T)\r\n cntr = np.asarray(cntr)\r\n ''' \r\n # Prints topics\r\n n_top_words = 10\r\n hasil = open('D:\\\\Private Property\\\\Data Kuliah\\\\Akademis\\\\Skripsweet\\\\program\\\\Program1\\\\Program\\\\nyoba\\\\topikgrp' + str(n_topics) + \".txt\", 'w')\r\n for topic_idx, topic in enumerate(cntr):\r\n print(\"Topic \" + str(topic_idx) + \" : \" + \" \".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))\r\n hasil.write(\"\"+\" \".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) + \"\\n\")\r\n hasil.close()\r\n"
] | [
[
"sklearn.random_projection.GaussianRandomProjection",
"sklearn.feature_extraction.text.TfidfVectorizer",
"scipy.sparse.csr_matrix",
"numpy.ones"
]
] |
hengma1001/molecules | [
"c6694cc77ef1eb246f3fdab1f201481d1bcaa07c"
] | [
"molecules/utils/callback.py"
] | [
"import os\nimport time\nimport torch\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass Callback:\n def __init__(self): pass\n def on_train_begin(self, logs): pass\n def on_train_end(self, logs): pass\n def on_epoch_begin(self, epoch, logs): pass\n def on_epoch_end(self, epoch, logs): pass\n def on_batch_begin(self, batch, epoch, logs): pass\n def on_batch_end(self, batch, epoch, logs): pass\n\n\n# TODO: need way to share SummaryWriter among multiple callbacks for a model\n# could make writer global variable\n\nclass LossCallback(Callback):\n def on_train_begin(self, logs):\n #from torch.utils.tensorboard import SummaryWriter\n #self.writer = SummaryWriter()\n\n self.train_losses = []\n self.valid_losses = []\n\n def on_epoch_end(self, epoch, logs):\n\n # self.writer.add_scalar('epoch training loss',\n # logs['train_loss'],\n # logs['global_step'])\n # self.writer.add_scalar('epoch validation loss',\n # logs['valid_loss'],\n # logs['global_step'])\n\n self.train_losses.append(logs['train_loss'])\n self.valid_losses.append(logs['valid_loss'])\n\n def save(self, path):\n \"\"\"\n Save train and validation loss from the end of each epoch.\n\n Parameters\n ----------\n path: str\n Path to save train and validation loss history\n \"\"\"\n torch.save({'loss': self.train_losses, 'valid': self.valid_losses}, path)\n\nclass CheckpointCallback(Callback):\n def __init__(self, interval=0,\n directory=os.path.join('.', 'checkpoints')):\n \"\"\"\n Checkpoint interface for saving dictionary objects to disk\n during training. Typically used to save model state_dict\n and optimizer state_dict in order to resume training and\n record model weight history.\n\n Parameters\n ----------\n directory : str\n Directory to store checkpoint files.\n Files are named 'epoch-{e}-%Y%m%d-%H%M%S.pt'\n\n interval : int\n Checkpoints model every interval batches, default is once per epoch.\n \"\"\"\n\n if interval < 0:\n raise ValueError('Checkpoint interval must be non-negative')\n\n os.makedirs(directory, exist_ok=True)\n\n self.interval = interval\n self.directory = directory\n\n def on_batch_end(self, batch, epoch, logs):\n if self.interval and batch % self.interval == 0:\n self._save(epoch, logs)\n\n def on_epoch_end(self, epoch, logs):\n if not self.interval:\n self._save(epoch, logs)\n\n def _save(self, epoch, logs):\n \"\"\"Saves optimizer state and encoder/decoder weights.\"\"\"\n\n checkpoint = {\n 'encoder_state_dict': logs['model'].encoder.state_dict(),\n 'decoder_state_dict': logs['model'].decoder.state_dict(),\n 'optimizer_state_dict': logs['optimizer'].state_dict(),\n 'epoch': epoch\n }\n\n time_stamp = time.strftime(f'epoch-{epoch}-%Y%m%d-%H%M%S.pt')\n path = os.path.join(self.directory, time_stamp)\n torch.save(checkpoint, path)\n\n\nclass EmbeddingCallback(Callback):\n \"\"\"\n Saves embeddings of random samples.\n\n Parameters\n ----------\n data : torch.Tensor\n Dataset from which to sample for embeddings.\n\n \"\"\"\n def __init__(self, data):\n self.data = data\n\n def on_train_begin(self, logs):\n self.embeddings = []\n self.data_index = []\n\n def on_epoch_end(self, epoch, logs):\n # TODO: may need to change the torch device\n idx = torch.randint(len(self.data), (1,))\n embedding = logs['model'].encode(self.data[idx].to(device))\n self.data_index.append(idx)\n self.embeddings.append(embedding)\n\n def save(self, path):\n \"\"\"\n Save embeddings and index of associated data point.\n\n Parameters\n ----------\n path: str\n Path to save embeddings and indices\n\n \"\"\"\n\n torch.save({'embeddings': self.embeddings, 'indices': self.data_index}, path)\n"
] | [
[
"torch.cuda.is_available",
"torch.save"
]
] |
bemrdo/CTF-2019 | [
"424512f7c43278d72091aa737da78907c14f9fc1"
] | [
"watevrCTF-2019/challenges/web/NewPwd/train.py"
] | [
"import requests\nimport urllib.parse\nimport base64\nimport json\nimport io\nimport numpy as np\nfrom PIL import Image\nimport cv2.cv2 as cv\nfrom solve import *\n\ndef combine_and_show_alphabet():\n imgTop = np.empty((50, 0))\n imgBottom = np.empty((50, 0))\n for char in alphabet[:16]:\n imgTop = np.append(imgTop, np.min(trained_key[char], axis=0), axis=1)\n for char in alphabet[16:]:\n imgBottom = np.append(imgBottom, np.min(trained_key[char], axis=0), axis=1)\n img = np.rot90(np.append(np.rot90(imgTop), np.rot90(imgBottom), axis=1), 3)\n cv.imshow(\"alphabet\", img)\n\n\ncombine_and_show_alphabet()\n\nlastchar = 0\ncount = 0\ncheat_amount = 0\n\nwhile True:\n captcha = get_captcha()\n solution = list(captcha[2])\n captcha_no_overlay = remove_overlay(captcha)\n chars = []\n for i in range(5):\n chars.append(captcha_no_overlay[:, i * 40 : (i + 1) * 40])\n\n while len(chars) != 0:\n cv.imshow(\"character\", chars[0])\n if cheat_amount <= 0:\n key = cv.waitKey(0)\n else:\n key = ord(solution[0].lower())\n if key not in [ord(char) for char in alphabet.lower()] + [8, 13, 27, 225]:\n continue\n if key == 8: # backspace\n trained_key[lastchar].pop()\n combine_and_show_alphabet()\n elif key == 27: # escape\n for char in alphabet:\n cv.imwrite(\"training/%s.png\" % char, np.min(trained_key[char], axis=0))\n cv.destroyAllWindows()\n exit()\n elif key == 13: # enter\n for char in alphabet:\n cv.imwrite(\"training/%s.png\" % char, np.min(trained_key[char], axis=0))\n elif key == 225: # left shift\n key = ord(solution[0].lower())\n cheat_amount = 10\n if key not in [8, 13, 27, 225]:\n trained_key[chr(key).upper()].append(chars[0])\n chars.pop(0)\n solution.pop(0)\n lastchar = chr(key).upper()\n combine_and_show_alphabet()\n count += 1\n cheat_amount -= 1\n print(count)\n"
] | [
[
"numpy.rot90",
"numpy.empty",
"numpy.min"
]
] |
ToddSmall/beanmachine | [
"85768bd1785bf6a8b3760a04f37a8fca69b4e4ca"
] | [
"src/beanmachine/ppl/inference/tests/inference_test.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nimport sys\n\nimport beanmachine.ppl as bm\nimport pytest\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.inference.proposer.base_proposer import (\n BaseProposer,\n)\nfrom beanmachine.ppl.world import World, init_from_prior\n\n\nclass SampleModel:\n @bm.random_variable\n def foo(self):\n return dist.Normal(0.0, 1.0)\n\n @bm.random_variable\n def bar(self):\n return dist.Normal(self.foo(), 1.0)\n\n @bm.functional\n def baz(self):\n return self.bar() * 2.0\n\n\nclass SampleDoubleModel:\n @bm.random_variable\n def foo(self):\n return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())\n\n @bm.random_variable\n def bar(self):\n return dist.Normal(self.foo(), torch.tensor(1.0).double())\n\n\[email protected](\"multiprocess\", [False, True])\ndef test_inference(multiprocess):\n if multiprocess and sys.platform.startswith(\"win\"):\n pytest.skip(\n \"Windows does not support fork-based multiprocessing (which is necessary \"\n \"for running parallel inference within pytest.\"\n )\n\n model = SampleModel()\n mh = bm.SingleSiteAncestralMetropolisHastings()\n queries = [model.foo(), model.baz()]\n observations = {model.bar(): torch.tensor(0.5)}\n num_samples = 30\n num_chains = 2\n samples = mh.infer(\n queries,\n observations,\n num_samples,\n num_adaptive_samples=num_samples,\n num_chains=num_chains,\n run_in_parallel=multiprocess,\n mp_context=\"fork\",\n )\n\n assert model.foo() in samples\n assert isinstance(samples[model.foo()], torch.Tensor)\n assert samples[model.foo()].shape == (num_chains, num_samples)\n assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2\n # make sure that the RNG state for each chain is different\n assert not torch.equal(\n samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]\n )\n\n\ndef test_get_proposers():\n world = World()\n model = SampleModel()\n world.call(model.bar())\n nuts = bm.GlobalNoUTurnSampler()\n proposers = nuts.get_proposers(world, world.latent_nodes, 10)\n assert all(isinstance(proposer, BaseProposer) for proposer in proposers)\n\n\ndef test_initialize_world():\n model = SampleModel()\n nuts = bm.GlobalNoUTurnSampler()\n world = nuts._initialize_world([model.bar()], {})\n assert model.foo() in world\n assert model.bar() in world\n\n\ndef test_initialize_from_prior():\n mh = bm.SingleSiteAncestralMetropolisHastings()\n model = SampleModel()\n queries = [model.foo()]\n\n samples_from_prior = []\n for _ in range(10000):\n world = mh._initialize_world(queries, {}, init_from_prior)\n val = world.get(model.foo())\n samples_from_prior.append(val.item())\n\n assert samples_from_prior[0] != samples_from_prior[1]\n assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)\n\n\ndef test_initialization_resampling():\n mh = bm.SingleSiteAncestralMetropolisHastings()\n\n @bm.random_variable\n def foo():\n return dist.Uniform(3.0, 5.0)\n\n # verify that the method re-sample as expected\n retries = 0\n\n def init_after_three_tries(d: dist.Distribution):\n nonlocal retries\n retries += 1\n return torch.tensor(float(\"nan\")) if retries < 3 else d.sample()\n\n sampler = mh.sampler(\n [foo()], {}, num_samples=10, initialize_fn=init_after_three_tries\n )\n for world in sampler:\n assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())\n\n # an extreme case where the init value is always out of the support\n def init_to_zero(d: dist.Distribution):\n return torch.zeros_like(d.sample())\n\n with pytest.raises(ValueError, match=\"Cannot find a valid initialization\"):\n mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)\n\n\[email protected](\n \"algorithm\",\n [\n bm.GlobalNoUTurnSampler(),\n bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),\n bm.SingleSiteAncestralMetropolisHastings(),\n bm.SingleSiteNewtonianMonteCarlo(),\n bm.SingleSiteUniformMetropolisHastings(),\n ],\n)\ndef test_inference_with_double_dtype(algorithm):\n model = SampleDoubleModel()\n queries = [model.foo()]\n bar_val = torch.tensor(0.5).double()\n # make sure that the inference can run successfully\n samples = algorithm.infer(\n queries,\n {model.bar(): bar_val},\n num_samples=20,\n num_chains=1,\n )\n assert samples[model.foo()].dtype == bar_val.dtype\n"
] | [
[
"torch.tensor",
"torch.distributions.Uniform",
"torch.distributions.Normal"
]
] |
hzyjerry/InfoGAIL | [
"89bf3bee42242f4a8a41401d17296773294e6b6a"
] | [
"wgail_info_2/preprocess.py"
] | [
"from keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras.models import Model\nimport numpy as np\nimport time\nimport cv2\n\n\ndef collect_demo(path, num_patch, aux_dim, action_dim):\n\n for i in range(num_patch):\n path_patch = path + str(i) + \"/\"\n demo_name = path_patch + \"demo.txt\"\n demo_raw = open(demo_name, 'r').readlines()\n state_name = path_patch + \"states.txt\"\n state_raw = open(state_name, 'r').readlines()\n\n pa = np.zeros(6, dtype=np.float32)\n\n print(\"Loading patch %d ...\" % i)\n for j in range(0, len(demo_raw)):\n action_data = np.array(demo_raw[j].strip().split(\" \")).astype(np.float32)\n state_data = np.array(state_raw[j].strip().split(\" \")).astype(np.float32)\n\n aux = np.expand_dims([state_data[-3], state_data[-1]], axis=0).astype(np.float32)\n action = np.expand_dims(action_data[:], axis=0).astype(np.float32)\n \n img_path = path_patch + str(j) + \".jpg\"\n img = image.load_img(img_path)\n img = image.img_to_array(img)\n img = cv2.resize(img, (256, 256))\n #img = img[40:, :, :]\n\n '''\n if j < 130 and i == 1:\n img_cv2 = cv2.imread(img_path)\n img_cv2 = cv2.resize(img_cv2, (200, 150))\n img_cv2 = img_cv2[40:, :, :]\n cv2.imshow('image', cv2.cvtColor(img, cv2.COLOR_RGB2BGR)/255.0)\n cv2.waitKey(0)\n '''\n img = np.expand_dims(img, axis=0).astype(np.uint8)\n\n\n if j == 0:\n auxs_tmp = aux\n actions_tmp = action\n imgs_tmp = img\n else:\n auxs_tmp = np.concatenate((auxs_tmp, aux), axis=0)\n actions_tmp = np.concatenate((actions_tmp, action), axis=0)\n imgs_tmp = np.concatenate((imgs_tmp, img), axis=0)\n\n if i == 0:\n auxs = auxs_tmp\n actions = actions_tmp\n imgs = imgs_tmp\n else:\n auxs = np.concatenate((auxs, auxs_tmp), axis=0)\n actions = np.concatenate((actions, actions_tmp), axis=0)\n imgs = np.concatenate((imgs, imgs_tmp), axis=0)\n\n print(\"Current total:\", imgs.shape, auxs.shape, actions.shape)\n\n print(\"Images:\", imgs.shape, \"Auxs:\", auxs.shape, \"Actions:\", actions.shape)\n\n return imgs, auxs, actions\n\n\ndef normalize(x):\n x[:, 0:4] /= 200.\n return x\n\n\ndef main():\n aux_dim = 66\n action_dim = 3\n num_patch = 240\n #demo_path = \"/home/yunzhu/Desktop/human_low_case_1/demo_\"\n demo_path = \"/home/zhiyang/Desktop/intention/reacher/rl_demo/demo_\"\n\n imgs, auxs, actions = collect_demo(demo_path, num_patch, aux_dim, action_dim)\n auxs = normalize(auxs)\n\n #np.savez_compressed(\"/home/zhiyang/Desktop/intention/reacher/rl_demo/demo.npz\",\n #imgs=imgs, auxs=auxs, actions=actions)\n print(\"Finished.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.concatenate",
"numpy.expand_dims",
"numpy.zeros"
]
] |
anlavandier/dask-image | [
"a858c61ac5beb7de7d7644d7e85714b5c16c2a7a",
"a858c61ac5beb7de7d7644d7e85714b5c16c2a7a"
] | [
"tests/test_dask_image/test_ndfilters/test__conv.py",
"tests/test_dask_image/test_ndfilters/test__gaussian.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\[email protected](\n \"da_func\",\n [\n (dask_image.ndfilters.convolve),\n (dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"err_type, weights, origin\",\n [\n (ValueError, np.ones((1,)), 0),\n (ValueError, np.ones((1, 0)), 0),\n (RuntimeError, np.ones((1, 1)), (0,)),\n (RuntimeError, np.ones((1, 1)), [(0,)]),\n (ValueError, np.ones((1, 1)), 1),\n (TypeError, np.ones((1, 1)), 0.0),\n (TypeError, np.ones((1, 1)), (0.0, 0.0)),\n (TypeError, np.ones((1, 1)), 1+0j),\n (TypeError, np.ones((1, 1)), (0+0j, 1+0j)),\n ]\n)\ndef test_convolutions_params(da_func,\n err_type,\n weights,\n origin):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n with pytest.raises(err_type):\n da_func(d,\n weights,\n origin=origin)\n\n\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.convolve,\n dask_image.ndfilters.correlate,\n ]\n)\ndef test_convolutions_shape_type(da_func):\n weights = np.ones((1, 1))\n\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n assert all([(type(s) is int) for s in d.shape])\n\n d2 = da_func(d, weights)\n\n assert all([(type(s) is int) for s in d2.shape])\n\n\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.convolve,\n dask_image.ndfilters.correlate,\n ]\n)\ndef test_convolutions_comprehensions(da_func):\n np.random.seed(0)\n\n a = np.random.random((3, 12, 14))\n d = da.from_array(a, chunks=(3, 6, 7))\n\n weights = np.ones((1, 1))\n\n l2s = [da_func(d[i], weights) for i in range(len(d))]\n l2c = [da_func(d[i], weights)[None] for i in range(len(d))]\n\n da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),\n (scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"weights\",\n [\n np.ones((1, 1)),\n ]\n)\ndef test_convolutions_identity(sp_func,\n da_func,\n weights):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n da.utils.assert_eq(\n d, da_func(d, weights)\n )\n\n da.utils.assert_eq(\n sp_func(a, weights),\n da_func(d, weights)\n )\n\n\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),\n (scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"weights, origin\",\n [\n (np.ones((2, 2)), 0),\n (np.ones((2, 3)), 0),\n (np.ones((2, 3)), (0, 1)),\n (np.ones((2, 3)), (0, -1)),\n ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),\n ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),\n ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),\n (np.ones((5, 5)), 0),\n (np.ones((7, 7)), 0),\n (np.ones((8, 8)), 0),\n (np.ones((10, 10)), 0),\n (np.ones((5, 5)), 2),\n (np.ones((5, 5)), -2),\n ]\n)\ndef test_convolutions_compare(sp_func,\n da_func,\n weights,\n origin):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n da.utils.assert_eq(\n sp_func(\n a, weights, origin=origin\n ),\n da_func(\n d, weights, origin=origin\n )\n )\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),\n (scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"weights\",\n [\n np.ones((1,5)),\n np.ones((5,1)),\n ]\n)\[email protected](\n \"mode\",\n [\"reflect\",\"wrap\",\"nearest\",\"constant\",\"mirror\"])\ndef test_convolutions_modes(sp_func,\n da_func,\n weights,\n mode):\n a = np.arange(140).reshape(10,14)\n d = da.from_array(a,chunks =(5, 7))\n \n da.utils.assert_eq(\n sp_func(\n a, weights, mode = mode\n ),\n da_func(\n d, weights, mode = mode\n )\n )",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\[email protected](\n \"err_type, sigma, truncate\",\n [\n (RuntimeError, [[1.0]], 4.0),\n (RuntimeError, [1.0], 4.0),\n (TypeError, 1.0 + 0.0j, 4.0),\n (TypeError, 1.0, 4.0 + 0.0j),\n ]\n)\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.gaussian_filter,\n dask_image.ndfilters.gaussian_gradient_magnitude,\n dask_image.ndfilters.gaussian_laplace,\n ]\n)\ndef test_gaussian_filters_params(da_func, err_type, sigma, truncate):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n with pytest.raises(err_type):\n da_func(d, sigma, truncate=truncate)\n\n\[email protected](\n \"sigma, truncate\",\n [\n (0.0, 0.0),\n (0.0, 1.0),\n (0.0, 4.0),\n (1.0, 0.0),\n ]\n)\[email protected](\n \"order\", [0, 1, 2, 3]\n)\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.gaussian_filter, dask_image.ndfilters.gaussian_filter), # noqa: E501\n ]\n)\ndef test_gaussian_filters_identity(sp_func, da_func, order, sigma, truncate):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n if order % 2 == 1 and sigma != 0 and truncate == 0:\n pytest.skip(\n \"SciPy zeros the result of a Gaussian filter with odd derivatives\"\n \" when sigma is non-zero, truncate is zero, and derivative is odd.\"\n \"\\n\\nxref: https://github.com/scipy/scipy/issues/7364\"\n )\n\n da.utils.assert_eq(\n d, da_func(d, sigma, order, truncate=truncate)\n )\n\n da.utils.assert_eq(\n sp_func(a, sigma, order, truncate=truncate),\n da_func(d, sigma, order, truncate=truncate)\n )\n\n\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.gaussian_filter,\n dask_image.ndfilters.gaussian_gradient_magnitude,\n dask_image.ndfilters.gaussian_laplace,\n ]\n)\ndef test_gaussian_filter_shape_type(da_func):\n sigma = 1.0\n truncate = 4.0\n\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n assert all([(type(s) is int) for s in d.shape])\n\n d2 = da_func(d, sigma=sigma, truncate=truncate)\n\n assert all([(type(s) is int) for s in d2.shape])\n\n\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.gaussian_filter,\n dask_image.ndfilters.gaussian_gradient_magnitude,\n dask_image.ndfilters.gaussian_laplace,\n ]\n)\ndef test_gaussian_filter_comprehensions(da_func):\n da_wfunc = lambda arr: da_func(arr, 1.0, truncate=4.0) # noqa: E731\n\n np.random.seed(0)\n\n a = np.random.random((3, 12, 14))\n d = da.from_array(a, chunks=(3, 6, 7))\n\n l2s = [da_wfunc(d[i]) for i in range(len(d))]\n l2c = [da_wfunc(d[i])[None] for i in range(len(d))]\n\n da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\[email protected](\n \"sigma, truncate\",\n [\n (1.0, 2.0),\n (1.0, 4.0),\n (2.0, 2.0),\n (2.0, 4.0),\n ((1.0, 2.0), 4.0),\n ]\n)\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.gaussian_filter,\n dask_image.ndfilters.gaussian_filter),\n (scipy.ndimage.filters.gaussian_gradient_magnitude,\n dask_image.ndfilters.gaussian_gradient_magnitude),\n (scipy.ndimage.filters.gaussian_laplace,\n dask_image.ndfilters.gaussian_laplace),\n ]\n)\ndef test_gaussian_filters_compare(sp_func, da_func, sigma, truncate):\n s = (100, 110)\n a = np.arange(float(np.prod(s))).reshape(s)\n d = da.from_array(a, chunks=(50, 55))\n\n da.utils.assert_eq(\n sp_func(a, sigma, truncate=truncate),\n da_func(d, sigma, truncate=truncate)\n )\n\n\[email protected](\n \"sigma, truncate\",\n [\n (0.0, 0.0),\n (1.0, 0.0),\n (0.0, 1.0),\n (1.0, 2.0),\n (1.0, 4.0),\n (2.0, 2.0),\n (2.0, 4.0),\n ((1.0, 2.0), 4.0),\n ]\n)\[email protected](\n \"order\", [\n 0,\n 1,\n 2,\n 3,\n (0, 1),\n (2, 3),\n ]\n)\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.gaussian_filter, dask_image.ndfilters.gaussian_filter), # noqa: E501\n ]\n)\ndef test_gaussian_derivative_filters_compare(sp_func, da_func,\n order, sigma, truncate):\n s = (100, 110)\n a = np.arange(float(np.prod(s))).reshape(s)\n d = da.from_array(a, chunks=(50, 55))\n\n da.utils.assert_eq(\n sp_func(a, sigma, order, truncate=truncate),\n da_func(d, sigma, order, truncate=truncate)\n )\n"
] | [
[
"numpy.random.random",
"numpy.random.seed",
"numpy.arange",
"numpy.stack",
"numpy.ones",
"numpy.concatenate"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.arange",
"numpy.stack",
"numpy.concatenate",
"numpy.prod"
]
] |
ViniViniAntunes/Prevendo_Valor_de_Aluguel_em_SP | [
"e37d54da0b2c8ce3c6ddb4ec45191b069834427c"
] | [
"Previsao_valor_aluguel/app.py"
] | [
"# Importando as bibliotecas necessárias\nimport pandas as pd\nimport streamlit as st\nimport plotly.express as px\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Criando uma função para carregar o dataset\n#@st.cache # Notação para ficar em cache\ndef get_data():\n return pd.read_csv(\"model/data_deploy.csv\")\n\n# Criando uma função para treinar o nosso modelo\ndef train_model():\n data = get_data()\n X = data.drop([\"valor\", \"bairro\"], axis=1)\n y = data[\"valor\"]\n rf_regressor = RandomForestRegressor(n_estimators=100)\n rf_regressor.fit(X, y)\n return rf_regressor\n\ndef get_villages_and_id():\n data = get_data()\n names_and_id = dict(zip(data['bairro'], data['bairro_id']))\n return names_and_id\n\ndef return_id_village(village):\n return get_villages_and_id()[village]\n\n# Armazenando o dataframe na variável 'data'\ndata = get_data().drop(\"bairro_id\", axis=1)\n\n# Treinando o modelo\nmodel = train_model()\n\n# Configurando o título do Data App\nst.title(\"Data App - Prevendo Valores de Imóveis\")\n\n# Configurando o subtítulo do data app\nst.markdown(\"Este é um Data App utilizado para exibir a solução de Machine Learning que prevê valores de aluguel de apartamentos na capital de São Paulo.\")\nst.markdown('Criado por: Vini Antunes')\nst.markdown('LinkedIn: https://www.linkedin.com/in/vini-antunes')\n\n# Verificando o dataset\nst.subheader(\"Selecionando apenas um pequeno conjunto de atributos\")\n\n# Selecionando atributos para serem exibidos por padrão\ndefault_cols = [\"quartos\",\"bairro\",\"valor\"]\n\n# Defindo atributos a partir do multiselect\ncols = st.multiselect(\"Atributos\", data.columns.tolist(), default=default_cols)\n\n# Exibindo os top 10 registros do DataFrame\nst.dataframe(data[cols].head(10))\n\n# Configurando outro subtítulo\nst.subheader(\"Distribuição de imóveis por preço do aluguel\")\n\n# Definindo a faixa de valores\nfaixa_valores = st.slider(\"Faixa de preço\", float(data['valor'].min()), float(data['valor'].max()), (1000.0, 2000.0))\n\n# Filtrando os dados\nfiltered_data = data[data['valor'].between(left=faixa_valores[0], right=faixa_valores[1])]\n\n# Plotando a distribuição dos dados\nf = px.histogram(filtered_data, x=\"valor\", nbins=20, title=\"Distribuição de Preços do Aluguel\")\nf.update_xaxes(title=\"valor\")\nf.update_yaxes(title=\"Total Imóveis\")\nst.plotly_chart(f)\n\n# Configurando subtítulo da lateral\nst.sidebar.subheader(\"Defina os atributos do imóvel para predição\")\n\n####### Mapeando dados #######\n# Armazena os nomes dos bairros e seus respectivos ids\nvillages = get_villages_and_id().keys()\n\n# Selecionando o bairro\nvillage = st.sidebar.selectbox(\"Em qual bairro?\", sorted(list(villages)))\n\n# Trocando o nome do bairro' pelo seus respectivo id\nid_village = return_id_village(village)\n\n# Selecionando a área do apartamento\narea = st.sidebar.number_input(\"Área (em m²)?\", min_value=float(data['area'].min()), max_value=float(data['area'].max()), step=1.0, format=\"%.0f\")\n\n# Selecionando a quantidade de quartos\nrooms = st.sidebar.number_input(\"Quantos quartos?\", min_value=float(data['quartos'].min()), max_value=float(data['quartos'].max()), step=1.0, format=\"%.0f\")\n\n# Selecionando a quantidade de suites\nsuites = st.sidebar.number_input(\"Quantas suítes?\", min_value=float(data['suites'].min()), max_value=float(data['suites'].max()), step=1.0, format=\"%.0f\")\n\n# Selecionando a quantidade de vagas de garagem\nparking_spaces = st.sidebar.number_input(\"Quantas vagas de garagem?\", min_value=float(data['vagas'].min()), max_value=float(data['vagas'].max()), step=1.0, format=\"%.0f\")\n\n# inserindo um botão na tela\nbtn_predict = st.sidebar.button(\"Realizar Predição\")\n\n# verifica se o botão foi acionado\nif btn_predict:\n result = model.predict([[area, rooms, suites, parking_spaces, id_village]])\n st.sidebar.subheader(\"O valor previsto para do aluguel é:\")\n st.sidebar.subheader(\"\")\n result = f\"R$ {str(round(result[0], 2))}\"\n st.sidebar.subheader(result)"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_csv"
]
] |
LucasLorenc/tensorflow | [
"10a7b61cdf55d13c85c2a3cc5ca669e3d9ea8e11"
] | [
"tensorflow/python/keras/layers/core.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Core Keras layers.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport sys\nimport types as python_types\nimport warnings\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import standard_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.Masking')\nclass Masking(Layer):\n \"\"\"Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n Example:\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you\n lack data for these timesteps. You can:\n\n - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n samples, timesteps, features = 32, 10, 8\n inputs = np.random.random([samples, timesteps, features]).astype(np.float32)\n inputs[:, 3, :] = 0.\n inputs[:, 5, :] = 0.\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Masking(mask_value=0.,\n input_shape=(timesteps, features)))\n model.add(tf.keras.layers.LSTM(32))\n\n output = model(inputs)\n # The time step 3 and 5 will be skipped from LSTM calculation.\n ```\n\n See [the masking and padding\n guide](https://www.tensorflow.org/guide/keras/masking_and_padding)\n for more details.\n \"\"\"\n\n def __init__(self, mask_value=0., **kwargs):\n super(Masking, self).__init__(**kwargs)\n self.supports_masking = True\n self.mask_value = mask_value\n self._compute_output_and_mask_jointly = True\n\n def compute_mask(self, inputs, mask=None):\n return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)\n\n def call(self, inputs):\n boolean_mask = K.any(\n math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)\n outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)\n # Compute the mask and outputs simultaneously.\n outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'mask_value': self.mask_value}\n base_config = super(Masking, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Dropout')\nclass Dropout(Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting\n a fraction `rate` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n \"\"\"\n\n def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n super(Dropout, self).__init__(**kwargs)\n self.rate = rate\n self.noise_shape = noise_shape\n self.seed = seed\n self.supports_masking = True\n\n def _get_noise_shape(self, inputs):\n # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,\n # which will override `self.noise_shape`, and allows for custom noise\n # shapes with dynamically sized inputs.\n if self.noise_shape is None:\n return None\n\n concrete_inputs_shape = array_ops.shape(inputs)\n noise_shape = []\n for i, value in enumerate(self.noise_shape):\n noise_shape.append(concrete_inputs_shape[i] if value is None else value)\n return ops.convert_to_tensor(noise_shape)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def dropped_inputs():\n return nn.dropout(\n inputs,\n noise_shape=self._get_noise_shape(inputs),\n seed=self.seed,\n rate=self.rate)\n\n output = tf_utils.smart_cond(training,\n dropped_inputs,\n lambda: array_ops.identity(inputs))\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'rate': self.rate,\n 'noise_shape': self.noise_shape,\n 'seed': self.seed\n }\n base_config = super(Dropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.SpatialDropout1D')\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\n Call arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n\n\n@keras_export('keras.layers.SpatialDropout2D')\nclass SpatialDropout2D(Dropout):\n \"\"\"Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 4D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout2D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('data_format must be in '\n '{\"channels_last\", \"channels_first\"}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=4)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], 1, 1)\n elif self.data_format == 'channels_last':\n return (input_shape[0], 1, 1, input_shape[3])\n\n\n@keras_export('keras.layers.SpatialDropout3D')\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 5D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout3D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('data_format must be in '\n '{\"channels_last\", \"channels_first\"}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], 1, 1, 1)\n elif self.data_format == 'channels_last':\n return (input_shape[0], 1, 1, 1, input_shape[4])\n\n\n@keras_export('keras.layers.Activation')\nclass Activation(Layer):\n \"\"\"Applies an activation function to an output.\n\n Arguments:\n activation: Activation function, such as `tf.nn.relu`, or string name of\n built-in activation function, such as \"relu\".\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Activation, self).__init__(**kwargs)\n self.supports_masking = True\n self.activation = activations.get(activation)\n\n def call(self, inputs):\n return self.activation(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'activation': activations.serialize(self.activation)}\n base_config = super(Activation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Reshape')\nclass Reshape(Layer):\n \"\"\"Reshapes an output to a certain shape.\n\n Arguments:\n target_shape: Target shape. Tuple of integers,\n does not include the samples dimension (batch size).\n\n Input shape:\n Arbitrary, although all dimensions in the input shaped must be fixed.\n Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n `(batch_size,) + target_shape`\n\n Example:\n\n ```python\n # as first layer in a Sequential model\n model = Sequential()\n model.add(Reshape((3, 4), input_shape=(12,)))\n # now: model.output_shape == (None, 3, 4)\n # note: `None` is the batch dimension\n\n # as intermediate layer in a Sequential model\n model.add(Reshape((6, 2)))\n # now: model.output_shape == (None, 6, 2)\n\n # also supports shape inference using `-1` as dimension\n model.add(Reshape((-1, 2, 2)))\n # now: model.output_shape == (None, None, 2, 2)\n ```\n \"\"\"\n\n def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def _fix_unknown_dimension(self, input_shape, output_shape):\n \"\"\"Find and replace a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n Arguments:\n input_shape: Shape of array being reshaped\n output_shape: Desired shape of the array with at most\n a single -1 which indicates a dimension that should be\n derived from the input shape.\n\n Returns:\n The new output shape with a -1 replaced with its computed value.\n\n Raises:\n ValueError: If the total array size of the output_shape is\n different than the input_shape, or more than one unknown dimension\n is specified.\n \"\"\"\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged'\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n return output_shape\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if None in input_shape[1:]:\n output_shape = [input_shape[0]]\n # input shape (partially) unknown? replace -1's with None's\n output_shape += tuple(s if s != -1 else None for s in self.target_shape)\n else:\n output_shape = [input_shape[0]]\n output_shape += self._fix_unknown_dimension(input_shape[1:],\n self.target_shape)\n return tensor_shape.TensorShape(output_shape)\n\n def call(self, inputs):\n return array_ops.reshape(inputs,\n (array_ops.shape(inputs)[0],) + self.target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super(Reshape, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Permute')\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful for e.g. connecting RNNs and convnets together.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n Arguments:\n dims: Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimensions\n of the input.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n if sorted(dims) != list(range(1, len(dims) + 1)):\n raise ValueError(\n 'Invalid permutation `dims` for Permute Layer: %s. '\n 'The set of indices in `dims` must be consecutive and start from 1.' %\n (dims,))\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tensor_shape.TensorShape(output_shape)\n\n def call(self, inputs):\n return array_ops.transpose(inputs, perm=(0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Flatten')\nclass Flatten(Layer):\n \"\"\"Flattens the input. Does not affect the batch size.\n\n If inputs are shaped `(batch,)` without a channel dimension, then flattening\n adds an extra channel dimension and output shapes are `(batch, 1)`.\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Convolution2D(64, 3, 3,\n border_mode='same',\n input_shape=(3, 32, 32)))\n # now: model.output_shape == (None, 64, 32, 32)\n\n model.add(Flatten())\n # now: model.output_shape == (None, 65536)\n ```\n \"\"\"\n\n def __init__(self, data_format=None, **kwargs):\n super(Flatten, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(min_ndim=1)\n\n def call(self, inputs):\n if (self.data_format == 'channels_first'\n and K.ndim(inputs) is not None and K.ndim(inputs) > 1):\n permutation = [0]\n permutation.extend([i for i in\n range(2, K.ndim(inputs))])\n permutation.append(1)\n inputs = array_ops.transpose(inputs, perm=permutation)\n\n input_shape = inputs.shape\n if input_shape[1:].is_fully_defined():\n flattened_dim = tensor_shape.dimension_value(\n np.prod(input_shape[1:], dtype=int))\n # Temporary fix for integer overflow issue.\n if flattened_dim > np.iinfo(np.int32).max:\n shape_dtype = dtypes.int64\n else:\n shape_dtype = dtypes.int32\n outputs = array_ops.reshape(\n inputs, constant_op.constant((-1, flattened_dim), dtype=shape_dtype))\n else:\n batch_size = tensor_shape.dimension_value(inputs.shape[0])\n if batch_size:\n # Temporary fix for integer overflow issue.\n if batch_size > np.iinfo(np.int32).max:\n shape_dtype = dtypes.int64\n else:\n shape_dtype = dtypes.int32\n outputs = array_ops.reshape(\n inputs, constant_op.constant((batch_size, -1), dtype=shape_dtype))\n else:\n outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))\n if not context.executing_eagerly():\n outputs.set_shape(self.compute_output_shape(inputs.shape))\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.as_shape(input_shape).as_list()\n if not input_shape:\n output_shape = tensor_shape.TensorShape([1])\n else:\n output_shape = [input_shape[0]]\n if all(input_shape[1:]):\n output_shape += [np.prod(input_shape[1:], dtype=int)]\n else:\n output_shape += [None]\n return tensor_shape.TensorShape(output_shape)\n\n def get_config(self):\n config = {'data_format': self.data_format}\n base_config = super(Flatten, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.RepeatVector')\nclass RepeatVector(Layer):\n \"\"\"Repeats the input n times.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n Arguments:\n n: Integer, repetition factor.\n\n Input shape:\n 2D tensor of shape `(num_samples, features)`.\n\n Output shape:\n 3D tensor of shape `(num_samples, n, features)`.\n \"\"\"\n\n def __init__(self, n, **kwargs):\n super(RepeatVector, self).__init__(**kwargs)\n self.n = n\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])\n\n def call(self, inputs):\n return K.repeat(inputs, self.n)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super(RepeatVector, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Lambda')\nclass Lambda(Layer):\n \"\"\"Wraps arbitrary expressions as a `Layer` object.\n\n The `Lambda` layer exists so that arbitrary TensorFlow functions\n can be used when constructing `Sequential` and Functional API\n models. `Lambda` layers are best suited for simple operations or\n quick experimentation. For more advanced usecases, follow \n [this guide](https://www.tensorflow.org/alpha/guide/keras/custom_layers_and_models) \n for subclassing `tf.keras.layers.Layer`. \n \n The main reason to subclass `tf.keras.layers.Layer` instead of using a \n `Lambda` layer is saving and inspecting a Model. `Lambda` layers \n are saved by serializing the Python bytecode, whereas subclassed \n Layers can be saved via overriding their `get_config` method. Overriding \n `get_config` improves the portability of Models. Models that rely on \n subclassed Layers are also often easier to visualize and reason about.\n\n Examples:\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n model.add(Lambda(antirectifier))\n ```\n\n Variables can be created within a `Lambda` layer. Like with\n other layers, these variables will be created only once and reused\n if the `Lambda` layer is called on new inputs. If creating more\n than one variable in a given `Lambda` instance, be sure to use\n a different name for each variable. Note that calling sublayers\n from within a `Lambda` is not supported.\n\n Example of variable creation:\n\n ```python\n def linear_transform(x):\n v1 = tf.Variable(1., name='multiplier')\n v2 = tf.Variable(0., name='bias')\n return x*v1 + v2\n\n linear_layer = Lambda(linear_transform)\n model.add(linear_layer)\n model.add(keras.layers.Dense(10, activation='relu'))\n model.add(linear_layer) # Reuses existing Variables\n ```\n\n Note that creating two instances of `Lambda` using the same function\n will *not* share Variables between the two instances. Each instance of\n `Lambda` will create and manage its own weights.\n\n Arguments:\n function: The function to be evaluated. Takes input tensor as first\n argument.\n output_shape: Expected output shape from function. This argument can be\n inferred if not explicitly provided. Can be a tuple or function. If a\n tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input: `output_shape =\n (input_shape[0], ) + output_shape` or, the input is `None` and\n the sample dimension is also `None`: `output_shape = (None, ) +\n output_shape` If a function, it specifies the entire shape as a function\n of the\n input shape: `output_shape = f(input_shape)`\n mask: Either None (indicating no masking) or a callable with the same\n signature as the `compute_mask` layer method, or a tensor that will be\n returned as output mask regardless what the input is.\n arguments: Optional dictionary of keyword arguments to be passed to the\n function.\n Input shape: Arbitrary. Use the keyword argument input_shape (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n Output shape: Specified by `output_shape` argument\n \"\"\"\n\n def __init__(self, function, output_shape=None, mask=None, arguments=None,\n **kwargs):\n super(Lambda, self).__init__(**kwargs)\n self.function = function\n self.arguments = arguments if arguments else {}\n if mask is not None:\n self.supports_masking = True\n self.mask = mask\n self._supports_ragged_inputs = True\n self._output_shape = output_shape\n self._variable_dict = {}\n # These attributes are inherited from `Layer`.\n self._trainable_weights = []\n self._non_trainable_weights = []\n\n function_args = tf_inspect.getfullargspec(self.function).args\n self._fn_expects_training_arg = 'training' in function_args\n self._fn_expects_mask_arg = 'mask' in function_args\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self._output_shape is None:\n # Make use of existing autocomputation but provide Lambda-specific\n # error message. This is always safe to run even when the outer context\n # is Graph mode because Lambda layers don't have side effects such as\n # `add_loss`.\n with context.eager_mode():\n try:\n return super(Lambda, self).compute_output_shape(input_shape)\n except NotImplementedError:\n raise NotImplementedError(\n 'We could not automatically infer the shape of the Lambda\\'s '\n 'output. Please specify `output_shape` for this Lambda.')\n\n if callable(self._output_shape):\n output_shapes = self._output_shape(input_shape)\n return tf_utils.convert_shapes(output_shapes, to_tuples=False)\n\n # Output shapes are passed directly and don't include batch dimension.\n input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None\n\n def _add_batch(shape):\n return tensor_shape.TensorShape([batch_size] + shape.as_list())\n\n output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)\n return nest.map_structure(_add_batch, output_shapes)\n\n def call(self, inputs, mask=None, training=None):\n arguments = self.arguments\n if self._fn_expects_mask_arg:\n arguments['mask'] = mask\n if self._fn_expects_training_arg:\n arguments['training'] = training\n with variable_scope.variable_creator_scope(self._variable_creator):\n return self.function(inputs, **arguments)\n\n def _variable_creator(self, next_creator, **kwargs):\n name = kwargs['name']\n if name in self._variable_dict:\n return self._variable_dict[name]\n var = next_creator(**kwargs)\n self._variable_dict[name] = var\n if var.trainable:\n self._trainable_weights.append(var)\n else:\n self._non_trainable_weights.append(var)\n K.track_variable(var)\n return var\n\n def compute_mask(self, inputs, mask=None):\n if callable(self.mask):\n return self.mask(inputs, mask)\n return self.mask\n\n def get_config(self):\n function_config = self._serialize_function_to_config(self.function)\n output_shape_config = self._serialize_function_to_config(self._output_shape,\n allow_raw=True)\n config = {\n 'function': function_config[0],\n 'function_type': function_config[1],\n 'module': function_config[2],\n 'output_shape': output_shape_config[0],\n 'output_shape_type': output_shape_config[1],\n 'output_shape_module': output_shape_config[2],\n }\n if self.mask is not None:\n mask_config = self._serialize_function_to_config(self.mask)\n config.update({\n 'mask': mask_config[0],\n 'mask_type': mask_config[1],\n 'mask_module': mask_config[2]\n })\n config['arguments'] = self.arguments\n\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _serialize_function_to_config(self, inputs, allow_raw=False):\n if isinstance(inputs, python_types.LambdaType):\n output = generic_utils.func_dump(inputs)\n output_type = 'lambda'\n module = inputs.__module__\n elif callable(inputs):\n output = inputs.__name__\n output_type = 'function'\n module = inputs.__module__\n elif allow_raw:\n output = inputs\n output_type = 'raw'\n module = None\n else:\n raise ValueError(\n 'Invalid input for serialization, type: %s ' % type(inputs))\n\n return output, output_type, module\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = config.copy()\n function = cls._parse_function_from_config(\n config, custom_objects, 'function', 'module', 'function_type')\n\n output_shape = cls._parse_function_from_config(\n config, custom_objects, 'output_shape', 'output_shape_module',\n 'output_shape_type')\n if 'mask' in config:\n mask = cls._parse_function_from_config(\n config, custom_objects, 'mask', 'mask_module', 'mask_type')\n else:\n mask = None\n\n config['function'] = function\n config['output_shape'] = output_shape\n config['mask'] = mask\n\n # If arguments were numpy array, they have been saved as\n # list. We need to recover the ndarray\n if 'arguments' in config:\n for key in config['arguments']:\n if isinstance(config['arguments'][key], dict):\n arg_dict = config['arguments'][key]\n if 'type' in arg_dict and arg_dict['type'] == 'ndarray':\n # Overwrite the argument with its numpy translation\n config['arguments'][key] = np.array(arg_dict['value'])\n\n return cls(**config)\n\n @classmethod\n def _parse_function_from_config(\n cls, config, custom_objects, func_attr_name, module_attr_name,\n func_type_attr_name):\n globs = globals()\n module = config.pop(module_attr_name, None)\n if module in sys.modules:\n globs.update(sys.modules[module].__dict__)\n elif module is not None:\n # Note: we don't know the name of the function if it's a lambda.\n warnings.warn('{} is not loaded, but a Lambda layer uses it. '\n 'It may cause errors.'.format(module)\n , UserWarning)\n if custom_objects:\n globs.update(custom_objects)\n function_type = config.pop(func_type_attr_name)\n if function_type == 'function':\n # Simple lookup in custom objects\n function = generic_utils.deserialize_keras_object(\n config[func_attr_name],\n custom_objects=custom_objects,\n printable_module_name='function in Lambda layer')\n elif function_type == 'lambda':\n # Unsafe deserialization from bytecode\n function = generic_utils.func_load(\n config[func_attr_name], globs=globs)\n elif function_type == 'raw':\n function = config[func_attr_name]\n else:\n raise TypeError('Unknown function type:', function_type)\n return function\n\n\n@keras_export('keras.layers.Dense')\nclass Dense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: If the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n Example:\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n def __init__(self,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n\n super(Dense, self).__init__(\n activity_regularizer=regularizers.get(activity_regularizer), **kwargs)\n\n self.units = int(units) if not isinstance(units, int) else units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.supports_masking = True\n self.input_spec = InputSpec(min_ndim=2)\n\n def build(self, input_shape):\n dtype = dtypes.as_dtype(self.dtype or K.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError('Unable to build `Dense` layer with non-floating point '\n 'dtype %s' % (dtype,))\n input_shape = tensor_shape.TensorShape(input_shape)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError('The last dimension of the inputs to `Dense` '\n 'should be defined. Found `None`.')\n last_dim = tensor_shape.dimension_value(input_shape[-1])\n self.input_spec = InputSpec(min_ndim=2,\n axes={-1: last_dim})\n self.kernel = self.add_weight(\n 'kernel',\n shape=[last_dim, self.units],\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n 'bias',\n shape=[self.units,],\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n rank = len(inputs.shape)\n if rank > 2:\n # Broadcasting is required for the inputs.\n outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])\n # Reshape the output back to the original ndim of the input.\n if not context.executing_eagerly():\n shape = inputs.shape.as_list()\n output_shape = shape[:-1] + [self.units]\n outputs.set_shape(output_shape)\n else:\n inputs = math_ops.cast(inputs, self._compute_dtype)\n if K.is_sparse(inputs):\n outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)\n else:\n outputs = gen_math_ops.mat_mul(inputs, self.kernel)\n if self.use_bias:\n outputs = nn.bias_add(outputs, self.bias)\n if self.activation is not None:\n return self.activation(outputs) # pylint: disable=not-callable\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_shape = input_shape.with_rank_at_least(2)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError(\n 'The innermost dimension of input_shape must be defined, but saw: %s'\n % input_shape)\n return input_shape[:-1].concatenate(self.units)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Dense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.ActivityRegularization')\nclass ActivityRegularization(Layer):\n \"\"\"Layer that applies an update to the cost function based input activity.\n\n Arguments:\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n \"\"\"\n\n def __init__(self, l1=0., l2=0., **kwargs):\n super(ActivityRegularization, self).__init__(\n activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'l1': self.l1, 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.DropConnectDense')\nclass DropConnectDense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: If the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n Example:\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n kernel_dropout: Float between 0 and 1.\n Fraction of the weight units to drop.\n unit_dropout: Float between 0 and 1.\n Fraction of the inputs to drop.\n use_mc_dropout: Bool when True layer always acts like in \"train mode\"\n so dropout can be applied also in inference mode\n\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n def __init__(self,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n kernel_dropout=0.,\n unit_dropout=0.,\n use_mc_dropout=False,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n\n super(DropConnectDense, self).__init__(\n activity_regularizer=regularizers.get(activity_regularizer), **kwargs)\n\n self.units = int(units) if not isinstance(units, int) else units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.kernel_dropout = min(1., max(0., kernel_dropout))\n self.unit_dropout = min(1., max(0., unit_dropout))\n self.use_mc_dropout = use_mc_dropout\n\n self.supports_masking = True\n self.input_spec = InputSpec(min_ndim=2)\n\n def build(self, input_shape):\n dtype = dtypes.as_dtype(self.dtype or K.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError('Unable to build `Dense` layer with non-floating point '\n 'dtype %s' % (dtype,))\n input_shape = tensor_shape.TensorShape(input_shape)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError('The last dimension of the inputs to `Dense` '\n 'should be defined. Found `None`.')\n last_dim = tensor_shape.dimension_value(input_shape[-1])\n self.input_spec = InputSpec(min_ndim=2,\n axes={-1: last_dim})\n self.kernel = self.add_weight(\n 'kernel',\n shape=[last_dim, self.units],\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n 'bias',\n shape=[self.units,],\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n if self.use_mc_dropout:\n training = True\n\n #units dropout\n def drop_inputs():\n return K.dropout(inputs, self.unit_dropout)\n if 0. < self.unit_dropout < 1.:\n inputs = K.in_train_phase(drop_inputs, inputs, training=training)\n\n #kernel dropout\n ones = array_ops.ones_like(self.kernel)\n def dropped_weight_connections():\n return K.dropout(ones, self.kernel_dropout) * (1 - self.kernel_dropout)\n if 0. < self.kernel_dropout < 1.:\n kern_dp_mask = K.in_train_phase(dropped_weight_connections, ones, training=training)\n else:\n kern_dp_mask = ones\n\n rank = len(inputs.shape)\n if rank > 2:\n # Broadcasting is required for the inputs.\n outputs = standard_ops.tensordot(inputs, self.kernel * kern_dp_mask, [[rank - 1], [0]])\n # Reshape the output back to the original ndim of the input.\n if not context.executing_eagerly():\n shape = inputs.shape.as_list()\n output_shape = shape[:-1] + [self.units]\n outputs.set_shape(output_shape)\n else:\n inputs = math_ops.cast(inputs, self._compute_dtype)\n if K.is_sparse(inputs):\n outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel * kern_dp_mask)\n else:\n outputs = gen_math_ops.mat_mul(inputs, self.kernel * kern_dp_mask)\n if self.use_bias:\n outputs = nn.bias_add(outputs, self.bias)\n if self.activation is not None:\n return self.activation(outputs) # pylint: disable=not-callable\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_shape = input_shape.with_rank_at_least(2)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError(\n 'The innermost dimension of input_shape must be defined, but saw: %s'\n % input_shape)\n return input_shape[:-1].concatenate(self.units)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'kernel_dropout': self.kernel_dropout,\n 'unit_dropout': self.unit_dropout,\n 'use_mc_dropout': self.use_mc_dropout\n }\n base_config = super(DropConnectDense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))"
] | [
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.ops.gen_math_ops.mat_mul",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.ops.standard_ops.tensordot",
"numpy.iinfo",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.keras.backend.track_variable",
"tensorflow.python.keras.backend.image_data_format",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.utils.tf_utils.convert_shapes",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.keras.regularizers.L1L2",
"tensorflow.python.framework.tensor_shape.dimension_value",
"tensorflow.python.keras.utils.generic_utils.func_load",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.ops.sparse_ops.sparse_tensor_dense_matmul",
"tensorflow.python.keras.activations.get",
"tensorflow.python.keras.backend.in_train_phase",
"tensorflow.python.keras.utils.generic_utils.deserialize_keras_object",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.nn.bias_add",
"numpy.array",
"tensorflow.python.keras.regularizers.serialize",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.keras.activations.serialize",
"tensorflow.python.keras.backend.dropout",
"tensorflow.python.keras.backend.repeat",
"tensorflow.python.keras.utils.generic_utils.func_dump",
"tensorflow.python.keras.constraints.serialize",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.keras.backend.is_sparse",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"numpy.prod",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.keras.backend.ndim",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.keras.initializers.serialize",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
]
] |
QuantumQuadrate/Rearrangement | [
"5f8d64bd18a471a488747ed8d17b00304b4ab293"
] | [
"PythonRearrangement/setup.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 13:22:01 2018\n\n@author: Cody\n\"\"\"\n\nfrom setuptools import setup\nfrom setuptools import Extension\nfrom Cython.Distutils import build_ext\nimport numpy as np\n\n\nsetup(\n cmdclass = {'build_ext': build_ext},\n ext_modules = [Extension(\"Rearranger\", sources= [\"pyRearranger.pyx\",\"../CPPrearrangement/Rearrangement.cpp\"],language='c++',include_dirs=[np.get_include()])])\n"
] | [
[
"numpy.get_include"
]
] |
Lechatelia/Welding_Joints | [
"7cb5b8ac4c961c4080e1590934c24130bfde3a26"
] | [
"ceshi.py"
] | [
"import cv2\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport random\r\n\r\ny = tf.constant([1,2,3,4,5,6], name='y',dtype=tf.float32)\r\ny_ = tf.constant([0,1,2,3,4,5], name='Y_',dtype=tf.float32)\r\ny = tf.reshape(y,[2,3])\r\ny_ = tf.reshape(y_,[2,3])\r\nz= tf.constant([1,2], name='z',dtype=tf.float32)\r\nz=tf.reshape(z,[2,-1])\r\n\r\n\r\nresult=[]\r\nresult.append(tf.subtract(y,y_))\r\nresult.append(tf.multiply(y,y_))\r\nresult.append(tf.multiply(y,z))\r\nwith tf.Session() as sess:\r\n result=sess.run(result)\r\n for i in result:\r\n\r\n print(i)\r\n # result=sess.run(multiply)\r\n # print(result)\r\n# y = tf.constant(0.5, shape=[7],name='y',dtype=tf.float32)\r\n# y_ = tf.constant([0.6, 0.3,0.4,0.6,0.6,0.5,0.8], name='Y_',dtype=tf.float32)\r\n# y_ = tf.constant([[9, 8], [7, 6], [10, 11]], name='x')\r\n# b = tf.constant(1, name='b')\r\n\r\n# a = tf.Variable(tf.zeros([3,3]))\r\n# result=tf.zeros(y.get_shape().as_list()[0])\r\n\r\n# result = tf.where(tf.greater(tf.abs((y-y_),\"abs\"),tf.constant(0.15,shape=y.get_shape(),dtype=tf.float32)),tf.constant(0,shape=y.get_shape(),dtype=tf.float32),tf.constant(1,shape=y.get_shape(),dtype=tf.float32))\r\n# y=23\r\n# y_=24\r\n# # result = tf.where(tf.greater(y,y_),tf.abs(y-y_)*10,tf.abs(y-y_))\r\n# result = tf.where(tf.greater(y,y_),y,y_)\r\n# z = tf.where(tf.greater(y,y_),y_,y)\r\n# z1=tf.to_int32(z)\r\n# z2=tf.to_int32(result)\r\n# #\r\n#\r\n# # result_mean=tf.reduce_mean(result)\r\n# # Create a session to compute\r\n# with tf.Session() as sess:\r\n# result=sess.run(result)\r\n# z=sess.run(z)\r\n# print(result)\r\n# # print(sess.run(result_mean))\r\n# print(z)\r\n\r\n# img = cv2.imread(\"test.jpg\")\r\n#\r\n# # img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n# img = np.float32(img)/255\r\n# cv2.imshow(\"Image\",img)\r\n# mask = np.zeros((50,50,1),dtype=np.float32)\r\n# for i in range(20):\r\n# for j in range(20):\r\n# mask[i][j]=-0.5\r\n# mask[i+30][j+30]=0.5\r\n#\r\n# mask = cv2.resize(mask,(658,832))\r\n#\r\n# mask=cv2.cvtColor(mask,cv2.COLOR_GRAY2RGB)\r\n# cv2.imshow(\"a\",mask)\r\n# cv2.addWeighted(img,0.5,mask,0.5,0,mask)\r\n# cv2.imshow('hunh',mask)\r\n# cv2.waitKey(0)\r\n\r\n\r\n# cv2.destroyAllWindows()\r\n# for i in range(10):\r\n# print(random.randint(0, 1))\r\n#\r\n# a=[[[i*j*k for i in range(0,3)]for j in range(0,3)] for k in range(0,3)]\r\n# # b=[[j*i for i in range(0,3)]for j in range(0,3)]\r\n# print(a)\r\n# # print(b)\r\n# a=np.array(a)\r\n# # b=np.array(b)\r\n# print((list(a.shape)))\r\n# # print(a+b);\r\n# for n in a:\r\n# print(n)\r\n# np.random.shuffle(a)\r\n#\r\n# print(len(a))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n\r\n\r\n# c=[i for i in range(7)]\r\n# print(c[-2:])\r\n\r\nr1 = np.array([1.2, 2, 3, 4],dtype=np.float32)\r\nr2 = np.array([1.1, 1.8, 3.3, 4.4],dtype=np.float32)\r\ncha = r1 - r2\r\nprint(cha)\r\nerror = np.mean(np.abs(cha), axis=0)\r\nprint(error)"
] | [
[
"tensorflow.multiply",
"tensorflow.constant",
"numpy.abs",
"tensorflow.reshape",
"tensorflow.subtract",
"tensorflow.Session",
"numpy.array"
]
] |
WeiyuCheng/FIA-KDD-19 | [
"18f29f8babbf1c505973a8a62ac48c6ca34ccd8a"
] | [
"src/scripts/RQ1.py"
] | [
"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport numpy as np\nimport argparse\nimport os\nfrom scipy.stats import pearsonr\nimport sys\n\nsys.path.append(\"..\")\nfrom scripts.load_movielens import load_movielens\nfrom scripts.load_yelp import load_yelp\nimport influence.experiments as experiments\nfrom influence.matrix_factorization import MF\nfrom influence.NCF import NCF\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--avextol', type=float, default=1e-3,\n help='threshold for optimization in influence function')\n parser.add_argument('--damping', type=float, default=1e-6,\n help='damping term in influence function')\n parser.add_argument('--weight_decay', type=float, default=1e-3,\n help='l2 regularization term for training MF or NCF model')\n parser.add_argument('--lr', type=float, default=1e-3,\n help='initial learning rate for training MF or NCF model')\n parser.add_argument('--embed_size', type=int, default=16,\n help='embedding size')\n parser.add_argument('--maxinf', type=int, default=1,\n help='remove type of train indices')\n parser.add_argument('--dataset', type=str, default='movielens',\n help='name of dataset: movielens or yelp')\n parser.add_argument('--model', type=str, default='NCF',\n help='model type: MF or NCF')\n parser.add_argument('--num_test', type=int, default=5,\n help='number of test points of retraining')\n parser.add_argument('--num_steps_train', type=int, default=180000,\n help='training steps')\n parser.add_argument('--num_steps_retrain', type=int, default=27000,\n help='retraining steps')\n parser.add_argument('--reset_adam', type=int, default=0)\n parser.add_argument('--load_checkpoint', type=int, default=1)\n parser.add_argument('--retrain_times', type=int, default=4)\n parser.add_argument('--sort_test_case', type=int, default=0)\n return parser.parse_args()\n\n\nargs = parse_args()\nif args.dataset == 'movielens':\n data_sets = load_movielens('../../data')\n batch_size = 3020\nelif args.dataset == 'yelp':\n data_sets = load_yelp('../../data')\n batch_size = 3009\nelse:\n raise NotImplementedError\nweight_decay = args.weight_decay\ninitial_learning_rate = args.lr\nnum_users = int(np.max(data_sets.train._x[:, 0])+1)\nnum_items = int(np.max(data_sets.train._x[:, 1])+1)\nprint(\"number of users: %d\" % num_users)\nprint(\"number of items: %d\" % num_items)\nprint(\"number of training examples: %d\" % data_sets.train._x.shape[0])\nprint(\"number of testing examples: %d\" % data_sets.test._x.shape[0])\navextol = args.avextol\ndamping = args.damping\nprint(\"Using avextol of %.0e\" % avextol)\nprint(\"Using damping of %.0e\" % damping)\nprint(\"Using embedding size of %d\" % args.embed_size)\nif args.model == 'MF':\n Model = MF\nelif args.model == 'NCF':\n Model = NCF\nelse:\n raise NotImplementedError\n\nmodel = Model(\n num_users=num_users,\n num_items=num_items,\n embedding_size=args.embed_size,\n weight_decay=weight_decay,\n num_classes=1,\n batch_size=batch_size,\n data_sets=data_sets,\n initial_learning_rate=initial_learning_rate,\n damping=damping,\n decay_epochs=[10000, 20000],\n mini_batch=True,\n train_dir='output',\n log_dir='log',\n avextol=avextol,\n model_name='%s_%s_explicit_damping%.0e_avextol%.0e_embed%d_maxinf%d_wd%.0e' % (\n args.dataset, args.model, damping, avextol, args.embed_size, args.maxinf, weight_decay))\nprint(f'Model name is: {model.model_name}')\n\nnum_steps = args.num_steps_train\niter_to_load = num_steps - 1\nif os.path.isfile(\"%s-%s.index\" % (model.checkpoint_file, iter_to_load)):\n print('Checkpoint found, loading...')\n model.load_checkpoint(iter_to_load=iter_to_load)\nelse:\n print('Checkpoint not found, start training...')\n model.train(\n num_steps=num_steps)\n model.saver.save(model.sess, model.checkpoint_file, global_step=num_steps - 1)\n\nif args.maxinf:\n remove_type = 'maxinf'\nelse:\n remove_type = 'random'\n\ntest_size = data_sets.test.num_examples\nnum_test = args.num_test\ntest_indices = np.random.choice(test_size, num_test, replace=False)\nif args.sort_test_case:\n num_related_ratings = []\n for i in range(test_size):\n num_related_ratings += [model.get_train_indices_of_test_case([i]).shape[0]]\n test_indices = np.argsort(np.array(num_related_ratings))[:num_test]\n\nactual_y_diff = np.zeros(num_test)\npredicted_y_diff = np.zeros(num_test)\nremoved_indices = np.zeros(num_test)\n\nfor i, test_idx in enumerate(test_indices):\n print(f'test point====={i}=====')\n actual_y_diffs, predicted_y_diffs, indices_to_remove = experiments.test_retraining(\n model,\n test_idx=test_idx,\n iter_to_load=iter_to_load,\n retrain_times=args.retrain_times,\n num_to_remove=1,\n num_steps=args.num_steps_retrain,\n remove_type=remove_type,\n force_refresh=True,\n reset_adam=args.reset_adam,\n load_checkpoint=args.load_checkpoint)\n actual_y_diff[i] = actual_y_diffs[0]\n predicted_y_diff[i] = predicted_y_diffs[0]\n removed_indices[i] = indices_to_remove[0]\n\nnp.savez(\n 'output/RQ1-%s-%s.npz' % (args.model, args.dataset),\n actual_loss_diffs=actual_y_diff,\n predicted_loss_diffs=predicted_y_diff,\n indices_to_remove=removed_indices\n)\nprint('Correlation is %s' % pearsonr(actual_y_diff, predicted_y_diff)[0])\n"
] | [
[
"numpy.savez",
"numpy.random.choice",
"scipy.stats.pearsonr",
"numpy.max",
"numpy.array",
"numpy.zeros"
]
] |
eyyupdirek/Rawpythn | [
"e66f0adcb8be514f349796d8ecad0a398412409d"
] | [
"rawpython.py"
] | [
"import pandas as pd\nimport numpy as np\nts = pd.Series(np.random.randn(1000),index=pd.date_range('1/1/2000', periods=1000))\n\nts\n"
] | [
[
"numpy.random.randn",
"pandas.date_range"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.